language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/projects.py
|
{
"start": 68512,
"end": 79019
}
|
class ____(Response):
"""
Response of projects.get_all endpoint.
:param projects: Projects list
:type projects: Sequence[ProjectsGetAllResponseSingle]
:param scroll_id: Scroll ID that can be used with the next calls to get_all_ex
to retrieve more data
:type scroll_id: str
"""
_service = "projects"
_action = "get_all"
_version = "2.23"
_schema = {
"definitions": {
"projects_get_all_response_single": {
"properties": {
"basename": {
"description": "Project base name",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Creation time",
"format": "date-time",
"type": ["string", "null"],
},
"dataset_stats": {
"description": "Project dataset statistics",
"properties": {
"file_count": {
"description": "The number of files stored in the dataset",
"type": "integer",
},
"total_size": {
"description": "The total dataset size in bytes",
"type": "integer",
},
},
"type": ["object", "null"],
},
"default_output_destination": {
"description": "The default output destination URL for new tasks under this project",
"type": ["string", "null"],
},
"description": {
"description": "Project description",
"type": ["string", "null"],
},
"id": {"description": "Project id", "type": ["string", "null"]},
"last_update": {
"description": "Last project update time. Reflects the last time the project metadata was changed or a task in this project has changed status",
"format": "date-time",
"type": ["string", "null"],
},
"name": {"description": "Project name", "type": ["string", "null"]},
"own_models": {
"description": "The amount of models under this project (without children projects). Returned if 'check_own_contents' flag is set in the request",
"type": ["integer", "null"],
},
"own_tasks": {
"description": "The amount of tasks under this project (without children projects). Returned if 'check_own_contents' flag is set in the request",
"type": ["integer", "null"],
},
"stats": {
"description": "Additional project stats",
"oneOf": [{"$ref": "#/definitions/stats"}, {"type": "null"}],
},
"sub_projects": {
"description": "The list of sub projects",
"items": {
"properties": {
"id": {
"description": "Subproject ID",
"type": "string",
},
"name": {
"description": "Subproject name",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
"stats": {
"properties": {
"active": {
"description": "Stats for active tasks",
"oneOf": [
{"$ref": "#/definitions/stats_status_count"},
{"type": "null"},
],
},
"archived": {
"description": "Stats for archived tasks",
"oneOf": [
{"$ref": "#/definitions/stats_status_count"},
{"type": "null"},
],
},
},
"type": "object",
},
"stats_status_count": {
"properties": {
"completed_tasks_24h": {
"description": "Number of tasks completed in the last 24 hours",
"type": ["integer", "null"],
},
"last_task_run": {
"description": "The most recent started time of a task",
"type": ["integer", "null"],
},
"status_count": {
"description": "Status counts",
"properties": {
"closed": {
"description": "Number of 'closed' tasks in project",
"type": "integer",
},
"completed": {
"description": "Number of 'completed' tasks in project",
"type": "integer",
},
"created": {
"description": "Number of 'created' tasks in project",
"type": "integer",
},
"failed": {
"description": "Number of 'failed' tasks in project",
"type": "integer",
},
"in_progress": {
"description": "Number of 'in_progress' tasks in project",
"type": "integer",
},
"published": {
"description": "Number of 'published' tasks in project",
"type": "integer",
},
"queued": {
"description": "Number of 'queued' tasks in project",
"type": "integer",
},
"stopped": {
"description": "Number of 'stopped' tasks in project",
"type": "integer",
},
"unknown": {
"description": "Number of 'unknown' tasks in project",
"type": "integer",
},
},
"type": ["object", "null"],
},
"total_runtime": {
"description": "Total run time of all tasks in project (in seconds)",
"type": ["integer", "null"],
},
"total_tasks": {
"description": "Number of tasks",
"type": ["integer", "null"],
},
},
"type": "object",
},
},
"properties": {
"projects": {
"description": "Projects list",
"items": {"$ref": "#/definitions/projects_get_all_response_single"},
"type": ["array", "null"],
},
"scroll_id": {
"description": "Scroll ID that can be used with the next calls to get_all_ex to retrieve more data",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, projects: Optional[List[Any]] = None, scroll_id: Optional[str] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.projects = projects
self.scroll_id = scroll_id
@schema_property("projects")
def projects(self) -> Optional[List[Any]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [ProjectsGetAllResponseSingle.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "projects", ProjectsGetAllResponseSingle, is_array=True)
self._property_projects = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
|
GetAllResponse
|
python
|
cython__cython
|
tests/run/ext_auto_richcmp.py
|
{
"start": 7951,
"end": 9512
}
|
class ____(X):
"""
>>> a = ClassLtGt(1)
>>> b = ClassLtGt(2)
>>> c = ClassLtGt(1)
>>> a < b
True
>>> b > a
True
>>> b < a
False
>>> a > b
False
>>> a < c
False
>>> c > a
False
>>> c < a
False
>>> a > c
False
>>> b < c
False
>>> c > b
False
>>> c < b
True
>>> b > c
True
>>> sorted([a, b, c])
[<1>, <1>, <2>]
>>> sorted([b, a, c])
[<1>, <1>, <2>]
>>> 2 > a
True
>>> 2 < a
False
>>> a < 2
True
>>> a > 2
False
>>> 'x' > a # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError...
>>> 'x' < a # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError...
>>> a < 'x' # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError...
>>> a > 'x' # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError...
"""
def __lt__(self, other):
assert 1 <= self.x <= 2
assert isinstance(self, ClassLtGt), type(self)
if isinstance(other, X):
return self.x < x_of(other)
elif isinstance(other, int):
return self.x < other
return NotImplemented
def __gt__(self, other):
assert 1 <= self.x <= 2
assert isinstance(self, ClassLtGt), type(self)
if isinstance(other, X):
return self.x > x_of(other)
elif isinstance(other, int):
return self.x > other
return NotImplemented
@cython.cclass
|
ClassLtGt
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/dagster_run.py
|
{
"start": 4184,
"end": 4498
}
|
class ____(IHaveNew):
run_id: str
steps_succeeded: int
steps_failed: int
materializations: int
expectations: int
enqueued_time: Optional[float]
launch_time: Optional[float]
start_time: Optional[float]
end_time: Optional[float]
@whitelist_for_serdes
@record
|
DagsterRunStatsSnapshot
|
python
|
pytorch__pytorch
|
benchmarks/functional_autograd_benchmark/torchaudio_models.py
|
{
"start": 20634,
"end": 25344
}
|
class ____(torch.nn.Module):
def __init__(self, dropout=0.0):
r"""Processes a projected query and key-value pair to apply
scaled dot product attention.
Args:
dropout (float): probability of dropping an attention weight.
Examples::
>>> SDP = torchtext.models.ScaledDotProduct(0.1)
>>> q = torch.randn(256, 21, 3)
>>> k = v = torch.randn(256, 21, 3)
>>> attn_output, attn_weights = SDP(q, k, v)
>>> print(attn_output.shape, attn_weights.shape)
torch.Size([256, 21, 3]) torch.Size([256, 21, 21])
"""
super().__init__()
self.dropout = dropout
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
bias_k: Optional[torch.Tensor] = None,
bias_v: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
r"""Uses a scaled dot product with the projected key-value pair to update
the projected query.
Args:
query (Tensor): Projected query
key (Tensor): Projected key
value (Tensor): Projected value
attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions.
bias_k and bias_v: (Tensor, optional): one more key and value sequence to be added at
sequence dim (dim=-3). Those are used for incremental decoding. Users should provide
non-None to both arguments in order to activate them.
Shape:
- query: :math:`(L, N * H, E / H)`
- key: :math:`(S, N * H, E / H)`
- value: :math:`(S, N * H, E / H)`
- attn_mask: :math:`(N * H, L, S)`, positions with ``True`` are not allowed to attend
while ``False`` values will be unchanged.
- bias_k and bias_v:bias: :math:`(1, N * H, E / H)`
- Output: :math:`(L, N * H, E / H)`, :math:`(N * H, L, S)`
where L is the target length, S is the source length, H is the number
of attention heads, N is the batch size, and E is the embedding dimension.
"""
if bias_k is not None and bias_v is not None:
assert (
key.size(-1) == bias_k.size(-1)
and key.size(-2) == bias_k.size(-2)
and bias_k.size(-3) == 1
), "Shape of bias_k is not supported"
assert (
value.size(-1) == bias_v.size(-1)
and value.size(-2) == bias_v.size(-2)
and bias_v.size(-3) == 1
), "Shape of bias_v is not supported"
key = torch.cat([key, bias_k])
value = torch.cat([value, bias_v])
if attn_mask is not None:
_attn_mask = attn_mask
attn_mask = torch.nn.functional.pad(_attn_mask, [0, 1])
tgt_len, head_dim = query.size(-3), query.size(-1)
assert query.size(-1) == key.size(-1) == value.size(-1), (
"The feature dim of query, key, value must be equal."
)
assert key.size() == value.size(), "Shape of key, value must match"
src_len = key.size(-3)
batch_heads = max(query.size(-2), key.size(-2))
# Scale query
query, key, value = (
query.transpose(-2, -3),
key.transpose(-2, -3),
value.transpose(-2, -3),
)
query = query * (float(head_dim) ** -0.5)
if attn_mask is not None:
if attn_mask.dim() != 3:
raise RuntimeError("attn_mask must be a 3D tensor.")
if (
(attn_mask.size(-1) != src_len)
or (attn_mask.size(-2) != tgt_len)
or (attn_mask.size(-3) != 1 and attn_mask.size(-3) != batch_heads)
):
raise RuntimeError("The size of the attn_mask is not correct.")
if attn_mask.dtype != torch.bool:
raise RuntimeError("Only bool tensor is supported for attn_mask")
# Dot product of q, k
attn_output_weights = torch.matmul(query, key.mT)
if attn_mask is not None:
attn_output_weights.masked_fill_(
attn_mask,
-1e8,
)
attn_output_weights = torch.nn.functional.softmax(attn_output_weights, dim=-1)
attn_output_weights = torch.nn.functional.dropout(
attn_output_weights, p=self.dropout, training=self.training
)
attn_output = torch.matmul(attn_output_weights, value)
return attn_output.transpose(-2, -3), attn_output_weights
|
ScaledDotProduct
|
python
|
pytorch__pytorch
|
test/fx/test_z3_gradual_types.py
|
{
"start": 2553,
"end": 39519
}
|
class ____(unittest.TestCase):
def test_eq_dim(self):
"""
test dimensions and equalities
"""
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([32, 4, 4])):
eq = x.dim() == 3
return eq
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.eq:
node = n
positive, negative = evaluate_conditional_with_constraints(
ast_rewriter.root, graph, node
)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.unsat)
def test_conditional_ne_1(self):
"""
This test case is for the HFmodels interface.
A function takes a node and a graph and considers
the conditional the node represents and its negation
and solves each formula with the remaining sets of constraints
Returns:
"""
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([32, 4, 4]), y: TensorType([32, 4, 4])):
size_5 = x.size()
getitem_7 = size_5[0]
getitem_8 = size_5[1]
getitem_9 = size_5[2]
ne_1 = y != (getitem_7, getitem_8, getitem_9)
return ne_1
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.ne:
node = n
# since x and y are equal, the requirement that x != y cannot be true, so we should get unsat
# for the positive condition and sat for the negative condition
positive, negative = evaluate_conditional_with_constraints(
ast_rewriter.root, graph, node
)
self.assertEqual(positive, z3.unsat)
self.assertEqual(negative, z3.sat)
def test_bmm(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([Dyn, 2, 3]), y: TensorType([1, 3, 2])):
bmm = torch.bmm(x, y)
return bmm
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
b = BasicBlock().forward(torch.rand(1, 2, 3), torch.rand(1, 3, 2))
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
output = z3.Const(3, tensor_type)
self.assertEqual(s.check(), z3.sat)
self.assertEqual(s.model()[output].arg(0).arg(1), b.shape[0])
self.assertEqual(s.model()[output].arg(1).arg(1), b.shape[1])
self.assertEqual(s.model()[output].arg(2).arg(1), b.shape[2])
def test_bmm2(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: Dyn, y: TensorType([1, 3, 2])):
bmm = torch.bmm(x, y)
return bmm
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
b = BasicBlock().forward(torch.rand(1, 2, 3), torch.rand(1, 3, 2))
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
output = z3.Const(3, tensor_type)
self.assertEqual(s.check(), z3.sat)
self.assertEqual(s.model()[output].arg(0).arg(1), b.shape[0])
self.assertEqual(s.model()[output].arg(1).arg(0), 0)
self.assertEqual(s.model()[output].arg(2).arg(1), b.shape[2])
def test_bmm3(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2, 3, 3]), y: TensorType([1, 3, 2])):
bmm = torch.bmm(x, y)
return bmm
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.unsat)
def test_transpose(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([1, 2, 3, 4])):
transpose = x.transpose(0, 1)
return transpose
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
b = BasicBlock().forward(torch.rand(1, 2, 3, 4))
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
output = z3.Const(2, tensor_type)
self.assertEqual(s.check(), z3.sat)
self.assertEqual(s.model()[output].arg(0).arg(1), b.shape[0])
self.assertEqual(s.model()[output].arg(1).arg(1), b.shape[1])
self.assertEqual(s.model()[output].arg(2).arg(1), b.shape[2])
self.assertEqual(s.model()[output].arg(3).arg(1), b.shape[3])
# change the annotation to Dyn
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_index_select(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2050, 1024]), y: Dyn):
index_select = x.index_select(0, y)
return index_select
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
# print(symbolic_traced)
b = BasicBlock().forward(torch.rand(2050, 1024), torch.ones(8).int())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
index_select = z3.Const(3, tensor_type)
# the second dimension of the result should not be affected since
# the index is 0
self.assertEqual(s.model()[index_select].arg(1).arg(1), b.shape[1])
replacement_vector = z3.Const(2, tensor_type)
# we set the vector to Dyn
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
index_select = z3.Const(3, tensor_type)
s.add(replacement_vector == z3_dyn)
self.assertEqual(s.check(), z3.sat)
# this implies that the index at 0 should be dyn
self.assertEqual(s.model()[index_select].arg(0).arg(0), 0)
def test_get_attr(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([1, 2, 3])):
getattr = x.device
to = x.to(getattr)
return to
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
b = BasicBlock().forward(torch.rand(1, 2, 3))
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
attr_res = z3.Const(3, tensor_type)
assert s.model()[attr_res].arg(0).arg(1) == b.shape[0]
assert s.model()[attr_res].arg(1).arg(1) == b.shape[1]
assert s.model()[attr_res].arg(2).arg(1) == b.shape[2]
def test_expand(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([1, 4])):
size = x.size()
getitem = size[-1]
expand = x.expand(getitem, 4)
return expand
b = BasicBlock().forward(torch.rand(1, 4))
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
expand_res = z3.Const(4, tensor_type)
assert s.model()[expand_res].arg(0).arg(1) == b.shape[0]
assert s.model()[expand_res].arg(1).arg(1) == b.shape[1]
# change the annotation on the input to Dyn.
# the last dimension should still be 4
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
assert s.model()[expand_res].arg(1).arg(1) == b.shape[1]
def test_getitem_tensor(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([4, 4])):
getitem = x[
(None, None, slice(None, None, None), slice(None, None, None))
]
return getitem
B = BasicBlock()
b = B.forward(torch.rand(4, 4))
symbolic_traced: torch.fx.GraphModule = symbolic_trace(B)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
get_item_res = z3.Const(2, tensor_type)
assert s.model()[get_item_res].arg(0).arg(1) == b.shape[0]
assert s.model()[get_item_res].arg(1).arg(1) == b.shape[1]
assert s.model()[get_item_res].arg(2).arg(1) == b.shape[2]
assert s.model()[get_item_res].arg(3).arg(1) == b.shape[3]
# change the annotation on the input to make sure it propagates
# to the output
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = TensorType([Dyn, 4])
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# dyn check
assert s.model()[get_item_res].arg(2).arg(0) == 0
def test_getitem_tensor2(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([4, 4])):
getitem = x[(None, None)]
return getitem
B = BasicBlock()
b = B.forward(torch.rand(4, 4))
symbolic_traced: torch.fx.GraphModule = symbolic_trace(B)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
get_item_res = z3.Const(2, tensor_type)
assert s.model()[get_item_res].arg(0).arg(1) == b.shape[0]
assert s.model()[get_item_res].arg(1).arg(1) == b.shape[1]
assert s.model()[get_item_res].arg(2).arg(1) == b.shape[2]
assert s.model()[get_item_res].arg(3).arg(1) == b.shape[3]
def test_getitem_tensor_3(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([4, 4])):
getitem = x[
(None, slice(None, None, None), None, slice(None, None, None))
]
return getitem
B = BasicBlock()
b = B.forward(torch.rand(4, 4))
symbolic_traced: torch.fx.GraphModule = symbolic_trace(B)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
get_item_res = z3.Const(2, tensor_type)
assert s.model()[get_item_res].arg(0).arg(1) == b.shape[0]
assert s.model()[get_item_res].arg(1).arg(1) == b.shape[1]
assert s.model()[get_item_res].arg(2).arg(1) == b.shape[2]
assert s.model()[get_item_res].arg(3).arg(1) == b.shape[3]
def test_layer_norm(self):
class BasicBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l = torch.nn.LayerNorm((1024,))
def forward(self, x: Dyn):
return self.l(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# make the output a size 1 tensor which should result
# in the migration of the input
b = BasicBlock().forward(torch.rand(1024))
input = z3.Const(1, tensor_type)
output = z3.Const(2, tensor_type)
s.add(output == tensor_type.tensor1(D(1, 1024)))
s.check()
self.assertEqual(s.model()[input], s.model()[output])
# input shape = output shape
self.assertEqual(b.shape[0], s.model()[input].arg(0).arg(1))
# change annotation to the wrong shape
for n in graph.nodes:
if n.op == "placeholder":
n.type = TensorType([10, 10])
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.unsat)
# fix the annotation
for n in graph.nodes:
if n.op == "placeholder":
n.type = TensorType([10, 1024])
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
s.check()
b = BasicBlock().forward(torch.rand(10, 1024)).shape
self.assertEqual(s.model()[output].arg(0).arg(1), b[0])
self.assertEqual(s.model()[output].arg(1).arg(1), b[1])
def test_layer_norm_functional(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: Dyn):
return torch.nn.functional.layer_norm(x, (1024,))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# make the output a size 1 tensor which should result
# in the migration of the input
b = BasicBlock().forward(torch.rand(1024))
input = z3.Const(1, tensor_type)
output = z3.Const(2, tensor_type)
s.add(output == tensor_type.tensor1(D(1, 1024)))
s.check()
self.assertEqual(s.model()[input], s.model()[output])
# input shape = output shape
self.assertEqual(b.shape[0], s.model()[input].arg(0).arg(1))
def test_ne_int_long_type_as(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([Dyn, Dyn]), y: TensorType([Dyn, Dyn])):
ne_int = torch.ne(x, y).int()
type_as = ne_int.type_as(y)
long = type_as.long()
return long
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# migrate one of the parameters to a fully static shape so we can compare
input = z3.Const(1, tensor_type)
input_2 = z3.Const(2, tensor_type)
s1, s2 = z3.Ints("s1 s2")
output_long = z3.Const(8, tensor_type)
s.add(input == tensor_type.tensor2(D(1, 2), D(1, 4)))
s.add(input_2 == tensor_type.tensor2(D(1, s1), D(1, s2)))
self.assertEqual(s.check(), z3.sat)
actual_shape = BasicBlock().forward(torch.rand(2, 4), torch.rand(2, 4)).shape
self.assertEqual(s.model()[output_long].arg(0).arg(1), actual_shape[0])
self.assertEqual(s.model()[output_long].arg(1).arg(1), actual_shape[1])
def test_ne(self):
s1, s2 = z3.Ints("s1 s2")
s11, s22 = z3.Ints("s11 s22")
d1, d2 = D(s11, s1), D(0, s2)
class BasicBlock(torch.nn.Module):
def forward(self, x: Dyn, y: Dyn):
return torch.ne(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# change the annotations
for n in graph.nodes:
if n.name == "x":
n.type = TensorType([1, 2])
if n.name == "y":
n.type = TensorType([2, Dyn])
# resulting type should be TensorType([2, 2])
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# force the second dimension to be Dyn
# output should still be TensorType([2, 2])
input = z3.Const(2, tensor_type)
s.add(input == tensor_type.tensor2(d1, d2))
self.assertEqual(s.check(), z3.sat)
B = BasicBlock().forward(torch.rand(1, 2), torch.rand(2, 1))
output = z3.Const(3, tensor_type)
self.assertEqual(s.model()[output].arg(0).arg(1), B.shape[0])
self.assertEqual(s.model()[output].arg(1).arg(1), B.shape[0])
def test_cumsum(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([Dyn, 4, 3])):
t = torch.cumsum(x, 3)
return t
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(
BasicBlock(), meta_args={}
)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
# should be unsat since the index is not valid for this annotation
self.assertEqual(s.check(), z3.unsat)
# modify the annotation to Dyn which should give sat
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# # modify the annotation to the right tensor size
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = TensorType([1, 2, 3, 4])
# verify that the input is equal to the output
B = BasicBlock().forward(torch.rand(1, 2, 3, 4))
res_shape = B.shape
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# confirm the output matches the expected tensor
result = z3.Const(2, tensor_type)
self.assertEqual(s.model()[result].arg(0).arg(1), res_shape[0])
self.assertEqual(s.model()[result].arg(1).arg(1), res_shape[1])
self.assertEqual(s.model()[result].arg(2).arg(1), res_shape[2])
self.assertEqual(s.model()[result].arg(3).arg(1), res_shape[3])
# confirm the output is not dyn
self.assertNotEqual(s.model()[result].arg(0).arg(0).as_long(), 0)
self.assertNotEqual(s.model()[result].arg(1).arg(0).as_long(), 0)
self.assertNotEqual(s.model()[result].arg(2).arg(0).as_long(), 0)
self.assertNotEqual(s.model()[result].arg(3).arg(0).as_long(), 0)
def test_cumsum_kwargs(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([Dyn, 4, 3])):
t = torch.cumsum(x, dim=3)
return t
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(
BasicBlock(), meta_args={}
)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
# should be unsat since the index is not valid for this annotation
self.assertEqual(s.check(), z3.unsat)
# modify the annotation to Dyn which should give sat
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_arange(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2, 4])):
size = x.size()
getitem = size[-1]
arange = torch.arange(getitem)
return arange
B = BasicBlock().forward(torch.rand(2, 4))
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(
BasicBlock(), meta_args={}
)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
arange_result = z3.Const(5, tensor_type)
self.assertNotEqual(s.model()[arange_result].arg(0).arg(0).as_long(), 0)
self.assertEqual(s.model()[arange_result].arg(0).arg(1).as_long(), B.size()[0])
# change the annotation to Dyn. This will migrate to an arbitrary type
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = TensorType([Dyn, Dyn, Dyn, Dyn])
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_scalar_add(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2, 4])):
size = x.size()
getitem = size[-1]
arange = torch.arange(getitem)
add = arange + 1
return add
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(
BasicBlock(), meta_args={}
)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
arange_result = z3.Const(5, tensor_type)
add_result = z3.Const(6, tensor_type)
self.assertEqual(s.model()[arange_result], s.model()[add_result])
def test_regular_add_2(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2, 4])):
to = x.to()
size = to.size()
getitem = size[-1]
add = getitem + 1
return add
b = BasicBlock().forward(torch.rand(2, 4))
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(
BasicBlock(), meta_args={}
)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
res = z3.Int(5)
self.assertEqual(s.model()[res], b)
def test_regular_add_3(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2, 4])):
to = x.to()
size = to.size()
getitem = size[-1]
add = 1 + getitem
return add
b = BasicBlock().forward(torch.rand(2, 4))
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(
BasicBlock(), meta_args={}
)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
res = z3.Int(5)
self.assertEqual(s.model()[res], b)
def test_embedding(self):
class BasicBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.embedding = torch.nn.Embedding(256008, 1024, padding_idx=1)
def forward(self, x: TensorType([2, 4])):
return self.embedding(x)
B = BasicBlock().forward(torch.ones([2, 4], dtype=torch.long)).size()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
embedding_result = z3.Const(2, tensor_type)
assert s.model()[embedding_result].arg(0).arg(1) == B[0]
assert s.model()[embedding_result].arg(1).arg(1) == B[1]
assert s.model()[embedding_result].arg(2).arg(1) == B[2]
# change the type. This should still be satisfiable
for n in traced.graph.nodes:
if n.op == "placeholder":
n.type = TensorType([Dyn, Dyn])
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
assert s.model()[embedding_result].arg(0).arg(0) == 0
assert s.model()[embedding_result].arg(1).arg(0) == 0
assert s.model()[embedding_result].arg(2).arg(1) == B[2]
# change the type to Dyn. Here, we will get an arbitrary migration
for n in traced.graph.nodes:
if n.op == "placeholder":
n.type = Dyn
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_embedding_2(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2, 4]), y: TensorType([Dyn, 1024])):
return torch.nn.functional.embedding(x, y)
B = (
BasicBlock()
.forward(torch.ones([2, 4], dtype=torch.long), torch.rand(256008, 1024))
.size()
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
embedding_result = z3.Const(5, tensor_type)
assert s.model()[embedding_result].arg(0).arg(1) == B[0]
assert s.model()[embedding_result].arg(1).arg(1) == B[1]
assert s.model()[embedding_result].arg(2).arg(1) == B[2]
def test_size_two_args(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([Dyn, 2, Dyn])):
size = x.size(-1)
return size
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
d1, d2 = z3.Int(39), z3.Int(2)
d4, d5 = z3.Int("input_d1"), z3.Int("input_d2")
# migrate the third dimension
s.add(d1 != 0)
self.assertEqual(s.check(), z3.sat)
input = z3.Const(1, tensor_type)
s.add(input == tensor_type.tensor3(D(3, 39), D(1, 2), D(d4, d5)))
# check if the item we got is the right one
self.assertEqual(s.check(), z3.sat)
self.assertEqual(s.model()[d5], s.model()[d2])
self.assertEqual(s.model()[d1], s.model()[d4])
def test_size_getitem(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: Dyn):
size = x.size()
getitem = size[-1]
return getitem
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# force the input to be of size 4
s1, s2, s3, s4 = z3.Ints("x1 x2 x3 x4")
s11, s22, s33, s44 = z3.Ints("x11 x22 x33 x44")
d1, d2, d3, d4 = (
D(s11, s1),
D(s22, s2),
D(s33, s3),
D(s44, s4),
)
input = z3.Const(1, tensor_type)
s.add(input == tensor_type.tensor4(d1, d2, d3, d4))
# check if the model is still SAT
self.assertEqual(s.check(), z3.sat)
s1, s2 = z3.Int(23), z3.Int(3)
# check that the item is correct
self.assertEqual(s.model()[s1], s.model()[s2])
# invalid index but should still be SAT because input will be Dyn
class BasicBlock(torch.nn.Module):
def forward(self, x: Dyn):
size = x.size()
getitem = size[-10]
return getitem
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
s.add(input != z3_dyn)
self.assertEqual(s.check(), z3.unsat)
def test_view_mul(self):
class BasicBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1)
def forward(self, x: TensorType([2, 4])):
size = x.size()
getitem = size[-1]
view = x.view(-1, getitem)
embed_tokens = self.embed_tokens(view)
mul = embed_tokens * 32.0
return mul
# print(B)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
# print(traced)
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# print(s.model())
embedding_result = z3.Const(6, tensor_type)
# note that the view output will be: tensor3(dim(0, 0), dim(1, 4), dim(1, 1024))
# this is due to the reshape constraints. This can be lifted
# but would require revising the type rules accordingly so we leave it for now
assert (s.model()[embedding_result].arg(1).arg(1)) == 4
assert (s.model()[embedding_result].arg(2).arg(1)) == 1024
mul_result = z3.Const(13, tensor_type)
assert s.model()[mul_result] == s.model()[embedding_result]
def test_gt(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([Dyn, 4])):
size = x.size()
getitem_1 = size[-1]
gt = getitem_1 > 1
return gt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
res = z3.Bool(4)
self.assertEqual(s.model()[res], True)
def test_view(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2, 4])):
view = x.view(-1, 8)
return view
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_lt_tensor(self):
class BasicBlock(torch.nn.Module):
def forward(self, x: TensorType([2, 4]), y: Dyn):
lt = x > y
return lt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_conditional_wrong_assumption(self):
"""
Test condition after making the wrong assumption about the input
"""
class BasicBlock(torch.nn.Module):
def forward(self, x: Dyn):
gt = x > 1
return gt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.gt:
node = n
positive, negative = evaluate_conditional_with_constraints(
ast_rewriter.root, graph, node
)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.sat)
def test_conditional(self):
"""
This test case is for the HFmodels interface.
A function takes a node and a graph and considers
the conditional the node represents and its negation
and solves each formula with the remaining sets of constraints
Returns:
"""
class BasicBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1)
def forward(self, x: TensorType([Dyn, 4])):
size = x.size()
getitem = size[-1]
view = x.view(-1, getitem)
_embed_tokens = self.embed_tokens(view)
getitem_1 = size[-1]
gt = getitem_1 > 1
return gt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.gt:
node = n
positive, negative = evaluate_conditional_with_constraints(
ast_rewriter.root, graph, node
)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.unsat)
# change the annotation to Dyn
for n in graph.nodes:
if n.op == "placeholder":
n.type = Dyn
# here, both should be SAT since the input is Dyn
positive, negative = evaluate_conditional_with_constraints(
ast_rewriter.root, graph, node
)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.sat)
# change the annotation to TensorType[Dyn, Dyn]
for n in graph.nodes:
if n.op == "placeholder":
n.type = TensorType([Dyn, Dyn])
# here, both should be SAT as well
positive, negative = evaluate_conditional_with_constraints(
ast_rewriter.root, graph, node
)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.sat)
def test_conditional_2(self):
"""
This test case is for the HFmodels interface.
A function takes a node and a graph and considers
the conditional the node represents and its negation
and solves each formula with the remaining sets of constraints
Returns the opposite result of the above testcase
"""
class BasicBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1)
def forward(self, x: TensorType([Dyn, 4])):
size = x.size()
getitem = size[-1]
view = x.view(-1, getitem)
_embed_tokens = self.embed_tokens(view)
getitem_1 = size[-1]
lt = getitem_1 < 1
return lt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.lt:
node = n
positive, negative = evaluate_conditional_with_constraints(
ast_rewriter.root, graph, node
)
self.assertEqual(positive, z3.unsat)
self.assertEqual(negative, z3.sat)
|
HFOperations
|
python
|
numba__numba
|
numba/tests/test_dyn_array.py
|
{
"start": 38092,
"end": 39046
}
|
class ____(BaseTest):
def test_linspace_2(self):
def pyfunc(n, m):
return np.linspace(n, m)
self.check_outputs(pyfunc,
[(0, 4), (1, 100), (-3.5, 2.5), (-3j, 2+3j),
(2, 1), (1+0.5j, 1.5j)])
def test_linspace_3(self):
def pyfunc(n, m, p):
return np.linspace(n, m, p)
self.check_outputs(pyfunc,
[(0, 4, 9), (1, 4, 3), (-3.5, 2.5, 8),
(-3j, 2+3j, 7), (2, 1, 0),
(1+0.5j, 1.5j, 5), (1, 1e100, 1)])
def test_linspace_accuracy(self):
# Checking linspace reasonably replicates NumPy's algorithm
# see https://github.com/numba/numba/issues/6768
@nrtjit
def foo(n, m, p):
return np.linspace(n, m, p)
n, m, p = 0.0, 1.0, 100
self.assertPreciseEqual(foo(n, m, p), foo.py_func(n, m, p))
|
TestLinspace
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/callable1.py
|
{
"start": 149,
"end": 874
}
|
class ____:
pass
Callable2 = Callable[[A], None]
def func1(a: Callable1):
a(A())
def func2(a: Callable2):
a(A())
Callable3 = Callable[..., int]
def func3(a: Callable3) -> int:
return a(1, 2, 3) + a() + a("hello") + a([])
# This should generate an error (... not allowed in param list).
Callable4 = Callable[[...], int]
# This should generate an error (too many arguments).
Callable5 = Callable[..., int, int]
# Test Callable with no parameters
Callable6 = Callable[[], str]
def func6(a: Callable6):
a()
# This should generate an error.
a(1)
def func7(a: Callable):
reveal_type(a, expected_text="(...) -> Unknown")
b = a(3, 4, 5)
reveal_type(b, expected_text="Unknown")
|
A
|
python
|
falconry__falcon
|
tests/test_uri_templates.py
|
{
"start": 1152,
"end": 1391
}
|
class ____:
def __init__(self):
self.id = None
self.name = None
self.called = False
def on_get(self, req, resp, id, name):
self.id = id
self.name = name
self.called = True
|
NameResource
|
python
|
allegroai__clearml
|
clearml/storage/manager.py
|
{
"start": 537,
"end": 25821
}
|
class ____(object):
"""
StorageManager is helper interface for downloading & uploading files to supported remote storage
Support remote servers: http(s)/S3/GS/Azure/File-System-Folder
Cache is enabled by default for all downloaded remote urls/files
"""
_file_upload_retries = deferred_config("network.file_upload_retries", 3)
storage_helper = StorageHelper
@classmethod
def get_local_copy(
cls,
remote_url: str,
cache_context: Optional[str] = None,
extract_archive: bool = True,
name: Optional[str] = None,
force_download: bool = False,
) -> Optional[str]:
"""
Get a local copy of the remote file. If the remote URL is a direct file access,
the returned link is the same, otherwise a link to a local copy of the url file is returned.
Caching is enabled by default, cache limited by number of stored files per cache context.
Oldest accessed files are deleted when cache is full.
One can also use this function to prevent the deletion of a file that has been cached,
as the respective file will have its timestamp refreshed
:param str remote_url: remote url link (string)
:param str cache_context: Optional caching context identifier (string), default context 'global'
:param bool extract_archive: if True, returned path will be a cached folder containing the archive's content,
currently only zip files are supported.
:param str name: name of the target file
:param bool force_download: download file from remote even if exists in local cache
:return: Full path to local copy of the requested url. Return None on Error.
"""
if bool(cls.storage_helper.use_disk_space_file_size_strategy):
cached_file = cls.storage_helper.get_local_copy(remote_url=remote_url, force_download=force_download)
if cached_file:
# noinspection PyProtectedMember
CacheManager._add_remote_url(remote_url=remote_url, local_copy_path=cached_file)
if not extract_archive or not cached_file:
return cached_file
cache = CacheManager.get_cache_manager(cache_context=cache_context)
cache_path_encoding = Path(cached_file).parent / cache.get_hashed_url_file(remote_url)
return cls._extract_to_cache(
cached_file,
name='cache',
cache_context=cache_context,
cache_path_encoding=cache_path_encoding.as_posix()
)
cache = CacheManager.get_cache_manager(cache_context=cache_context)
cached_file = cache.get_local_copy(remote_url=remote_url, force_download=force_download)
if extract_archive and cached_file:
# this will get us the actual cache (even with direct access)
cache_path_encoding = Path(cache.get_cache_folder()) / cache.get_hashed_url_file(remote_url)
return cls._extract_to_cache(
cached_file,
name,
cache_context,
cache_path_encoding=cache_path_encoding.as_posix(),
)
return cached_file
@classmethod
def upload_file(
cls,
local_file: str,
remote_url: str,
wait_for_upload: bool = True,
retries: Optional[int] = None,
) -> str:
"""
Upload a local file to a remote location. remote url is the final destination of the uploaded file.
Examples:
.. code-block:: py
upload_file('/tmp/artifact.yaml', 'http://localhost:8081/manual_artifacts/my_artifact.yaml')
upload_file('/tmp/artifact.yaml', 's3://a_bucket/artifacts/my_artifact.yaml')
upload_file('/tmp/artifact.yaml', '/mnt/share/folder/artifacts/my_artifact.yaml')
:param str local_file: Full path of a local file to be uploaded
:param str remote_url: Full path or remote url to upload to (including file name)
:param bool wait_for_upload: If False, return immediately and upload in the background. Default True.
:param int retries: Number of retries before failing to upload file.
:return: Newly uploaded remote URL.
"""
return CacheManager.get_cache_manager().upload_file(
local_file=local_file,
remote_url=remote_url,
wait_for_upload=wait_for_upload,
retries=retries if retries else cls._file_upload_retries,
)
@classmethod
def set_cache_file_limit(cls, cache_file_limit: int, cache_context: Optional[str] = None) -> int:
"""
Set the cache context file limit. File limit is the maximum number of files the specific cache context holds.
Notice, there is no limit on the size of these files, only the total number of cached files.
:param int cache_file_limit: New maximum number of cached files
:param str cache_context: Optional cache context identifier, default global context
:return: The new cache context file limit.
"""
return CacheManager.get_cache_manager(
cache_context=cache_context, cache_file_limit=cache_file_limit
).set_cache_limit(cache_file_limit)
@classmethod
def _extract_to_cache(
cls,
cached_file: str,
name: str,
cache_context: Optional[str] = None,
target_folder: Optional[str] = None,
cache_path_encoding: Optional[str] = None,
force: bool = False,
) -> str:
"""
Extract cached file to cache folder
:param str cached_file: local copy of archive file
:param str name: name of the target file
:param str cache_context: cache context id
:param str target_folder: specify target path to use for archive extraction
:param str cache_path_encoding: specify representation of the local path of the cached files,
this will always point to local cache folder, even if we have direct access file.
Used for extracting the cached archived based on cache_path_encoding
:param bool force: Force archive extraction even if target folder exists
:return: cached folder containing the extracted archive content
"""
if not cached_file:
return cached_file
cached_file = Path(cached_file)
cache_path_encoding = Path(cache_path_encoding) if cache_path_encoding else None
# we support zip and tar.gz files auto-extraction
suffix = cached_file.suffix.lower()
if suffix == ".gz":
suffix = "".join(a.lower() for a in cached_file.suffixes[-2:])
if suffix not in (".zip", ".tgz", ".tar.gz"):
return str(cached_file)
cache_folder = Path(cache_path_encoding or cached_file).parent
archive_suffix = (cache_path_encoding or cached_file).name[: -len(suffix)]
name = encode_string_to_filename(name) if name else name
if target_folder:
target_folder = Path(target_folder)
else:
target_folder = cache_folder / CacheManager.get_context_folder_lookup(cache_context).format(
archive_suffix, name
)
if target_folder.is_dir() and not force:
# noinspection PyBroadException
try:
target_folder.touch(exist_ok=True)
return target_folder.as_posix()
except Exception:
pass
base_logger = LoggerRoot.get_base_logger()
try:
# if target folder exists, meaning this is forced ao we extract directly into target folder
if target_folder.is_dir():
temp_target_folder = target_folder
else:
temp_target_folder = cache_folder / "{0}_{1}_{2}".format(
target_folder.name, time() * 1000, str(random()).replace(".", "")
)
temp_target_folder.mkdir(parents=True, exist_ok=True)
if suffix == ".zip":
zip_file = ZipFile(cached_file.as_posix())
create_zip_directories(zip_file, path=temp_target_folder.as_posix())
zip_file.extractall(path=temp_target_folder.as_posix())
elif suffix == ".tar.gz":
with tarfile.open(cached_file.as_posix()) as file:
safe_extract(file, temp_target_folder.as_posix())
elif suffix == ".tgz":
with tarfile.open(cached_file.as_posix(), mode="r:gz") as file:
safe_extract(file, temp_target_folder.as_posix())
if temp_target_folder != target_folder:
# we assume we will have such folder if we already extract the file
# noinspection PyBroadException
try:
# if rename fails, it means that someone else already manged to extract the file, delete the current
# folder and return the already existing cached zip folder
shutil.move(temp_target_folder.as_posix(), target_folder.as_posix())
except Exception:
if target_folder.exists():
target_folder.touch(exist_ok=True)
else:
base_logger.warning(
"Failed renaming {0} to {1}".format(temp_target_folder.as_posix(), target_folder.as_posix())
)
try:
shutil.rmtree(temp_target_folder.as_posix())
except Exception as ex:
base_logger.warning(
"Exception {}\nFailed deleting folder {}".format(ex, temp_target_folder.as_posix())
)
except Exception as ex:
# failed extracting the file:
base_logger.warning("Exception {}\nFailed extracting zip file {}".format(ex, cached_file.as_posix()))
# noinspection PyBroadException
try:
target_folder.rmdir()
except Exception:
pass
return cached_file.as_posix()
return target_folder.as_posix()
@classmethod
def get_files_server(cls) -> str:
from ..backend_api import Session
return Session.get_files_server_host()
@classmethod
def upload_folder(
cls,
local_folder: str,
remote_url: str,
match_wildcard: Optional[str] = None,
retries: Optional[int] = None,
) -> Optional[str]:
"""
Upload local folder recursively to a remote storage, maintaining the sub folder structure
in the remote storage.
.. note::
If we have a local file ``\\~/folder/sub/file.ext`` then
``StorageManager.upload_folder('\\~/folder/', 's3://bucket/')``
will create ``s3://bucket/sub/file.ext``
:param str local_folder: Local folder to recursively upload
:param str remote_url: Target remote storage location, tree structure of `local_folder` will
be created under the target remote_url. Supports Http/S3/GS/Azure and shared filesystem.
Example: 's3://bucket/data/'
:param str match_wildcard: If specified only upload files matching the `match_wildcard`
Example: `*.json`
Notice: target file size/date are not checked. Default True, always upload.
Notice if uploading to http, we will always overwrite the target.
:param int retries: Number of retries before failing to upload a file in the folder.
:return: Newly uploaded remote URL or None on error.
"""
base_logger = LoggerRoot.get_base_logger()
if not Path(local_folder).is_dir():
base_logger.error("Local folder '{}' does not exist".format(local_folder))
return
local_folder = str(Path(local_folder))
results = []
helper = cls.storage_helper.get(remote_url)
with ThreadPool() as pool:
for path in Path(local_folder).rglob(match_wildcard or "*"):
if not path.is_file():
continue
results.append(
pool.apply_async(
helper.upload,
args=(str(path), str(path).replace(local_folder, remote_url)),
kwds={"retries": retries if retries else cls._file_upload_retries},
)
)
success = 0
failed = 0
for res in results:
# noinspection PyBroadException
try:
res.get() # Reraise the exceptions from remote call (if any)
success += 1
except Exception:
failed += 1
if failed == 0:
return remote_url
base_logger.error("Failed uploading {}/{} files from {}".format(failed, success + failed, local_folder))
@classmethod
def download_file(
cls,
remote_url: str,
local_folder: Optional[str] = None,
overwrite: bool = False,
skip_zero_size_check: bool = False,
silence_errors: bool = False,
) -> Optional[str]:
"""
Download remote file to the local machine, maintaining the sub folder structure from the
remote storage.
.. note::
If we have a remote file `s3://bucket/sub/file.ext` then
`StorageManager.download_file('s3://bucket/sub/file.ext', '~/folder/')`
will create `~/folder/sub/file.ext`
:param str remote_url: Source remote storage location, path of `remote_url` will
be created under the target local_folder. Supports S3/GS/Azure and shared filesystem.
Example: 's3://bucket/data/'
:param bool overwrite: If False, and target files exist do not download.
If True, always download the remote files. Default False.
:param bool skip_zero_size_check: If True, no error will be raised for files with zero bytes size.
:param bool silence_errors: If True, silence errors that might pop up when trying to download
files stored remotely. Default False
:return: Path to downloaded file or None on error
"""
def remove_prefix_from_str(target_str: str, prefix_to_be_removed: str) -> str:
if target_str.startswith(prefix_to_be_removed):
return target_str[len(prefix_to_be_removed) :]
return target_str
longest_configured_url = cls.storage_helper._resolve_base_url(remote_url) # noqa
bucket_path = remove_prefix_from_str(remote_url[len(longest_configured_url) :], "/")
if not local_folder:
local_folder = CacheManager.get_cache_manager().get_cache_folder()
local_path = str(Path(local_folder).expanduser().absolute() / bucket_path)
helper = cls.storage_helper.get(remote_url)
return helper.download_to_file(
remote_url,
local_path,
overwrite_existing=overwrite,
skip_zero_size_check=skip_zero_size_check,
silence_errors=silence_errors,
)
@classmethod
def exists_file(cls, remote_url: str) -> bool:
"""
Check if remote file exists. Note that this function will return
False for directories.
:param str remote_url: The url where the file is stored.
E.g. 's3://bucket/some_file.txt', 'file://local/file'
:return: True is the remote_url stores a file and False otherwise
"""
# noinspection PyBroadException
try:
if remote_url.endswith("/"):
return False
helper = cls.storage_helper.get(remote_url)
return helper.exists_file(remote_url)
except Exception:
return False
@classmethod
def get_file_size_bytes(cls, remote_url: str, silence_errors: bool = False) -> Optional[int]:
"""
Get size of the remote file in bytes.
:param str remote_url: The url where the file is stored.
E.g. 's3://bucket/some_file.txt', 'file://local/file'
:param bool silence_errors: Silence errors that might occur
when fetching the size of the file. Default: False
:return: The size of the file in bytes.
None if the file could not be found or an error occurred.
"""
helper = cls.storage_helper.get(remote_url)
return helper.get_object_size_bytes(remote_url, silence_errors)
@classmethod
def download_folder(
cls,
remote_url: str,
local_folder: Optional[str] = None,
match_wildcard: Optional[str] = None,
overwrite: bool = False,
skip_zero_size_check: bool = False,
silence_errors: bool = False,
max_workers: Optional[int] = None,
) -> Optional[str]:
"""
Download remote folder recursively to the local machine, maintaining the sub folder structure
from the remote storage.
.. note::
If we have a remote file `s3://bucket/sub/file.ext` then
`StorageManager.download_folder('s3://bucket/', '~/folder/')`
will create `~/folder/sub/file.ext`
:param str remote_url: Source remote storage location, tree structure of `remote_url` will
be created under the target local_folder. Supports S3/GS/Azure and shared filesystem.
Example: 's3://bucket/data/'
:param str local_folder: Local target folder to create the full tree from remote_url.
If None, use the cache folder. (Default: use cache folder)
:param match_wildcard: If specified only download files matching the `match_wildcard`
Example: `*.json`
:param bool overwrite: If False, and target files exist do not download.
If True, always download the remote files. Default False.
:param bool skip_zero_size_check: If True, no error will be raised for files with zero bytes size.
:param bool silence_errors: If True, silence errors that might pop up when trying to download
files stored remotely. Default False
:param int max_workers: If value is set to a number,
it will spawn the specified number of worker threads
to download the contents of the folder in parallel. Otherwise, if set to None, it will
internally use as many threads as there are
logical CPU cores in the system (this is default Python behavior). Default None
:return: Target local folder
"""
base_logger = LoggerRoot.get_base_logger()
if local_folder:
try:
Path(local_folder).mkdir(parents=True, exist_ok=True)
except OSError as ex:
base_logger.error("Failed creating local folder '{}': {}".format(local_folder, ex))
return
else:
local_folder = CacheManager.get_cache_manager().get_cache_folder()
helper = cls.storage_helper.get(remote_url)
results = []
with ThreadPool(processes=max_workers) as pool:
for path in helper.list(prefix=remote_url):
remote_path = (
str(Path(helper.base_url) / Path(path))
if helper.get_driver_direct_access(helper.base_url)
else "{}/{}".format(helper.base_url.rstrip("/"), path.lstrip("/"))
)
if match_wildcard and not fnmatch.fnmatch(remote_path, match_wildcard):
continue
results.append(
pool.apply_async(
cls.download_file,
args=(remote_path, local_folder),
kwds={
"overwrite": overwrite,
"skip_zero_size_check": skip_zero_size_check,
"silence_errors": silence_errors,
},
)
)
for res in results:
res.wait()
if not results and not silence_errors:
LoggerRoot.get_base_logger().warning("Did not download any files matching {}".format(remote_url))
return local_folder
@classmethod
def list(
cls,
remote_url: str,
return_full_path: bool = False,
with_metadata: bool = False,
) -> Optional[List[Union[str, dict]]]:
"""
Return a list of object names inside the base path or dictionaries containing the corresponding
objects' metadata (in case `with_metadata` is True)
:param str remote_url: The base path.
For Google Storage, Azure and S3 it is the bucket of the path, for local files it is the root directory.
For example: AWS S3: `s3://bucket/folder_` will list all the files you have in
`s3://bucket-name/folder_*/*`. The same behaviour with Google Storage: `gs://bucket/folder_`,
Azure blob storage: `azure://bucket/folder_` and also file system listing: `/mnt/share/folder_`
:param bool return_full_path: If True, return a list of full object paths, otherwise return a list of
relative object paths (default False).
:param with_metadata: Instead of returning just the names of the objects, return a list of dictionaries
containing the name and metadata of the remote file. Thus, each dictionary will contain the following
keys: `name`, `size`.
`return_full_path` will modify the name of each dictionary entry to the full path.
:return: The paths of all the objects the storage base path under prefix or the dictionaries containing the objects' metadata, relative to the base path.
None in case of list operation is not supported (http and https protocols for example)
"""
helper = cls.storage_helper.get(remote_url)
try:
helper_list_result = helper.list(prefix=remote_url, with_metadata=with_metadata)
except Exception as ex:
LoggerRoot.get_base_logger().warning("Can not list files for '{}' - {}".format(remote_url, ex))
return None
prefix = remote_url.rstrip("/") if helper.base_url == "file://" else helper.base_url
if not with_metadata:
return (
["{}/{}".format(prefix, name) for name in helper_list_result]
if return_full_path
else helper_list_result
)
else:
if return_full_path:
for obj in helper_list_result:
obj["name"] = "{}/{}".format(prefix, obj.get("name"))
return helper_list_result
@classmethod
def get_metadata(cls, remote_url: str, return_full_path: bool = False) -> Optional[dict]:
"""
Get the metadata of the remote object.
The metadata is a dict containing the following keys: `name`, `size`.
:param str remote_url: Source remote storage location, tree structure of `remote_url` will
be created under the target local_folder. Supports S3/GS/Azure, shared filesystem and http(s).
Example: 's3://bucket/data/'
:param return_full_path: True for returning a full path (with the base url)
:return: A dict containing the metadata of the remote object. In case of an error, `None` is returned
"""
helper = cls.storage_helper.get(remote_url)
obj = helper.get_object(remote_url)
if not obj:
return None
metadata = helper.get_object_metadata(obj)
base_url = helper._resolve_base_url(remote_url)
if return_full_path and not metadata["name"].startswith(base_url):
metadata["name"] = base_url + ("/" if not base_url.endswith("/") else "") + metadata["name"]
return metadata
@classmethod
def set_report_upload_chunk_size(cls, chunk_size_mb: int) -> None:
"""
Set the upload progress report chunk size (in MB). The chunk size
determines how often the progress reports are logged:
every time a chunk of data with a size greater than `chunk_size_mb`
is uploaded, log the report.
This function overwrites the `sdk.storage.log.report_upload_chunk_size_mb`
config entry
:param chunk_size_mb: The chunk size, in megabytes
"""
ProgressReport.report_upload_chunk_size_mb = int(chunk_size_mb)
@classmethod
def set_report_download_chunk_size(cls, chunk_size_mb: int) -> None:
"""
Set the download progress report chunk size (in MB). The chunk size
determines how often the progress reports are logged:
every time a chunk of data with a size greater than `chunk_size_mb`
is downloaded, log the report.
This function overwrites the `sdk.storage.log.report_download_chunk_size_mb`
config entry
:param chunk_size_mb: The chunk size, in megabytes
"""
ProgressReport.report_download_chunk_size_mb = int(chunk_size_mb)
|
StorageManager
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingTypedDict1.py
|
{
"start": 230,
"end": 3280
}
|
class ____(TypedDict, total=False):
a: int
d: str
def f1(p: TD1 | TD2):
if "b" in p:
# This should technically be TD1 | TD2, but the
# current narrowing logic implements a not-entirely-safe
# narrowing behavior. We can fix this once PEP 728
# is accepted.
reveal_type(p, expected_text="TD1")
# reveal_type(p, expected_text="TD1 | TD2")
else:
reveal_type(p, expected_text="TD2")
def f2(p: TD1 | TD2):
if "b" not in p:
reveal_type(p, expected_text="TD2")
else:
# This should technically be TD1 | TD2, but the
# current narrowing logic implements a not-entirely-safe
# narrowing behavior. We can fix this once PEP 728
# is accepted.
reveal_type(p, expected_text="TD1")
# reveal_type(p, expected_text="TD1 | TD2")
def f3(p: TD1 | TD3):
if "d" in p:
# This should technically be TD1 | TD3, but the
# current narrowing logic implements a not-entirely-safe
# narrowing behavior. We can fix this once PEP 728
# is accepted.
reveal_type(p, expected_text="TD3")
# reveal_type(p, expected_text="TD1 | TD3")
else:
reveal_type(p, expected_text="TD1 | TD3")
def f4(p: TD1 | TD3):
if "d" not in p:
reveal_type(p, expected_text="TD1 | TD3")
else:
# This should technically be TD1 | TD3, but the
# current narrowing logic implements a not-entirely-safe
# narrowing behavior. We can fix this once PEP 728
# is accepted.
reveal_type(p, expected_text="TD3")
# reveal_type(p, expected_text="TD1 | TD3")
def f5(p: TD1 | TD3):
if "a" in p:
reveal_type(p, expected_text="TD1 | TD3")
else:
reveal_type(p, expected_text="TD3")
def f6(p: TD1 | TD2 | TD3):
# This should generate an error for TD3.
v1 = p["a"]
v2 = p.get("a")
if "c" in p:
# This should technicall generate two errors for TD1 and TD3
v3 = p["c"]
# This should technically be Unknown | str, but the
# current narrowing logic implements a not-entirely-safe
# narrowing behavior. We can fix this once PEP 728
# is accepted.
reveal_type(v3, expected_text="str")
# reveal_type(v3, expected_text="Unknown | str")
if "a" in p and "d" in p:
v4 = p["a"]
# This should technically be str | int, but the
# current narrowing logic implements a not-entirely-safe
# narrowing behavior. We can fix this once PEP 728
# is accepted.
reveal_type(v4, expected_text="int")
# reveal_type(v4, expected_text="str | int")
# This should generate an error for TD1 and TD2
v5 = p["d"]
reveal_type(v5, expected_text="Unknown | str")
# This should generate three errors, two for TD1 and TD2 (because
# "d" is not a valid key) and one for TD3 (because "d" is not required).
v6 = p["d"]
def f7(p: TD3):
pass
def f8(p: TD3):
if "a" in p:
f7(p)
|
TD3
|
python
|
mlflow__mlflow
|
mlflow/genai/scorers/builtin_scorers.py
|
{
"start": 67942,
"end": 68265
}
|
class ____(MlflowException):
def __init__(self, scorer: str, missing_columns: set[str]):
self.scorer = scorer
self.missing_columns = list(missing_columns)
super().__init__(
f"The following columns are required for the scorer {scorer}: {missing_columns}"
)
|
MissingColumnsException
|
python
|
plotly__plotly.py
|
plotly/graph_objs/choropleth/colorbar/_title.py
|
{
"start": 233,
"end": 3992
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "choropleth.colorbar"
_path_str = "choropleth.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.choropleth.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choropleth.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Title
|
python
|
huggingface__transformers
|
src/transformers/models/dpt/modeling_dpt.py
|
{
"start": 12943,
"end": 15370
}
|
class ____(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer, attention_probs
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViTConfig->DPTConfig, ViTSelfOutput->DPTViTSelfOutput
|
DPTSelfAttention
|
python
|
crytic__slither
|
slither/slithir/operations/return_operation.py
|
{
"start": 330,
"end": 1885
}
|
class ____(Operation):
"""
Return
Only present as last operation in RETURN node
"""
def __init__(
self, values: Optional[Union[RVALUE, TupleVariable, Function, List[RVALUE]]]
) -> None:
# Note: Can return None
# ex: return call()
# where call() dont return
self._values: List[Union[RVALUE, TupleVariable, Function]]
if not isinstance(values, list):
assert (
is_valid_rvalue(values)
or isinstance(values, (TupleVariable, Function))
or values is None
)
if values is None:
self._values = []
else:
self._values = [values]
else:
# Remove None
# Prior Solidity 0.5
# return (0,)
# was valid for returns(uint)
self._values = [v for v in values if not v is None]
self._valid_value(self._values)
super().__init__()
def _valid_value(self, value: Any) -> bool:
if isinstance(value, list):
assert all(self._valid_value(v) for v in value)
else:
assert is_valid_rvalue(value) or isinstance(value, (TupleVariable, Function))
return True
@property
def read(self) -> List[Variable]:
return self._unroll(self.values)
@property
def values(self) -> List[Variable]:
return self._unroll(self._values)
def __str__(self) -> str:
return f"RETURN {','.join([f'{x}' for x in self.values])}"
|
Return
|
python
|
huggingface__transformers
|
src/transformers/models/owlvit/modeling_owlvit.py
|
{
"start": 34733,
"end": 36943
}
|
class ____(OwlViTPreTrainedModel):
config: OwlViTTextConfig
input_modalities = ("text",)
def __init__(self, config: OwlViTTextConfig):
super().__init__(config)
self.text_model = OwlViTTextTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.text_model.embeddings.token_embedding
def set_input_embeddings(self, value):
self.text_model.embeddings.token_embedding = value
@auto_docstring
def forward(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
Examples:
```python
>>> from transformers import AutoProcessor, OwlViTTextModel
>>> model = OwlViTTextModel.from_pretrained("google/owlvit-base-patch32")
>>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
>>> inputs = processor(
... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
... )
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```"""
# Get embeddings for all text queries in all batch samples
return self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
|
OwlViTTextModel
|
python
|
getsentry__sentry
|
tests/sentry/middleware/test_subdomain.py
|
{
"start": 3401,
"end": 4182
}
|
class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.middleware = settings.MIDDLEWARE
def test_simple(self) -> None:
self.create_organization(name="albertos-apples")
response = self.client.get(
reverse("test-endpoint"),
HTTP_HOST="albertos-apples.testserver",
)
assert response.status_code == 200
assert response.data == {
"subdomain": "albertos-apples",
}
response = self.client.get(
reverse("test-endpoint"),
HTTP_HOST="albertos_apples.testserver",
)
assert isinstance(response, HttpResponseRedirect)
assert response.status_code == 302
assert response.url == "http://testserver"
|
End2EndTest
|
python
|
cython__cython
|
Cython/Debugger/libcython.py
|
{
"start": 46210,
"end": 47007
}
|
class ____(CythonCommand):
"""
Set a Cython variable to a certain value
cy set my_cython_c_variable = 10
cy set my_cython_py_variable = $cy_eval("{'doner': 'kebab'}")
This is equivalent to
set $cy_value("my_cython_variable") = 10
"""
name = 'cy set'
command_class = gdb.COMMAND_DATA
completer_class = gdb.COMPLETE_NONE
@libpython.dont_suppress_errors
@require_cython_frame
def invoke(self, expr, from_tty):
name_and_expr = expr.split('=', 1)
if len(name_and_expr) != 2:
raise gdb.GdbError("Invalid expression. Use 'cy set var = expr'.")
varname, expr = name_and_expr
cname = self.cy.cy_cname.invoke(varname.strip())
gdb.execute("set %s = %s" % (cname, expr))
# Functions
|
CySet
|
python
|
astropy__astropy
|
astropy/modeling/parameters.py
|
{
"start": 581,
"end": 695
}
|
class ____(Exception):
"""Generic exception class for all exceptions pertaining to Parameters."""
|
ParameterError
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
|
{
"start": 1981,
"end": 3633
}
|
class ____(Enum):
"""Contains the possible State values of an EKS Managed Nodegroup."""
CREATING = "CREATING"
ACTIVE = "ACTIVE"
UPDATING = "UPDATING"
DELETING = "DELETING"
CREATE_FAILED = "CREATE_FAILED"
DELETE_FAILED = "DELETE_FAILED"
DEGRADED = "DEGRADED"
NONEXISTENT = "NONEXISTENT"
COMMAND = """
export PYTHON_OPERATORS_VIRTUAL_ENV_MODE=1
# Source credentials from secure file
source {credentials_file}
output=$({python_executable} -m airflow.providers.amazon.aws.utils.eks_get_token \
--cluster-name {eks_cluster_name} --sts-url '{sts_url}' {args} 2>&1)
status=$?
# Clear environment variables after use (defense in depth)
unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN
if [ "$status" -ne 0 ]; then
printf '%s' "$output" >&2
exit "$status"
fi
# Use pure bash below to parse so that it's posix compliant
last_line=${{output##*$'\\n'}} # strip everything up to the last newline
timestamp=${{last_line#expirationTimestamp: }} # drop the label
timestamp=${{timestamp%%,*}} # keep up to the first comma
token=${{last_line##*, token: }} # text after ", token: "
json_string=$(printf '{{"kind": "ExecCredential","apiVersion": \
"client.authentication.k8s.io/v1alpha1","spec": {{}},"status": \
{{"expirationTimestamp": "%s","token": "%s"}}}}' "$timestamp" "$token")
echo $json_string
"""
|
NodegroupStates
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
|
{
"start": 16301,
"end": 16563
}
|
class ____(graphene.Union):
class Meta:
types = (
GrapheneFailedToMaterializeEvent,
GrapheneMaterializationEvent,
GrapheneObservationEvent,
)
name = "AssetResultEventType"
|
GrapheneAssetResultEventType
|
python
|
django__django
|
django/test/testcases.py
|
{
"start": 50395,
"end": 56291
}
|
class ____(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomic = transaction.atomic(using=db_name)
atomic._from_testcase = True
atomic.__enter__()
atomics[db_name] = atomic
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def _databases_support_savepoints(cls):
return connections_support_savepoints(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not (
cls._databases_support_transactions()
and cls._databases_support_savepoints()
):
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command(
"loaddata",
*cls.fixtures,
verbosity=0,
database=db_name,
)
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
pre_attrs = cls.__dict__.copy()
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
for name, value in cls.__dict__.items():
if value is not pre_attrs.get(name):
setattr(cls, name, TestData(name, value))
@classmethod
def tearDownClass(cls):
if (
cls._databases_support_transactions()
and cls._databases_support_savepoints()
):
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all(initialized_only=True):
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
@classmethod
def _fixture_setup(cls):
if not cls._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
cls.setUpTestData()
return super()._fixture_setup()
if cls.reset_sequences:
raise TypeError("reset_sequences cannot be used on TestCase instances")
cls.atomics = cls._enter_atomics()
if not cls._databases_support_savepoints():
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
call_command(
"loaddata",
*cls.fixtures,
**{"verbosity": 0, "database": db_name},
)
cls.setUpTestData()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks
and not connection.needs_rollback
and connection.is_usable()
)
@classmethod
@contextmanager
def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False):
"""Context manager to capture transaction.on_commit() callbacks."""
callbacks = []
start_count = len(connections[using].run_on_commit)
try:
yield callbacks
finally:
while True:
callback_count = len(connections[using].run_on_commit)
for _, callback, robust in connections[using].run_on_commit[
start_count:
]:
callbacks.append(callback)
if execute:
if robust:
try:
callback()
except Exception as e:
logger.error(
f"Error calling {callback.__qualname__} in "
f"on_commit() (%s).",
e,
exc_info=True,
)
else:
callback()
if callback_count == len(connections[using].run_on_commit):
break
start_count = callback_count
|
TestCase
|
python
|
davidhalter__jedi
|
jedi/inference/gradual/typing.py
|
{
"start": 10166,
"end": 10883
}
|
class ____(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
|
Callable
|
python
|
pytorch__pytorch
|
test/functorch/test_eager_transforms.py
|
{
"start": 174712,
"end": 176894
}
|
class ____(TestCase):
# torch.compile is not supported on Windows CUDA.
# Triton only supports GPU with SM70 or later.
@expectedFailureIf((IS_WINDOWS and TEST_CUDA) or (TEST_CUDA and not SM70OrLater))
@unittest.skipIf(
TEST_CUDA_MEM_LEAK_CHECK,
"Leaking memory, see https://github.com/pytorch/pytorch/pull/150059 for example",
)
def test_compile_vmap_hessian(self, device):
# The model and inputs are a smaller version
# of code at benchmark repo:
# https://github.com/pytorch/benchmark/blob/main/userbenchmark/functorch/vmap_hessian_fc.py
D = 2
B = 4
x = torch.randn(B, D, device=device)
model = nn.Sequential(nn.Linear(D, D), nn.ReLU()).to(device)
params_and_buffers = (
dict(model.named_parameters()),
dict(model.named_buffers()),
)
def predict(params_and_buffers, x):
out = torch.func.functional_call(model, params_and_buffers, x)
return out, out
fn = vmap(
jacfwd(jacrev(predict, argnums=1, has_aux=True), argnums=1, has_aux=True),
in_dims=(None, 0),
)
expected = fn(params_and_buffers, x)
opt_fn = torch.compile(traceable(fn))
actual = opt_fn(params_and_buffers, x)
self.assertEqual(actual, expected)
# torch.compile is not supported on Windows
@torch._dynamo.config.patch(suppress_errors=False)
def test_grad_deprecated_api(self, device):
x = torch.randn((), device=device)
y = torch.randn((), device=device)
def wrapper_fn(x, y):
return functorch.grad(torch.mul)(x, y)
actual = wrapper_fn(x, y)
expected = torch.compile(wrapper_fn, backend="eager", fullgraph=True)(x, y)
torch.compile(wrapper_fn, backend="eager", fullgraph=True)
self.assertEqual(actual, expected)
def wrapper_fn(x, y):
return functorch.grad(torch.mul, argnums=(0, 1))(x, y)
actual = wrapper_fn(x, y)
expected = torch.compile(wrapper_fn, backend="eager", fullgraph=True)(x, y)
self.assertEqual(actual, expected)
|
TestCompileTransforms
|
python
|
PrefectHQ__prefect
|
tests/server/orchestration/test_task_concurrency_v2_integration.py
|
{
"start": 13648,
"end": 28040
}
|
class ____:
"""Test ReleaseTaskConcurrencySlots with V2 Global Concurrency Limits.
Note: Some of these tests may fail due to a bug in the current implementation
where holder.id (task run ID) is used as lease_id in the release logic.
The correct behavior would require finding the lease_id associated with a holder.
"""
async def create_v2_concurrency_limit(
self, session: AsyncSession, tag: str, limit: int
) -> ConcurrencyLimitV2:
"""Helper to create a V2 concurrency limit."""
return await concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=actions.ConcurrencyLimitV2Create(
name=f"tag:{tag}",
limit=limit,
active=True,
),
)
async def test_v2_and_v1_integration_full_cycle(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test full cycle: secure V2+V1 limits, then release both."""
# Set up both V2 and V1 limits
v2_limit = await self.create_v2_concurrency_limit(session, "cycle-v2", 2)
cl_create = actions.ConcurrencyLimitCreate(
tag="cycle-v1",
concurrency_limit=2,
).model_dump(mode="json")
cl_model = core.ConcurrencyLimit(**cl_create)
await concurrency_limits.create_concurrency_limit(
session=session, concurrency_limit=cl_model
)
# Test acquiring slots
secure_policy = [SecureTaskConcurrencySlots]
release_policy = [ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
# Task gets both V2 and V1 tags
ctx1 = await initialize_orchestration(
session, "task", *running_transition, run_tags=["cycle-v2", "cycle-v1"]
)
# Secure slots
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
ctx1 = await stack.enter_async_context(rule(ctx1, *running_transition))
await ctx1.validate_proposed_state()
assert ctx1.response_status == SetStateStatus.ACCEPT
# Verify both limits were used
await session.refresh(v2_limit)
assert v2_limit.active_slots == 1
v1_limit = await concurrency_limits.read_concurrency_limit_by_tag(
session, "cycle-v1"
)
assert str(ctx1.run.id) in v1_limit.active_slots
# Now complete the task to release slots
ctx2 = await initialize_orchestration(
session,
"task",
*completed_transition,
run_override=ctx1.run, # Same task run
run_tags=["cycle-v2", "cycle-v1"],
)
# Set validated state to completed (normally done by orchestration)
ctx2.validated_state = states.State(type=states.StateType.COMPLETED)
async with contextlib.AsyncExitStack() as stack:
for rule in release_policy:
ctx2 = await stack.enter_async_context(
rule(ctx2, *completed_transition)
)
# Verify slots were released
await session.refresh(v2_limit)
assert v2_limit.active_slots == 0
await session.refresh(v1_limit)
assert str(ctx1.run.id) not in v1_limit.active_slots
async def test_release_only_on_terminal_transitions(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that slots are only released on terminal transitions."""
v2_limit = await self.create_v2_concurrency_limit(session, "terminal-test", 2)
# First acquire a slot
secure_policy = [SecureTaskConcurrencySlots]
release_policy = [ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx1 = await initialize_orchestration(
session, "task", *running_transition, run_tags=["terminal-test"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
ctx1 = await stack.enter_async_context(rule(ctx1, *running_transition))
await ctx1.validate_proposed_state()
assert ctx1.response_status == SetStateStatus.ACCEPT
await session.refresh(v2_limit)
assert v2_limit.active_slots == 1
# Do a terminal transition (running to completed - should release)
terminal_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
ctx2 = await initialize_orchestration(
session,
"task",
*terminal_transition,
run_override=ctx1.run,
run_tags=["terminal-test"],
)
# Set validated state to completed (normally done by orchestration)
ctx2.validated_state = states.State(type=states.StateType.COMPLETED)
async with contextlib.AsyncExitStack() as stack:
for rule in release_policy:
ctx2 = await stack.enter_async_context(rule(ctx2, *terminal_transition))
# Verify slots were released on terminal transition
await session.refresh(v2_limit)
assert v2_limit.active_slots == 0
async def test_v2_release_with_no_matching_holders(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that release handles case where no holders match the task run."""
v2_limit = await self.create_v2_concurrency_limit(session, "no-match", 2)
release_policy = [ReleaseTaskConcurrencySlots]
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
# Task that doesn't have any leases
ctx = await initialize_orchestration(
session, "task", *completed_transition, run_tags=["no-match"]
)
# This should not raise any errors
async with contextlib.AsyncExitStack() as stack:
for rule in release_policy:
ctx = await stack.enter_async_context(rule(ctx, *completed_transition))
# No slots should be affected since no leases existed
await session.refresh(v2_limit)
assert v2_limit.active_slots == 0
async def test_v2_limits_with_multiple_tags(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that a task with multiple V2 tags processes all limits."""
v2_limit1 = await self.create_v2_concurrency_limit(session, "multi-1", 2)
v2_limit2 = await self.create_v2_concurrency_limit(session, "multi-2", 3)
secure_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["multi-1", "multi-2"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
# Both limits should have active slots
await session.refresh(v2_limit1)
await session.refresh(v2_limit2)
assert v2_limit1.active_slots == 1
assert v2_limit2.active_slots == 1
async def test_v2_slot_increment_lease_creation_atomicity(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""
Test that slot increments and lease creation are atomic in orchestration policy.
This test verifies the fix for the zombie slot bug where slots could be
incremented but leases not created due to session/transaction boundary issues.
The fix ensures both operations happen in a single transaction context.
"""
# Create two limits with different capacities
v2_limit_5 = await self.create_v2_concurrency_limit(session, "limit-5", 5)
v2_limit_10 = await self.create_v2_concurrency_limit(session, "limit-10", 10)
secure_policy = [SecureTaskConcurrencySlots]
release_policy = [ReleaseTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
completed_transition = (states.StateType.RUNNING, states.StateType.COMPLETED)
# Run 5 tasks to fill the smaller limit and use slots from both limits
task_contexts = []
for _ in range(5):
task_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_tags=["limit-5", "limit-10"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
task_ctx = await stack.enter_async_context(
rule(task_ctx, *running_transition)
)
await task_ctx.validate_proposed_state()
# Each task should be accepted and increment both limits
assert task_ctx.response_status == SetStateStatus.ACCEPT
task_contexts.append(task_ctx)
# Verify both limits have the expected slots and leases
await session.refresh(v2_limit_5)
await session.refresh(v2_limit_10)
assert v2_limit_5.active_slots == 5
assert v2_limit_10.active_slots == 5
# Count leases via lease storage
lease_storage = get_concurrency_lease_storage()
limit_5_holders = await lease_storage.list_holders_for_limit(v2_limit_5.id)
limit_10_holders = await lease_storage.list_holders_for_limit(v2_limit_10.id)
assert len(limit_5_holders) == 5
assert len(limit_10_holders) == 5
# 6th task should be blocked because limit-5 is full
blocked_task_ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["limit-5", "limit-10"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
blocked_task_ctx = await stack.enter_async_context(
rule(blocked_task_ctx, *running_transition)
)
await blocked_task_ctx.validate_proposed_state()
# Should be blocked - no partial increments should occur
assert blocked_task_ctx.response_status == SetStateStatus.WAIT
# Slots should remain the same (no zombie slots created)
await session.refresh(v2_limit_5)
await session.refresh(v2_limit_10)
assert v2_limit_5.active_slots == 5
assert v2_limit_10.active_slots == 5
# Complete all tasks to verify proper cleanup atomicity
for task_ctx in task_contexts:
completed_ctx = await initialize_orchestration(
session,
"task",
*completed_transition,
run_override=task_ctx.run,
run_tags=["limit-5", "limit-10"],
)
# Set validated state to completed (normally done by orchestration)
completed_ctx.validated_state = states.State(
type=states.StateType.COMPLETED
)
async with contextlib.AsyncExitStack() as stack:
for rule in release_policy:
completed_ctx = await stack.enter_async_context(
rule(completed_ctx, *completed_transition)
)
# Both limits should be completely cleaned up
await session.refresh(v2_limit_5)
await session.refresh(v2_limit_10)
assert v2_limit_5.active_slots == 0
assert v2_limit_10.active_slots == 0
# Verify leases were cleaned up
limit_5_holders = await lease_storage.list_holders_for_limit(v2_limit_5.id)
limit_10_holders = await lease_storage.list_holders_for_limit(v2_limit_10.id)
assert len(limit_5_holders) == 0
assert len(limit_10_holders) == 0
# Now the previously blocked task should be able to run
retry_task_ctx = await initialize_orchestration(
session,
"task",
*running_transition,
run_override=blocked_task_ctx.run,
run_tags=["limit-5", "limit-10"],
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
retry_task_ctx = await stack.enter_async_context(
rule(retry_task_ctx, *running_transition)
)
await retry_task_ctx.validate_proposed_state()
assert retry_task_ctx.response_status == SetStateStatus.ACCEPT
await session.refresh(v2_limit_5)
await session.refresh(v2_limit_10)
assert v2_limit_5.active_slots == 1
assert v2_limit_10.active_slots == 1
async def test_v2_limit_prevents_exceeding_capacity(
self,
session: AsyncSession,
initialize_orchestration: Callable[..., Any],
) -> None:
"""Test that V2 limits prevent tasks from exceeding capacity."""
v2_limit = await self.create_v2_concurrency_limit(session, "capacity-test", 1)
# First, manually fill the limit to capacity
await concurrency_limits_v2.bulk_increment_active_slots(
session=session,
concurrency_limit_ids=[v2_limit.id],
slots=1,
)
secure_policy = [SecureTaskConcurrencySlots]
running_transition = (states.StateType.PENDING, states.StateType.RUNNING)
ctx = await initialize_orchestration(
session, "task", *running_transition, run_tags=["capacity-test"]
)
async with contextlib.AsyncExitStack() as stack:
for rule in secure_policy:
ctx = await stack.enter_async_context(rule(ctx, *running_transition))
await ctx.validate_proposed_state()
# Should be told to wait since capacity is already reached
assert ctx.response_status == SetStateStatus.WAIT
|
TestReleaseTaskConcurrencySlotsV2Integration
|
python
|
getsentry__sentry
|
tests/sentry/release_health/release_monitor/test_metrics.py
|
{
"start": 349,
"end": 516
}
|
class ____(
BaseFetchProjectsWithRecentSessionsTest, BaseMetricsTestCase
):
backend_class = MetricReleaseMonitorBackend
|
MetricFetchProjectsWithRecentSessionsTest
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/enums.py
|
{
"start": 2893,
"end": 2966
}
|
class ____(Greeter, Overridden, enum.Enum):
"""docstring"""
|
_ParentEnum
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-stability-factor-of-array.py
|
{
"start": 102,
"end": 2116
}
|
class ____(object):
def minStable(self, nums, maxC):
"""
:type nums: List[int]
:type maxC: int
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
def binary_search_right(left, right, check):
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
# RMQ - Sparse Table
# Template: https://github.com/kamyu104/GoogleCodeJam-Farewell-Rounds/blob/main/Round%20D/genetic_sequences2.py3
# Time: ctor: O(NlogN) * O(fn)
# query: O(fn)
# Space: O(NlogN)
class SparseTable(object):
def __init__(self, arr, fn):
self.fn = fn
self.bit_length = [0]
n = len(arr)
k = n.bit_length()-1 # log2_floor(n)
for i in xrange(k+1):
self.bit_length.extend(i+1 for _ in xrange(min(1<<i, (n+1)-len(self.bit_length))))
self.st = [[0]*n for _ in xrange(k+1)]
self.st[0] = arr[:]
for i in xrange(1, k+1): # Time: O(NlogN) * O(fn)
for j in xrange((n-(1<<i))+1):
self.st[i][j] = fn(self.st[i-1][j], self.st[i-1][j+(1<<(i-1))])
def query(self, L, R): # Time: O(fn)
i = self.bit_length[R-L+1]-1 # log2_floor(R-L+1)
return self.fn(self.st[i][L], self.st[i][R-(1<<i)+1])
def check(l):
cnt = 0
i = 0
while i+l-1 < len(nums):
if rmq.query(i, i+l-1) >= 2:
cnt += 1
i += l
else:
i += 1
return cnt > maxC
rmq = SparseTable(nums, gcd)
return binary_search_right(1, len(nums), check)
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/constraints.py
|
{
"start": 7620,
"end": 11444
}
|
class ____(Constraint):
"""Constrains `Conv2D` kernel weights to be the same for each radius.
Also available via the shortcut function
`tf.keras.constraints.radial_constraint`.
For example, the desired output for the following 4-by-4 kernel:
```
kernel = [[v_00, v_01, v_02, v_03],
[v_10, v_11, v_12, v_13],
[v_20, v_21, v_22, v_23],
[v_30, v_31, v_32, v_33]]
```
is this::
```
kernel = [[v_11, v_11, v_11, v_11],
[v_11, v_33, v_33, v_11],
[v_11, v_33, v_33, v_11],
[v_11, v_11, v_11, v_11]]
```
This constraint can be applied to any `Conv2D` layer version, including
`Conv2DTranspose` and `SeparableConv2D`, and with either `"channels_last"` or
`"channels_first"` data format. The method assumes the weight tensor is of
shape `(rows, cols, input_depth, output_depth)`.
"""
@doc_controls.do_not_generate_docs
def __call__(self, w):
w_shape = w.shape
if w_shape.rank is None or w_shape.rank != 4:
raise ValueError(
'The weight tensor must be of rank 4, but is of shape: %s' % w_shape)
height, width, channels, kernels = w_shape
w = backend.reshape(w, (height, width, channels * kernels))
# TODO(cpeter): Switch map_fn for a faster tf.vectorized_map once
# backend.switch is supported.
w = backend.map_fn(
self._kernel_constraint,
backend.stack(array_ops_stack.unstack(w, axis=-1), axis=0))
return backend.reshape(
backend.stack(array_ops_stack.unstack(w, axis=0), axis=-1),
(height, width, channels, kernels))
def _kernel_constraint(self, kernel):
"""Radially constraints a kernel with shape (height, width, channels)."""
padding = backend.constant([[1, 1], [1, 1]], dtype='int32')
kernel_shape = backend.shape(kernel)[0]
start = backend.cast(kernel_shape / 2, 'int32')
kernel_new = backend.switch(
backend.cast(math_ops.floormod(kernel_shape, 2), 'bool'),
lambda: kernel[start - 1:start, start - 1:start],
lambda: kernel[start - 1:start, start - 1:start] + backend.zeros( # pylint: disable=g-long-lambda
(2, 2), dtype=kernel.dtype))
index = backend.switch(
backend.cast(math_ops.floormod(kernel_shape, 2), 'bool'),
lambda: backend.constant(0, dtype='int32'),
lambda: backend.constant(1, dtype='int32'))
while_condition = lambda index, *args: backend.less(index, start)
def body_fn(i, array):
return i + 1, array_ops.pad(
array,
padding,
constant_values=kernel[start + i, start + i])
_, kernel_new = while_loop.while_loop(
while_condition,
body_fn, [index, kernel_new],
shape_invariants=[
index.get_shape(),
tensor_shape.TensorShape([None, None])
])
return kernel_new
# Aliases.
max_norm = MaxNorm
non_neg = NonNeg
unit_norm = UnitNorm
min_max_norm = MinMaxNorm
radial_constraint = RadialConstraint
# Legacy aliases.
maxnorm = max_norm
nonneg = non_neg
unitnorm = unit_norm
def serialize(constraint):
return serialize_keras_object(constraint)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='constraint')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, str):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret constraint identifier: ' +
str(identifier))
|
RadialConstraint
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_bar23.py
|
{
"start": 315,
"end": 1915
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_bar23.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [43706240, 43727104]
headers = ["Series 1", "Series 2", "Series 3"]
data = [
["Category 1", "Category 2", "Category 3", "Category 4"],
[4.3, 2.5, 3.5, 4.5],
[2.4, 4.5, 1.8, 2.8],
[2, 2, 3, 5],
]
worksheet.set_column("A:D", 11)
worksheet.write_row("B1", headers)
worksheet.write_column("A2", data[0])
worksheet.write_column("B2", data[1])
worksheet.write_column("C2", data[2])
worksheet.write_column("D2", data[3])
chart.add_series(
{
"categories": "=Sheet1!$A$2:$A$5",
"values": "=Sheet1!$B$2:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$2:$A$5",
"values": "=Sheet1!$C$2:$C$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$2:$A$5",
"values": "=Sheet1!$D$2:$D$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
jupyterlab__jupyterlab
|
jupyterlab/commands.py
|
{
"start": 10370,
"end": 19708
}
|
class ____(HasTraits):
"""Options object for build system"""
def __init__(self, logger=None, core_config=None, **kwargs):
if core_config is not None:
kwargs["core_config"] = core_config
if logger is not None:
kwargs["logger"] = logger
# use the default if app_dir is empty
if "app_dir" in kwargs and not kwargs["app_dir"]:
kwargs.pop("app_dir")
super().__init__(**kwargs)
app_dir = Unicode(help="The application directory")
use_sys_dir = Bool(
True,
help=("Whether to shadow the default app_dir if that is set to a non-default value"),
)
logger = Instance(logging.Logger, help="The logger to use")
core_config = Instance(CoreConfig, help="Configuration for core data")
kill_event = Instance(Event, args=(), help="Event for aborting call")
labextensions_path = List(
Unicode(), help="The paths to look in for prebuilt JupyterLab extensions"
)
registry = Unicode(help="NPM packages registry URL")
splice_source = Bool(False, help="Splice source packages into app directory.")
skip_full_build_check = Bool(
False,
help=(
"If true, perform only a quick check that the lab build is up to date."
" If false, perform a thorough check, which verifies extension contents."
),
)
verbose = Bool(False, help="Increase verbosity level.")
@default("logger")
def _default_logger(self):
return logging.getLogger("jupyterlab")
# These defaults need to be federated to pick up
# any changes to env vars:
@default("app_dir")
def _default_app_dir(self):
return get_app_dir()
@default("core_config")
def _default_core_config(self):
return CoreConfig()
@default("registry")
def _default_registry(self):
config = _yarn_config(self.logger)["yarn config"]
return config.get("registry", YARN_DEFAULT_REGISTRY)
def _ensure_options(options):
"""Helper to use deprecated kwargs for AppOption"""
if options is None:
return AppOptions()
elif issubclass(options.__class__, AppOptions):
return options
else:
return AppOptions(**options)
def watch(app_options=None):
"""Watch the application.
Parameters
----------
app_options: :class:`AppOptions`, optional
The application options.
Returns
-------
A list of processes to run asynchronously.
"""
app_options = _ensure_options(app_options)
_node_check(app_options.logger)
handler = _AppHandler(app_options)
package_procs = watch_packages(app_options.logger) if app_options.splice_source else []
return package_procs + handler.watch()
def install_extension(extension, app_options=None, pin=None):
"""Install an extension package into JupyterLab.
The extension is first validated.
Returns `True` if a rebuild is recommended, `False` otherwise.
"""
app_options = _ensure_options(app_options)
_node_check(app_options.logger)
handler = _AppHandler(app_options)
return handler.install_extension(extension, pin=pin)
def uninstall_extension(name=None, app_options=None, all_=False):
"""Uninstall an extension by name or path.
Returns `True` if a rebuild is recommended, `False` otherwise.
"""
app_options = _ensure_options(app_options)
_node_check(app_options.logger)
handler = _AppHandler(app_options)
if all_ is True:
return handler.uninstall_all_extensions()
return handler.uninstall_extension(name)
def update_extension(name=None, all_=False, app_dir=None, app_options=None):
"""Update an extension by name, or all extensions.
Either `name` must be given as a string, or `all_` must be `True`.
If `all_` is `True`, the value of `name` is ignored.
Returns `True` if a rebuild is recommended, `False` otherwise.
"""
app_options = _ensure_options(app_options)
_node_check(app_options.logger)
handler = _AppHandler(app_options)
if all_ is True:
return handler.update_all_extensions()
return handler.update_extension(name)
def clean(app_options=None):
"""Clean the JupyterLab application directory."""
app_options = _ensure_options(app_options)
logger = app_options.logger
app_dir = app_options.app_dir
logger.info(f"Cleaning {app_dir}...")
if app_dir == pjoin(HERE, "dev"):
msg = "Cannot clean the dev app"
raise ValueError(msg)
if app_dir == pjoin(HERE, "core"):
msg = "Cannot clean the core app"
raise ValueError(msg)
if getattr(app_options, "all", False):
logger.info(f"Removing everything in {app_dir}...")
_rmtree_star(app_dir, logger)
else:
possible_targets = ["extensions", "settings", "staging", "static"]
targets = [t for t in possible_targets if getattr(app_options, t)]
for name in targets:
target = pjoin(app_dir, name)
if osp.exists(target):
logger.info(f"Removing {name}...")
_rmtree(target, logger)
else:
logger.info(f"{name} not present, skipping...")
logger.info("Success!")
if getattr(app_options, "all", False) or getattr(app_options, "extensions", False):
logger.info("All of your extensions have been removed, and will need to be reinstalled")
def build(
name=None,
version=None,
static_url=None,
kill_event=None,
clean_staging=False,
app_options=None,
production=True,
minimize=True,
):
"""Build the JupyterLab application."""
app_options = _ensure_options(app_options)
_node_check(app_options.logger)
handler = _AppHandler(app_options)
return handler.build(
name=name,
version=version,
static_url=static_url,
production=production,
minimize=minimize,
clean_staging=clean_staging,
)
def get_app_info(app_options=None):
"""Get a dictionary of information about the app."""
handler = _AppHandler(app_options)
handler._ensure_disabled_info()
return handler.info
def enable_extension(extension, app_options=None, level="sys_prefix"):
"""Enable a JupyterLab extension/plugin.
Returns `True` if a rebuild is recommended, `False` otherwise.
"""
handler = _AppHandler(app_options)
return handler.toggle_extension(extension, False, level=level)
def disable_extension(extension, app_options=None, level="sys_prefix"):
"""Disable a JupyterLab extension/plugin.
Returns `True` if a rebuild is recommended, `False` otherwise.
"""
handler = _AppHandler(app_options)
return handler.toggle_extension(extension, True, level=level)
def check_extension(extension, installed=False, app_options=None):
"""Check if a JupyterLab extension is enabled or disabled."""
handler = _AppHandler(app_options)
return handler.check_extension(extension, installed)
def lock_extension(extension, app_options=None, level="sys_prefix"):
"""Lock a JupyterLab extension/plugin."""
handler = _AppHandler(app_options)
return handler.toggle_extension_lock(extension, True, level=level)
def unlock_extension(extension, app_options=None, level="sys_prefix"):
"""Unlock a JupyterLab extension/plugin."""
handler = _AppHandler(app_options)
return handler.toggle_extension_lock(extension, False, level=level)
def build_check(app_options=None):
"""Determine whether JupyterLab should be built.
Returns a list of messages.
"""
app_options = _ensure_options(app_options)
_node_check(app_options.logger)
handler = _AppHandler(app_options)
return handler.build_check()
def list_extensions(app_options=None):
"""List the extensions."""
handler = _AppHandler(app_options)
return handler.list_extensions()
def link_package(path, app_options=None):
"""Link a package against the JupyterLab build.
Returns `True` if a rebuild is recommended, `False` otherwise.
"""
handler = _AppHandler(app_options)
return handler.link_package(path)
def unlink_package(package, app_options=None):
"""Unlink a package from JupyterLab by path or name.
Returns `True` if a rebuild is recommended, `False` otherwise.
"""
handler = _AppHandler(app_options)
return handler.unlink_package(package)
def get_app_version(app_options=None):
"""Get the application version."""
handler = _AppHandler(app_options)
return handler.info["version"]
def get_latest_compatible_package_versions(names, app_options=None):
"""Get the latest compatible version of a list of packages."""
handler = _AppHandler(app_options)
return handler.latest_compatible_package_versions(names)
def read_package(target):
"""Read the package data in a given target tarball."""
with tarfile.open(target, "r") as tar:
with tar.extractfile("package/package.json") as f:
data = json.loads(f.read().decode("utf8"))
data["jupyterlab_extracted_files"] = [f.path[len("package/") :] for f in tar.getmembers()]
return data
# ----------------------------------------------------------------------
# Implementation details
# ----------------------------------------------------------------------
|
AppOptions
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/geo/_lonaxis.py
|
{
"start": 235,
"end": 7311
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.geo"
_path_str = "layout.geo.lonaxis"
_valid_props = {
"dtick",
"gridcolor",
"griddash",
"gridwidth",
"range",
"showgrid",
"tick0",
}
@property
def dtick(self):
"""
Sets the graticule's longitude/latitude tick step.
The 'dtick' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def gridcolor(self):
"""
Sets the graticule's stroke color.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
@property
def gridwidth(self):
"""
Sets the graticule's stroke width (in px).
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def range(self):
"""
Sets the range of this axis (in degrees), sets the map's
clipped coordinates.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property is a number and may be specified as:
- An int or float
(1) The 'range[1]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
@property
def showgrid(self):
"""
Sets whether or not graticule are shown on the map.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def tick0(self):
"""
Sets the graticule's starting tick longitude/latitude.
The 'tick0' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def _prop_descriptions(self):
return """\
dtick
Sets the graticule's longitude/latitude tick step.
gridcolor
Sets the graticule's stroke color.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the graticule's stroke width (in px).
range
Sets the range of this axis (in degrees), sets the
map's clipped coordinates.
showgrid
Sets whether or not graticule are shown on the map.
tick0
Sets the graticule's starting tick longitude/latitude.
"""
def __init__(
self,
arg=None,
dtick=None,
gridcolor=None,
griddash=None,
gridwidth=None,
range=None,
showgrid=None,
tick0=None,
**kwargs,
):
"""
Construct a new Lonaxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.geo.Lonaxis`
dtick
Sets the graticule's longitude/latitude tick step.
gridcolor
Sets the graticule's stroke color.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the graticule's stroke width (in px).
range
Sets the range of this axis (in degrees), sets the
map's clipped coordinates.
showgrid
Sets whether or not graticule are shown on the map.
tick0
Sets the graticule's starting tick longitude/latitude.
Returns
-------
Lonaxis
"""
super().__init__("lonaxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.geo.Lonaxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.geo.Lonaxis`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtick", arg, dtick)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("griddash", arg, griddash)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("range", arg, range)
self._set_property("showgrid", arg, showgrid)
self._set_property("tick0", arg, tick0)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Lonaxis
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/interfaces/physical_operator.py
|
{
"start": 32399,
"end": 34310
}
|
class ____(abc.ABC):
@abc.abstractmethod
def extra_resource_usage(self: PhysicalOperator) -> ExecutionResources:
"""Returns resources used by this operator beyond standard accounting."""
...
def estimate_total_num_of_blocks(
num_tasks_submitted: int,
upstream_op_num_outputs: int,
metrics: OpRuntimeMetrics,
total_num_tasks: Optional[int] = None,
) -> Tuple[int, int, int]:
"""This method is trying to estimate total number of blocks/rows based on
- How many outputs produced by the input deps
- How many blocks/rows produced by tasks of this operator
"""
if (
upstream_op_num_outputs > 0
and metrics.average_num_inputs_per_task
and metrics.average_num_outputs_per_task
and metrics.average_rows_outputs_per_task
):
estimated_num_tasks = total_num_tasks
if estimated_num_tasks is None:
estimated_num_tasks = (
upstream_op_num_outputs / metrics.average_num_inputs_per_task
)
estimated_num_output_bundles = round(
estimated_num_tasks * metrics.average_num_outputs_per_task
)
estimated_output_num_rows = round(
estimated_num_tasks * metrics.average_rows_outputs_per_task
)
return (
estimated_num_tasks,
estimated_num_output_bundles,
estimated_output_num_rows,
)
return (0, 0, 0)
def _create_sub_pb(
name: str, total_output_rows: Optional[int], position: int
) -> Tuple[ProgressBar, int]:
progress_bar = ProgressBar(
name,
total_output_rows or 1,
unit="row",
position=position,
)
# NOTE: call `set_description` to trigger the initial print of progress
# bar on console.
progress_bar.set_description(f" *- {name}")
position += 1
return progress_bar, position
|
ReportsExtraResourceUsage
|
python
|
pytorch__pytorch
|
torch/distributed/fsdp/api.py
|
{
"start": 5102,
"end": 11746
}
|
class ____:
"""
This configures FSDP-native mixed precision training.
Attributes:
param_dtype (Optional[torch.dtype]): This specifies the dtype for model
parameters during forward and backward and thus the dtype for
forward and backward computation. Outside forward and backward, the
*sharded* parameters are kept in full precision (e.g. for the
optimizer step), and for model checkpointing, the parameters are
always saved in full precision. (Default: ``None``)
reduce_dtype (Optional[torch.dtype]): This specifies the dtype for
gradient reduction (i.e. reduce-scatter or all-reduce). If this is
``None`` but ``param_dtype`` is not ``None``, then this takes on
the ``param_dtype`` value, still running gradient reduction in low
precision. This is permitted to differ from ``param_dtype``, e.g.
to force gradient reduction to run in full precision. (Default:
``None``)
buffer_dtype (Optional[torch.dtype]): This specifies the dtype for
buffers. FSDP does not shard buffers. Rather, FSDP casts them to
``buffer_dtype`` in the first forward pass and keeps them in that
dtype thereafter. For model checkpointing, the buffers are saved
in full precision except for ``LOCAL_STATE_DICT``. (Default:
``None``)
keep_low_precision_grads (bool): If ``False``, then FSDP upcasts
gradients to full precision after the backward pass in preparation
for the optimizer step. If ``True``, then FSDP keeps the gradients
in the dtype used for gradient reduction, which can save memory if
using a custom optimizer that supports running in low precision.
(Default: ``False``)
cast_forward_inputs (bool): If ``True``, then this FSDP module casts
its forward args and kwargs to ``param_dtype``. This is to ensure
that parameter and input dtypes match for forward computation, as
required by many ops. This may need to be set to ``True`` when only
applying mixed precision to some but not all FSDP modules, in which
case a mixed-precision FSDP submodule needs to recast its inputs.
(Default: ``False``)
cast_root_forward_inputs (bool): If ``True``, then the root FSDP module
casts its forward args and kwargs to ``param_dtype``, overriding
the value of ``cast_forward_inputs``. For non-root FSDP modules,
this does not do anything. (Default: ``True``)
_module_classes_to_ignore: (Sequence[Type[nn.Module]]): This specifies
module classes to ignore for mixed precision when using an
``auto_wrap_policy``: Modules of these classes will have FSDP
applied to them separately with mixed precision disabled (meaning
that the final FSDP construction would deviate from the specified
policy). If ``auto_wrap_policy`` is not specified, then this does
not do anything. This API is experimental and subject to change.
(Default: ``(_BatchNorm,)``)
.. note:: This API is experimental and subject to change.
.. note:: Only floating point tensors are cast to their specified dtypes.
.. note:: In ``summon_full_params``, parameters are forced to full
precision, but buffers are not.
.. note:: Layer norm and batch norm accumulate in ``float32`` even when
their inputs are in a low precision like ``float16`` or ``bfloat16``.
Disabling FSDP's mixed precision for those norm modules only means that
the affine parameters are kept in ``float32``. However, this incurs
separate all-gathers and reduce-scatters for those norm modules, which
may be inefficient, so if the workload permits, the user should prefer
to still apply mixed precision to those modules.
.. note:: By default, if the user passes a model with any ``_BatchNorm``
modules and specifies an ``auto_wrap_policy``, then the batch norm
modules will have FSDP applied to them separately with mixed precision
disabled. See the ``_module_classes_to_ignore`` argument.
.. note:: ``MixedPrecision`` has ``cast_root_forward_inputs=True`` and
``cast_forward_inputs=False`` by default. For the root FSDP instance,
its ``cast_root_forward_inputs`` takes precedence over its
``cast_forward_inputs``. For non-root FSDP instances, their
``cast_root_forward_inputs`` values are ignored. The default setting is
sufficient for the typical case where each FSDP instance has the same
``MixedPrecision`` configuration and only needs to cast inputs to the
``param_dtype`` at the beginning of the model's forward pass.
.. note:: For nested FSDP instances with different ``MixedPrecision``
configurations, we recommend setting individual ``cast_forward_inputs``
values to configure casting inputs or not before each instance's
forward. In such a case, since the casts happen before each FSDP
instance's forward, a parent FSDP instance should have its non-FSDP
submodules run before its FSDP submodules to avoid the activation dtype
being changed due to a different ``MixedPrecision`` configuration.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> model = nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3))
>>> model[1] = FSDP(
>>> model[1],
>>> mixed_precision=MixedPrecision(param_dtype=torch.float16, cast_forward_inputs=True),
>>> )
>>> model = FSDP(
>>> model,
>>> mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, cast_forward_inputs=True),
>>> )
The above shows a working example. On the other hand, if ``model[1]``
were replaced with ``model[0]``, meaning that the submodule using
different ``MixedPrecision`` ran its forward first, then ``model[1]``
would incorrectly see ``float16`` activations instead of ``bfloat16``
ones.
"""
param_dtype: Optional[torch.dtype] = None
reduce_dtype: Optional[torch.dtype] = None
buffer_dtype: Optional[torch.dtype] = None
keep_low_precision_grads: bool = False
cast_forward_inputs: bool = False
cast_root_forward_inputs: bool = True
_module_classes_to_ignore: Sequence[type[torch.nn.Module]] = (_BatchNorm,)
@dataclass
|
MixedPrecision
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/triggers/test_vertex_ai.py
|
{
"start": 11624,
"end": 14145
}
|
class ____:
def setup_method(self):
self.trigger = CreateBatchPredictionJobTrigger(
conn_id=TEST_CONN_ID,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
job_id=TEST_HPT_JOB_ID,
poll_interval=TEST_POLL_INTERVAL,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_class_attributes(self):
assert self.trigger.trigger_class_path == (
"airflow.providers.google.cloud.triggers.vertex_ai.CreateBatchPredictionJobTrigger"
)
assert self.trigger.job_type_verbose_name == "Batch Prediction Job"
assert self.trigger.job_serializer_class == BatchPredictionJob
@mock.patch(VERTEX_AI_TRIGGER_PATH.format("BatchPredictionJobAsyncHook"))
def test_async_hook(self, mock_async_hook):
async_hook_actual = self.trigger.async_hook
mock_async_hook.assert_called_once_with(
gcp_conn_id=self.trigger.conn_id,
impersonation_chain=self.trigger.impersonation_chain,
)
assert async_hook_actual == mock_async_hook.return_value
@pytest.mark.asyncio
@mock.patch(VERTEX_AI_TRIGGER_PATH.format("BatchPredictionJobAsyncHook.wait_batch_prediction_job"))
async def test_wait_job(self, mock_wait_batch_prediction_job):
job_expected = mock.MagicMock()
async_mock = mock.AsyncMock(return_value=job_expected)
mock_wait_batch_prediction_job.side_effect = async_mock
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_default_project_id
):
job_actual = await self.trigger._wait_job()
mock_wait_batch_prediction_job.assert_awaited_once_with(
project_id=self.trigger.project_id,
location=self.trigger.location,
job_id=self.trigger.job_id,
poll_interval=self.trigger.poll_interval,
)
assert job_actual == job_expected
def test_serialize(self):
classpath, kwargs = self.trigger.serialize()
assert (
classpath == "airflow.providers.google.cloud.triggers.vertex_ai.CreateBatchPredictionJobTrigger"
)
assert kwargs == dict(
conn_id=TEST_CONN_ID,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
job_id=TEST_HPT_JOB_ID,
poll_interval=TEST_POLL_INTERVAL,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
|
TestCreateBatchPredictionJobTrigger
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/superfences.py
|
{
"start": 2602,
"end": 6057
}
|
class ____:
"""
Stash code for later retrieval.
Store original fenced code here in case we were
too greedy and need to restore in an indented code
block.
"""
def __init__(self):
"""Initialize."""
self.stash = {}
def __len__(self): # pragma: no cover
"""Length of stash."""
return len(self.stash)
def get(self, key, default=None):
"""Get the code from the key."""
code = self.stash.get(key, default)
return code
def remove(self, key):
"""Remove the stashed code."""
del self.stash[key]
def store(self, key, code, indent_level):
"""Store the code in the stash."""
self.stash[key] = (code, indent_level)
def clear_stash(self):
"""Clear the stash."""
self.stash = {}
def fence_code_format(source, language, class_name, options, md, **kwargs):
"""Format source as code blocks."""
classes = kwargs['classes']
id_value = kwargs['id_value']
attrs = kwargs['attrs']
if class_name:
classes.insert(0, class_name)
id_value = f' id="{id_value}"' if id_value else ''
classes = ' class="{}"'.format(' '.join(classes)) if classes else ''
attrs = ' ' + ' '.join(f'{k}="{v}"' for k, v in attrs.items()) if attrs else ''
return '<pre{}{}{}><code>{}</code></pre>'.format(id_value, classes, attrs, _escape(source))
def fence_div_format(source, language, class_name, options, md, **kwargs):
"""Format source as div."""
classes = kwargs['classes']
id_value = kwargs['id_value']
attrs = kwargs['attrs']
if class_name:
classes.insert(0, class_name)
id_value = f' id="{id_value}"' if id_value else ''
classes = ' class="{}"'.format(' '.join(classes)) if classes else ''
attrs = ' ' + ' '.join(f'{k}="{v}"' for k, v in attrs.items()) if attrs else ''
return '<div{}{}{}>{}</div>'.format(id_value, classes, attrs, _escape(source))
def highlight_validator(language, inputs, options, attrs, md):
"""Highlight validator."""
use_pygments = md.preprocessors['fenced_code_block'].use_pygments
for k, v in inputs.items():
matched = False
if use_pygments:
if k.startswith('data-'):
attrs[k] = v
continue
for opt, validator in (('hl_lines', RE_HL_LINES), ('linenums', RE_LINENUMS), ('title', None)):
if k == opt:
if v is not True and (validator is None or validator.match(v) is not None):
options[k] = v
matched = True
break
if not matched:
attrs[k] = v
return True
def default_validator(language, inputs, options, attrs, md):
"""Default validator."""
for k, v in inputs.items():
attrs[k] = v
return True
def _validator(language, inputs, options, attrs, md, validator=None):
"""Validator wrapper."""
md.preprocessors['fenced_code_block'].get_hl_settings()
return validator(language, inputs, options, attrs, md)
def _formatter(src='', language='', options=None, md=None, class_name="", _fmt=None, **kwargs):
"""Formatter wrapper."""
return _fmt(src, language, class_name, options, md, **kwargs)
def _test(language, test_language=None):
"""Test language."""
return test_language is None or test_language == "*" or language == test_language
|
CodeStash
|
python
|
keras-team__keras
|
keras/src/layers/normalization/group_normalization.py
|
{
"start": 348,
"end": 9367
}
|
class ____(Layer):
"""Group normalization layer.
Group Normalization divides the channels into groups and computes
within each group the mean and variance for normalization.
Empirically, its accuracy is more stable than batch norm in a wide
range of small batch sizes, if learning rate is adjusted linearly
with batch sizes.
Relation to Layer Normalization:
If the number of groups is set to 1, then this operation becomes nearly
identical to Layer Normalization (see Layer Normalization docs for details).
Relation to Instance Normalization:
If the number of groups is set to the input dimension (number of groups is
equal to number of channels), then this operation becomes identical to
Instance Normalization. You can achieve this via `groups=-1`.
Args:
groups: Integer, the number of groups for Group Normalization. Can be in
the range `[1, N]` where N is the input dimension. The input
dimension must be divisible by the number of groups.
Defaults to 32.
axis: Integer or List/Tuple. The axis or axes to normalize across.
Typically, this is the features axis/axes. The left-out axes are
typically the batch axis/axes. -1 is the last dimension in the
input. Defaults to `-1`.
epsilon: Small float added to variance to avoid dividing by zero.
Defaults to 1e-3.
center: If `True`, add offset of `beta` to normalized tensor.
If `False`, `beta` is ignored. Defaults to `True`.
scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used.
When the next layer is linear (also e.g. `relu`), this can be
disabled since the scaling will be done by the next layer.
Defaults to `True`.
beta_initializer: Initializer for the beta weight. Defaults to zeros.
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
beta_regularizer: Optional regularizer for the beta weight. None by
default.
gamma_regularizer: Optional regularizer for the gamma weight. None by
default.
beta_constraint: Optional constraint for the beta weight.
None by default.
gamma_constraint: Optional constraint for the gamma weight. None by
default. Input shape: Arbitrary. Use the keyword argument
`input_shape` (tuple of integers, does not include the samples
axis) when using this layer as the first layer in a model.
Output shape: Same shape as input.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Reference:
- [Yuxin Wu & Kaiming He, 2018](https://arxiv.org/abs/1803.08494)
"""
def __init__(
self,
groups=32,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError(
f"Axis {self.axis} of input tensor should have a defined "
"dimension but the layer received an input with shape "
f"{input_shape}."
)
if self.groups == -1:
self.groups = dim
if dim < self.groups:
raise ValueError(
f"Number of groups ({self.groups}) cannot be more than the "
f"number of channels ({dim})."
)
if dim % self.groups != 0:
raise ValueError(
f"Number of groups ({self.groups}) must be a multiple "
f"of the number of channels ({dim})."
)
self.input_spec = InputSpec(
ndim=len(input_shape), axes={self.axis: dim}
)
if self.scale:
self.gamma = self.add_weight(
shape=(dim,),
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
shape=(dim,),
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
)
else:
self.beta = None
super().build(input_shape)
def call(self, inputs):
reshaped_inputs = self._reshape_into_groups(inputs)
normalized_inputs = self._apply_normalization(
reshaped_inputs, inputs.shape
)
return ops.reshape(normalized_inputs, ops.shape(inputs))
def _reshape_into_groups(self, inputs):
input_shape = ops.shape(inputs)
group_shape = list(inputs.shape)
group_shape[0] = -1
for i, e in enumerate(group_shape[1:]):
if e is None:
group_shape[i + 1] = input_shape[i + 1]
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(self.axis, self.groups)
reshaped_inputs = ops.reshape(inputs, group_shape)
return reshaped_inputs
def _apply_normalization(self, reshaped_inputs, input_shape):
inputs_dtype = reshaped_inputs.dtype
compute_dtype = backend.result_type(inputs_dtype, "float32")
# GN is prone to overflow with float16/bfloat16 inputs, so we upcast to
# float32 for the subsequent computations.
reshaped_inputs = ops.cast(reshaped_inputs, compute_dtype)
group_reduction_axes = list(range(1, len(reshaped_inputs.shape)))
axis = -2 if self.axis == -1 else self.axis - 1
group_reduction_axes.pop(axis)
broadcast_shape = self._create_broadcast_shape(input_shape)
mean, variance = ops.moments(
reshaped_inputs, axes=group_reduction_axes, keepdims=True
)
# Compute the batch normalization.
inv = ops.rsqrt(variance + self.epsilon)
if self.scale:
gamma = ops.reshape(self.gamma, broadcast_shape)
gamma = ops.cast(gamma, reshaped_inputs.dtype)
inv = inv * gamma
res = -mean * inv
if self.center:
beta = ops.reshape(self.beta, broadcast_shape)
beta = ops.cast(beta, reshaped_inputs.dtype)
res = res + beta
normalized_inputs = reshaped_inputs * inv + res
normalized_inputs = ops.cast(normalized_inputs, inputs_dtype)
return normalized_inputs
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(self.axis, self.groups)
return broadcast_shape
def compute_output_shape(self, input_shape):
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {axis} is out of bounds for "
f"input shape {input_shape}. "
f"Received: axis={self.axis}"
)
return input_shape
def get_config(self):
config = {
"groups": self.groups,
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
|
GroupNormalization
|
python
|
PyCQA__pylint
|
tests/functional/n/non_ascii_name/non_ascii_name_staticmethod.py
|
{
"start": 48,
"end": 301
}
|
class ____:
"""Class Docstring"""
def public(self):
"""Say it load"""
@staticmethod
def umlaut_ä(): # [non-ascii-name]
"""Say ä"""
return "ä"
# Usage should not raise a second error
OkayClass.umlaut_ä()
|
OkayClass
|
python
|
walkccc__LeetCode
|
solutions/2564. Substring XOR Queries/2564.py
|
{
"start": 0,
"end": 748
}
|
class ____:
def substringXorQueries(self, s: str, queries: list[list[int]]) -> list[list[int]]:
MAX_BIT = 30
# {val: [left, right]} := s[left..right]'s decimal value = val
valToLeftAndRight = collections.defaultdict(lambda: [-1, -1])
for left, c in enumerate(s):
val = 0
if c == '0':
# edge case: Save the index of the first 0.
if 0 not in valToLeftAndRight:
valToLeftAndRight[0] = [left, left]
continue
for right in range(left, min(len(s), left + MAX_BIT)):
val = val * 2 + int(s[right])
if val not in valToLeftAndRight:
valToLeftAndRight[val] = [left, right]
return [valToLeftAndRight[first, right]
for first, right in queries]
|
Solution
|
python
|
encode__django-rest-framework
|
rest_framework/generics.py
|
{
"start": 6678,
"end": 6926
}
|
class ____(mixins.CreateModelMixin,
GenericAPIView):
"""
Concrete view for creating a model instance.
"""
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
|
CreateAPIView
|
python
|
ethereum__web3.py
|
web3/exceptions.py
|
{
"start": 3836,
"end": 4004
}
|
class ____(AttributeError, MismatchedABI):
"""
Raised when an attempt is made to access a function
that does not exist in the ABI.
"""
|
ABIFunctionNotFound
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 195964,
"end": 196658
}
|
class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("avatar_url", "login", "resource_path", "url")
avatar_url = sgqlc.types.Field(
sgqlc.types.non_null(URI),
graphql_name="avatarUrl",
args=sgqlc.types.ArgDict(
(("size", sgqlc.types.Arg(Int, graphql_name="size", default=None)),)
),
)
login = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="login")
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
|
Actor
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_meta.py
|
{
"start": 4035,
"end": 14777
}
|
class ____(FSDPTest):
@property
def world_size(self):
return 2
@property
def process_group(self):
return dist.distributed_c10d._get_default_group()
def _compare_fsdp(self, fsdp1, fsdp2):
with FSDP.summon_full_params(fsdp1):
with FSDP.summon_full_params(fsdp2):
for p1, p2 in zip(fsdp1.parameters(), fsdp2.parameters()):
self.assertTrue(torch.allclose(p1, p2), f"{p1} vs {p2}")
def _test_simple_model_with_meta_device(self, meta_module_fn, init_fn=None):
# Create model on meta device and wrap with FSDP.
model = meta_module_fn()
is_meta = next(model.parameters()).is_meta
fsdp_meta = FSDP(
model,
auto_wrap_policy=always_wrap,
param_init_fn=init_fn,
)
meta_opt = torch.optim.SGD(fsdp_meta.parameters(), lr=1e-3)
# Test to make sure it is the same model parameters as regular FSDP
# approach.
regular = MyModel(device=device_type)
_reset_params_if_meta(is_meta, regular)
fsdp_regular = FSDP(regular, auto_wrap_policy=always_wrap)
regular_opt = torch.optim.SGD(fsdp_regular.parameters(), lr=1e-3)
self._compare_fsdp(fsdp_meta, fsdp_regular)
inp = torch.randn(10, 2, device=device_type)
fsdp_meta(inp).sum().backward()
fsdp_regular(inp).sum().backward()
meta_opt.step()
regular_opt.step()
self._compare_fsdp(fsdp_meta, fsdp_regular)
# Test that meta init works if all submodules are contained in only a
# single FSDP unit.
model = meta_module_fn()
fsdp_meta = FSDP(model, param_init_fn=init_fn)
meta_opt = torch.optim.SGD(fsdp_meta.parameters(), lr=1e-3)
regular = MyModel(device=device_type)
_reset_params_if_meta(is_meta, regular)
fsdp_regular = FSDP(regular, auto_wrap_policy=always_wrap)
regular_opt = torch.optim.SGD(fsdp_regular.parameters(), lr=1e-3)
# Run a forward + backward pass + optimizer step
fsdp_meta(inp).sum().backward()
fsdp_regular(inp).sum().backward()
meta_opt.step()
regular_opt.step()
self._compare_fsdp(fsdp_meta, fsdp_regular)
@skip_if_lt_x_gpu(2)
def test_simple_model_with_meta_device_reset_params(self):
def meta_module_fn():
return MyModel(device="meta")
self._test_simple_model_with_meta_device(
meta_module_fn, _init_with_reset_params
)
@skip_if_lt_x_gpu(2)
def test_simple_model_with_meta_device_default_init(self):
def meta_module_fn():
return MyModel(device="meta")
self._test_simple_model_with_meta_device(meta_module_fn)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
not _TORCHDISTX_AVAIL,
"Test requires torchdistX: https://github.com/pytorch/torchdistX",
)
def test_simple_model_with_torchdistX_default_init(self):
def meta_module_fn():
return deferred_init.deferred_init(MyModel, device=device_type)
self._test_simple_model_with_meta_device(meta_module_fn)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
not _TORCHDISTX_AVAIL,
"Test requires torchdistX: https://github.com/pytorch/torchdistX",
)
def test_simple_model_with_torchdistX_init_fn(self):
def meta_module_fn():
return deferred_init.deferred_init(MyModel, device=device_type)
self._test_simple_model_with_meta_device(
meta_module_fn, init_fn=_init_with_torchdistX
)
def _test_nested_model_with_meta_device(
self, auto_wrap, meta_module_fn, init_fn=None
):
if auto_wrap:
module = meta_module_fn()
is_meta = (
next(module.parameters()).is_meta or next(module.buffers()).is_meta
)
fsdp_meta = FSDP(
module,
auto_wrap_policy=always_wrap,
param_init_fn=init_fn,
)
meta_opt = torch.optim.SGD(fsdp_meta.parameters(), lr=1e-3)
module_regular = NestedModel(device=device_type)
_reset_params_if_meta(is_meta, module_regular)
fsdp_regular = FSDP(
module_regular,
auto_wrap_policy=always_wrap,
)
regular_opt = torch.optim.SGD(fsdp_regular.parameters(), lr=1e-3)
else:
with enable_wrap(
wrapper_cls=FSDP,
param_init_fn=init_fn,
):
module = meta_module_fn()
is_meta = next(module.parameters()).is_meta
# Non FSDP modules will still be initialized because they bubble up
# to be part of a larger FSDP unit.
fsdp_meta = wrap(module)
meta_opt = torch.optim.SGD(fsdp_meta.parameters(), lr=1e-3)
# Init and reset parameters before wrapping so that reset_params
# matches up with meta device's initialization.
module_regular = NestedModel(device=device_type)
_reset_params_if_meta(is_meta, module_regular)
with enable_wrap(wrapper_cls=FSDP):
module_regular.lin1 = wrap(module_regular.lin1)
module_regular.l3 = wrap(module_regular.l3)
fsdp_regular = wrap(module_regular)
regular_opt = torch.optim.SGD(fsdp_regular.parameters(), lr=1e-3)
# Compare it before training
self._compare_fsdp(fsdp_meta, fsdp_regular)
inp = torch.randn(10, 2, device=device_type)
fsdp_meta(inp).sum().backward()
fsdp_regular(inp).sum().backward()
meta_opt.step()
regular_opt.step()
self._compare_fsdp(fsdp_meta, fsdp_regular)
@skip_if_lt_x_gpu(2)
@parametrize("auto_wrap", [True, False])
def test_nested_model_with_meta_device_reset_params(self, auto_wrap):
def meta_module_fn():
return NestedModel(device="meta")
self._test_nested_model_with_meta_device(
auto_wrap=auto_wrap,
meta_module_fn=meta_module_fn,
init_fn=_init_with_reset_params,
)
@skip_if_lt_x_gpu(2)
@parametrize("auto_wrap", [True, False])
def test_nested_model_with_meta_device_default_init(self, auto_wrap):
def meta_module_fn():
return NestedModel(device="meta")
self._test_nested_model_with_meta_device(
auto_wrap=auto_wrap,
meta_module_fn=meta_module_fn,
)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
not _TORCHDISTX_AVAIL,
"Test requires torchdistX: https://github.com/pytorch/torchdistX",
)
@parametrize("auto_wrap", [True, False])
def test_nested_model_with_torchdistX_default_init(self, auto_wrap):
def meta_module_fn():
return deferred_init.deferred_init(NestedModel, device=device_type)
self._test_nested_model_with_meta_device(
auto_wrap=auto_wrap, meta_module_fn=meta_module_fn
)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
not _TORCHDISTX_AVAIL,
"Test requires torchdistX: https://github.com/pytorch/torchdistX",
)
@parametrize("auto_wrap", [True, False])
def test_nested_model_with_torchdistX_init_fn(self, auto_wrap):
def meta_module_fn():
return deferred_init.deferred_init(NestedModel, device=device_type)
self._test_nested_model_with_meta_device(
auto_wrap=auto_wrap,
meta_module_fn=meta_module_fn,
init_fn=_init_with_torchdistX,
)
def _test_bad_arg(self, meta_module_fn):
mod = meta_module_fn()
with self.assertRaisesRegex(ValueError, "to be callable"):
FSDP(mod, param_init_fn=42)
@skip_if_lt_x_gpu(2)
@skip_but_pass_in_sandcastle_if(
not _TORCHDISTX_AVAIL,
"Test requires torchdistX: https://github.com/pytorch/torchdistX",
)
def test_bad_arg_torchdistx(self):
def meta_module_fn():
return deferred_init.deferred_init(NestedModel, device_type)
self._test_bad_arg(meta_module_fn)
@skip_if_lt_x_gpu(2)
def test_bad_arg_meta(self):
def meta_module_fn():
return NestedModel(device="meta")
self._test_bad_arg(meta_module_fn)
@skip_if_lt_x_gpu(2)
def test_meta_device_with_mixed_precision(self):
"""
Tests meta device initialization with a ``param_init_fn`` when
specifying mixed precision with ``param_dtype=torch.float32``.
"""
class FakeLinear(nn.Module):
def __init__(
self, in_dim: int, out_dim: int, device: Union[torch.device, str]
) -> None:
super().__init__()
self.weight = nn.Parameter(
torch.randn((in_dim, out_dim), device=device)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x @ self.weight
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(5, 5, device="meta")
self.lin2 = FakeLinear(5, 5, device="meta")
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.relu(self.lin1(x)))
def _module_init_fn(self, module: nn.Module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.1)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
def _param_init_fn(module: nn.Module) -> None:
# TODO: `module.to_empty()` is not generally correct for meta
# device initialization.
# https://github.com/pytorch/pytorch/issues/90465
module.to_empty(device=torch.device(device_type))
module.apply(model._module_init_fn)
model = Model()
# Wrap `lin1` and the top level `model` to create nested FSDP instances
# where each instance has parameters
FSDP(
model,
auto_wrap_policy=ModuleWrapPolicy({nn.Linear}),
mixed_precision=MixedPrecision(
param_dtype=torch.float32, reduce_dtype=torch.float16
),
param_init_fn=_param_init_fn,
device_id=torch.accelerator.current_device_index(),
)
instantiate_parametrized_tests(TestFSDPWithMetaDevice)
if __name__ == "__main__":
run_tests()
|
TestFSDPWithMetaDevice
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_project_user_stats.py
|
{
"start": 233,
"end": 2233
}
|
class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.org = self.create_organization(owner=None)
self.team = self.create_team(organization=self.org)
self.project = self.create_project(organization=self.org, teams=[self.team])
self.create_member(user=self.user, organization=self.org, teams=[self.team])
self.login_as(user=self.user)
self.path = reverse(
"sentry-api-0-project-userstats", args=[self.org.slug, self.project.slug]
)
def test_simple(self) -> None:
# Set the time to yesterday at 10am. This ensures the time is not
# in the future AND doesn't get affected by events and request being
# on seperate days, which can occur at midnight without freezing time.
now = before_now(hours=24).replace(hour=10)
with freeze_time(now):
self.store_event(
data={
"timestamp": before_now(minutes=10).isoformat(),
"tags": {"sentry:user": "user_1"},
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(minutes=10).isoformat(),
"tags": {"sentry:user": "user_1"},
},
project_id=self.project.id,
)
self.store_event(
data={
"timestamp": before_now(minutes=10).isoformat(),
"tags": {"sentry:user": "user_2"},
},
project_id=self.project.id,
)
response = self.client.get(self.path)
assert response.status_code == 200, response.content
assert response.data[-1][1] == 2, response.data
for point in response.data[:-1]:
assert point[1] == 0
assert len(response.data) == 31
|
ProjectUserDetailsTest
|
python
|
ansible__ansible
|
test/integration/targets/ansible-test-container/runme.py
|
{
"start": 35997,
"end": 38376
}
|
class ____(Bootstrapper):
"""Bootstrapper for dnf based systems."""
@classmethod
def install_podman(cls) -> bool:
"""Return True if podman will be installed."""
return True
@classmethod
def install_docker(cls) -> bool:
"""Return True if docker will be installed."""
return os_release.id != 'rhel'
@classmethod
def usable(cls) -> bool:
"""Return True if the bootstrapper can be used, otherwise False."""
return bool(shutil.which('dnf'))
@classmethod
def run(cls) -> None:
"""Run the bootstrapper."""
# NOTE: Install crun to make it available to podman, otherwise installing moby-engine can cause podman to use runc instead.
packages = ['podman', 'crun']
if cls.install_docker():
packages.append('moby-engine')
if os_release.id == 'rhel':
# As of the release of RHEL 9.1, installing podman on RHEL 9.0 results in a non-fatal error at install time:
#
# libsemanage.semanage_pipe_data: Child process /usr/libexec/selinux/hll/pp failed with code: 255. (No such file or directory).
# container: libsepol.policydb_read: policydb module version 21 does not match my version range 4-20
# container: libsepol.sepol_module_package_read: invalid module in module package (at section 0)
# container: Failed to read policy package
# libsemanage.semanage_direct_commit: Failed to compile hll files into cil files.
# (No such file or directory).
# /usr/sbin/semodule: Failed!
#
# Unfortunately this is then fatal when running podman, resulting in no error message and a 127 return code.
# The solution is to update the policycoreutils package *before* installing podman.
#
# NOTE: This work-around can probably be removed once we're testing on RHEL 9.1, as the updated packages should already be installed.
# Unfortunately at this time there is no RHEL 9.1 AMI available (other than the Beta release).
run_command('dnf', 'update', '-y', 'policycoreutils')
run_command('dnf', 'install', '-y', *packages)
if cls.install_docker():
run_command('systemctl', 'start', 'docker')
super().run()
|
DnfBootstrapper
|
python
|
huggingface__transformers
|
src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py
|
{
"start": 1312,
"end": 10207
}
|
class ____(EncodecFeatureExtractor):
r"""
Constructs an KyutaiSpeechToText feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding values.
chunk_length_s (`float`, *optional*):
If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
overlap (`float`, *optional*):
Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
audio_delay_seconds (`float`, *optional*, defaults to 0.0):
The delay in seconds to add after the audio (right padding).
audio_silence_prefix_seconds (`float`, *optional*, defaults to 0.0):
The silence prefix in seconds to add before the audio (left padding).
"""
def __init__(
self,
audio_delay_seconds: Optional[float] = 0.0,
audio_silence_prefix_seconds: Optional[float] = 0.0,
**super_kwargs,
):
super().__init__(**super_kwargs)
self.audio_delay_seconds = audio_delay_seconds
self.audio_silence_prefix_seconds = audio_silence_prefix_seconds
def __call__(
self,
raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
padding: Optional[Union[bool, str, PaddingStrategy]] = None,
truncation: Optional[bool] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
`(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
(`feature_size = 2`).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, *optional*, defaults to `False`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one.")
elif padding is None:
# by default let's pad the inputs
padding = True
is_batched = bool(
isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
elif not is_batched and not isinstance(raw_audio, np.ndarray):
raw_audio = np.asarray(raw_audio, dtype=np.float32)
elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
raw_audio = raw_audio.astype(np.float32)
# always return batch
if not is_batched:
raw_audio = [np.asarray(raw_audio).T]
# verify inputs are valid
for idx, example in enumerate(raw_audio):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels")
padded_inputs = None
input_values = BatchFeature({"input_values": raw_audio})
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
max_length = min(array.shape[0] for array in raw_audio)
nb_step = int(np.floor(max_length / self.chunk_stride))
max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
max_length = max(array.shape[0] for array in raw_audio)
nb_step = int(np.ceil(max_length / self.chunk_stride))
max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
padding = "max_length"
else:
padded_inputs = input_values
# normal padding on batch
if padded_inputs is None:
padded_inputs = self.pad(
input_values,
max_length=max_length,
truncation=truncation,
padding=padding,
return_attention_mask=padding,
)
if padding:
padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
# now let's pad left and right
pad_left = int(self.audio_silence_prefix_seconds * self.sampling_rate)
pad_right = int((self.audio_delay_seconds + 1.0) * self.sampling_rate)
padded_inputs["input_values"] = np.pad(
padded_inputs["input_values"],
((0, 0), (pad_left, pad_right)),
mode="constant",
constant_values=0.0,
)
if padding:
padded_inputs["padding_mask"] = np.pad(
padded_inputs["padding_mask"],
((0, 0), (pad_left, pad_right)),
mode="constant",
constant_values=0,
)
input_values = []
for example in padded_inputs.pop("input_values"):
if self.feature_size == 1:
example = example[..., None]
input_values.append(example.T)
padded_inputs["input_values"] = input_values
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
|
KyutaiSpeechToTextFeatureExtractor
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py
|
{
"start": 1925,
"end": 2513
}
|
class ____(CloudEnvironment):
"""HTTP Tester environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
return CloudEnvironmentConfig(
env_vars=dict(
HTTPTESTER='1', # backwards compatibility for tests intended to work with or without HTTP Tester
KRB5_PASSWORD=str(self._get_cloud_config(KRB5_PASSWORD_ENV)),
)
)
|
HttptesterEnvironment
|
python
|
keon__algorithms
|
tests/test_backtrack.py
|
{
"start": 9276,
"end": 10041
}
|
class ____(unittest.TestCase):
def test_permute_unique(self):
nums1 = [1, 1, 2]
answer1 = [[2, 1, 1], [1, 2, 1], [1, 1, 2]]
self.assertEqual(sorted(permute_unique(nums1)), sorted(answer1))
nums2 = [1, 2, 1, 3]
answer2 = [[3, 1, 2, 1], [1, 3, 2, 1], [1, 2, 3, 1], [1, 2, 1, 3],
[3, 2, 1, 1], [2, 3, 1, 1], [2, 1, 3, 1], [2, 1, 1, 3],
[3, 1, 1, 2], [1, 3, 1, 2], [1, 1, 3, 2], [1, 1, 2, 3]]
self.assertEqual(sorted(permute_unique(nums2)), sorted(answer2))
nums3 = [1, 2, 3]
answer3 = [[3, 2, 1], [2, 3, 1], [2, 1, 3], [3, 1, 2],
[1, 3, 2], [1, 2, 3]]
self.assertEqual(sorted(permute_unique(nums3)), sorted(answer3))
|
TestPermuteUnique
|
python
|
ray-project__ray
|
python/ray/serve/_private/benchmarks/serialization/common.py
|
{
"start": 605,
"end": 802
}
|
class ____:
text: Optional[str] = None
floats: Optional[List[float]] = None
ints: Optional[List[int]] = None
ts: Optional[float] = None
reason: Optional[str] = None
|
PayloadDataclass
|
python
|
gevent__gevent
|
src/gevent/tests/test__order.py
|
{
"start": 742,
"end": 1125
}
|
class ____(greentest.TestCase):
def test(self):
lst = []
gevent.spawn(sleep0, lst, '1')
gevent.spawn(sleep0, lst, '2')
gevent.wait()
self.assertEqual(' '.join(lst), '1A 2A 1B 2B')
def sleep0(lst, param):
lst.append(param + 'A')
gevent.sleep(0)
lst.append(param + 'B')
if __name__ == '__main__':
greentest.main()
|
TestSleep0
|
python
|
openai__openai-python
|
src/openai/types/realtime/audio_transcription_param.py
|
{
"start": 213,
"end": 1275
}
|
class ____(TypedDict, total=False):
language: str
"""The language of the input audio.
Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
"""
model: Literal["whisper-1", "gpt-4o-mini-transcribe", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize"]
"""The model to use for transcription.
Current options are `whisper-1`, `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`,
and `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need
diarization with speaker labels.
"""
prompt: str
"""
An optional text to guide the model's style or continue a previous audio
segment. For `whisper-1`, the
[prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the
prompt is a free text string, for example "expect words related to technology".
"""
|
AudioTranscriptionParam
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/timeseries.py
|
{
"start": 7743,
"end": 8565
}
|
class ____:
params = [None, "US/Eastern", "UTC", dateutil.tz.tzutc()]
param_names = "tz"
def setup(self, tz):
N = 100000
self.series = Series(date_range(start="1/1/2000", periods=N, freq="min", tz=tz))
def time_dt_accessor(self, tz):
self.series.dt
def time_dt_accessor_normalize(self, tz):
self.series.dt.normalize()
def time_dt_accessor_month_name(self, tz):
self.series.dt.month_name()
def time_dt_accessor_day_name(self, tz):
self.series.dt.day_name()
def time_dt_accessor_time(self, tz):
self.series.dt.time
def time_dt_accessor_date(self, tz):
self.series.dt.date
def time_dt_accessor_year(self, tz):
self.series.dt.year
from .pandas_vb_common import setup # noqa: F401 isort:skip
|
DatetimeAccessor
|
python
|
getsentry__sentry
|
src/sentry/integrations/github_enterprise/webhook.py
|
{
"start": 3690,
"end": 3798
}
|
class ____(GitHubEnterpriseWebhook, PullRequestEventWebhook):
pass
|
GitHubEnterprisePullRequestEventWebhook
|
python
|
conda__conda
|
tests/plugins/test_manager.py
|
{
"start": 1442,
"end": 9479
}
|
class ____:
@plugins.hookimpl
def conda_virtual_packages(*args) -> Iterator[plugins.CondaVirtualPackage]:
yield DummyVirtualPackage
def test_load_without_plugins(plugin_manager: CondaPluginManager):
assert plugin_manager.load_plugins() == 0
def test_load_two_plugins_one_impls(plugin_manager: CondaPluginManager):
assert plugin_manager.load_plugins(this_module) == 1
assert plugin_manager.get_plugins() == {this_module}
assert plugin_manager.hook.conda_solvers.get_hookimpls() == []
assert plugin_manager.load_plugins(VerboseSolverPlugin) == 1
assert plugin_manager.get_plugins() == {this_module, VerboseSolverPlugin}
hooks_impls = plugin_manager.hook.conda_solvers.get_hookimpls()
assert len(hooks_impls) == 1
assert hooks_impls[0].plugin == VerboseSolverPlugin
def test_get_hook_results(plugin_manager: CondaPluginManager):
name = "virtual_packages"
assert plugin_manager.get_hook_results(name) == []
# loading the archspec plugin module and make sure it was loaded correctly
assert plugin_manager.load_plugins(virtual_packages.archspec) == 1
hook_result = plugin_manager.get_hook_results(name)
assert len(hook_result) == 1
assert hook_result[0].name == "archspec"
# loading an unknown hook result should raise an error
with pytest.raises(PluginError, match="Could not find requested `unknown` plugins"):
plugin_manager.get_hook_results("unknown")
# let's double-check the validation of conflicting plugins works
class SecondArchspec:
@plugins.hookimpl
def conda_virtual_packages():
yield plugins.CondaVirtualPackage("archspec", "", None)
plugin_manager.register(SecondArchspec)
with pytest.raises(
PluginError, match="Conflicting plugins found for `virtual_packages`"
):
plugin_manager.get_hook_results(name)
def test_load_plugins_error(plugin_manager: CondaPluginManager):
assert plugin_manager.load_plugins(VerboseSolverPlugin) == 1
assert plugin_manager.get_plugins() == {VerboseSolverPlugin}
def test_load_entrypoints_success(plugin_manager: CondaPluginManager):
assert plugin_manager.load_entrypoints("test_plugin", "success") == 1
assert len(plugin_manager.get_plugins()) == 1
assert plugin_manager.list_name_plugin()[0][0] == "test_plugin.success"
def test_load_entrypoints_importerror(
plugin_manager: CondaPluginManager,
mocker: MockerFixture,
):
mocked_warning = mocker.patch("conda.plugins.manager.log.warning")
assert plugin_manager.load_entrypoints("test_plugin", "importerror") == 0
assert len(plugin_manager.get_plugins()) == 0
assert mocked_warning.call_count == 1
assert mocked_warning.call_args.args[0] == (
"Error while loading conda entry point: importerror "
"(No module named 'package_that_does_not_exist')"
)
def test_load_entrypoints_blocked(plugin_manager: CondaPluginManager):
plugin_manager.set_blocked("test_plugin.blocked")
assert plugin_manager.load_entrypoints("test_plugin", "blocked") == 0
if pluggy_v100 or pluggy_v150:
assert plugin_manager.get_plugins() == set()
else:
assert plugin_manager.get_plugins() == {None}
assert plugin_manager.list_name_plugin() == [("test_plugin.blocked", None)]
def test_load_entrypoints_register_valueerror(plugin_manager: CondaPluginManager):
"""
Cover check when self.register() raises ValueError because the plugin
was loaded already.
"""
assert plugin_manager.load_entrypoints("test_plugin", "success") == 1
assert plugin_manager.load_entrypoints("test_plugin", "success") == 0
def test_unknown_solver(plugin_manager: CondaPluginManager):
"""
Cover getting a solver that doesn't exist.
"""
with pytest.raises(CondaValueError):
plugin_manager.get_solver_backend("p_equals_np")
def test_known_solver(plugin_manager: CondaPluginManager):
"""
Cover getting a solver that exists.
"""
assert plugin_manager.load_plugins(VerboseSolverPlugin) == 1
assert plugin_manager.get_solver_backend("verbose-classic") == VerboseSolver
def test_get_canonical_name_object(plugin_manager: CondaPluginManager):
canonical_name = plugin_manager.get_canonical_name(object())
assert re.match(r"<unknown_module>.object\[\d+\]", canonical_name), canonical_name
def test_get_canonical_name_module(plugin_manager: CondaPluginManager):
assert plugin_manager.get_canonical_name(this_module) == __name__
def test_get_canonical_name_class(plugin_manager: CondaPluginManager):
canonical_name = plugin_manager.get_canonical_name(VerboseSolverPlugin)
assert canonical_name == f"{__name__}.VerboseSolverPlugin"
def test_get_canonical_name_instance(plugin_manager: CondaPluginManager):
canonical_name = plugin_manager.get_canonical_name(VerboseSolverPlugin())
assert re.match(
rf"{__name__}.VerboseSolverPlugin\[\d+\]",
canonical_name,
)
@pytest.mark.parametrize("plugin", [this_module, VerboseSolverPlugin])
def test_disable_external_plugins(plugin_manager: CondaPluginManager, plugin: object):
"""
Run a test to ensure we can successfully disable externally registered plugins.
"""
assert plugin_manager.load_plugins(plugin) == 1
assert plugin_manager.get_plugins() == {plugin}
plugin_manager.disable_external_plugins()
if pluggy_v100 or pluggy_v150:
assert plugin_manager.get_plugins() == set()
else:
assert plugin_manager.get_plugins() == {None}
def test_plugin_name() -> None:
assert CondaPlugin("foo").name == "foo"
# name is lowercased
assert CondaPlugin("FOO").name == "foo"
# spaces are stripped
assert CondaPlugin(" foo ").name == "foo"
assert CondaPlugin("foo bar").name == "foo bar"
assert CondaPlugin(" foo bar ").name == "foo bar"
@pytest.mark.parametrize("name", [None, 42, True, False, [], {}])
def test_plugin_bad_names(name: Any) -> None:
with pytest.raises(PluginError, match="Invalid plugin name for"):
CondaPlugin(name)
def test_custom_plugin_name_validation(plugin_manager: CondaPluginManager) -> None:
@dataclass
class NoNameCondaPlugin(CondaPlugin):
name: str | None # type: ignore[assignment]
def __post_init__(self):
# do not normalize the name
pass
@dataclass
class NoNamePlugin:
name: str | None
class SpecialPlugin:
@plugins.hookimpl
def conda_virtual_packages(*args):
yield NoNameCondaPlugin(None)
yield NoNamePlugin(None)
plugin_manager.load_plugins(SpecialPlugin)
with pytest.raises(
PluginError,
match=r"(?s)Invalid plugin names found.+NoNameCondaPlugin.+NoNamePlugin",
):
plugin_manager.get_virtual_package_records()
def test_get_virtual_packages(plugin_manager: CondaPluginManager):
assert plugin_manager.load_plugins(DummyVirtualPackagePlugin) == 1
assert plugin_manager.get_virtual_package_records() == (
DummyVirtualPackage.to_virtual_package(),
)
def test_get_solvers(plugin_manager: CondaPluginManager):
assert plugin_manager.load_plugins(VerboseSolverPlugin) == 1
assert plugin_manager.get_plugins() == {VerboseSolverPlugin}
assert plugin_manager.get_solvers() == {"verbose-classic": VerboseCondaSolver}
def test_get_session_headers(plugin_manager: CondaPluginManager):
"""
Ensure that an empty dict is returned when no ``conda_request_headers`` plugin
hooks have been defined.
"""
url = urlparse("https://example.com")
assert plugin_manager.get_session_headers(host=url.netloc) == {}
def test_get_request_headers(plugin_manager: CondaPluginManager):
"""
Ensure that an empty dict is returned when no ``conda_request_headers`` plugin
hooks have been defined.
"""
url = urlparse("https://example.com")
assert plugin_manager.get_request_headers(host=url.netloc, path=url.path) == {}
|
DummyVirtualPackagePlugin
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/dtype/npy/time.py
|
{
"start": 3974,
"end": 4608
}
|
class ____(DTypeConfig_V2[str, None]):
"""
A wrapper around the JSON representation of the ``TimeDelta64`` data type in Zarr V2.
The ``name`` field of this class contains the value that would appear under the
``dtype`` field in Zarr V2 array metadata.
References
----------
The structure of the ``name`` field is defined in the Zarr V2
[specification document](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding).
Examples
--------
```python
{
"name": "<m8[1s]",
"object_codec_id": None
}
```
"""
|
TimeDelta64JSON_V2
|
python
|
jina-ai__jina
|
tests/integration/hot_reload/exec2/my_executor2.py
|
{
"start": 73,
"end": 295
}
|
class ____(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = get_doc_value()
|
MyExecutorToReload2
|
python
|
pytorch__pytorch
|
tools/experimental/torchfuzz/operators/nn_functional.py
|
{
"start": 33275,
"end": 34702
}
|
class ____(Operator):
"""Operator for torch.nn.functional.elu (Exponential Linear Unit)."""
def __init__(self):
super().__init__("torch.nn.functional.elu")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.nn.functional.elu"
def can_produce(self, output_spec: Spec) -> bool:
"""ELU can produce tensor outputs with floating point dtypes."""
if not isinstance(output_spec, TensorSpec):
return False
return is_float_dtype(output_spec.dtype)
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for ELU operation.
ELU is element-wise, so input shape matches output shape.
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ELUOperator can only produce TensorSpec outputs")
input_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
return [input_spec]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for ELU operation."""
if len(input_names) != 1:
raise ValueError("ELU requires exactly 1 input")
input_name = input_names[0]
return f"{output_name} = torch.nn.functional.elu({input_name})"
|
ELUOperator
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/core_tests/resource_tests/pythonic_resources/test_type_signatures.py
|
{
"start": 3041,
"end": 4221
}
|
class ____(ConfigurableResource):
a_string: ResourceDependency[str]
reveal_type(StringDependentResource.__init__)
my_str_resource = StringDependentResource(a_string="foo")
reveal_type(my_str_resource.a_string)
"""
)
pyright_out = get_pyright_reveal_type_output(filename)
mypy_out = get_mypy_type_output(filename)
# Ensure constructor signature supports str Resource, PartialResource, raw str, or a
# resource function that returns a str
assert (
pyright_out[0]
== "(self: StringDependentResource, *, a_string: ConfigurableResourceFactory[str] |"
" PartialResource[str] | ResourceDefinition | str) -> None"
)
# Ensure that the retrieved type is str
assert pyright_out[1] == "str"
assert mypy_out[1] == "builtins.str"
@pytest.mark.typesignature
def test_type_signatures_alias():
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, "test.py")
with open(filename, "w") as f:
f.write(
"""
from dagster import ConfigurableResource
from pydantic import Field
|
StringDependentResource
|
python
|
pennersr__django-allauth
|
allauth/headless/socialaccount/inputs.py
|
{
"start": 646,
"end": 1503
}
|
class ____(inputs.Input):
provider = inputs.CharField()
account = inputs.CharField()
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
def clean(self):
cleaned_data = super().clean()
uid = cleaned_data.get("account")
provider_id = cleaned_data.get("provider")
if uid and provider_id:
accounts = SocialAccount.objects.filter(user=self.user)
account = accounts.filter(
uid=uid,
provider=provider_id,
).first()
if not account:
raise get_adapter().validation_error("account_not_found")
validate_disconnect(context.request, account)
self.cleaned_data["account"] = account
return cleaned_data
|
DeleteProviderAccountInput
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_format26.py
|
{
"start": 315,
"end": 1642
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format26.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [108652416, 108655744]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"trendline": {"type": "linear", "display_equation": True},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.set_legend({"delete_series": [2, 0]})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
walkccc__LeetCode
|
solutions/2489. Number of Substrings With Fixed Ratio/2489.py
|
{
"start": 0,
"end": 728
}
|
class ____:
def fixedRatio(self, s: str, num1: int, num2: int) -> int:
# Let x := the number of 0s and y := the number of 1s in the subarray.
# We want x : y = num1 : num2, so our goal is to find number of subarrays
# with x * num2 - y * num1 = 0. To achieve this, we can use a prefix count
# map to record the count of the running x * num2 - y * num1. If the
# running x * num2 - y * num1 = prefix, then add count[prefix] to the
# `ans`.
ans = 0
prefix = 0
prefixCount = collections.Counter({0: 1})
for c in s:
if c == '0':
prefix += num2
else: # c == '1'
prefix -= num1
ans += prefixCount[prefix]
prefixCount[prefix] += 1
return ans
|
Solution
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/extra_links.py
|
{
"start": 937,
"end": 1119
}
|
class ____(BaseModel):
"""Extra Links Response."""
extra_links: dict[str, str | None]
total_entries: Annotated[int, Field(title="Total Entries")]
|
ExtraLinkCollectionResponse
|
python
|
pytorch__pytorch
|
benchmarks/dynamo/microbenchmarks/operator_inp_utils.py
|
{
"start": 7058,
"end": 10693
}
|
class ____:
def __init__(self, json_file_path):
self.operator_db = defaultdict(Counter)
with open(json_file_path) as f:
lines = f.readlines()
i = 0
while i < len(lines):
op_line = lines[i].strip("\n")
assert "Operator: " in op_line, op_line
operator = op_line[len("Operator: ") :]
operator = (
operator if operator != "aten.sum.SymInt" else "aten.sum.dim_IntList"
)
op_inps = Counter()
i += 1
while i < len(lines) and "Operator: " not in lines[i]:
line = lines[i]
cnt = eval(line[len("cnt: ") : line.find(",")])
inps = line[line.find(",") + 2 :].strip("'")
op_inps[inps] += cnt
i += 1
self.operator_db[operator] = op_inps
def get_inputs_for_operator(
self, operator, dtype=None, device="cuda"
) -> Generator[tuple[Iterable[Any], dict[str, Any]], None, None]:
assert str(operator) in self.operator_db, (
f"Could not find {operator}, must provide overload"
)
if "embedding" in str(operator):
log.warning("Embedding inputs NYI, input data cannot be randomized")
yield
return
# line[1] represents number of times these inputs occurred, ignored for now
for line in self.operator_db[str(operator)].items():
inps = line[0]
args, kwargs = deserialize_args(inps)
# Backwards require some inputs to be float16 and some to be float32
# So we record on half and upcast to float when specified
if dtype and dtype != torch.float16:
to_dtype = partial(map_to_dtype, dtype=dtype)
args, kwargs = tree_map(to_dtype, (args, kwargs))
if device:
to_device = partial(map_to_device, device=torch.device(device))
args, kwargs = tree_map(to_device, (args, kwargs))
yield args, kwargs
def get_all_ops(self):
for key in self.operator_db:
try:
op = eval(key)
except AttributeError:
log.warning("Evaluating an op name into an OpOverload", exc_info=True)
continue
yield op
def get_call_frequency(self, op):
assert str(op) in self.operator_db, (
f"Could not find {op}, must provide overload"
)
count = 0
for counter in self.operator_db[str(op)].values():
count += counter
return count
def merge(self, other):
for operator, counter_dict in other.operator_db.items():
for inps, cnt in counter_dict.items():
self.operator_db[operator][inps] += cnt
@staticmethod
def get_timm_loader():
return OperatorInputsLoader._load_directory(TIMM_DIR)
@staticmethod
def get_huggingface_loader():
return OperatorInputsLoader._load_directory(HF_DIR)
@staticmethod
def get_torchbench_loader():
return OperatorInputsLoader._load_directory(TORCHBENCH_DIR)
@staticmethod
def _load_directory(inp_dir):
assert os.path.isdir(inp_dir), inp_dir
union = None
for inp in os.listdir(inp_dir):
if inp[-4:] != ".txt":
continue
path = os.path.join(inp_dir, inp)
if union is None:
union = OperatorInputsLoader(path)
else:
union.merge(OperatorInputsLoader(path))
return union
|
OperatorInputsLoader
|
python
|
google__jax
|
tests/pallas/tpu_pallas_random_test.py
|
{
"start": 1259,
"end": 8891
}
|
class ____(jtu.JaxTestCase):
def setUp(self):
if not jtu.test_device_matches(["tpu"]):
self.skipTest("Need TPU devices")
super().setUp()
@parameterized.parameters(True, False)
@jax.legacy_prng_key('allow')
def test_to_pallas_key_under_vmap(self, use_legacy_key: bool):
if use_legacy_key:
key = jax.random.PRNGKey(42)
else:
key = jax.random.key(42, impl="rbg")
key = jax.random.split(key, 10)
batched_key = pltpu.to_pallas_key(key)
batched_key_data = jax.random.key_data(batched_key)
vmapped_key = jax.vmap(pltpu.to_pallas_key)(key)
vmapped_key_data = jax.random.key_data(vmapped_key)
np.testing.assert_array_equal(batched_key_data, vmapped_key_data)
def test_pallas_key_raise_not_implemented_outside_of_kernel(self):
key = jax_random.key(0, impl="rbg")
pallas_key = pltpu.to_pallas_key(key)
# Using a pallas key outside of a kernel should raise an error when
# trying to lower TPU-specific ops to XLA.
# TODO(justinfu): Make this error more specific to pallas PRNG usage.
with self.assertRaisesRegex(NotImplementedError,
"MLIR translation rule .* not found"):
jax.random.uniform(
pallas_key, shape=(1,), minval=0.0, maxval=1.0)
def test_seeded_reproducibility(self):
# Test whether generating random bits with the same seed
# produces the same result (and different seeds produce
# different results).
def seeded_body(seed: int):
def body(o_ref):
pltpu.prng_seed(seed)
o_ref[...] = pltpu.prng_random_bits(o_ref[...].shape)
return body
out = jax.ShapeDtypeStruct((8, 128), jnp.int32)
result_1a = pl.pallas_call(seeded_body(0), out_shape=out)()
result_1b = pl.pallas_call(seeded_body(0), out_shape=out)()
result_2 = pl.pallas_call(seeded_body(1), out_shape=out)()
with self.subTest("same_seed_same_result"):
np.testing.assert_array_equal(result_1a, result_1b)
with self.subTest("diff_seed_diff_result"):
np.testing.assert_array_compare(np.not_equal, result_1a, result_2)
@parameterized.parameters(
((32, 256),),
((8, 16),),
)
def test_prng_non_vreg_shape_output(self, shape):
# Tests that RNG generation works with output shapes
# not equal to a native-sized VREG.
# This test makes sure that vector layout tiling
# is implemented correctly.
def body(o_ref):
pltpu.prng_seed(0)
samples = pltpu.prng_random_bits(o_ref[...].shape)
o_ref[...] = samples
o_shape = jax.ShapeDtypeStruct(shape, jnp.int32)
result = pl.pallas_call(body, out_shape=o_shape)()
# Check that random_bits generates (mostly) unique values.
unique_frac = float(len(jnp.unique(result))) / np.prod(shape)
self.assertGreater(unique_frac, 0.99)
self.assertLessEqual(jnp.max(result), np.iinfo(jnp.int32).max)
self.assertGreaterEqual(jnp.min(result), np.iinfo(jnp.int32).min)
@parameterized.parameters(
(pltpu.stateful_uniform, jnp.float32),
(pltpu.stateful_normal, jnp.float32),
)
def test_stateful_sample(self, generator, dtype):
# Test stateful RNG using the jax.random API wrappers.
def body(key_ref, o_ref):
pltpu.prng_seed(key_ref[...])
o_ref[...] = generator(shape=o_ref[...].shape)
rbg_key = jax_random.key(0, impl="rbg")
key = pltpu.to_pallas_key(rbg_key)
o_shape = jax.ShapeDtypeStruct((8, 128), dtype)
result = pl.pallas_call(
body,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_shape=o_shape,
)(key)
# Check that the numbers are different.
self.assertGreaterEqual(jnp.max(result), jnp.min(result))
@parameterized.parameters(
(jax_random.uniform, jnp.float32, None),
(jax_random.uniform, jnp.float32, (1, 1)),
(jax_random.normal, jnp.float32, None),
(jax_random.bits, jnp.uint32, None),
)
def test_stateless_sample(self, generator, dtype, key_shape):
# Test keyed RNG using the jax.random API.
def body(key_ref, o_ref):
if key_shape:
key_ref = key_ref.at[*((0,) * len(key_shape))]
o_ref[...] = generator(
key_ref[...], shape=o_ref[...].shape
)
rbg_key = jax_random.key(0, impl="rbg")
key = pltpu.to_pallas_key(rbg_key)
if key_shape:
key = jnp.reshape(key, key_shape)
o_shape = jax.ShapeDtypeStruct((8, 128), dtype)
result = pl.pallas_call(
body,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_shape=o_shape,
)(key)
# Check that the numbers are different.
self.assertGreaterEqual(jnp.max(result), jnp.min(result))
def test_key_data(self):
def body(key_ref, o_ref):
x0, x1 = plrandom.unwrap_pallas_seed(key_ref[...])
o_ref[0, 0] = x0
o_ref[0, 1] = x1
rbg_key = jax_random.key(0, impl="rbg")
key = pltpu.to_pallas_key(rbg_key)
expected_key_data = jax.random.key_data(key)
o_shape = jax.ShapeDtypeStruct(expected_key_data.shape,
expected_key_data.dtype)
result = pl.pallas_call(
body,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
out_shape=o_shape,
)(key)
self.assertArraysEqual(result, expected_key_data)
def test_squeezed_blockspec(self):
@functools.partial(
pl.pallas_call,
grid=(),
in_specs=[
pl.BlockSpec((pl.squeezed,), lambda: (0,), memory_space=pltpu.SMEM)
],
out_specs=pl.BlockSpec((8, 128)),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
)
def kernel(key_ref, o_ref):
o_ref[...] = jax_random.uniform(key_ref[...], shape=o_ref.shape)
# Just make sure this does not crash.
k = pltpu.to_pallas_key(jax_random.key(0, impl="rbg"))
kernel(k[None])
def test_fold_in(self):
# Test that folding in a value results in different random numbers.
def body(key_ref, o_ref):
key = key_ref[...]
o_ref[0, ...] = jax_random.uniform(
key, shape=o_ref[0, ...].shape, minval=0.0, maxval=1.0
)
key = jax_random.fold_in(key, 2)
o_ref[1, ...] = jax_random.uniform(
key, shape=o_ref[1, ...].shape, minval=0.0, maxval=1.0
)
rbg_key = jax_random.key(0, impl="rbg")
key = pltpu.to_pallas_key(rbg_key)
o_shape = jax.ShapeDtypeStruct((2, 8, 128), jnp.float32)
result = pl.pallas_call(
body,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_shape=o_shape,
)(key)
result_a = result[0]
result_b = result[1]
np.testing.assert_array_compare(np.not_equal, result_a, result_b)
def test_key_in_core_map(self):
if not jtu.is_device_tpu_at_least(4):
self.skipTest("Fails on TPU <= v3")
def main(refs):
key_hbm, o_ref = refs
@pl.core_map(pltpu.create_tensorcore_mesh('core'))
def _():
@functools.partial(pl.run_scoped,
key_smem=pltpu.SMEM((), key_hbm.dtype),
o_vmem=pltpu.VMEM(o_ref.shape, o_ref.dtype))
def _scoped(key_smem, o_vmem):
pltpu.sync_copy(key_hbm, key_smem)
o_vmem[...] = jax_random.uniform(
key_smem[...], shape=o_ref.shape, minval=0.0, maxval=1.0
)
pltpu.sync_copy(o_vmem, o_ref)
@jax.jit
def f(rng_key):
y = jnp.zeros((8, 128), dtype=jnp.float32)
_, y = pl.run_state(main)((rng_key, y))
return y
key = pltpu.to_pallas_key(jax_random.key(0, impl="rbg"))
y = f(key)
self.assertGreaterEqual(jnp.max(y), jnp.min(y))
|
PRNGTest
|
python
|
huggingface__transformers
|
src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py
|
{
"start": 20792,
"end": 23943
}
|
class ____(nn.Module):
def __init__(self, config: Sam3TrackerVideoMaskDecoderConfig, skip_first_layer_pe: bool = False):
"""
A transformer block with four layers:
(1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on
sparse inputs (4) cross attention of dense inputs -> sparse inputs
Arguments:
config (`Sam3TrackerVideoMaskDecoderConfig`):
The configuration file used to instantiate the block
attention_downsample_rate (*optionalk*, int, defaults to 2):
The downsample ratio of the block used to reduce the inner dim of the attention.
skip_first_layer_pe (*optional*, bool, defaults to `False`):
Whether or not to skip the addition of the query_point_embedding on the first layer.
"""
super().__init__()
self.self_attn = Sam3TrackerVideoAttention(config, downsample_rate=1)
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.cross_attn_token_to_image = Sam3TrackerVideoAttention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.mlp = Sam3TrackerVideoFeedForward(
config.hidden_size, config.mlp_dim, config.hidden_size, num_layers=config.num_hidden_layers
)
self.layer_norm3 = nn.LayerNorm(config.hidden_size)
self.layer_norm4 = nn.LayerNorm(config.hidden_size)
self.cross_attn_image_to_token = Sam3TrackerVideoAttention(config)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(
self,
queries: Tensor,
keys: Tensor,
query_point_embedding: Tensor,
key_point_embedding: Tensor,
attention_similarity: Tensor,
**kwargs: Unpack[TransformersKwargs],
):
# Self attention block
if self.skip_first_layer_pe:
queries, _ = self.self_attn(query=queries, key=queries, value=queries)
else:
query = queries + query_point_embedding
attn_out, _ = self.self_attn(query=query, key=query, value=queries)
queries = queries + attn_out
queries = self.layer_norm1(queries)
# Cross attention block, tokens attending to image embedding
query = queries + query_point_embedding
key = keys + key_point_embedding
attn_out, _ = self.cross_attn_token_to_image(
query=query, key=key, value=keys, attention_similarity=attention_similarity
)
queries = queries + attn_out
queries = self.layer_norm2(queries)
# MLP block
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.layer_norm3(queries)
# Cross attention block, image embedding attending to tokens
query = queries + query_point_embedding
key = keys + key_point_embedding
attn_out, _ = self.cross_attn_image_to_token(query=key, key=query, value=queries)
keys = keys + attn_out
keys = self.layer_norm4(keys)
return queries, keys, attn_out
|
Sam3TrackerVideoTwoWayAttentionBlock
|
python
|
PrefectHQ__prefect
|
src/prefect/_internal/concurrency/services.py
|
{
"start": 1773,
"end": 11815
}
|
class ____(abc.ABC, Generic[T]):
_instances: dict[int, Self] = {}
_instance_lock = threading.Lock()
def __init__(self, *args: Hashable) -> None:
self._queue: queue.Queue[Optional[T]] = queue.Queue()
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._done_event: Optional[asyncio.Event] = None
self._task: Optional[asyncio.Task[None]] = None
self._stopped: bool = False
self._started: bool = False
self._key = hash((self.__class__, *args))
self._lock = threading.Lock()
self._queue_get_thread = WorkerThread(
# TODO: This thread should not need to be a daemon but when it is not, it
# can prevent the interpreter from exiting.
daemon=True,
name=f"{type(self).__name__}Thread",
)
self._logger = logging.getLogger(f"{type(self).__name__}")
# Track this instance for fork handling
_active_services.add(self)
def reset_for_fork(self) -> None:
"""Reset instance state after fork() to prevent deadlocks in child process."""
self._stopped = True
self._started = False
self._loop = None
self._done_event = None
self._task = None
self._queue = queue.Queue()
self._lock = threading.Lock()
@classmethod
def reset_instances_for_fork(cls) -> None:
"""Reset class-level state after fork() to prevent deadlocks in child process."""
cls._instances.clear()
cls._instance_lock = threading.Lock()
def start(self) -> None:
logger.debug("Starting service %r", self)
loop_thread = get_global_loop()
if not asyncio.get_running_loop() == getattr(loop_thread, "_loop"):
raise RuntimeError("Services must run on the global loop thread.")
self._loop = asyncio.get_running_loop()
self._done_event = asyncio.Event()
self._task = self._loop.create_task(self._run())
self._queue_get_thread.start()
self._started = True
# Ensure that we wait for worker completion before loop thread shutdown
loop_thread.add_shutdown_call(create_call(self.drain))
# Stop at interpreter exit by default
# Handling items may require spawning a thread and in 3.9 new threads
# cannot be spawned after the interpreter finalizes threads which
# happens _before_ the normal `atexit` hook is called resulting in
# failure to process items. This is particularly relevant for services
# which use an httpx client. See related issue at
# https://github.com/python/cpython/issues/86813
threading._register_atexit(self._at_exit) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
def _at_exit(self) -> None:
self.drain(at_exit=True)
def _stop(self, at_exit: bool = False) -> None:
"""
Stop running this instance.
Does not wait for the instance to finish. See `drain`.
"""
if self._stopped:
return
with self._lock:
if not at_exit: # The logger may not be available during interpreter exit
logger.debug("Stopping service %r", self)
# Stop sending work to this instance
self._remove_instance()
self._stopped = True
# Allow asyncio task to be garbage-collected. Its context may contain
# references to all Prefect Task calls made during a flow run, through
# EngineContext. Issue #10338.
self._task = None
# Signal completion to the loop
self._queue.put_nowait(None)
@abc.abstractmethod
def send(self, item: Any) -> Any:
raise NotImplementedError
async def _run(self) -> None:
try:
async with self._lifespan():
await self._main_loop()
except BaseException:
self._remove_instance()
# The logging call yields to another thread, so we must remove the instance
# before reporting the failure to prevent retrieval of a dead instance
log_traceback = logger.isEnabledFor(logging.DEBUG)
logger.error(
"Service %r failed with %s pending items.",
type(self).__name__,
self._queue.qsize(),
exc_info=log_traceback,
)
finally:
self._remove_instance()
# Shutdown the worker thread
self._queue_get_thread.shutdown()
self._stopped = True
assert self._done_event is not None
self._done_event.set()
async def _main_loop(self) -> None:
last_log_time = 0
log_interval = 4 # log every 4 seconds
while True:
item: Optional[T] = await self._queue_get_thread.submit(
create_call(self._queue.get)
).aresult()
if self._stopped:
current_time = asyncio.get_event_loop().time()
queue_size = self._queue.qsize()
if current_time - last_log_time >= log_interval and queue_size > 0:
self._logger.warning(
f"Still processing items: {queue_size} items remaining..."
)
last_log_time = current_time
if item is None:
logger.debug("Exiting service %r", self)
self._queue.task_done()
break
try:
logger.debug("Service %r handling item %r", self, item)
await self._handle(item)
except Exception:
log_traceback = logger.isEnabledFor(logging.DEBUG)
logger.error(
"Service %r failed to process item %r",
type(self).__name__,
item,
exc_info=log_traceback,
)
finally:
self._queue.task_done()
@abc.abstractmethod
async def _handle(self, item: Any) -> Any:
raise NotImplementedError
@contextlib.asynccontextmanager
async def _lifespan(self) -> AsyncGenerator[None, Any]:
"""
Perform any setup and teardown for the service.
"""
yield
def _drain(self, at_exit: bool = False) -> concurrent.futures.Future[bool]:
"""
Internal implementation for `drain`. Returns a future for sync/async interfaces.
"""
if not at_exit: # The logger may not be available during interpreter exit
logger.debug("Draining service %r", self)
self._stop(at_exit=at_exit)
assert self._done_event is not None
if self._done_event.is_set():
future: concurrent.futures.Future[bool] = concurrent.futures.Future()
future.set_result(False)
return future
assert self._loop is not None
task = cast(Coroutine[Any, Any, bool], self._done_event.wait())
return asyncio.run_coroutine_threadsafe(task, self._loop)
def drain(self, at_exit: bool = False) -> Union[bool, Awaitable[bool]]:
"""
Stop this instance of the service and wait for remaining work to be completed.
Returns an awaitable if called from an async context.
"""
future = self._drain(at_exit=at_exit)
if get_running_loop() is not None:
return asyncio.wrap_future(future)
else:
return future.result()
@classmethod
def drain_all(
cls, timeout: Optional[float] = None, at_exit: bool = True
) -> Union[
tuple[
set[concurrent.futures.Future[bool]], set[concurrent.futures.Future[bool]]
],
Coroutine[
Any,
Any,
Optional[tuple[set[asyncio.Future[bool]], set[asyncio.Future[bool]]]],
],
]:
"""
Stop all instances of the service and wait for all remaining work to be
completed.
Returns an awaitable if called from an async context.
"""
futures: list[concurrent.futures.Future[bool]] = []
with cls._instance_lock:
instances = tuple(cls._instances.values())
for instance in instances:
futures.append(instance._drain(at_exit=at_exit))
if get_running_loop() is not None:
if futures:
return asyncio.wait(
[asyncio.wrap_future(fut) for fut in futures], timeout=timeout
)
# `wait` errors if it receives an empty list but we need to return a
# coroutine still
return asyncio.sleep(0)
else:
return concurrent.futures.wait(futures, timeout=timeout)
def wait_until_empty(self) -> None:
"""
Wait until the queue is empty and all items have been processed.
"""
self._queue.join()
@classmethod
def instance(cls, *args: Hashable) -> Self:
"""
Get an instance of the service.
If an instance already exists with the given arguments, it will be returned.
"""
with cls._instance_lock:
key = hash((cls, *args))
if key not in cls._instances:
cls._instances[key] = cls._new_instance(*args)
return cls._instances[key]
def _remove_instance(self):
self._instances.pop(self._key, None)
@classmethod
def _new_instance(cls, *args: Hashable) -> Self:
"""
Create and start a new instance of the service.
"""
instance = cls(*args)
# If already on the global loop, just start it here to avoid deadlock
if threading.get_ident() == get_global_loop().thread.ident:
instance.start()
# Otherwise, bind the service to the global loop
else:
from_sync.call_soon_in_loop_thread(create_call(instance.start)).result()
return instance
|
_QueueServiceBase
|
python
|
PrefectHQ__prefect
|
src/prefect/settings/models/experiments.py
|
{
"start": 1747,
"end": 2369
}
|
class ____(PrefectBaseSettings):
"""
Settings for configuring experimental features
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(("experiments",))
warn: bool = Field(
default=True,
description="If `True`, warn on usage of experimental features.",
validation_alias=AliasChoices(
AliasPath("warn"), "prefect_experiments_warn", "prefect_experimental_warn"
),
)
plugins: PluginsSettings = Field(
default_factory=PluginsSettings,
description="Settings for the experimental plugin system",
)
|
ExperimentsSettings
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/services/public/test_task_instances.py
|
{
"start": 1363,
"end": 4106
}
|
class ____(TestTaskInstanceEndpoint):
"""Tests for the categorize_task_instances method in BulkTaskInstanceService."""
def setup_method(self):
self.clear_db()
def teardown_method(self):
self.clear_db()
class MockUser:
def get_id(self) -> str:
return "test_user"
def get_name(self) -> str:
return "test_user"
@pytest.mark.parametrize(
(
"task_keys",
"expected_matched_keys",
"expected_not_found_keys",
"expected_matched_count",
"expected_not_found_count",
),
[
pytest.param(
{(TASK_ID_1, -1), (TASK_ID_2, -1)},
{(TASK_ID_1, -1), (TASK_ID_2, -1)},
set(),
2,
0,
id="all_found",
),
pytest.param(
{("nonexistent_task", -1), ("nonexistent_task", 0)},
set(),
{("nonexistent_task", -1), ("nonexistent_task", 0)},
0,
2,
id="none_found",
),
pytest.param(
{(TASK_ID_1, -1), (TASK_ID_1, 0)},
{(TASK_ID_1, -1)},
{(TASK_ID_1, 0)},
1,
1,
id="mixed_found_and_not_found",
),
pytest.param(set(), set(), set(), 0, 0, id="empty_input"),
],
)
def test_categorize_task_instances(
self,
session,
dag_maker,
task_keys,
expected_matched_keys,
expected_not_found_keys,
expected_matched_count,
expected_not_found_count,
):
"""Test categorize_task_instances with various scenarios."""
with dag_maker(dag_id=DAG_ID, session=session):
BashOperator(task_id=TASK_ID_1, bash_command="echo 1")
BashOperator(task_id=TASK_ID_2, bash_command="echo 2")
dag_maker.create_dagrun(run_id=DAG_RUN_ID)
session.commit()
user = self.MockUser()
bulk_request = BulkBody(actions=[])
service = BulkTaskInstanceService(
session=session,
request=bulk_request,
dag_id=DAG_ID,
dag_run_id=DAG_RUN_ID,
dag_bag=dag_maker.dagbag,
user=user,
)
_, matched_task_keys, not_found_task_keys = service.categorize_task_instances(task_keys)
assert len(matched_task_keys) == expected_matched_count
assert len(not_found_task_keys) == expected_not_found_count
assert matched_task_keys == expected_matched_keys
assert not_found_task_keys == expected_not_found_keys
|
TestCategorizeTaskInstances
|
python
|
pytorch__pytorch
|
test/mobile/test_quantize_fx_lite_script_module.py
|
{
"start": 472,
"end": 3121
}
|
class ____(QuantizationLiteTestCase):
# Tests from:
# ./caffe2/test/quantization/fx/test_quantize_fx.py
def test_embedding(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
def forward(self, indices):
return self.emb(indices)
model = M().eval()
indices = torch.randint(low=0, high=10, size=(20,))
ns.call_module(nnq.Embedding)
configs = [
(float_qparams_weight_only_qconfig, ns.call_module(nnq.Embedding)),
(None, ns.call_module(nn.Embedding)),
(default_qconfig, ns.call_module(nn.Embedding)),
]
for qconfig, _ in configs:
qconfig_dict = {"": qconfig}
m = prepare_fx(
model,
qconfig_dict,
example_inputs=torch.randint(low=0, high=10, size=(20,)),
)
m = convert_fx(m)
self._compare_script_and_mobile(m, input=indices)
def test_conv2d(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
qconfig_dict = {"": default_qconfig, "module_name": [("conv1", None)]}
m = prepare_fx(m, qconfig_dict, example_inputs=torch.randn(1, 1, 1, 1))
data = torch.randn(1, 1, 1, 1)
m = convert_fx(m)
# first conv is quantized, second conv is not quantized
self._compare_script_and_mobile(m, input=data)
def test_submodule(self):
# test quantizing complete module, submodule and linear layer
configs = [
{},
{"module_name": [("subm", None)]},
{"module_name": [("fc", None)]},
]
for config in configs:
model = LinearModelWithSubmodule().eval()
qconfig_dict = {
"": torch.ao.quantization.get_default_qconfig("qnnpack"),
**config,
}
model = prepare_fx(
model,
qconfig_dict,
example_inputs=torch.randn(5, 5),
)
quant = convert_fx(model)
x = torch.randn(5, 5)
self._compare_script_and_mobile(quant, input=x)
if __name__ == "__main__":
run_tests() # noqa: F821
|
TestLiteFuseFx
|
python
|
pytransitions__transitions
|
tests/test_nesting.py
|
{
"start": 910,
"end": 1082
}
|
class ____(object):
pass
test_states = ['A', 'B',
{'name': 'C', 'children': ['1', '2', {'name': '3', 'children': ['a', 'b', 'c']}]}, 'D', 'E', 'F']
|
Dummy
|
python
|
great-expectations__great_expectations
|
great_expectations/data_context/types/resource_identifiers.py
|
{
"start": 11335,
"end": 13084
}
|
class ____(DataContextKey):
def __init__(
self,
resource_type: GXCloudRESTResource,
id: str | None = None,
resource_name: str | None = None,
) -> None:
super().__init__()
self._resource_type = resource_type
self._id = id
self._resource_name = resource_name
@property
def resource_type(self) -> GXCloudRESTResource:
return self._resource_type
@resource_type.setter
def resource_type(self, value: GXCloudRESTResource) -> None:
self._resource_type = value
@property
def id(self) -> str | None:
return self._id
@id.setter
def id(self, value: str) -> None:
self._id = value
@property
def resource_name(self) -> str | None:
return self._resource_name
def to_tuple(self): # type: ignore[explicit-override] # FIXME
return (self.resource_type, self.id, self.resource_name)
def to_fixed_length_tuple(self): # type: ignore[explicit-override] # FIXME
return self.to_tuple()
@classmethod
@override
def from_tuple(cls, tuple_):
# Only add resource name if it exists in the tuple_
if len(tuple_) == 3: # noqa: PLR2004 # FIXME CoP
return cls(resource_type=tuple_[0], id=tuple_[1], resource_name=tuple_[2])
return cls(resource_type=tuple_[0], id=tuple_[1])
@classmethod
@override
def from_fixed_length_tuple(cls, tuple_):
return cls.from_tuple(tuple_)
def __repr__(self): # type: ignore[explicit-override] # FIXME
repr = f"{self.__class__.__name__}::{self.resource_type}::{self.id}"
if self.resource_name:
repr += f"::{self.resource_name}"
return repr
|
GXCloudIdentifier
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/handlers/condition/test_new_high_priority_issue_handler.py
|
{
"start": 452,
"end": 2762
}
|
class ____(ConditionTestCase):
condition = Condition.NEW_HIGH_PRIORITY_ISSUE
payload = {"id": NewHighPriorityIssueCondition.id}
def setUp(self) -> None:
super().setUp()
self.event_data = WorkflowEventData(
event=self.group_event,
group=self.group_event.group,
group_state=GroupState(
{
"id": 1,
"is_regression": True,
"is_new": True,
"is_new_group_environment": True,
}
),
workflow_env=self.environment,
)
self.dc = self.create_data_condition(
type=self.condition,
comparison=True,
condition_result=True,
)
def test_dual_write(self) -> None:
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison is True
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_json_schema(self) -> None:
dc = self.create_data_condition(
type=self.condition,
comparison=True,
condition_result=True,
)
dc.comparison = False
dc.save()
dc.comparison = {"time": "asdf"}
with pytest.raises(ValidationError):
dc.save()
dc.comparison = "hello"
with pytest.raises(ValidationError):
dc.save()
def test_applies_correctly(self) -> None:
assert self.event_data.group_state
# This will only pass for new issues
self.group_event.group.update(priority=PriorityLevel.HIGH)
self.event_data.group_state["is_new_group_environment"] = True
self.assert_passes(self.dc, self.event_data)
# These will never pass
self.event_data.group_state["is_new_group_environment"] = False
self.assert_does_not_pass(self.dc, self.event_data)
self.group_event.group.update(priority=PriorityLevel.MEDIUM)
self.assert_does_not_pass(self.dc, self.event_data)
self.group_event.group.update(priority=PriorityLevel.LOW)
self.assert_does_not_pass(self.dc, self.event_data)
|
TestNewHighPriorityIssueCondition
|
python
|
davidhalter__jedi
|
test/completion/pep0484_comments.py
|
{
"start": 285,
"end": 544
}
|
class ____: pass
def test(a, b):
a = a # type: BB
c = a # type: str
d = a
# type: str
e = a # type: str # Should ignore long whitespace
#? BB()
a
#? str()
c
#? BB()
d
#? str()
e
|
BB
|
python
|
django-debug-toolbar__django-debug-toolbar
|
debug_toolbar/panels/timer.py
|
{
"start": 326,
"end": 4680
}
|
class ____(Panel):
"""
Panel that displays the time a response took in milliseconds.
"""
is_async = True
def nav_subtitle(self):
stats = self.get_stats()
if stats.get("utime"):
utime = stats.get("utime")
stime = stats.get("stime")
return _("CPU: %(cum)0.2fms (%(total)0.2fms)") % {
"cum": (utime + stime),
"total": stats["total_time"],
}
elif "total_time" in stats:
return _("Total: %0.2fms") % stats["total_time"]
else:
return ""
has_content = resource is not None
title = _("Time")
template = "debug_toolbar/panels/timer.html"
@property
def content(self):
stats = self.get_stats()
rows = (
(_("User CPU time"), _("%(utime)0.3f msec") % stats),
(_("System CPU time"), _("%(stime)0.3f msec") % stats),
(_("Total CPU time"), _("%(total)0.3f msec") % stats),
(_("Elapsed time"), _("%(total_time)0.3f msec") % stats),
(
_("Context switches"),
_("%(vcsw)d voluntary, %(ivcsw)d involuntary") % stats,
),
)
return render_to_string(self.template, {"rows": rows})
@property
def scripts(self):
scripts = super().scripts
scripts.append(static("debug_toolbar/js/timer.js"))
return scripts
def process_request(self, request):
self._start_time = perf_counter()
if self.has_content:
self._start_rusage = resource.getrusage(resource.RUSAGE_SELF)
return super().process_request(request)
def serialize_rusage(self, data):
fields_to_serialize = [
"ru_utime",
"ru_stime",
"ru_nvcsw",
"ru_nivcsw",
"ru_minflt",
"ru_majflt",
]
return {field: getattr(data, field) for field in fields_to_serialize}
def generate_stats(self, request, response):
stats = {}
if hasattr(self, "_start_time"):
stats["total_time"] = (perf_counter() - self._start_time) * 1000
if self.has_content:
self._end_rusage = resource.getrusage(resource.RUSAGE_SELF)
start = self.serialize_rusage(self._start_rusage)
end = self.serialize_rusage(self._end_rusage)
stats.update(
{
"utime": 1000 * self._elapsed_ru(start, end, "ru_utime"),
"stime": 1000 * self._elapsed_ru(start, end, "ru_stime"),
"vcsw": self._elapsed_ru(start, end, "ru_nvcsw"),
"ivcsw": self._elapsed_ru(start, end, "ru_nivcsw"),
"minflt": self._elapsed_ru(start, end, "ru_minflt"),
"majflt": self._elapsed_ru(start, end, "ru_majflt"),
}
)
stats["total"] = stats["utime"] + stats["stime"]
# these are documented as not meaningful under Linux. If you're
# running BSD feel free to enable them, and add any others that I
# hadn't gotten to before I noticed that I was getting nothing but
# zeroes and that the docs agreed. :-(
#
# stats['blkin'] = self._elapsed_ru(start, end, 'ru_inblock')
# stats['blkout'] = self._elapsed_ru(start, end, 'ru_oublock')
# stats['swap'] = self._elapsed_ru(start, end, 'ru_nswap')
# stats['rss'] = self._end_rusage.ru_maxrss
# stats['srss'] = self._end_rusage.ru_ixrss
# stats['urss'] = self._end_rusage.ru_idrss
# stats['usrss'] = self._end_rusage.ru_isrss
self.record_stats(stats)
def generate_server_timing(self, request, response):
stats = self.get_stats()
self.record_server_timing("utime", "User CPU time", stats.get("utime", 0))
self.record_server_timing("stime", "System CPU time", stats.get("stime", 0))
self.record_server_timing("total", "Total CPU time", stats.get("total", 0))
self.record_server_timing(
"total_time", "Elapsed time", stats.get("total_time", 0)
)
@staticmethod
def _elapsed_ru(start, end, name):
return end.get(name) - start.get(name)
|
TimerPanel
|
python
|
pytest-dev__pytest
|
src/_pytest/pytester.py
|
{
"start": 53348,
"end": 53993
}
|
class ____:
def __init__(self) -> None:
self.stringio = StringIO()
""":class:`python:io.StringIO()` instance used for input."""
def assert_contains_lines(self, lines2: Sequence[str]) -> None:
"""Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value.
Lines are matched using :func:`LineMatcher.fnmatch_lines <pytest.LineMatcher.fnmatch_lines>`.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
LineMatcher(lines1).fnmatch_lines(lines2)
|
LineComp
|
python
|
ray-project__ray
|
rllib/models/torch/attention_net.py
|
{
"start": 1620,
"end": 10401
}
|
class ____(RecurrentNetwork, nn.Module):
"""A GTrXL net Model described in [2].
This is still in an experimental phase.
Can be used as a drop-in replacement for LSTMs in PPO and IMPALA.
To use this network as a replacement for an RNN, configure your Algorithm
as follows:
Examples:
>> config["model"]["custom_model"] = GTrXLNet
>> config["model"]["max_seq_len"] = 10
>> config["model"]["custom_model_config"] = {
>> num_transformer_units=1,
>> attention_dim=32,
>> num_heads=2,
>> memory_tau=50,
>> etc..
>> }
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: Optional[int],
model_config: ModelConfigDict,
name: str,
*,
num_transformer_units: int = 1,
attention_dim: int = 64,
num_heads: int = 2,
memory_inference: int = 50,
memory_training: int = 50,
head_dim: int = 32,
position_wise_mlp_dim: int = 32,
init_gru_gate_bias: float = 2.0
):
"""Initializes a GTrXLNet.
Args:
num_transformer_units: The number of Transformer repeats to
use (denoted L in [2]).
attention_dim: The input and output dimensions of one
Transformer unit.
num_heads: The number of attention heads to use in parallel.
Denoted as `H` in [3].
memory_inference: The number of timesteps to concat (time
axis) and feed into the next transformer unit as inference
input. The first transformer unit will receive this number of
past observations (plus the current one), instead.
memory_training: The number of timesteps to concat (time
axis) and feed into the next transformer unit as training
input (plus the actual input sequence of len=max_seq_len).
The first transformer unit will receive this number of
past observations (plus the input sequence), instead.
head_dim: The dimension of a single(!) attention head within
a multi-head attention unit. Denoted as `d` in [3].
position_wise_mlp_dim: The dimension of the hidden layer
within the position-wise MLP (after the multi-head attention
block within one Transformer unit). This is the size of the
first of the two layers within the PositionwiseFeedforward. The
second layer always has size=`attention_dim`.
init_gru_gate_bias: Initial bias values for the GRU gates
(two GRUs per Transformer unit, one after the MHA, one after
the position-wise MLP).
"""
super().__init__(
observation_space, action_space, num_outputs, model_config, name
)
nn.Module.__init__(self)
self.num_transformer_units = num_transformer_units
self.attention_dim = attention_dim
self.num_heads = num_heads
self.memory_inference = memory_inference
self.memory_training = memory_training
self.head_dim = head_dim
self.max_seq_len = model_config["max_seq_len"]
self.obs_dim = observation_space.shape[0]
self.linear_layer = SlimFC(in_size=self.obs_dim, out_size=self.attention_dim)
self.layers = [self.linear_layer]
attention_layers = []
# 2) Create L Transformer blocks according to [2].
for i in range(self.num_transformer_units):
# RelativeMultiHeadAttention part.
MHA_layer = SkipConnection(
RelativeMultiHeadAttention(
in_dim=self.attention_dim,
out_dim=self.attention_dim,
num_heads=num_heads,
head_dim=head_dim,
input_layernorm=True,
output_activation=nn.ReLU,
),
fan_in_layer=GRUGate(self.attention_dim, init_gru_gate_bias),
)
# Position-wise MultiLayerPerceptron part.
E_layer = SkipConnection(
nn.Sequential(
torch.nn.LayerNorm(self.attention_dim),
SlimFC(
in_size=self.attention_dim,
out_size=position_wise_mlp_dim,
use_bias=False,
activation_fn=nn.ReLU,
),
SlimFC(
in_size=position_wise_mlp_dim,
out_size=self.attention_dim,
use_bias=False,
activation_fn=nn.ReLU,
),
),
fan_in_layer=GRUGate(self.attention_dim, init_gru_gate_bias),
)
# Build a list of all attanlayers in order.
attention_layers.extend([MHA_layer, E_layer])
# Create a Sequential such that all parameters inside the attention
# layers are automatically registered with this top-level model.
self.attention_layers = nn.Sequential(*attention_layers)
self.layers.extend(attention_layers)
# Final layers if num_outputs not None.
self.logits = None
self.values_out = None
# Last value output.
self._value_out = None
# Postprocess GTrXL output with another hidden layer.
if self.num_outputs is not None:
self.logits = SlimFC(
in_size=self.attention_dim,
out_size=self.num_outputs,
activation_fn=nn.ReLU,
)
# Value function used by all RLlib Torch RL implementations.
self.values_out = SlimFC(
in_size=self.attention_dim, out_size=1, activation_fn=None
)
else:
self.num_outputs = self.attention_dim
# Setup trajectory views (`memory-inference` x past memory outs).
for i in range(self.num_transformer_units):
space = Box(-1.0, 1.0, shape=(self.attention_dim,))
self.view_requirements["state_in_{}".format(i)] = ViewRequirement(
"state_out_{}".format(i),
shift="-{}:-1".format(self.memory_inference),
# Repeat the incoming state every max-seq-len times.
batch_repeat_value=self.max_seq_len,
space=space,
)
self.view_requirements["state_out_{}".format(i)] = ViewRequirement(
space=space, used_for_training=False
)
@override(ModelV2)
def forward(
self, input_dict, state: List[TensorType], seq_lens: TensorType
) -> (TensorType, List[TensorType]):
assert seq_lens is not None
# Add the needed batch rank (tf Models' Input requires this).
observations = input_dict[SampleBatch.OBS]
# Add the time dim to observations.
B = len(seq_lens)
T = observations.shape[0] // B
observations = torch.reshape(
observations, [-1, T] + list(observations.shape[1:])
)
all_out = observations
memory_outs = []
for i in range(len(self.layers)):
# MHA layers which need memory passed in.
if i % 2 == 1:
all_out = self.layers[i](all_out, memory=state[i // 2])
# Either self.linear_layer (initial obs -> attn. dim layer) or
# MultiLayerPerceptrons. The output of these layers is always the
# memory for the next forward pass.
else:
all_out = self.layers[i](all_out)
memory_outs.append(all_out)
# Discard last output (not needed as a memory since it's the last
# layer).
memory_outs = memory_outs[:-1]
if self.logits is not None:
out = self.logits(all_out)
self._value_out = self.values_out(all_out)
out_dim = self.num_outputs
else:
out = all_out
out_dim = self.attention_dim
return torch.reshape(out, [-1, out_dim]), [
torch.reshape(m, [-1, self.attention_dim]) for m in memory_outs
]
# TODO: (sven) Deprecate this once trajectory view API has fully matured.
@override(RecurrentNetwork)
def get_initial_state(self) -> List[np.ndarray]:
return []
@override(ModelV2)
def value_function(self) -> TensorType:
assert (
self._value_out is not None
), "Must call forward first AND must have value branch!"
return torch.reshape(self._value_out, [-1])
|
GTrXLNet
|
python
|
python-openxml__python-docx
|
src/docx/enum/dml.py
|
{
"start": 89,
"end": 779
}
|
class ____(BaseEnum):
"""Specifies the color specification scheme.
Example::
from docx.enum.dml import MSO_COLOR_TYPE
assert font.color.type == MSO_COLOR_TYPE.SCHEME
MS API name: `MsoColorType`
http://msdn.microsoft.com/en-us/library/office/ff864912(v=office.15).aspx
"""
RGB = (1, "Color is specified by an |RGBColor| value.")
"""Color is specified by an |RGBColor| value."""
THEME = (2, "Color is one of the preset theme colors.")
"""Color is one of the preset theme colors."""
AUTO = (101, "Color is determined automatically by the application.")
"""Color is determined automatically by the application."""
|
MSO_COLOR_TYPE
|
python
|
TheAlgorithms__Python
|
graphs/edmonds_karp_multiple_source_and_sink.py
|
{
"start": 2939,
"end": 6592
}
|
class ____(MaximumFlowAlgorithmExecutor):
def __init__(self, flow_network):
super().__init__(flow_network)
self.preflow = [[0] * self.verticies_count for i in range(self.verticies_count)]
self.heights = [0] * self.verticies_count
self.excesses = [0] * self.verticies_count
def _algorithm(self):
self.heights[self.source_index] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
vertices_list = [
i
for i in range(self.verticies_count)
if i not in {self.source_index, self.sink_index}
]
# move through list
i = 0
while i < len(vertices_list):
vertex_index = vertices_list[i]
previous_height = self.heights[vertex_index]
self.process_vertex(vertex_index)
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0, vertices_list.pop(i))
i = 0
else:
i += 1
self.maximum_flow = sum(self.preflow[self.source_index])
def process_vertex(self, vertex_index):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(vertex_index, neighbour_index)
self.relabel(vertex_index)
def push(self, from_index, to_index):
preflow_delta = min(
self.excesses[from_index],
self.graph[from_index][to_index] - self.preflow[from_index][to_index],
)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def relabel(self, vertex_index):
min_height = None
for to_index in range(self.verticies_count):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
min_height = self.heights[to_index]
if min_height is not None:
self.heights[vertex_index] = min_height + 1
if __name__ == "__main__":
entrances = [0]
exits = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
graph = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
flow_network = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
maximum_flow = flow_network.find_maximum_flow()
print(f"maximum flow is {maximum_flow}")
|
PushRelabelExecutor
|
python
|
tensorflow__tensorflow
|
tensorflow/python/util/decorator_utils.py
|
{
"start": 4197,
"end": 4560
}
|
class ____(object): # pylint: disable=invalid-name
"""Class property decorator.
Example usage:
class MyClass(object):
@classproperty
def value(cls):
return '123'
> print MyClass.value
123
"""
def __init__(self, func):
self._func = func
def __get__(self, owner_self, owner_cls):
return self._func(owner_cls)
|
classproperty
|
python
|
arrow-py__arrow
|
tests/test_arrow.py
|
{
"start": 89478,
"end": 105153
}
|
class ____:
def test_now(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 6, 18, 5, 55, 0)
second_ago = arw.shift(seconds=-1)
second_future = arw.shift(seconds=1)
second_ago_string = second_ago.humanize(
arw, locale=lang, granularity=["second"]
)
second_future_string = second_future.humanize(
arw, locale=lang, granularity=["second"]
)
assert arw.dehumanize(second_ago_string, locale=lang) == arw
assert arw.dehumanize(second_future_string, locale=lang) == arw
def test_seconds(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 6, 18, 5, 55, 0)
second_ago = arw.shift(seconds=-5)
second_future = arw.shift(seconds=5)
second_ago_string = second_ago.humanize(
arw, locale=lang, granularity=["second"]
)
second_future_string = second_future.humanize(
arw, locale=lang, granularity=["second"]
)
assert arw.dehumanize(second_ago_string, locale=lang) == second_ago
assert arw.dehumanize(second_future_string, locale=lang) == second_future
def test_minute(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2001, 6, 18, 5, 55, 0)
minute_ago = arw.shift(minutes=-1)
minute_future = arw.shift(minutes=1)
minute_ago_string = minute_ago.humanize(
arw, locale=lang, granularity=["minute"]
)
minute_future_string = minute_future.humanize(
arw, locale=lang, granularity=["minute"]
)
assert arw.dehumanize(minute_ago_string, locale=lang) == minute_ago
assert arw.dehumanize(minute_future_string, locale=lang) == minute_future
def test_minutes(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2007, 1, 10, 5, 55, 0)
minute_ago = arw.shift(minutes=-5)
minute_future = arw.shift(minutes=5)
minute_ago_string = minute_ago.humanize(
arw, locale=lang, granularity=["minute"]
)
minute_future_string = minute_future.humanize(
arw, locale=lang, granularity=["minute"]
)
assert arw.dehumanize(minute_ago_string, locale=lang) == minute_ago
assert arw.dehumanize(minute_future_string, locale=lang) == minute_future
def test_hour(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2009, 4, 20, 5, 55, 0)
hour_ago = arw.shift(hours=-1)
hour_future = arw.shift(hours=1)
hour_ago_string = hour_ago.humanize(arw, locale=lang, granularity=["hour"])
hour_future_string = hour_future.humanize(
arw, locale=lang, granularity=["hour"]
)
assert arw.dehumanize(hour_ago_string, locale=lang) == hour_ago
assert arw.dehumanize(hour_future_string, locale=lang) == hour_future
def test_hours(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2010, 2, 16, 7, 55, 0)
hour_ago = arw.shift(hours=-3)
hour_future = arw.shift(hours=3)
hour_ago_string = hour_ago.humanize(arw, locale=lang, granularity=["hour"])
hour_future_string = hour_future.humanize(
arw, locale=lang, granularity=["hour"]
)
assert arw.dehumanize(hour_ago_string, locale=lang) == hour_ago
assert arw.dehumanize(hour_future_string, locale=lang) == hour_future
def test_week(self, locale_list_with_weeks: List[str]):
for lang in locale_list_with_weeks:
arw = arrow.Arrow(2012, 2, 18, 1, 52, 0)
week_ago = arw.shift(weeks=-1)
week_future = arw.shift(weeks=1)
week_ago_string = week_ago.humanize(arw, locale=lang, granularity=["week"])
week_future_string = week_future.humanize(
arw, locale=lang, granularity=["week"]
)
assert arw.dehumanize(week_ago_string, locale=lang) == week_ago
assert arw.dehumanize(week_future_string, locale=lang) == week_future
def test_weeks(self, locale_list_with_weeks: List[str]):
for lang in locale_list_with_weeks:
arw = arrow.Arrow(2020, 3, 18, 5, 3, 0)
week_ago = arw.shift(weeks=-7)
week_future = arw.shift(weeks=7)
week_ago_string = week_ago.humanize(arw, locale=lang, granularity=["week"])
week_future_string = week_future.humanize(
arw, locale=lang, granularity=["week"]
)
assert arw.dehumanize(week_ago_string, locale=lang) == week_ago
assert arw.dehumanize(week_future_string, locale=lang) == week_future
def test_year(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 1, 10, 5, 55, 0)
year_ago = arw.shift(years=-1)
year_future = arw.shift(years=1)
year_ago_string = year_ago.humanize(arw, locale=lang, granularity=["year"])
year_future_string = year_future.humanize(
arw, locale=lang, granularity=["year"]
)
assert arw.dehumanize(year_ago_string, locale=lang) == year_ago
assert arw.dehumanize(year_future_string, locale=lang) == year_future
def test_years(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 1, 10, 5, 55, 0)
year_ago = arw.shift(years=-10)
year_future = arw.shift(years=10)
year_ago_string = year_ago.humanize(arw, locale=lang, granularity=["year"])
year_future_string = year_future.humanize(
arw, locale=lang, granularity=["year"]
)
assert arw.dehumanize(year_ago_string, locale=lang) == year_ago
assert arw.dehumanize(year_future_string, locale=lang) == year_future
def test_gt_than_10_years(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 1, 10, 5, 55, 0)
year_ago = arw.shift(years=-25)
year_future = arw.shift(years=25)
year_ago_string = year_ago.humanize(arw, locale=lang, granularity=["year"])
year_future_string = year_future.humanize(
arw, locale=lang, granularity=["year"]
)
assert arw.dehumanize(year_ago_string, locale=lang) == year_ago
assert arw.dehumanize(year_future_string, locale=lang) == year_future
def test_mixed_granularity(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 1, 10, 5, 55, 0)
past = arw.shift(hours=-1, minutes=-1, seconds=-1)
future = arw.shift(hours=1, minutes=1, seconds=1)
past_string = past.humanize(
arw, locale=lang, granularity=["hour", "minute", "second"]
)
future_string = future.humanize(
arw, locale=lang, granularity=["hour", "minute", "second"]
)
assert arw.dehumanize(past_string, locale=lang) == past
assert arw.dehumanize(future_string, locale=lang) == future
def test_mixed_granularity_hours(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 1, 10, 5, 55, 0)
past = arw.shift(hours=-3, minutes=-1, seconds=-15)
future = arw.shift(hours=3, minutes=1, seconds=15)
past_string = past.humanize(
arw, locale=lang, granularity=["hour", "minute", "second"]
)
future_string = future.humanize(
arw, locale=lang, granularity=["hour", "minute", "second"]
)
assert arw.dehumanize(past_string, locale=lang) == past
assert arw.dehumanize(future_string, locale=lang) == future
def test_mixed_granularity_day(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 1, 10, 5, 55, 0)
past = arw.shift(days=-3, minutes=-1, seconds=-15)
future = arw.shift(days=3, minutes=1, seconds=15)
past_string = past.humanize(
arw, locale=lang, granularity=["day", "minute", "second"]
)
future_string = future.humanize(
arw, locale=lang, granularity=["day", "minute", "second"]
)
assert arw.dehumanize(past_string, locale=lang) == past
assert arw.dehumanize(future_string, locale=lang) == future
def test_mixed_granularity_day_hour(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 1, 10, 5, 55, 0)
past = arw.shift(days=-3, hours=-23, seconds=-15)
future = arw.shift(days=3, hours=23, seconds=15)
past_string = past.humanize(
arw, locale=lang, granularity=["day", "hour", "second"]
)
future_string = future.humanize(
arw, locale=lang, granularity=["day", "hour", "second"]
)
assert arw.dehumanize(past_string, locale=lang) == past
assert arw.dehumanize(future_string, locale=lang) == future
# Test to make sure unsupported locales error out
def test_unsupported_locale(self):
arw = arrow.Arrow(2000, 6, 18, 5, 55, 0)
second_ago = arw.shift(seconds=-5)
second_future = arw.shift(seconds=5)
second_ago_string = second_ago.humanize(
arw, locale="ko", granularity=["second"]
)
second_future_string = second_future.humanize(
arw, locale="ko", granularity=["second"]
)
# ko is an example of many unsupported locales currently
with pytest.raises(ValueError):
arw.dehumanize(second_ago_string, locale="ko")
with pytest.raises(ValueError):
arw.dehumanize(second_future_string, locale="ko")
# Test to ensure old style locale strings are supported
def test_normalized_locale(self):
arw = arrow.Arrow(2000, 6, 18, 5, 55, 0)
second_ago = arw.shift(seconds=-5)
second_future = arw.shift(seconds=5)
second_ago_string = second_ago.humanize(
arw, locale="zh_hk", granularity=["second"]
)
second_future_string = second_future.humanize(
arw, locale="zh_hk", granularity=["second"]
)
assert arw.dehumanize(second_ago_string, locale="zh_hk") == second_ago
assert arw.dehumanize(second_future_string, locale="zh_hk") == second_future
# Ensures relative units are required in string
def test_require_relative_unit(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 6, 18, 5, 55, 0)
second_ago = arw.shift(seconds=-5)
second_future = arw.shift(seconds=5)
second_ago_string = second_ago.humanize(
arw, locale=lang, granularity=["second"], only_distance=True
)
second_future_string = second_future.humanize(
arw, locale=lang, granularity=["second"], only_distance=True
)
with pytest.raises(ValueError):
arw.dehumanize(second_ago_string, locale=lang)
with pytest.raises(ValueError):
arw.dehumanize(second_future_string, locale=lang)
# Test for scrambled input
def test_scrambled_input(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 6, 18, 5, 55, 0)
second_ago = arw.shift(seconds=-5)
second_future = arw.shift(seconds=5)
second_ago_string = second_ago.humanize(
arw, locale=lang, granularity=["second"], only_distance=True
)
second_future_string = second_future.humanize(
arw, locale=lang, granularity=["second"], only_distance=True
)
# Scrambles input by sorting strings
second_ago_presort = sorted(second_ago_string)
second_ago_string = "".join(second_ago_presort)
second_future_presort = sorted(second_future_string)
second_future_string = "".join(second_future_presort)
with pytest.raises(ValueError):
arw.dehumanize(second_ago_string, locale=lang)
with pytest.raises(ValueError):
arw.dehumanize(second_future_string, locale=lang)
def test_no_units_modified(self, locale_list_no_weeks: List[str]):
for lang in locale_list_no_weeks:
arw = arrow.Arrow(2000, 6, 18, 5, 55, 0)
# Ensures we pass the first stage of checking whether relative units exist
locale_obj = locales.get_locale(lang)
empty_past_string = locale_obj.past
empty_future_string = locale_obj.future
with pytest.raises(ValueError):
arw.dehumanize(empty_past_string, locale=lang)
with pytest.raises(ValueError):
arw.dehumanize(empty_future_string, locale=lang)
def test_slavic_locales(self, slavic_locales: List[str]):
# Relevant units for Slavic locale plural logic
units = [
0,
1,
2,
5,
21,
22,
25,
]
# Only need to test on seconds as logic holds for all slavic plural units
for lang in slavic_locales:
for unit in units:
arw = arrow.Arrow(2000, 2, 18, 1, 50, 30)
past = arw.shift(minutes=-1 * unit, days=-1)
future = arw.shift(minutes=unit, days=1)
past_string = past.humanize(
arw, locale=lang, granularity=["minute", "day"]
)
future_string = future.humanize(
arw, locale=lang, granularity=["minute", "day"]
)
assert arw.dehumanize(past_string, locale=lang) == past
assert arw.dehumanize(future_string, locale=lang) == future
def test_czech_slovak(self):
# Relevant units for Slavic locale plural logic
units = [
0,
1,
2,
5,
]
# Only need to test on seconds as logic holds for all slavic plural units
for lang in ["cs"]:
for unit in units:
arw = arrow.Arrow(2000, 2, 18, 1, 50, 30)
past = arw.shift(minutes=-1 * unit, days=-1)
future = arw.shift(minutes=unit, days=1)
past_string = past.humanize(
arw, locale=lang, granularity=["minute", "day"]
)
future_string = future.humanize(
arw, locale=lang, granularity=["minute", "day"]
)
assert arw.dehumanize(past_string, locale=lang) == past
assert arw.dehumanize(future_string, locale=lang) == future
|
TestArrowDehumanize
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/session_update_event.py
|
{
"start": 2734,
"end": 3354
}
|
class ____(BaseModel):
group_id: Optional[str] = None
"""
The group id to attach to this trace to enable filtering and grouping in the
traces dashboard.
"""
metadata: Optional[object] = None
"""
The arbitrary metadata to attach to this trace to enable filtering in the traces
dashboard.
"""
workflow_name: Optional[str] = None
"""The name of the workflow to attach to this trace.
This is used to name the trace in the traces dashboard.
"""
SessionTracing: TypeAlias = Union[Literal["auto"], SessionTracingTracingConfiguration]
|
SessionTracingTracingConfiguration
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-instagram/components.py
|
{
"start": 12949,
"end": 14251
}
|
class ____(SubstreamPartitionRouter):
"""
The way the Python user_insights stream was a substream of the Api/Accounts parent stream, but it only incorporated
the business_account_id as the partition field. However, the actual parent stream slice is an account object made
up of a business_account_id and a page_id. This creates issues when trying to extract the state of the current
partition because the incoming state of an existing connection will not contain the page_id.
This custom component retains the existing behavior of the Python implementation by only saving the business_account_id
to the per-partition state while adding the page_id to the extra_fields so it can be used while readiing records
for a given partition.
"""
def stream_slices(self) -> Iterable[StreamSlice]:
for stream_slice in super().stream_slices():
account_object = stream_slice.partition.get("business_account_id")
stream_slice = StreamSlice(
partition={"business_account_id": account_object.get("business_account_id")},
cursor_slice=stream_slice.cursor_slice,
extra_fields={"page_id": account_object.get("page_id")},
)
yield stream_slice
|
UserInsightsSubstreamPartitionRouter
|
python
|
huggingface__transformers
|
tests/models/markuplm/test_modeling_markuplm.py
|
{
"start": 12022,
"end": 13201
}
|
class ____(unittest.TestCase):
@cached_property
def default_processor(self):
# TODO use from_pretrained here
feature_extractor = MarkupLMFeatureExtractor()
tokenizer = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base")
return MarkupLMProcessor(feature_extractor, tokenizer)
@slow
def test_forward_pass_no_head(self):
model = MarkupLMModel.from_pretrained("microsoft/markuplm-base").to(torch_device)
processor = self.default_processor
inputs = processor(prepare_html_string(), return_tensors="pt")
inputs = inputs.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
expected_shape = torch.Size([1, 14, 768])
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0675, -0.0052, 0.5001], [-0.2281, 0.0802, 0.2192], [-0.0583, -0.3311, 0.1185]]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
MarkupLMModelIntegrationTest
|
python
|
great-expectations__great_expectations
|
tests/datasource/fluent/test_invalid_datasource.py
|
{
"start": 10356,
"end": 12173
}
|
class ____:
def test_connection_raises_informative_error(
self, invalid_datasource_factory: InvalidDSFactory
):
random_ds_type = random.choice([t for t in DataSourceManager.type_lookup.type_names()])
print(f"{random_ds_type=}")
invalid_datasource: InvalidDatasource = invalid_datasource_factory(
{
"name": "my invalid ds",
"type": random_ds_type,
"foo": "bar", # regardless of the type this extra field should make the datasource invalid # noqa: E501 # FIXME CoP
"assets": [
{"name": "definitely_invalid", "type": "NOT_A_VALID_TYPE"},
{"name": "maybe_valid", "type": "table", "table_name": "my_table"},
{"name": "maybe_valid_2", "type": "csv", "sep": "|"},
{"name": "missing type"},
],
}
)
print(invalid_datasource)
assert invalid_datasource.assets, "Expected assets to be present"
for invalid_asset in invalid_datasource.assets:
with pytest.raises(TestConnectionError) as conn_err:
invalid_asset.test_connection()
assert invalid_datasource.config_error == conn_err.value.__cause__
@pytest.mark.parametrize("attr_name", ["name", "id", "type"])
def test_base_data_asset_attribute_does_not_error(
self, rand_invalid_datasource_with_assets: InvalidDatasource, attr_name: str
):
assert rand_invalid_datasource_with_assets.assets, "Expected assets to be present"
for asset in rand_invalid_datasource_with_assets.assets:
value = getattr(asset, attr_name)
print(attr_name, value)
if __name__ == "__main__":
pytest.main(["-vv", __file__])
|
TestInvalidDataAsset
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/ghostwriter/test_ghostwriter.py
|
{
"start": 4237,
"end": 7729
}
|
class ____:
foo: str = attr.ib()
def takes_attrs_class(x: Foo) -> None:
pass
@varied_excepts
@pytest.mark.parametrize(
"func",
[
re.compile,
json.loads,
json.dump,
timsort,
ast.literal_eval,
non_type_annotation,
annotated_any,
space_in_name,
non_resolvable_arg,
takes_keys,
takes_values,
takes_match,
takes_pattern,
takes_sized,
takes_frozensets,
takes_attrs_class,
],
)
def test_ghostwriter_fuzz(func, ex):
source_code = ghostwriter.fuzz(func, except_=ex)
get_test_function(source_code)
def test_socket_module():
source_code = ghostwriter.magic(socket)
exec(source_code, {})
def test_binary_op_also_handles_frozensets():
# Using str.replace in a loop would convert `frozensets()` into
# `st.frozenst.sets()` instead of `st.frozensets()`; fixed with re.sub.
source_code = ghostwriter.binary_operation(takes_frozensets)
exec(source_code, {})
def test_binary_op_with_numpy_arrays_includes_imports():
# Regression test for issue #4576: binary_operation should include imports
# for numpy strategies like arrays(), scalar_dtypes(), and array_shapes()
pytest.importorskip("numpy")
import numpy as np
def numpy_add(a: np.ndarray, b: np.ndarray) -> np.ndarray:
return a + b
source_code = ghostwriter.binary_operation(
numpy_add, associative=True, commutative=True, identity=None
)
# Check that the necessary imports are present
assert "from hypothesis.extra.numpy import" in source_code
assert "arrays" in source_code
assert "scalar_dtypes" in source_code
assert "array_shapes" in source_code
# Most importantly: the code should execute without NameError
exec(source_code, {})
@varied_excepts
@pytest.mark.parametrize(
"func", [re.compile, json.loads, json.dump, timsort, ast.literal_eval]
)
def test_ghostwriter_unittest_style(func, ex):
source_code = ghostwriter.fuzz(func, except_=ex, style="unittest")
assert issubclass(get_test_function(source_code), unittest.TestCase)
def no_annotations(foo=None, *, bar=False):
pass
def test_inference_from_defaults_and_none_booleans_reprs_not_just_and_sampled_from():
source_code = ghostwriter.fuzz(no_annotations)
assert "@given(foo=st.none(), bar=st.booleans())" in source_code
def hopefully_hashable(foo: set[Decimal]):
pass
def test_no_hashability_filter():
# In from_type, we ordinarily protect users from really weird cases like
# `Decimal('snan')` - a unhashable value of a hashable type - but in the
# ghostwriter we instead want to present this to the user for an explicit
# decision. They can pass `allow_nan=False`, fix their custom type's
# hashing logic, or whatever else; simply doing nothing will usually work.
source_code = ghostwriter.fuzz(hopefully_hashable)
assert "@given(foo=st.sets(st.decimals()))" in source_code
assert "_can_hash" not in source_code
@pytest.mark.parametrize(
"gw,args",
[
(ghostwriter.fuzz, ["not callable"]),
(ghostwriter.idempotent, ["not callable"]),
(ghostwriter.roundtrip, []),
(ghostwriter.roundtrip, ["not callable"]),
(ghostwriter.equivalent, [sorted]),
(ghostwriter.equivalent, [sorted, "not callable"]),
],
)
def test_invalid_func_inputs(gw, args):
with pytest.raises(InvalidArgument):
gw(*args)
|
Foo
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/completion/base.py
|
{
"start": 12091,
"end": 13384
}
|
class ____(Completer):
"""
Wrapper around any other completer that will enable/disable the completions
depending on whether the received condition is satisfied.
:param completer: :class:`.Completer` instance.
:param filter: :class:`.Filter` instance.
"""
def __init__(self, completer: Completer, filter: FilterOrBool) -> None:
self.completer = completer
self.filter = to_filter(filter)
def __repr__(self) -> str:
return f"ConditionalCompleter({self.completer!r}, filter={self.filter!r})"
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
# Get all completions in a blocking way.
if self.filter():
yield from self.completer.get_completions(document, complete_event)
async def get_completions_async(
self, document: Document, complete_event: CompleteEvent
) -> AsyncGenerator[Completion, None]:
# Get all completions in a non-blocking way.
if self.filter():
async with aclosing(
self.completer.get_completions_async(document, complete_event)
) as async_generator:
async for item in async_generator:
yield item
|
ConditionalCompleter
|
python
|
google__pytype
|
pytype/overlays/functools_overlay.py
|
{
"start": 534,
"end": 999
}
|
class ____(overlay.Overlay):
"""An overlay for the functools std lib module."""
def __init__(self, ctx):
member_map = {
"cached_property": overlay.add_name(
"cached_property", special_builtins.Property.make_alias
),
}
if ctx.options.use_functools_partial_overlay:
member_map["partial"] = Partial
ast = ctx.loader.import_name(_MODULE_NAME)
super().__init__(ctx, _MODULE_NAME, member_map, ast)
|
FunctoolsOverlay
|
python
|
spyder-ide__spyder
|
spyder/plugins/completion/providers/languageserver/transport/tcp/consumer.py
|
{
"start": 731,
"end": 882
}
|
class ____(IncomingMessageThread):
"""TCP socket consumer."""
def read_num_bytes(self, n):
return self.fd.recv(n)
|
TCPIncomingMessageThread
|
python
|
scipy__scipy
|
scipy/stats/tests/test_stats.py
|
{
"start": 14937,
"end": 32906
}
|
class ____:
def test_pearsonr_result_attributes(self):
res = stats.pearsonr(X, X)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes)
assert_equal(res.correlation, res.statistic)
def test_r_almost_exactly_pos1(self, xp):
a = xp.arange(3.0)
r, prob = stats.pearsonr(a, a)
xp_assert_close(r, xp.asarray(1.0), atol=1e-15)
# With n = len(a) = 3, the error in prob grows like the
# square root of the error in r.
xp_assert_close(prob, xp.asarray(0.0), atol=np.sqrt(2*np.spacing(1.0)))
def test_r_almost_exactly_neg1(self, xp):
a = xp.arange(3.0)
r, prob = stats.pearsonr(a, -a)
xp_assert_close(r, xp.asarray(-1.0), atol=1e-15)
# With n = len(a) = 3, the error in prob grows like the
# square root of the error in r.
xp_assert_close(prob, xp.asarray(0.0), atol=np.sqrt(2*np.spacing(1.0)))
def test_basic(self, xp):
# A basic test, with a correlation coefficient
# that is not 1 or -1.
a = xp.asarray([-1, 0, 1])
b = xp.asarray([0, 0, 3])
r, prob = stats.pearsonr(a, b)
xp_assert_close(r, xp.asarray(3**0.5/2))
xp_assert_close(prob, xp.asarray(1/3))
def test_constant_input(self, xp):
# Zero variance input
# See https://github.com/scipy/scipy/issues/3728
x = xp.asarray([0.667, 0.667, 0.667])
y = xp.asarray([0.123, 0.456, 0.789])
msg = "An input array is constant"
with eager_warns(stats.ConstantInputWarning, match=msg, xp=xp):
r, p = stats.pearsonr(x, y)
xp_assert_close(r, xp.asarray(xp.nan))
xp_assert_close(p, xp.asarray(xp.nan))
@pytest.mark.parametrize('dtype', ['float32', 'float64'])
def test_near_constant_input(self, xp, dtype):
npdtype = getattr(np, dtype)
dtype = getattr(xp, dtype)
# Near constant input (but not constant):
x = xp.asarray([2, 2, 2 + np.spacing(2, dtype=npdtype)], dtype=dtype)
y = xp.asarray([3, 3, 3 + 6*np.spacing(3, dtype=npdtype)], dtype=dtype)
msg = "An input array is nearly constant; the computed"
with eager_warns(stats.NearConstantInputWarning, match=msg, xp=xp):
# r and p are garbage, so don't bother checking them in this case.
# (The exact value of r would be 1.)
stats.pearsonr(x, y)
def test_very_small_input_values(self, xp):
# Very small values in an input. A naive implementation will
# suffer from underflow.
# See https://github.com/scipy/scipy/issues/9353
x = xp.asarray([0.004434375, 0.004756007, 0.003911996, 0.0038005, 0.003409971],
dtype=xp.float64)
y = xp.asarray([2.48e-188, 7.41e-181, 4.09e-208, 2.08e-223, 2.66e-245],
dtype=xp.float64)
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
xp_assert_close(r, xp.asarray(0.7272930540750450, dtype=xp.float64))
xp_assert_close(p, xp.asarray(0.1637805429533202, dtype=xp.float64))
def test_very_large_input_values(self, xp):
# Very large values in an input. A naive implementation will
# suffer from overflow.
# See https://github.com/scipy/scipy/issues/8980
x = 1e90*xp.asarray([0, 0, 0, 1, 1, 1, 1], dtype=xp.float64)
y = 1e90*xp.arange(7, dtype=xp.float64)
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
xp_assert_close(r, xp.asarray(0.8660254037844386, dtype=xp.float64))
xp_assert_close(p, xp.asarray(0.011724811003954638, dtype=xp.float64))
def test_extremely_large_input_values(self, xp):
# Extremely large values in x and y. These values would cause the
# product sigma_x * sigma_y to overflow if the two factors were
# computed independently.
x = xp.asarray([2.3e200, 4.5e200, 6.7e200, 8e200], dtype=xp.float64)
y = xp.asarray([1.2e199, 5.5e200, 3.3e201, 1.0e200], dtype=xp.float64)
r, p = stats.pearsonr(x, y)
# The expected values were computed using mpmath with 80 digits
# of precision.
xp_assert_close(r, xp.asarray(0.351312332103289, dtype=xp.float64))
xp_assert_close(p, xp.asarray(0.648687667896711, dtype=xp.float64))
@pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask")
@pytest.mark.filterwarnings("ignore:divide by zero encountered:RuntimeWarning:dask")
def test_length_two_pos1(self, xp):
# Inputs with length 2.
# See https://github.com/scipy/scipy/issues/7730
x = xp.asarray([1., 2.])
y = xp.asarray([3., 5.])
res = stats.pearsonr(x, y)
r, p = res
one = xp.asarray(1.)
xp_assert_equal(r, one)
xp_assert_equal(p, one)
low, high = res.confidence_interval()
xp_assert_equal(low, -one)
xp_assert_equal(high, one)
@pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask")
@pytest.mark.filterwarnings("ignore:divide by zero encountered:RuntimeWarning:dask")
def test_length_two_neg1(self, xp):
# Inputs with length 2.
# See https://github.com/scipy/scipy/issues/7730
x = xp.asarray([2., 1.])
y = xp.asarray([3., 5.])
res = stats.pearsonr(x, y)
r, p = res
one = xp.asarray(1.)
xp_assert_equal(r, -one)
xp_assert_equal(p, one)
low, high = res.confidence_interval()
xp_assert_equal(low, -one)
xp_assert_equal(high, one)
@pytest.mark.filterwarnings("ignore:invalid value encountered in divide")
def test_length_two_constant_input(self, xp):
# Zero variance input
# See https://github.com/scipy/scipy/issues/3728
# and https://github.com/scipy/scipy/issues/7730
x = xp.asarray([0.667, 0.667])
y = xp.asarray([0.123, 0.456])
msg = "An input array is constant"
with eager_warns(stats.ConstantInputWarning, match=msg, xp=xp):
r, p = stats.pearsonr(x, y)
xp_assert_close(r, xp.asarray(xp.nan))
xp_assert_close(p, xp.asarray(xp.nan))
# Expected values computed with R 3.6.2 cor.test, e.g.
# options(digits=16)
# x <- c(1, 2, 3, 4)
# y <- c(0, 1, 0.5, 1)
# cor.test(x, y, method = "pearson", alternative = "g")
# correlation coefficient and p-value for alternative='two-sided'
# calculated with mpmath agree to 16 digits.
@skip_xp_backends(np_only=True)
@pytest.mark.parametrize('alternative, pval, rlow, rhigh, sign',
[('two-sided', 0.325800137536, -0.814938968841, 0.99230697523, 1),
('less', 0.8370999312316, -1, 0.985600937290653, 1),
('greater', 0.1629000687684, -0.6785654158217636, 1, 1),
('two-sided', 0.325800137536, -0.992306975236, 0.81493896884, -1),
('less', 0.1629000687684, -1.0, 0.6785654158217636, -1),
('greater', 0.8370999312316, -0.985600937290653, 1.0, -1)])
def test_basic_example(self, alternative, pval, rlow, rhigh, sign, xp):
x = [1, 2, 3, 4]
y = np.array([0, 1, 0.5, 1]) * sign
result = stats.pearsonr(x, y, alternative=alternative)
assert_allclose(result.statistic, 0.6741998624632421*sign, rtol=1e-12)
assert_allclose(result.pvalue, pval, rtol=1e-6)
ci = result.confidence_interval()
assert_allclose(ci, (rlow, rhigh), rtol=1e-6)
def test_negative_correlation_pvalue_gh17795(self, xp):
x = xp.arange(10.)
y = -x
test_greater = stats.pearsonr(x, y, alternative='greater')
test_less = stats.pearsonr(x, y, alternative='less')
xp_assert_close(test_greater.pvalue, xp.asarray(1.))
xp_assert_close(test_less.pvalue, xp.asarray(0.), atol=1e-20)
@pytest.mark.filterwarnings("ignore:divide by zero encountered:RuntimeWarning:dask")
def test_length3_r_exactly_negative_one(self, xp):
x = xp.asarray([1., 2., 3.])
y = xp.asarray([5., -4., -13.])
res = stats.pearsonr(x, y)
# The expected r and p are exact.
r, p = res
one = xp.asarray(1.0)
xp_assert_close(r, -one)
xp_assert_close(p, 0*one, atol=1e-7)
low, high = res.confidence_interval()
xp_assert_equal(low, -one)
xp_assert_equal(high, one)
def test_input_validation(self):
# Arraylike is np only
x = [1, 2, 3]
y = [4]
message = '`x` and `y` must have the same length along `axis`.'
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, y)
x = [1, 2, 3]
y = [4, 5]
message = '`x` and `y` must be broadcastable.'
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, y)
x = [1]
y = [2]
message = '`x` and `y` must have length at least 2.'
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, y)
x = [-1j, -2j, -3.0j]
y = [-1j, -2j, -3.0j]
message = 'This function does not support complex data'
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, y)
message = "`method` must be an instance of..."
with pytest.raises(ValueError, match=message):
stats.pearsonr([1, 2], [3, 4], method="asymptotic")
res = stats.pearsonr([1, 2], [3, 4])
with pytest.raises(ValueError, match=message):
res.confidence_interval(method="exact")
@pytest.mark.fail_slow(10)
@pytest.mark.xfail_on_32bit("Monte Carlo method needs > a few kB of memory")
@pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided'))
@pytest.mark.parametrize('method_name',
('permutation', 'monte_carlo', 'monte_carlo2'))
def test_resampling_pvalue(self, method_name, alternative):
rng = np.random.default_rng(24623935790378923)
size = (2, 100) if method_name == 'permutation' else (2, 1000)
x = rng.normal(size=size)
y = rng.normal(size=size)
methods = {'permutation': stats.PermutationMethod(rng=rng),
'monte_carlo': stats.MonteCarloMethod(rvs=(rng.normal,)*2),
'monte_carlo2': stats.MonteCarloMethod(rng=1294)}
method = methods[method_name]
res = stats.pearsonr(x, y, alternative=alternative, method=method, axis=-1)
ref = stats.pearsonr(x, y, alternative=alternative, axis=-1)
assert_allclose(res.statistic, ref.statistic, rtol=1e-15)
assert_allclose(res.pvalue, ref.pvalue, rtol=1e-2, atol=1e-3)
if method_name == 'monte_carlo2':
method = stats.MonteCarloMethod(rng=1294)
res2 = stats.pearsonr(x, y, alternative=alternative, method=method, axis=-1)
assert_equal(res2.statistic, res.statistic)
assert_equal(res2.pvalue, res.pvalue)
@pytest.mark.slow
@pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided'))
def test_bootstrap_ci(self, alternative):
rng = np.random.default_rng(2462935790378923)
x = rng.normal(size=(2, 100))
y = rng.normal(size=(2, 100))
res = stats.pearsonr(x, y, alternative=alternative, axis=-1)
# preserve use of old random_state during SPEC 7 transition
rng = np.random.default_rng(724358723498249852)
method = stats.BootstrapMethod(random_state=rng)
res_ci = res.confidence_interval(method=method)
ref_ci = res.confidence_interval()
assert_allclose(res_ci, ref_ci, atol=1.5e-2)
# `rng` is the new argument name`
rng = np.random.default_rng(724358723498249852)
method = stats.BootstrapMethod(rng=rng)
res_ci2 = res.confidence_interval(method=method)
assert_allclose(res_ci2, res_ci)
@pytest.mark.parametrize('axis', [0, 1])
def test_axis01(self, axis):
rng = np.random.default_rng(38572345825)
shape = (9, 10)
x, y = rng.normal(size=(2,) + shape)
res = stats.pearsonr(x, y, axis=axis)
ci = res.confidence_interval()
if axis == 0:
x, y = x.T, y.T
for i in range(x.shape[0]):
res_i = stats.pearsonr(x[i], y[i])
ci_i = res_i.confidence_interval()
assert_allclose(res.statistic[i], res_i.statistic)
assert_allclose(res.pvalue[i], res_i.pvalue)
assert_allclose(ci.low[i], ci_i.low)
assert_allclose(ci.high[i], ci_i.high)
def test_axis_None(self):
rng = np.random.default_rng(38572345825)
shape = (9, 10)
x, y = rng.normal(size=(2,) + shape)
res = stats.pearsonr(x, y, axis=None)
ci = res.confidence_interval()
ref = stats.pearsonr(x.ravel(), y.ravel())
ci_ref = ref.confidence_interval()
assert_allclose(res.statistic, ref.statistic)
assert_allclose(res.pvalue, ref.pvalue)
assert_allclose(ci, ci_ref)
def test_nd_input_validation(self, xp):
x = y = xp.ones((2, 5))
message = '`axis` must be an integer.'
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, y, axis=1.5)
message = '`x` and `y` must have the same length along `axis`'
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, xp.ones((2, 1)), axis=1)
message = '`x` and `y` must have length at least 2.'
with pytest.raises(ValueError, match=message):
stats.pearsonr(xp.ones((2, 1)), xp.ones((2, 1)), axis=1)
message = '`x` and `y` must be broadcastable.'
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, xp.ones((3, 5)), axis=1)
message = '`method` must be `None` if arguments are not NumPy arrays.'
if not is_numpy(xp):
x = xp.arange(10)
with pytest.raises(ValueError, match=message):
stats.pearsonr(x, x, method=stats.PermutationMethod())
@pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask")
@pytest.mark.filterwarnings("ignore:divide by zero encountered:RuntimeWarning:dask")
def test_nd_special_cases(self, xp):
rng = np.random.default_rng(34989235492245)
x0, y0 = rng.random((4, 5)), rng.random((4, 5))
x0[0, ...] = 1
y0[1, ...] = 2
x, y = xp.asarray(x0), xp.asarray(y0)
message = 'An input array is constant'
with eager_warns(stats.ConstantInputWarning, match=message, xp=xp):
res = stats.pearsonr(x, y, axis=1)
ci = res.confidence_interval()
nans = xp.asarray([xp.nan, xp.nan], dtype=xp.float64)
xp_assert_equal(res.statistic[0:2], nans)
xp_assert_equal(res.pvalue[0:2], nans)
xp_assert_equal(ci.low[0:2], nans)
xp_assert_equal(ci.high[0:2], nans)
assert xp.all(xp.isfinite(res.statistic[2:]))
assert xp.all(xp.isfinite(res.pvalue[2:]))
assert xp.all(xp.isfinite(ci.low[2:]))
assert xp.all(xp.isfinite(ci.high[2:]))
x0[0, 0], y0[1, 1] = 1 + 1e-15, 2 + 1e-15
x, y = xp.asarray(x0), xp.asarray(y0)
message = 'An input array is nearly constant'
with eager_warns(stats.NearConstantInputWarning, match=message, xp=xp):
stats.pearsonr(x, y, axis=1)
# length 2 along axis
x = xp.asarray([[1, 2], [1, 2], [2, 1], [2, 1.]])
y = xp.asarray([[1, 2], [2, 1], [1, 2], [2, 1.]])
ones = xp.ones(4)
res = stats.pearsonr(x, y, axis=-1)
ci = res.confidence_interval()
xp_assert_close(res.statistic, xp.asarray([1, -1, -1, 1.]))
xp_assert_close(res.pvalue, ones)
xp_assert_close(ci.low, -ones)
xp_assert_close(ci.high, ones)
def test_different_dimensionality(self, xp):
# For better or for worse, there is one difference between the broadcasting
# behavior of most stats functions and NumPy gufuncs / NEP 5: gufuncs `axis`
# refers to the core dimension *before* prepending `1`s to the array shapes
# to match dimensionality; SciPy's prepends `1`s first. For instance, in
# SciPy, `vecdot` would work just like `xp.sum(x * y, axis=axis)`, but this
# is NOT true of NumPy. The discrepancy only arises when there are multiple
# arguments with different dimensionality and positive indices are used,
# which is probably why it hasn't been a problem. There are pros and cons of
# each convention, and we might want to consider changing our behavior in
# SciPy 2.0. For now, preserve consistency / backward compatibility.
rng = np.random.default_rng(45834598265019344)
x = rng.random((3, 10))
y = rng.random(10)
res = stats.pearsonr(x, y, axis=1)
ref = stats.pearsonr(x, y, axis=-1)
assert_equal(res.statistic, ref.statistic)
@pytest.mark.parametrize('axis', [0, 1, None])
@pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
def test_array_api(self, xp, axis, alternative):
x, y = rng.normal(size=(2, 10, 11))
res = stats.pearsonr(xp.asarray(x), xp.asarray(y),
axis=axis, alternative=alternative)
ref = stats.pearsonr(x, y, axis=axis, alternative=alternative)
xp_assert_close(res.statistic, xp.asarray(ref.statistic))
xp_assert_close(res.pvalue, xp.asarray(ref.pvalue))
res_ci = res.confidence_interval()
ref_ci = ref.confidence_interval()
xp_assert_close(res_ci.low, xp.asarray(ref_ci.low))
xp_assert_close(res_ci.high, xp.asarray(ref_ci.high))
|
TestPearsonr
|
python
|
django__django
|
django/db/models/lookups.py
|
{
"start": 26767,
"end": 26887
}
|
class ____(YearLookup, GreaterThanOrEqual):
def get_bound_params(self, start, finish):
return (start,)
|
YearGte
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_gnu/package.py
|
{
"start": 299,
"end": 578
}
|
class ____(AutotoolsPackage, GNUMirrorPackage):
"""Simple GNU package"""
homepage = "https://www.gnu.org/software/make/"
gnu_mirror_path = "make/make-4.2.1.tar.gz"
version("4.2.1", sha256="e40b8f018c1da64edd1cc9a6fce5fa63b2e707e404e20cad91fbae337c98a5b7")
|
MirrorGnu
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/schema.py
|
{
"start": 110974,
"end": 133335
}
|
class ____(DialectKWArgs, SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`_schema.Column`
object,
e.g.::
t = Table(
"remote_table",
metadata,
Column("remote_id", ForeignKey("main_table.id")),
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`_schema.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`_schema.Column` which
in turn is associated with a :class:`_schema.Table`. Conversely,
when :class:`_schema.ForeignKeyConstraint` is applied to a
:class:`_schema.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`_schema.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`_schema.ForeignKeyConstraint` object must be used, and applied
to the :class:`_schema.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`_schema.Column`
object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key"
parent: Column[Any]
_table_column: Optional[Column[Any]]
_colspec: Union[str, Column[Any]]
def __init__(
self,
column: _DDLColumnReferenceArgument,
_constraint: Optional[ForeignKeyConstraint] = None,
use_alter: bool = False,
name: _ConstraintNameArgument = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
link_to_name: bool = False,
match: Optional[str] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
_unresolvable: bool = False,
**dialect_kw: Any,
):
r"""
Construct a column-level FOREIGN KEY.
The :class:`_schema.ForeignKey` object when constructed generates a
:class:`_schema.ForeignKeyConstraint`
which is associated with the parent
:class:`_schema.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`_schema.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
.. seealso::
:ref:`on_update_on_delete`
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
SET NULL and RESTRICT. Some dialects may allow for additional
syntaxes.
.. seealso::
:ref:`on_update_on_delete`
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`_schema.ForeignKeyConstraint`
to indicate the constraint should
be generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See :paramref:`_schema.ForeignKeyConstraint.use_alter`
for further description.
.. seealso::
:paramref:`_schema.ForeignKeyConstraint.use_alter`
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. The
arguments are ultimately handled by a corresponding
:class:`_schema.ForeignKeyConstraint`.
See the documentation regarding
an individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self._unresolvable = _unresolvable
self._colspec, self._table_column = self._parse_colspec_argument(
column
)
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
# .parent is not Optional under normal use
self.parent = None # type: ignore
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
self.match = match
self.comment = comment
if info:
self.info = info
self._unvalidated_dialect_kw = dialect_kw
def _resolve_colspec_argument(
self,
) -> Tuple[
Union[str, Column[Any]],
Optional[Column[Any]],
]:
argument = self._colspec
return self._parse_colspec_argument(argument)
def _parse_colspec_argument(
self,
argument: _DDLColumnArgument,
) -> Tuple[
Union[str, Column[Any]],
Optional[Column[Any]],
]:
_colspec = coercions.expect(roles.DDLReferredColumnRole, argument)
if isinstance(_colspec, str):
_table_column = None
else:
assert isinstance(_colspec, ColumnClause)
_table_column = _colspec
if not isinstance(_table_column.table, (type(None), TableClause)):
raise exc.ArgumentError(
"ForeignKey received Column not bound "
"to a Table, got: %r" % _table_column.table
)
return _colspec, _table_column
def __repr__(self) -> str:
return "ForeignKey(%r)" % self._get_colspec()
@util.deprecated(
"1.4",
"The :meth:`_schema.ForeignKey.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, *, schema: Optional[str] = None, **kw: Any) -> ForeignKey:
return self._copy(schema=schema, **kw)
def _copy(self, *, schema: Optional[str] = None, **kw: Any) -> ForeignKey:
"""Produce a copy of this :class:`_schema.ForeignKey` object.
The new :class:`_schema.ForeignKey` will not be bound
to any :class:`_schema.Column`.
This method is usually used by the internal
copy procedures of :class:`_schema.Column`, :class:`_schema.Table`,
and :class:`_schema.MetaData`.
:param schema: The returned :class:`_schema.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
comment=self.comment,
**self._unvalidated_dialect_kw,
)
return self._schema_item_copy(fk)
def _get_colspec(
self,
schema: Optional[
Union[
str,
Literal[SchemaConst.RETAIN_SCHEMA, SchemaConst.BLANK_SCHEMA],
]
] = None,
table_name: Optional[str] = None,
_is_copy: bool = False,
) -> str:
"""Return a string based 'column specification' for this
:class:`_schema.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
_colspec, effective_table_column = self._resolve_colspec_argument()
if schema not in (None, RETAIN_SCHEMA):
_schema, tname, colname = self._column_tokens
if table_name is not None:
tname = table_name
if schema is BLANK_SCHEMA:
return "%s.%s" % (tname, colname)
else:
return "%s.%s.%s" % (schema, tname, colname)
elif table_name:
schema, tname, colname = self._column_tokens
if schema:
return "%s.%s.%s" % (schema, table_name, colname)
else:
return "%s.%s" % (table_name, colname)
elif effective_table_column is not None:
if effective_table_column.table is None:
if _is_copy:
raise exc.InvalidRequestError(
f"Can't copy ForeignKey object which refers to "
f"non-table bound Column {effective_table_column!r}"
)
else:
return effective_table_column.key
return "%s.%s" % (
effective_table_column.table.fullname,
effective_table_column.key,
)
else:
assert isinstance(_colspec, str)
return _colspec
@property
def _referred_schema(self) -> Optional[str]:
return self._column_tokens[0]
def _table_key_within_construction(self) -> Any:
"""get the table key but only safely"""
if self._table_column is not None:
if self._table_column.table is None:
return None
else:
return self._table_column.table.key
else:
schema, tname, colname = self._column_tokens
return _get_table_key(tname, schema)
target_fullname = property(_get_colspec)
def references(self, table: Table) -> bool:
"""Return True if the given :class:`_schema.Table`
is referenced by this
:class:`_schema.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table: FromClause) -> Optional[Column[Any]]:
"""Return the :class:`_schema.Column` in the given
:class:`_schema.Table` (or any :class:`.FromClause`)
referenced by this :class:`_schema.ForeignKey`.
Returns None if this :class:`_schema.ForeignKey`
does not reference the given
:class:`_schema.Table`.
"""
# our column is a Column, and any subquery etc. proxying us
# would be doing so via another Column, so that's what would
# be returned here
return table.columns.corresponding_column(self.column) # type: ignore
@util.memoized_property
def _column_tokens(self) -> Tuple[Optional[str], str, Optional[str]]:
"""parse a string-based _colspec into its component parts."""
m = self._get_colspec().split(".")
if len(m) == 1:
tname = m.pop()
colname = None
else:
colname = m.pop()
tname = m.pop()
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
if len(m) > 0:
schema = ".".join(m)
else:
schema = None
return schema, tname, colname
def _resolve_col_tokens(self) -> Tuple[Table, str, Optional[str]]:
if self.parent is None:
raise exc.InvalidRequestError(
"this ForeignKey object does not yet have a "
"parent Column associated with it."
)
elif self.parent.table is None:
raise exc.InvalidRequestError(
"this ForeignKey's parent column is not yet associated "
"with a Table."
)
parenttable = self.parent.table
if self._unresolvable:
schema, tname, colname = self._column_tokens
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
# assertion
# basically Column._make_proxy() sends the actual
# target Column to the ForeignKey object, so the
# string resolution here is never called.
for c in self.parent.base_columns:
if isinstance(c, Column):
assert c.table is parenttable
break
else:
assert False
######################
schema, tname, colname = self._column_tokens
if schema is None and parenttable.metadata.schema is not None:
schema = parenttable.metadata.schema
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
def _link_to_col_by_colstring(
self, parenttable: Table, table: Table, colname: Optional[str]
) -> Column[Any]:
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
# this use case wasn't working in later 1.x series
# as it had no test coverage; fixed in 2.0
parent = self.parent
assert parent is not None
key = parent.key
_column = table.c.get(key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not initialize target column "
f"for ForeignKey '{self._get_colspec()}' "
f"on table '{parenttable.name}': "
f"table '{table.name}' has no column named '{key}'",
table.name,
key,
)
return _column
def _set_target_column(self, column: Column[Any]) -> None:
assert self.parent is not None
# propagate TypeEngine to parent if it didn't have one
if self.parent.type._isnull:
self.parent.type = column.type
# super-edgy case, if other FKs point to our column,
# they'd get the type propagated out also.
def set_type(fk: ForeignKey) -> None:
if fk.parent.type._isnull:
fk.parent.type = column.type
self.parent._setup_on_memoized_fks(set_type)
self.column = column # type: ignore
@util.ro_memoized_property
def column(self) -> Column[Any]:
"""Return the target :class:`_schema.Column` referenced by this
:class:`_schema.ForeignKey`.
If no target column has been established, an exception
is raised.
"""
return self._resolve_column()
@overload
def _resolve_column(
self, *, raiseerr: Literal[True] = ...
) -> Column[Any]: ...
@overload
def _resolve_column(
self, *, raiseerr: bool = ...
) -> Optional[Column[Any]]: ...
def _resolve_column(
self, *, raiseerr: bool = True
) -> Optional[Column[Any]]:
_column: Column[Any]
_colspec, effective_table_column = self._resolve_colspec_argument()
if isinstance(_colspec, str):
parenttable, tablekey, colname = self._resolve_col_tokens()
if self._unresolvable or tablekey not in parenttable.metadata:
if not raiseerr:
return None
raise exc.NoReferencedTableError(
f"Foreign key associated with column "
f"'{self.parent}' could not find "
f"table '{tablekey}' with which to generate a "
f"foreign key to target column '{colname}'",
tablekey,
)
elif parenttable.key not in parenttable.metadata:
if not raiseerr:
return None
raise exc.InvalidRequestError(
f"Table {parenttable} is no longer associated with its "
"parent MetaData"
)
else:
table = parenttable.metadata.tables[tablekey]
return self._link_to_col_by_colstring(
parenttable, table, colname
)
elif hasattr(_colspec, "__clause_element__"):
_column = _colspec.__clause_element__()
return _column
else:
assert isinstance(_colspec, Column)
_column = _colspec
return _column
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, Column)
if self.parent is not None and self.parent is not parent:
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !"
)
self.parent = parent
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_remote_table(self, table: Table) -> None:
parenttable, _, colname = self._resolve_col_tokens()
_column = self._link_to_col_by_colstring(parenttable, table, colname)
self._set_target_column(_column)
assert self.constraint is not None
self.constraint._validate_dest_table(table)
def _remove_from_metadata(self, metadata: MetaData) -> None:
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if self in metadata._fk_memos[fk_key]:
# TODO: no test coverage for self not in memos
metadata._fk_memos[fk_key].remove(self)
def _set_table(self, column: Column[Any], table: Table) -> None:
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
assert isinstance(table, Table)
if self.constraint is None:
self.constraint = ForeignKeyConstraint(
[],
[],
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
match=self.match,
comment=self.comment,
**self._unvalidated_dialect_kw,
)
self.constraint._append_element(column, self)
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
# set up remote ".column" attribute, or a note to pick it
# up when the other Table/Column shows up
_colspec, _ = self._resolve_colspec_argument()
if isinstance(_colspec, str):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if table_key in parenttable.metadata.tables:
table = parenttable.metadata.tables[table_key]
try:
_column = self._link_to_col_by_colstring(
parenttable, table, colname
)
except exc.NoReferencedColumnError:
# this is OK, we'll try later
pass
else:
self._set_target_column(_column)
parenttable.metadata._fk_memos[fk_key].append(self)
elif hasattr(_colspec, "__clause_element__"):
_column = _colspec.__clause_element__()
self._set_target_column(_column)
else:
self._set_target_column(_colspec)
if TYPE_CHECKING:
def default_is_sequence(
obj: Optional[DefaultGenerator],
) -> TypeGuard[Sequence]: ...
def default_is_clause_element(
obj: Optional[DefaultGenerator],
) -> TypeGuard[ColumnElementColumnDefault]: ...
def default_is_scalar(
obj: Optional[DefaultGenerator],
) -> TypeGuard[ScalarElementColumnDefault]: ...
else:
default_is_sequence = operator.attrgetter("is_sequence")
default_is_clause_element = operator.attrgetter("is_clause_element")
default_is_scalar = operator.attrgetter("is_scalar")
|
ForeignKey
|
python
|
getsentry__sentry
|
src/sentry/integrations/issue_alert_image_builder.py
|
{
"start": 1213,
"end": 7544
}
|
class ____:
def __init__(self, group: Group, provider: ExternalProviderEnum) -> None:
self.group = group
self.provider = provider
self.cache_key = f"chartcuterie-image:{self.group.id}"
self.tags = {
"provider": self.provider,
"issue_category": self.group.issue_category,
}
self.lock = locks.get(key=f"lock_{self.cache_key}", duration=10, name="issue_alert_image")
self.issue_type_to_image_builder: dict[type[GroupType], Callable[[], str | None]] = {
PerformanceP95EndpointRegressionGroupType: self._get_endpoint_regression_image_url,
ProfileFunctionRegressionType: self._get_function_regression_image_url,
}
def get_image_url(self) -> str | None:
try:
# We only generate images for supported issue types
if self.group.issue_type not in self.issue_type_to_image_builder:
return None
metrics.incr("chartcuterie.issue_alert.attempt", tags=self.tags)
image_url = cache.get(self.cache_key)
if image_url is None:
self.lock.blocking_acquire(initial_delay=1, timeout=30)
# Checking again in case another thread generated the image while
# this thread was acquiring the lock
image_url = cache.get(self.cache_key)
if image_url is None:
image_url = self.issue_type_to_image_builder[self.group.issue_type]()
self.lock.release()
except UnableToAcquireLock:
# There is a chance that another thread generated the image
image_url = cache.get(self.cache_key)
if not image_url:
logger.warning(
"issue_alert_chartcuterie_image.lock.failed",
extra={"group_id": self.group.id},
)
except Exception as e:
logger.exception(
"issue_alert_chartcuterie_image.failed",
extra={"exception": e, "group_id": self.group.id},
)
sentry_sdk.capture_exception()
if self.lock.locked():
self.lock.release()
if image_url:
metrics.incr("chartcuterie.issue_alert.success", tags=self.tags)
# We don't want to regenerate the image if another type of notification is sending the same one
# For example slack notification and email notification for the same issue
cache.set(self.cache_key, image_url, timeout=60 * 5)
return image_url
# This would only happen if we support the issue type, but chartcuterie failed to generate the image
logger.warning(
"issue_alert_chartcuterie_image.empty_image",
extra={"group_id": self.group.id},
)
return None
def _get_endpoint_regression_image_url(self) -> str | None:
organization = self.group.organization
event = self.group.get_latest_event_for_environments()
if event is None or event.transaction is None or event.occurrence is None:
logger.warning(
"issue_alert_chartcuterie_image.empty_event",
extra={"event": event},
)
return None
transaction_name = utils.escape_transaction(event.transaction)
period = get_relative_time(anchor=get_approx_start_time(self.group), relative_days=14)
resp = client.get(
auth=ApiKey(organization_id=organization.id, scope_list=["org:read"]),
user=None,
path=f"/organizations/{organization.slug}/events-stats/",
data={
"yAxis": ["count()", "p95(transaction.duration)"],
"referrer": Referrer.API_ENDPOINT_REGRESSION_ALERT_CHARTCUTERIE,
"query": f'event.type:transaction transaction:"{transaction_name}"',
"project": self.group.project.id,
"start": period["start"].strftime("%Y-%m-%d %H:%M:%S"),
"end": period["end"].strftime("%Y-%m-%d %H:%M:%S"),
"dataset": "metrics",
},
)
return charts.generate_chart(
ChartType.SLACK_PERFORMANCE_ENDPOINT_REGRESSION,
data={
"evidenceData": event.occurrence.evidence_data,
"percentileData": resp.data["p95(transaction.duration)"]["data"],
},
size=DEFAULT_CHART_SIZE,
)
def _get_function_regression_image_url(self) -> str | None:
organization = self.group.organization
event = self.group.get_latest_event_for_environments()
if event is None or event.occurrence is None:
logger.warning(
"issue_alert_chartcuterie_image.empty_event",
extra={"event": event},
)
return None
period = get_relative_time(anchor=get_approx_start_time(self.group), relative_days=14)
resp = client.get(
auth=ApiKey(organization_id=organization.id, scope_list=["org:read"]),
user=None,
path=f"/organizations/{organization.slug}/events-stats/",
data={
"dataset": "profileFunctions",
"referrer": Referrer.API_FUNCTION_REGRESSION_ALERT_CHARTCUTERIE,
"project": self.group.project.id,
"start": period["start"].strftime("%Y-%m-%d %H:%M:%S"),
"end": period["end"].strftime("%Y-%m-%d %H:%M:%S"),
"yAxis": ["p95()"],
"query": f"fingerprint:{event.occurrence.evidence_data['fingerprint']}",
},
)
# Convert the aggregate range from nanoseconds to milliseconds
evidence_data = {
"aggregate_range_1": event.occurrence.evidence_data["aggregate_range_1"] / 1e6,
"aggregate_range_2": event.occurrence.evidence_data["aggregate_range_2"] / 1e6,
"breakpoint": event.occurrence.evidence_data["breakpoint"],
}
return charts.generate_chart(
ChartType.SLACK_PERFORMANCE_FUNCTION_REGRESSION,
data={
"evidenceData": evidence_data,
"rawResponse": resp.data,
},
size=DEFAULT_CHART_SIZE,
)
|
IssueAlertImageBuilder
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/llama_index/llms/cloudflare_ai_gateway/base.py
|
{
"start": 1039,
"end": 1176
}
|
class ____(CloudflareAIGatewayError):
"""Raised when AI Gateway authentication fails."""
pass
|
CloudflareAIGatewayUnauthorizedError
|
python
|
kamyu104__LeetCode-Solutions
|
Python/shortest-word-distance-ii.py
|
{
"start": 110,
"end": 910
}
|
class ____(object):
# initialize your data structure here.
# @param {string[]} words
def __init__(self, words):
self.wordIndex = collections.defaultdict(list)
for i in xrange(len(words)):
self.wordIndex[words[i]].append(i)
# @param {string} word1
# @param {string} word2
# @return {integer}
# Adds a word into the data structure.
def shortest(self, word1, word2):
indexes1 = self.wordIndex[word1]
indexes2 = self.wordIndex[word2]
i, j, dist = 0, 0, float("inf")
while i < len(indexes1) and j < len(indexes2):
dist = min(dist, abs(indexes1[i] - indexes2[j]))
if indexes1[i] < indexes2[j]:
i += 1
else:
j += 1
return dist
|
WordDistance
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/blobstore/gcs/main.py
|
{
"start": 969,
"end": 2492
}
|
class ____(webapp2.RequestHandler):
def get(self):
# Get the default Cloud Storage Bucket name and create a file name for
# the object in Cloud Storage.
bucket = app_identity.get_default_gcs_bucket_name()
# Cloud Storage file names are in the format /bucket/object.
filename = "/{}/blobstore_demo".format(bucket)
# Create a file in Google Cloud Storage and write something to it.
with cloudstorage.open(filename, "w") as filehandle:
filehandle.write("abcde\n")
# In order to read the contents of the file using the Blobstore API,
# you must create a blob_key from the Cloud Storage file name.
# Blobstore expects the filename to be in the format of:
# /gs/bucket/object
blobstore_filename = "/gs{}".format(filename)
blob_key = blobstore.create_gs_key(blobstore_filename)
# Read the file's contents using the Blobstore API.
# The last two parameters specify the start and end index of bytes we
# want to read.
data = blobstore.fetch_data(blob_key, 0, 6)
# Write the contents to the response.
self.response.headers["Content-Type"] = "text/plain"
self.response.write(data)
# Delete the file from Google Cloud Storage using the blob_key.
blobstore.delete(blob_key)
# This handler creates a file in Cloud Storage using the cloudstorage
# client library and then serves the file back using the Blobstore API.
|
CreateAndReadFileHandler
|
python
|
huggingface__transformers
|
src/transformers/models/efficientloftr/modeling_efficientloftr.py
|
{
"start": 28783,
"end": 30916
}
|
class ____(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = EfficientLoFTRConfig
base_model_prefix = "efficientloftr"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_can_record_outputs = {
"hidden_states": EfficientLoFTRRepVGGBlock,
"attentions": EfficientLoFTRAttention,
}
@torch.no_grad()
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.Conv1d, nn.BatchNorm2d)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
# Copied from transformers.models.superpoint.modeling_superpoint.SuperPointPreTrainedModel.extract_one_channel_pixel_values with SuperPoint->EfficientLoFTR
def extract_one_channel_pixel_values(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
"""
Assuming pixel_values has shape (batch_size, 3, height, width), and that all channels values are the same,
extract the first channel value to get a tensor of shape (batch_size, 1, height, width) for EfficientLoFTR. This is
a workaround for the issue discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
pixel_values: torch.FloatTensor of shape (batch_size, 3, height, width)
Returns:
pixel_values: torch.FloatTensor of shape (batch_size, 1, height, width)
"""
return pixel_values[:, 0, :, :][:, None, :, :]
@auto_docstring(
custom_intro="""
EfficientLoFTR model taking images as inputs and outputting the features of the images.
"""
)
|
EfficientLoFTRPreTrainedModel
|
python
|
ansible__ansible
|
lib/ansible/executor/task_queue_manager.py
|
{
"start": 2560,
"end": 2755
}
|
class ____:
worker_id: int
prompt: str
private: bool = True
seconds: int = None
interrupt_input: t.Iterable[bytes] = None
complete_input: t.Iterable[bytes] = None
|
PromptSend
|
python
|
Textualize__textual
|
tests/test_message_pump.py
|
{
"start": 1071,
"end": 1833
}
|
class ____(Widget):
called_by = None
def key_x(self):
self.called_by = self.key_x
def _key_x(self):
self.called_by = self._key_x
def key_tab(self):
self.called_by = self.key_tab
def key_ctrl_i(self):
self.called_by = self.key_ctrl_i
async def test_dispatch_key_raises_when_conflicting_handler_aliases():
"""If you've got a handler for e.g. ctrl+i and a handler for tab, that's probably a mistake.
In the terminal, they're the same thing, so we fail fast via exception here."""
widget = DuplicateHandlersWidget()
with pytest.raises(DuplicateKeyHandlers):
await dispatch_key(widget, Key(key="tab", character="\t"))
assert widget.called_by == widget.key_tab
|
DuplicateHandlersWidget
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
{
"start": 25448,
"end": 27058
}
|
class ____(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in range(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
|
AdjustHueBenchmark
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mysql/mariadb.py
|
{
"start": 1111,
"end": 2258
}
|
class ____(UUID[_UUID_RETURN]):
def __init__(self, as_uuid: bool = True, native_uuid: bool = True):
self.as_uuid = as_uuid
# the _MariaDBUUID internal type is only invoked for a Uuid() with
# native_uuid=True. for non-native uuid type, the plain Uuid
# returns itself due to the workings of the Emulated superclass.
assert native_uuid
# for internal type, force string conversion for result_processor() as
# current drivers are returning a string, not a Python UUID object
self.native_uuid = False
@property
def native(self) -> bool: # type: ignore[override]
# override to return True, this is a native type, just turning
# off native_uuid for internal data handling
return True
def bind_processor(self, dialect: MariaDBDialect) -> Optional[_BindProcessorType[_UUID_RETURN]]: # type: ignore[override] # noqa: E501
if not dialect.supports_native_uuid or not dialect._allows_uuid_binds:
return super().bind_processor(dialect) # type: ignore[return-value] # noqa: E501
else:
return None
|
_MariaDBUUID
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.