language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 182179,
"end": 191750
} | class ____(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def test_o2m_raiseload_mapper(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address, lazy="raise")),
)
q = fixture_session().query(User)
result = [None]
def go():
x = q.filter(User.id == 7).all()
assert_raises_message(
sa.exc.InvalidRequestError,
"'User.addresses' is not available due to lazy='raise'",
lambda: x[0].addresses,
)
result[0] = x
self.assert_sql_count(testing.db, go, 1)
self.assert_result(result[0], User, {"id": 7})
def test_o2m_raiseload_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User, users, properties=dict(addresses=relationship(Address))
)
q = fixture_session().query(User)
result = [None]
def go():
x = (
q.options(sa.orm.raiseload(User.addresses))
.filter(User.id == 7)
.all()
)
assert_raises_message(
sa.exc.InvalidRequestError,
"'User.addresses' is not available due to lazy='raise'",
lambda: x[0].addresses,
)
result[0] = x
self.assert_sql_count(testing.db, go, 1)
self.assert_result(result[0], User, {"id": 7})
def test_o2m_raiseload_lazyload_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address, lazy="raise")),
)
q = (
fixture_session()
.query(User)
.options(sa.orm.lazyload(User.addresses))
)
result = [None]
def go():
x = q.filter(User.id == 7).all()
x[0].addresses
result[0] = x
self.sql_count_(2, go)
self.assert_result(
result[0], User, {"id": 7, "addresses": (Address, [{"id": 1}])}
)
def test_m2o_raiseload_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address, addresses, properties={"user": relationship(User)}
)
self.mapper_registry.map_imperatively(User, users)
s = fixture_session()
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.raiseload(Address.user))
.first()
)
def go():
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise'",
lambda: a1.user,
)
self.sql_count_(0, go)
def test_m2o_raise_on_sql_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address, addresses, properties={"user": relationship(User)}
)
self.mapper_registry.map_imperatively(User, users)
s = fixture_session()
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.raiseload(Address.user, sql_only=True))
.first()
)
def go():
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise_on_sql'",
lambda: a1.user,
)
self.sql_count_(0, go)
s.close()
u1 = s.query(User).first()
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.raiseload(Address.user, sql_only=True))
.first()
)
assert "user" not in a1.__dict__
is_(a1.user, u1)
def test_m2o_non_use_get_raise_on_sql_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User,
primaryjoin=sa.and_(
addresses.c.user_id == users.c.id,
users.c.name != None, # noqa
),
)
},
)
self.mapper_registry.map_imperatively(User, users)
s = fixture_session()
u1 = s.query(User).first() # noqa
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.raiseload(Address.user, sql_only=True))
.first()
)
def go():
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise_on_sql'",
lambda: a1.user,
)
def test_raiseload_from_eager_load(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
Dingaling, dingalings = self.classes.Dingaling, self.tables.dingalings
self.mapper_registry.map_imperatively(Dingaling, dingalings)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties=dict(dingaling=relationship(Dingaling)),
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address)),
)
q = (
fixture_session()
.query(User)
.options(joinedload(User.addresses).raiseload("*"))
.filter_by(id=7)
)
u1 = q.first()
assert "addresses" in u1.__dict__
with expect_raises_message(
sa.exc.InvalidRequestError,
"'Address.dingaling' is not available due to lazy='raise'",
):
u1.addresses[0].dingaling
def test_raiseload_wildcard_all_classes_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address, backref="user")),
)
q = (
fixture_session()
.query(User, Address)
.join(Address, User.id == Address.user_id)
)
u1, a1 = q.options(sa.orm.raiseload("*")).filter(User.id == 7).first()
assert_raises_message(
sa.exc.InvalidRequestError,
"'User.addresses' is not available due to lazy='raise'",
lambda: u1.addresses,
)
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise'",
lambda: a1.user,
)
# columns still work
eq_(u1.id, 7)
eq_(a1.id, 1)
def test_raiseload_wildcard_specific_class_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address, backref="user")),
)
q = (
fixture_session()
.query(User, Address)
.join(Address, User.id == Address.user_id)
)
u1, a1 = (
q.options(sa.orm.Load(Address).raiseload("*"))
.filter(User.id == 7)
.first()
)
# User doesn't raise
def go():
eq_(u1.addresses, [a1])
self.assert_sql_count(testing.db, go, 1)
# Address does
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise'",
lambda: a1.user,
)
# columns still work
eq_(u1.id, 7)
eq_(a1.id, 1)
| RaiseLoadTest |
python | django__django | django/db/models/fields/__init__.py | {
"start": 92009,
"end": 92764
} | class ____(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs.setdefault("max_length", 200)
super().__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 200:
del kwargs["max_length"]
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
return super().formfield(
**{
"form_class": forms.URLField,
**kwargs,
}
)
| URLField |
python | kamyu104__LeetCode-Solutions | Python/smallest-string-starting-from-leaf.py | {
"start": 226,
"end": 829
} | class ____(object):
def smallestFromLeaf(self, root):
"""
:type root: TreeNode
:rtype: str
"""
def dfs(node, candidate, result):
if not node:
return
candidate.append(chr(ord('a') + node.val))
if not node.left and not node.right:
result[0] = min(result[0], "".join(reversed(candidate)))
dfs(node.left, candidate, result)
dfs(node.right, candidate, result)
candidate.pop()
result = ["~"]
dfs(root, [], result)
return result[0]
| Solution |
python | huggingface__transformers | tests/models/olmo3/test_modeling_olmo3.py | {
"start": 1417,
"end": 1546
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = Olmo3Model
@require_torch
| Olmo3ModelTester |
python | pytorch__pytorch | test/dynamo/test_regional_inductor.py | {
"start": 2867,
"end": 16138
} | class ____(torch._inductor.test_case.TestCase):
@parametrize("serialize", [False, True])
def test_simple(self, serialize):
def fn(x, y):
sin = torch.sin(x)
with fx_traceback.annotate({"compile_with_inductor": 0}):
mul = sin * y
add = mul + 1
return torch.sin(add)
opt_fn = torch.compile(
fn, backend=aot_eager_regional_inductor(serialize=serialize), fullgraph=True
)
x = torch.randn(10, requires_grad=True)
y = torch.randn(10, requires_grad=True)
# Check that inductor compilation is called twice
_, codes = run_fw_bw_and_get_code(lambda: opt_fn(x, y))
self.assertEqual(len(codes), 2)
@parametrize("serialize", [False, True])
def test_repeated_blocks(self, serialize):
def fn(x, y):
sin = torch.sin(x)
with fx_traceback.annotate({"compile_with_inductor": 0}):
mul = sin * y
add = mul + 1
return torch.sin(add)
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
a = fn(x, y)
return fn(a, y)
mod = Mod()
opt_mod = torch.compile(
mod,
backend=aot_eager_regional_inductor(serialize=serialize),
fullgraph=True,
)
x = torch.randn(10, requires_grad=True)
y = torch.randn(10, requires_grad=True)
# Check that inductor compilation is called 4 times
# there will be 2 partitions in the fwd and 2 in the bwd, totalling 4
_, codes = run_fw_bw_and_get_code(lambda: opt_mod(x, y))
self.assertEqual(len(codes), 4)
@parametrize("serialize", [False, True])
def test_invoke_subgraph(self, serialize):
# Checks that get_attr nodes custom metadata is propagated
@torch.compiler.nested_compile_region
def gn(x):
return torch.sin(x)
def fn(x):
x = x + 1
with fx_traceback.annotate({"compile_with_inductor": 0}):
z = gn(x)
return torch.sigmoid(z)
opt_fn = torch.compile(
fn, backend=aot_eager_regional_inductor(serialize=serialize), fullgraph=True
)
x = torch.randn(10, requires_grad=True)
_, codes = run_fw_bw_and_get_code(lambda: opt_fn(x))
self.assertEqual(len(codes), 2)
@parametrize("serialize", [False, True])
def test_invoke_subgraph_inner(self, serialize):
# Checks that the inductor regions are searched recursively.
@torch.compiler.nested_compile_region
def gn(x):
with fx_traceback.annotate({"compile_with_inductor": 0}):
return torch.sin(x)
def fn(x):
x = x + 1
x = gn(x)
x = x + 1
x = gn(x)
return torch.sigmoid(x)
opt_fn = torch.compile(
fn, backend=aot_eager_regional_inductor(serialize=serialize), fullgraph=True
)
x = torch.randn(10, requires_grad=True)
_, codes = run_fw_bw_and_get_code(lambda: opt_fn(x))
# the invoke_subgraph is called twice - but the inside code is compiled
# once - so in total 2 (1 fwd + 1 bwd)
self.assertEqual(len(codes), 2)
@requires_cuda_and_triton
@parametrize("serialize", [False, True])
def test_flex_attention(self, serialize):
def _squared(score, b, h, m, n):
return score * score
def mask_mod(b, h, q, k):
return q >= 0
a = 12
b = 64
block_mask = create_block_mask(mask_mod, None, None, a * b, a * b)
def fn(x):
x = torch.sin(x)
with fx_traceback.annotate({"compile_with_inductor": 0}):
x = flex_attention(x, x, x, block_mask=block_mask, score_mod=_squared)
return torch.cos(x)
x = torch.randn(
1,
1,
a * b,
b,
dtype=torch.bfloat16,
device="cuda",
requires_grad=True,
)
opt_fn = torch.compile(
fn,
backend=aot_eager_regional_inductor(serialize),
fullgraph=True,
)
_, codes = run_fw_bw_and_get_code(lambda: opt_fn(x))
# flex in forward and flex_backward in backward
self.assertEqual(len(codes), 2)
@parametrize("serialize", [False, True])
def test_max_autotune_no_cudagraphs(self, serialize):
"""Test that max-autotune-no-cudagraphs options are properly applied via annotations."""
import torch._inductor.config as inductor_config
def fn(x, y):
sin = torch.sin(x)
# Use annotation API to specify inductor configs
with fx_traceback.annotate(
{
"compile_with_inductor": {
"inductor_configs": {
"max_autotune": True,
"triton.cudagraphs": False,
}
}
}
):
mul = sin * y
add = mul + 1
return torch.sin(add)
# Hook to verify options
original_compile = torch._inductor.standalone_compile
captured_options = []
def verify_options(*args, **kwargs):
options = kwargs.get("options", {})
captured_options.append(options)
# Verify config is set as expected from explicit options
assert inductor_config.max_autotune, "max_autotune should be True"
assert not inductor_config.triton.cudagraphs, (
"triton.cudagraphs should be False"
)
return original_compile(*args, **kwargs)
torch._inductor.standalone_compile = verify_options
try:
# Use backend without options - they come from annotations
backend = aot_eager_regional_inductor(serialize=serialize)
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(10, requires_grad=True)
y = torch.randn(10, requires_grad=True)
# Run and check that options were passed
_, codes = run_fw_bw_and_get_code(lambda: opt_fn(x, y))
self.assertEqual(len(codes), 2)
# Verify that compilation happened
self.assertTrue(
len(captured_options) > 0, "Compilation should have occurred"
)
finally:
torch._inductor.standalone_compile = original_compile
def test_annotation_inductor_configs(self):
"""Test that inductor_configs can be passed through annotation API."""
import torch._inductor.config as inductor_config
def fn_with_annotation_configs(x, y):
# New annotation format with inductor_configs
with fx_traceback.annotate(
{
"compile_with_inductor": {
"inductor_configs": {
"max_autotune": True,
"triton.cudagraphs": False,
}
}
}
):
return torch.matmul(x, y) + 1
# Capture config during compilation
config_snapshots = []
original_compile = torch._inductor.standalone_compile
def capture_config(*args, **kwargs):
config_snapshots.append(
{
"max_autotune": inductor_config.max_autotune,
"triton.cudagraphs": inductor_config.triton.cudagraphs,
}
)
return original_compile(*args, **kwargs)
torch._inductor.standalone_compile = capture_config
try:
backend = aot_eager_regional_inductor()
opt_fn = torch.compile(
fn_with_annotation_configs, backend=backend, fullgraph=True
)
x = torch.randn(32, 32, requires_grad=True)
y = torch.randn(32, 32, requires_grad=True)
# Run forward and backward
result = opt_fn(x, y)
result.sum().backward()
self.assertTrue(len(config_snapshots) > 0, "No compilation occurred")
for snapshot in config_snapshots:
self.assertEqual(snapshot["max_autotune"], True)
self.assertEqual(snapshot["triton.cudagraphs"], False)
finally:
torch._inductor.standalone_compile = original_compile
def test_invalid_inductor_config(self):
"""Test that invalid inductor config keys are caught with a clear error."""
def fn(x, y):
with fx_traceback.annotate(
{
"compile_with_inductor": {
"inductor_configs": {
"invalid_config_key": True,
}
}
}
):
return x * y + 1
backend = aot_eager_regional_inductor()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(10, requires_grad=True)
y = torch.randn(10, requires_grad=True)
with self.assertRaisesRegex(
torch._dynamo.exc.BackendCompilerFailed,
"Invalid inductor config key 'invalid_config_key'",
):
opt_fn(x, y)
@requires_cuda_and_triton
@parametrize("serialize", [False, True])
def test_selective_ac_flex(self, serialize):
class FlexAttentionModule(torch.nn.Module):
def __init__(self, hidden_size, num_heads):
super().__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.head_dim = hidden_size // num_heads
# In-projections (query, key, value)
self.q_proj = torch.nn.Linear(hidden_size, hidden_size)
self.k_proj = torch.nn.Linear(hidden_size, hidden_size)
self.v_proj = torch.nn.Linear(hidden_size, hidden_size)
# Out-projection
self.out_proj = torch.nn.Linear(hidden_size, hidden_size)
def forward(self, x):
batch_size, seq_len, _ = x.size()
# Project queries, keys, and values
q = (
self.q_proj(x)
.view(batch_size, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
k = (
self.k_proj(x)
.view(batch_size, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
v = (
self.v_proj(x)
.view(batch_size, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
# Apply flex attention
with torch.fx.traceback.annotate({"compile_with_inductor": 0}):
attn_output = flex_attention(
q,
k,
v,
)
# Reshape output
attn_output = (
attn_output.transpose(1, 2)
.contiguous()
.view(batch_size, seq_len, self.hidden_size)
)
# Out projection
output = self.out_proj(attn_output)
return output
from torch.utils.checkpoint import (
checkpoint,
create_selective_checkpoint_contexts,
)
ops_to_save = [
torch.ops.aten.mm.default,
]
context_fn = functools.partial(
create_selective_checkpoint_contexts, ops_to_save
)
# Define a model that uses FlexAttention with selective activation checkpointing
class SacModule(torch.nn.Module):
def __init__(self, hidden_size, num_heads, context_fn):
super().__init__()
self.flex_attn = FlexAttentionModule(hidden_size, num_heads)
self.context_fn = context_fn
def forward(self, x):
def flex_attn_fn(x):
return self.flex_attn(x)
output = checkpoint(
flex_attn_fn,
x,
use_reentrant=False,
context_fn=self.context_fn,
)
return output
flex_module = SacModule(hidden_size=512, num_heads=8, context_fn=context_fn).to(
"cuda", dtype=torch.bfloat16
)
x = torch.ones(8, 1024, 512, device="cuda", dtype=torch.bfloat16)
compiled_module = torch.compile(
flex_module, backend=aot_eager_regional_inductor(), fullgraph=True
)
_, codes = run_fw_bw_and_get_code(lambda: compiled_module(x))
# flex in forward and flex_backward in backward
self.assertEqual(len(codes), 2)
@skipIfTorchDynamo("Not a suitable dynamo wrapped test")
| RegionalInductorTests |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_resource_slice_list.py | {
"start": 383,
"end": 7109
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta1ResourceSlice]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ResourceSliceList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta1ResourceSliceList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1ResourceSliceList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1ResourceSliceList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1ResourceSliceList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta1ResourceSliceList. # noqa: E501
Items is the list of resource ResourceSlices. # noqa: E501
:return: The items of this V1beta1ResourceSliceList. # noqa: E501
:rtype: list[V1beta1ResourceSlice]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta1ResourceSliceList.
Items is the list of resource ResourceSlices. # noqa: E501
:param items: The items of this V1beta1ResourceSliceList. # noqa: E501
:type: list[V1beta1ResourceSlice]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta1ResourceSliceList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1ResourceSliceList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1ResourceSliceList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1ResourceSliceList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1ResourceSliceList. # noqa: E501
:return: The metadata of this V1beta1ResourceSliceList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1ResourceSliceList.
:param metadata: The metadata of this V1beta1ResourceSliceList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ResourceSliceList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ResourceSliceList):
return True
return self.to_dict() != other.to_dict()
| V1beta1ResourceSliceList |
python | kamyu104__LeetCode-Solutions | Python/maximum-array-hopping-score-i.py | {
"start": 369,
"end": 674
} | class ____(object):
def maxScore(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dp = [0]*len(nums)
for i in xrange(1, len(nums)):
for j in xrange(i):
dp[i] = max(dp[i], dp[j]+(i-j)*nums[i])
return dp[-1]
| Solution2 |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_flex_test.py | {
"start": 8804,
"end": 12151
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def _createGraphWithCustomOp(self, opname='CustomAdd'):
custom_opdefs_str = (
'name: \'' + opname + '\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
'\'Output\' type: DT_FLOAT}')
# Create a graph that has one add op.
new_graph = graph_pb2.GraphDef()
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='input')
out_tensor = in_tensor + in_tensor
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
new_graph.CopyFrom(sess.graph_def)
# Rename Add op name to opname.
for node in new_graph.node:
if node.op.startswith('Add'):
node.op = opname
del node.attr['T']
# Register custom op defs to import modified graph def.
register_custom_opdefs([custom_opdefs_str])
return (new_graph, inputs, outputs)
def testFlexWithCustomOp(self):
new_graph, inputs, outputs = self._createGraphWithCustomOp(
opname='CustomAdd4')
# Import to load the custom opdef.
saved_model_dir = os.path.join(self.get_temp_dir(), 'model')
with ops.Graph().as_default():
with session.Session() as sess:
import_graph_def(new_graph, name='')
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
converter.target_spec.experimental_select_user_tf_ops = ['CustomAdd4']
tflite_model = converter.convert()
self.assertIn('FlexCustomAdd4', tflite_test_util.get_ops_list(tflite_model))
def testFlexWithDoubleOp(self):
# Create a graph that has one double op.
saved_model_dir = os.path.join(self.get_temp_dir(), 'model2')
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor = array_ops.placeholder(
shape=[1, 4], dtype=dtypes.int32, name='input')
out_tensor = double_op.double(in_tensor)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = set([lite.OpsSet.SELECT_TF_OPS])
converter.target_spec.experimental_select_user_tf_ops = ['Double']
tflite_model = converter.convert()
self.assertTrue(tflite_model)
self.assertIn('FlexDouble', tflite_test_util.get_ops_list(tflite_model))
# Check the model works with TensorFlow ops.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.int32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([[2.0, 4.0, 6.0, 8.0]], dtype=np.int32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
| WithCustomOpTest |
python | wandb__wandb | wandb/sdk/launch/builder/noop.py | {
"start": 439,
"end": 1900
} | class ____(AbstractBuilder):
"""NoOp builder."""
type = "noop"
def __init__(
self,
builder_config: Dict[str, Any],
environment: AbstractEnvironment,
registry: AbstractRegistry,
) -> None:
"""Initialize a NoOpBuilder."""
self.environment = environment
self.registry = registry
@classmethod
def from_config(
cls,
config: dict,
environment: AbstractEnvironment,
registry: AbstractRegistry,
verify: bool = True,
) -> "AbstractBuilder":
"""Create a noop builder from a config."""
return cls(config, environment, registry)
async def verify(self) -> None:
"""Verify the builder."""
raise LaunchError("Attempted to verify noop builder.")
async def build_image(
self,
launch_project: LaunchProject,
entrypoint: EntryPoint,
job_tracker: Optional[JobAndRunStatusTracker] = None,
) -> str:
"""Build the image.
For this we raise a launch error since it can't build.
"""
raise LaunchError(
"Attempted build with noop builder. Specify a builder in your launch config at ~/.config/wandb/launch-config.yaml.\n"
"Note: Jobs sourced from git repos and code artifacts require a builder, while jobs sourced from Docker images do not.\n"
"See https://docs.wandb.ai/guides/launch/create-job."
)
| NoOpBuilder |
python | apache__airflow | providers/apache/spark/tests/unit/apache/spark/operators/test_spark_jdbc.py | {
"start": 1038,
"end": 7718
} | class ____:
_config = {
"spark_app_name": "{{ task_instance.task_id }}",
"spark_conf": {"parquet.compression": "SNAPPY"},
"spark_files": "hive-site.xml",
"spark_py_files": "sample_library.py",
"spark_jars": "parquet.jar",
"num_executors": 4,
"executor_cores": 4,
"executor_memory": "22g",
"driver_memory": "3g",
"verbose": True,
"keytab": "privileged_user.keytab",
"principal": "user/spark@airflow.org",
"cmd_type": "spark_to_jdbc",
"jdbc_table": "tableMcTableFace",
"jdbc_driver": "org.postgresql.Driver",
"metastore_table": "hiveMcHiveFace",
"jdbc_truncate": False,
"save_mode": "append",
"save_format": "parquet",
"batch_size": 100,
"fetch_size": 200,
"num_partitions": 10,
"partition_column": "columnMcColumnFace",
"lower_bound": "10",
"upper_bound": "20",
"create_table_column_types": "columnMcColumnFace INTEGER(100), name CHAR(64),comments VARCHAR(1024)",
"use_krb5ccache": True,
}
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
def test_execute(self):
# Given / When
spark_conn_id = "spark-default"
jdbc_conn_id = "jdbc-default"
operator = SparkJDBCOperator(task_id="spark_jdbc_job", dag=self.dag, **self._config)
# Then
expected_dict = {
"spark_app_name": "{{ task_instance.task_id }}",
"spark_conf": {"parquet.compression": "SNAPPY"},
"spark_files": "hive-site.xml",
"spark_py_files": "sample_library.py",
"spark_jars": "parquet.jar",
"num_executors": 4,
"executor_cores": 4,
"executor_memory": "22g",
"driver_memory": "3g",
"verbose": True,
"keytab": "privileged_user.keytab",
"principal": "user/spark@airflow.org",
"cmd_type": "spark_to_jdbc",
"jdbc_table": "tableMcTableFace",
"jdbc_driver": "org.postgresql.Driver",
"metastore_table": "hiveMcHiveFace",
"jdbc_truncate": False,
"save_mode": "append",
"save_format": "parquet",
"batch_size": 100,
"fetch_size": 200,
"num_partitions": 10,
"partition_column": "columnMcColumnFace",
"lower_bound": "10",
"upper_bound": "20",
"create_table_column_types": "columnMcColumnFace INTEGER(100), name CHAR(64),"
"comments VARCHAR(1024)",
"use_krb5ccache": True,
}
assert spark_conn_id == operator._spark_conn_id
assert jdbc_conn_id == operator._jdbc_conn_id
assert expected_dict["spark_app_name"] == operator._spark_app_name
assert expected_dict["spark_conf"] == operator._spark_conf
assert expected_dict["spark_files"] == operator._spark_files
assert expected_dict["spark_py_files"] == operator._spark_py_files
assert expected_dict["spark_jars"] == operator._spark_jars
assert expected_dict["num_executors"] == operator._num_executors
assert expected_dict["executor_cores"] == operator._executor_cores
assert expected_dict["executor_memory"] == operator._executor_memory
assert expected_dict["driver_memory"] == operator._driver_memory
assert expected_dict["verbose"] == operator._verbose
assert expected_dict["keytab"] == operator.keytab
assert expected_dict["principal"] == operator.principal
assert expected_dict["cmd_type"] == operator._cmd_type
assert expected_dict["jdbc_table"] == operator._jdbc_table
assert expected_dict["jdbc_driver"] == operator._jdbc_driver
assert expected_dict["metastore_table"] == operator._metastore_table
assert expected_dict["jdbc_truncate"] == operator._jdbc_truncate
assert expected_dict["save_mode"] == operator._save_mode
assert expected_dict["save_format"] == operator._save_format
assert expected_dict["batch_size"] == operator._batch_size
assert expected_dict["fetch_size"] == operator._fetch_size
assert expected_dict["num_partitions"] == operator._num_partitions
assert expected_dict["partition_column"] == operator._partition_column
assert expected_dict["lower_bound"] == operator._lower_bound
assert expected_dict["upper_bound"] == operator._upper_bound
assert expected_dict["create_table_column_types"] == operator._create_table_column_types
assert expected_dict["use_krb5ccache"] == operator._use_krb5ccache
@pytest.mark.db_test
def test_templating_with_create_task_instance_of_operator(
self, create_task_instance_of_operator, session
):
ti = create_task_instance_of_operator(
SparkJDBCOperator,
# Templated fields
application="{{ 'application' }}",
conf="{{ 'conf' }}",
files="{{ 'files' }}",
py_files="{{ 'py-files' }}",
jars="{{ 'jars' }}",
driver_class_path="{{ 'driver_class_path' }}",
packages="{{ 'packages' }}",
exclude_packages="{{ 'exclude_packages' }}",
keytab="{{ 'keytab' }}",
principal="{{ 'principal' }}",
proxy_user="{{ 'proxy_user' }}",
name="{{ 'name' }}",
application_args="{{ 'application_args' }}",
env_vars="{{ 'env_vars' }}",
properties_file="{{ 'properties_file' }}",
# Other parameters
dag_id="test_template_body_templating_dag",
task_id="test_template_body_templating_task",
)
session.add(ti)
session.commit()
ti.render_templates()
task: SparkJDBCOperator = ti.task
assert task.application == "application"
assert task.conf == "conf"
assert task.files == "files"
assert task.py_files == "py-files"
assert task.jars == "jars"
assert task.driver_class_path == "driver_class_path"
assert task.packages == "packages"
assert task.exclude_packages == "exclude_packages"
assert task.keytab == "keytab"
assert task.principal == "principal"
assert task.proxy_user == "proxy_user"
assert task.name == "name"
assert task.application_args == "application_args"
assert task.env_vars == "env_vars"
assert task.properties_file == "properties_file"
| TestSparkJDBCOperator |
python | getsentry__sentry | src/sentry/statistical_detectors/detector.py | {
"start": 1659,
"end": 1861
} | class ____:
type: TrendType
score: float
payload: DetectorPayload
state: DetectorState | None = None
regression_group: RegressionGroup | None = None
@dataclass(frozen=True)
| TrendBundle |
python | jina-ai__jina | jina/serve/stream/helper.py | {
"start": 535,
"end": 3486
} | class ____:
"""Iterator to allow async iteration of blocking/non-blocking iterator from the Client"""
def __init__(
self,
iterator: Union[Iterator, AsyncIterator],
request_counter: Optional[_RequestsCounter] = None,
prefetch: int = 0,
iterate_sync_in_thread: bool = True,
) -> None:
"""Async request iterator
:param iterator: request iterator
:param request_counter: counter of the numbers of request being handled at a given moment
:param prefetch: The max amount of requests to be handled at a given moment (0 disables feature)
:param iterate_sync_in_thread: if True, blocking iterators will call __next__ in a Thread.
"""
self.iterator = iterator
self._request_counter = request_counter
self._prefetch = prefetch
self._iterate_sync_in_thread = iterate_sync_in_thread
def iterator__next__(self):
"""
Executed inside a `ThreadPoolExecutor` via `loop.run_in_executor` to avoid following exception.
"StopIteration interacts badly with generators and cannot be raised into a Future"
:return: next request or None
"""
try:
return self.iterator.__next__()
except StopIteration:
return None
def __aiter__(self):
return self
async def __anext__(self):
if isinstance(self.iterator, Iterator):
"""
An `Iterator` indicates "blocking" code, which might block all tasks in the event loop.
Hence we iterate in the default executor provided by asyncio.
"""
if not self._iterate_sync_in_thread:
async def _get_next():
try:
req = self.iterator.__next__()
except StopIteration:
req = None
return req
request = await asyncio.create_task(_get_next())
else:
request = await get_or_reuse_loop().run_in_executor(
None, self.iterator__next__
)
"""
`iterator.__next__` can be executed directly and that'd raise `StopIteration` in the executor,
which raises the following exception while chaining states in futures.
"StopIteration interacts badly with generators and cannot be raised into a Future"
To avoid that, we handle the raise by a `return None`
"""
if request is None:
raise StopAsyncIteration
elif isinstance(self.iterator, AsyncIterator):
# we assume that `AsyncIterator` doesn't block the event loop
request = await self.iterator.__anext__()
if self._prefetch > 0:
while self._request_counter.count >= self._prefetch:
await asyncio.sleep(0)
return request
| AsyncRequestsIterator |
python | huggingface__transformers | src/transformers/models/splinter/modeling_splinter.py | {
"start": 17464,
"end": 18078
} | class ____(nn.Module):
def __init__(self, input_dim, output_dim, hidden_act="gelu"):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.dense = nn.Linear(self.input_dim, self.output_dim)
self.act_fn = ACT2FN[hidden_act]
self.LayerNorm = nn.LayerNorm(self.output_dim)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(inputs)
hidden_states = self.act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
| SplinterFullyConnectedLayer |
python | keras-team__keras | keras/src/constraints/constraints.py | {
"start": 2359,
"end": 3865
} | class ____(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Also available via the shortcut function `keras.constraints.max_norm`.
Args:
max_value: the maximum norm value for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
desired = ops.clip(norms, 0, self.max_value)
return ops.cast(w, norms.dtype) * (
desired / (backend.epsilon() + norms)
)
def get_config(self):
return {"max_value": self.max_value, "axis": self.axis}
@keras_export(["keras.constraints.NonNeg", "keras.constraints.non_neg"])
| MaxNorm |
python | numba__numba | numba/tests/test_alignment.py | {
"start": 147,
"end": 1086
} | class ____(TestCase):
def test_record_alignment(self):
rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')], align=True)
rec = from_dtype(rec_dtype)
@njit((rec[:],))
def foo(a):
for i in range(a.size):
a[i].a = a[i].b
a_recarray = np.recarray(3, dtype=rec_dtype)
for i in range(a_recarray.size):
a_rec = a_recarray[i]
a_rec.a = 0
a_rec.b = (i + 1) * 123
foo(a_recarray)
np.testing.assert_equal(a_recarray.a, a_recarray.b)
def test_record_misaligned(self):
rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')])
rec = from_dtype(rec_dtype)
# Unlike the CUDA target, this will not generate an error
@njit((rec[:],))
def foo(a):
for i in range(a.size):
a[i].a = a[i].b
if __name__ == '__main__':
unittest.main()
| TestAlignment |
python | mamba-org__mamba | micromamba/tests/test_config.py | {
"start": 24064,
"end": 30736
} | class ____:
@staticmethod
def _roundtrip(rc_file_path, rc_contents):
rc_file_path.write_text(rc_contents)
return config("list", "--json", "--no-env", "--rc-file", rc_file_path)
@classmethod
def _roundtrip_attr(cls, rc_file_path, attr, config_expr):
return cls._roundtrip(rc_file_path, f"{attr}: {config_expr}")[attr]
@pytest.mark.parametrize("yaml_quote", ["", '"'])
def test_expandvars_conda(self, monkeypatch, tmpdir_factory, rc_file, yaml_quote):
"""
Environment variables should be expanded in settings that have expandvars=True.
Test copied from Conda.
"""
def _expandvars(attr, config_expr, env_value):
config_expr = config_expr.replace("'", yaml_quote)
monkeypatch.setenv("TEST_VAR", env_value)
return self._roundtrip_attr(rc_file, attr, config_expr)
ssl_verify = _expandvars("ssl_verify", "${TEST_VAR}", "yes")
assert ssl_verify
for attr, env_value in [
# Not supported by Micromamba
# ("client_ssl_cert", "foo"),
# ("client_ssl_cert_key", "foo"),
("channel_alias", "http://foo"),
]:
value = _expandvars(attr, "${TEST_VAR}", env_value)
assert value == env_value
for attr in [
# Not supported by Micromamba
# "migrated_custom_channels",
# "proxy_servers",
]:
value = _expandvars(attr, "{'x': '${TEST_VAR}'}", "foo")
assert value == {"x": "foo"}
for attr in [
"channels",
"default_channels",
]:
value = _expandvars(attr, "['${TEST_VAR}']", "foo")
assert value == ["foo"]
custom_channels = _expandvars("custom_channels", "{'x': '${TEST_VAR}'}", "http://foo")
assert custom_channels["x"] == "http://foo"
custom_multichannels = _expandvars(
"custom_multichannels", "{'x': ['${TEST_VAR}']}", "http://foo"
)
assert len(custom_multichannels["x"]) == 1
assert custom_multichannels["x"][0] == "http://foo"
envs_dirs = _expandvars("envs_dirs", "['${TEST_VAR}']", "/foo")
assert any("foo" in d for d in envs_dirs)
pkgs_dirs = _expandvars("pkgs_dirs", "['${TEST_VAR}']", "/foo")
assert any("foo" in d for d in pkgs_dirs)
@pytest.mark.parametrize(
"inp,outp",
[
# Tests copied from: cpython/Lib/test/test_genericpath.py
("$", "$"),
("$$", "$$"),
("foo", "foo"),
("${foo}bar1", "barbar1"),
("$[foo]bar", "$[foo]bar"),
("$bar bar", "$bar bar"),
("$?bar", "$?bar"),
("$foo}bar", "bar}bar"),
("${foo", "${foo"),
# Not supported by Micromamba
# ("${{foo}}", "baz1"),
# *(
# [
# ("%", "%"),
# ("foo", "foo"),
# ("$foo bar", "bar bar"),
# ("${foo}bar", "barbar"),
# ("$[foo]bar", "$[foo]bar"),
# ("$bar bar", "$bar bar"),
# ("$?bar", "$?bar"),
# ("$foo}bar", "bar}bar"),
# ("${foo", "${foo"),
# ("${{foo}}", "baz1}"),
# ("$foo$foo", "barbar"),
# ("$bar$bar", "$bar$bar"),
# ("%foo% bar", "bar bar"),
# ("%foo%bar", "barbar"),
# ("%foo%%foo%", "barbar"),
# ("%%foo%%foo%foo%", "%foo%foobar"),
# ("%?bar%", "%?bar%"),
# ("%foo%%bar", "bar%bar"),
# ("'%foo%'%bar", "'%foo%'%bar"),
# ("bar'%foo%", "bar'%foo%"),
# ("'$foo'$foo", "'$foo'bar"),
# ("'$foo$foo", "'$foo$foo"),
# ]
# if platform.system() == "Windows"
# else []
# ),
# Our tests:
("$bar$bar", "$bar$bar"),
("$foo$foo", "barbar"),
("$foo$$foo bar", "bar$bar bar"),
("$foo bar", "bar bar"),
],
)
@pytest.mark.parametrize("yaml_quote", ["", '"', "'"])
def test_expandvars_cpython(self, monkeypatch, rc_file, inp, outp, yaml_quote):
monkeypatch.setenv("foo", "bar", True)
monkeypatch.setenv("{foo", "baz1", True)
monkeypatch.setenv("{foo}", "baz2", True)
assert outp == self._roundtrip_attr(rc_file, "channel_alias", yaml_quote + inp + yaml_quote)
@pytest.mark.parametrize(
"inp,outp",
[
(
'x", "y',
[
"${x",
"y}",
],
),
("x\ny", ["${x y}"]),
],
)
def test_envsubst_yaml_mixup(self, monkeypatch, rc_file, inp, outp):
assert self._roundtrip_attr(rc_file, "channels", f'["${{{inp}}}"]') == outp
def test_envsubst_empty_var(self, monkeypatch, rc_file):
monkeypatch.setenv("foo", "", True)
# Windows does not support empty environment variables
expected = "${foo}" if platform.system() == "Windows" else ""
assert self._roundtrip_attr(rc_file, "channel_alias", "'${foo}'") == expected
def test_envsubst_windows_problem(self, monkeypatch, rc_file):
# Real-world problematic .condarc file
condarc = textwrap.dedent(
"""
channel_alias: https://xxxxxxxxxxxxxxxxxxxx.com/t/${CONDA_API_KEY}/get
channels:
- xxxxxxxxxxx
- yyyyyyyyyyyy
- conda-forge
custom_channels:
yyyyyyyyyyyy: https://${CONDA_CHANNEL_UPLOAD_USER}:${CONDA_CHANNEL_UPLOAD_PASSWORD}@xxxxxxxxxxxxxxx.com
custom_multichannels:
conda-forge:
- https://conda.anaconda.org/conda-forge
"""
)
monkeypatch.setenv("CONDA_API_KEY", "kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk", True)
monkeypatch.setenv("CONDA_CHANNEL_UPLOAD_USER", "uuuuuuuuu", True)
monkeypatch.setenv("CONDA_CHANNEL_UPLOAD_PASSWORD", "pppppppppppppppppppp", True)
out = self._roundtrip(rc_file, condarc)
assert (
out["channel_alias"]
== "https://xxxxxxxxxxxxxxxxxxxx.com/t/kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk/get"
)
assert (
out["custom_channels"]["yyyyyyyyyyyy"]
== "https://uuuuuuuuu:pppppppppppppppppppp@xxxxxxxxxxxxxxx.com"
)
| TestConfigExpandVars |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 258426,
"end": 258920
} | class ____(sgqlc.types.Input):
"""Ways in which lists of package files can be ordered upon return."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(PackageFileOrderField, graphql_name="field")
"""The field in which to order package files by."""
direction = sgqlc.types.Field(OrderDirection, graphql_name="direction")
"""The direction in which to order package files by the specified
field.
"""
| PackageFileOrder |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ranges.py | {
"start": 30578,
"end": 30709
} | class ____(AbstractSingleRange[Decimal]):
"""Represent the PostgreSQL NUMRANGE type."""
__visit_name__ = "NUMRANGE"
| NUMRANGE |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_layout08.py | {
"start": 315,
"end": 1674
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_layout08.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [46317568, 46319488]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_title(
{
"name_formula": "=Sheet1!$A$1",
"data": [1],
"overlay": 1,
"layout": {
"x": 0.359652668416448,
"y": 0.16203703703703703,
},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_mfa_removed_email.py | {
"start": 401,
"end": 1254
} | class ____(View):
def get(self, request: HttpRequest) -> HttpResponse:
if isinstance(request.user, AnonymousUser):
return HttpResponse(status=401)
authenticator = Authenticator(id=0, type=3, user_id=request.user.id) # u2f
email = generate_security_email(
account=request.user,
actor=request.user,
type="mfa-removed",
ip_address=request.META["REMOTE_ADDR"],
context={"authenticator": authenticator, "device_name": "Home computer"},
# make this consistent for acceptance tests
current_datetime=datetime.datetime(2017, 1, 20, 21, 39, 23, 30723),
)
return MailPreview(
html_template=email.html_template, text_template=email.template, context=email.context
).render(request)
| DebugMfaRemovedEmailView |
python | apache__airflow | providers/apache/spark/tests/unit/apache/spark/hooks/test_spark_submit.py | {
"start": 1127,
"end": 44580
} | class ____:
_spark_job_file = "test_application.py"
_config = {
"conf": {"parquet.compression": "SNAPPY"},
"conn_id": "default_spark",
"files": "hive-site.xml",
"py_files": "sample_library.py",
"archives": "sample_archive.zip#SAMPLE",
"jars": "parquet.jar",
"packages": "com.databricks:spark-avro_2.11:3.2.0",
"exclude_packages": "org.bad.dependency:1.0.0",
"repositories": "http://myrepo.org",
"total_executor_cores": 4,
"executor_cores": 4,
"executor_memory": "22g",
"keytab": "privileged_user.keytab",
"principal": "user/spark@airflow.org",
"proxy_user": "sample_user",
"name": "spark-job",
"num_executors": 10,
"verbose": True,
"driver_memory": "3g",
"java_class": "com.foo.bar.AppMain",
"application_args": [
"-f",
"foo",
"--bar",
"bar",
"--with-spaces",
"args should keep embedded spaces",
"baz",
],
"use_krb5ccache": True,
}
@staticmethod
def cmd_args_to_dict(list_cmd):
return_dict = {}
for arg1, arg2 in zip(list_cmd, list_cmd[1:]):
if arg1.startswith("--"):
return_dict[arg1] = arg2
return return_dict
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="spark_yarn_cluster",
conn_type="spark",
host="yarn://yarn-master",
extra='{"queue": "root.etl", "deploy-mode": "cluster"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_k8s_cluster",
conn_type="spark",
host="k8s://https://k8s-master",
extra='{"deploy-mode": "cluster", "namespace": "mynamespace"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_k8s_client",
conn_type="spark",
host="k8s://https://k8s-master",
extra='{"deploy-mode": "client", "namespace": "mynamespace"}',
)
)
create_connection_without_db(
Connection(conn_id="spark_default_mesos", conn_type="spark", host="mesos://host", port=5050)
)
create_connection_without_db(
Connection(
conn_id="spark_binary_set",
conn_type="spark",
host="yarn",
extra='{"spark-binary": "spark2-submit"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_binary_set_spark3_submit",
conn_type="spark",
host="yarn",
extra='{"spark-binary": "spark3-submit"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_custom_binary_set",
conn_type="spark",
host="yarn",
extra='{"spark-binary": "spark-other-submit"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_home_set",
conn_type="spark",
host="yarn",
extra='{"spark-home": "/custom/spark-home/path"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_standalone_cluster",
conn_type="spark",
host="spark://spark-standalone-master:6066",
extra='{"deploy-mode": "cluster"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_standalone_cluster_client_mode",
conn_type="spark",
host="spark://spark-standalone-master:6066",
extra='{"deploy-mode": "client"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_principal_set",
conn_type="spark",
host="yarn",
extra='{"principal": "user/spark@airflow.org"}',
)
)
create_connection_without_db(
Connection(
conn_id="spark_keytab_set",
conn_type="spark",
host="yarn",
extra='{"keytab": "privileged_user.keytab"}',
)
)
@pytest.mark.db_test
@patch(
"airflow.providers.apache.spark.hooks.spark_submit.os.getenv", return_value="/tmp/airflow_krb5_ccache"
)
def test_build_spark_submit_command(self, mock_get_env, sdk_connection_not_found):
# Given
hook = SparkSubmitHook(**self._config)
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_build_cmd = [
"spark-submit",
"--master",
"yarn",
"--conf",
"parquet.compression=SNAPPY",
"--files",
"hive-site.xml",
"--py-files",
"sample_library.py",
"--archives",
"sample_archive.zip#SAMPLE",
"--jars",
"parquet.jar",
"--packages",
"com.databricks:spark-avro_2.11:3.2.0",
"--exclude-packages",
"org.bad.dependency:1.0.0",
"--repositories",
"http://myrepo.org",
"--num-executors",
"10",
"--total-executor-cores",
"4",
"--executor-cores",
"4",
"--executor-memory",
"22g",
"--driver-memory",
"3g",
"--keytab",
"privileged_user.keytab",
"--principal",
"user/spark@airflow.org",
"--conf",
"spark.kerberos.renewal.credentials=ccache",
"--proxy-user",
"sample_user",
"--name",
"spark-job",
"--class",
"com.foo.bar.AppMain",
"--verbose",
"test_application.py",
"-f",
"foo",
"--bar",
"bar",
"--with-spaces",
"args should keep embedded spaces",
"baz",
]
assert expected_build_cmd == cmd
mock_get_env.assert_called_with("KRB5CCNAME")
@patch("airflow.configuration.conf.get_mandatory_value")
def test_resolve_spark_submit_env_vars_use_krb5ccache_missing_principal(self, mock_get_madantory_value):
mock_principal = "airflow"
mock_get_madantory_value.return_value = mock_principal
hook = SparkSubmitHook(conn_id="spark_yarn_cluster", principal=None, use_krb5ccache=True)
mock_get_madantory_value.assert_called_with("kerberos", "principal")
assert hook._principal == mock_principal
def test_resolve_spark_submit_env_vars_use_krb5ccache_missing_KRB5CCNAME_env(self):
hook = SparkSubmitHook(
conn_id="spark_yarn_cluster", principal="user/spark@airflow.org", use_krb5ccache=True
)
with pytest.raises(
AirflowException,
match="KRB5CCNAME environment variable required to use ticket ccache is missing.",
):
hook._build_spark_submit_command(self._spark_job_file)
def test_build_track_driver_status_command(self):
# note this function is only relevant for spark setup matching below condition
# 'spark://' in self._connection['master'] and self._connection['deploy_mode'] == 'cluster'
# Given
hook_spark_standalone_cluster = SparkSubmitHook(conn_id="spark_standalone_cluster")
hook_spark_standalone_cluster._driver_id = "driver-20171128111416-0001"
hook_spark_yarn_cluster = SparkSubmitHook(conn_id="spark_yarn_cluster")
hook_spark_yarn_cluster._driver_id = "driver-20171128111417-0001"
# When
build_track_driver_status_spark_standalone_cluster = (
hook_spark_standalone_cluster._build_track_driver_status_command()
)
build_track_driver_status_spark_yarn_cluster = (
hook_spark_yarn_cluster._build_track_driver_status_command()
)
# Then
expected_spark_standalone_cluster = [
"/usr/bin/curl",
"--max-time",
"30",
"http://spark-standalone-master:6066/v1/submissions/status/driver-20171128111416-0001",
]
expected_spark_yarn_cluster = [
"spark-submit",
"--master",
"yarn://yarn-master",
"--status",
"driver-20171128111417-0001",
]
assert expected_spark_standalone_cluster == build_track_driver_status_spark_standalone_cluster
assert expected_spark_yarn_cluster == build_track_driver_status_spark_yarn_cluster
@pytest.mark.db_test
@patch("airflow.providers.apache.spark.hooks.spark_submit.subprocess.Popen")
def test_spark_process_runcmd(self, mock_popen, sdk_connection_not_found):
# Given
mock_popen.return_value.stdout = StringIO("stdout")
mock_popen.return_value.stderr = StringIO("stderr")
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSubmitHook(conn_id="")
hook.submit()
# Then
assert mock_popen.mock_calls[0] == call(
["spark-submit", "--master", "yarn", "--name", "default-name", ""],
stderr=-2,
stdout=-1,
universal_newlines=True,
bufsize=-1,
)
@pytest.mark.db_test
def test_resolve_should_track_driver_status(self, sdk_connection_not_found):
# Given
hook_default = SparkSubmitHook(conn_id="")
hook_spark_yarn_cluster = SparkSubmitHook(conn_id="spark_yarn_cluster")
hook_spark_k8s_cluster = SparkSubmitHook(conn_id="spark_k8s_cluster")
hook_spark_default_mesos = SparkSubmitHook(conn_id="spark_default_mesos")
hook_spark_binary_set = SparkSubmitHook(conn_id="spark_binary_set")
hook_spark_standalone_cluster = SparkSubmitHook(conn_id="spark_standalone_cluster")
# When
should_track_driver_status_default = hook_default._resolve_should_track_driver_status()
should_track_driver_status_spark_yarn_cluster = (
hook_spark_yarn_cluster._resolve_should_track_driver_status()
)
should_track_driver_status_spark_k8s_cluster = (
hook_spark_k8s_cluster._resolve_should_track_driver_status()
)
should_track_driver_status_spark_default_mesos = (
hook_spark_default_mesos._resolve_should_track_driver_status()
)
should_track_driver_status_spark_binary_set = (
hook_spark_binary_set._resolve_should_track_driver_status()
)
should_track_driver_status_spark_standalone_cluster = (
hook_spark_standalone_cluster._resolve_should_track_driver_status()
)
# Then
assert should_track_driver_status_default is False
assert should_track_driver_status_spark_yarn_cluster is False
assert should_track_driver_status_spark_k8s_cluster is False
assert should_track_driver_status_spark_default_mesos is False
assert should_track_driver_status_spark_binary_set is False
assert should_track_driver_status_spark_standalone_cluster is True
@pytest.mark.db_test
def test_resolve_connection_yarn_default(self, sdk_connection_not_found):
# Given
hook = SparkSubmitHook(conn_id="")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "yarn"
@pytest.mark.db_test
def test_resolve_connection_yarn_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id="spark_default")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": "root.default",
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "yarn"
assert dict_cmd["--queue"] == "root.default"
def test_resolve_connection_mesos_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id="spark_default_mesos")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "mesos://host:5050",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "mesos://host:5050"
def test_resolve_connection_spark_yarn_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id="spark_yarn_cluster")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": "root.etl",
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "yarn://yarn-master"
assert dict_cmd["--queue"] == "root.etl"
assert dict_cmd["--deploy-mode"] == "cluster"
def test_resolve_connection_spark_k8s_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id="spark_k8s_cluster")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"queue": None,
"spark_binary": "spark-submit",
"master": "k8s://https://k8s-master",
"deploy_mode": "cluster",
"namespace": "mynamespace",
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "k8s://https://k8s-master"
assert dict_cmd["--deploy-mode"] == "cluster"
def test_resolve_connection_spark_k8s_cluster_ns_conf(self):
# Given we specify the config option directly
conf = {
"spark.kubernetes.namespace": "airflow",
}
hook = SparkSubmitHook(conn_id="spark_k8s_cluster", conf=conf)
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"queue": None,
"spark_binary": "spark-submit",
"master": "k8s://https://k8s-master",
"deploy_mode": "cluster",
"namespace": "airflow",
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "k8s://https://k8s-master"
assert dict_cmd["--deploy-mode"] == "cluster"
assert dict_cmd["--conf"] == "spark.kubernetes.namespace=airflow"
def test_resolve_connection_spark_binary_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id="spark_binary_set")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark2-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert cmd[0] == "spark2-submit"
def test_resolve_connection_spark_binary_spark3_submit_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id="spark_binary_set_spark3_submit")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark3-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert cmd[0] == "spark3-submit"
def test_resolve_connection_custom_spark_binary_allowed_in_hook(self):
SparkSubmitHook(conn_id="spark_binary_set", spark_binary="another-custom-spark-submit")
def test_resolve_connection_spark_binary_extra_not_allowed_runtime_error(self):
with pytest.raises(
ValueError,
match="Please make sure your spark binary is one of the allowed ones and that it is available on the PATH",
):
SparkSubmitHook(conn_id="spark_custom_binary_set")
def test_resolve_connection_spark_home_not_allowed_runtime_error(self):
with pytest.raises(ValueError, match="The `spark-home` extra is not allowed any more"):
SparkSubmitHook(conn_id="spark_home_set")
def test_resolve_connection_spark_binary_default_value_override(self):
# Given
hook = SparkSubmitHook(conn_id="spark_binary_set", spark_binary="spark3-submit")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark3-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert cmd[0] == "spark3-submit"
@pytest.mark.db_test
def test_resolve_connection_spark_binary_default_value(self):
# Given
hook = SparkSubmitHook(conn_id="spark_default")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": "root.default",
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert cmd[0] == "spark-submit"
def test_resolve_connection_spark_standalone_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id="spark_standalone_cluster")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "spark://spark-standalone-master:6066",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": None,
"namespace": None,
"principal": None,
"keytab": None,
}
assert connection == expected_spark_connection
assert cmd[0] == "spark-submit"
def test_resolve_connection_principal_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id="spark_principal_set")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": "user/spark@airflow.org",
"keytab": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--principal"] == "user/spark@airflow.org"
def test_resolve_connection_principal_value_override(self):
# Given
hook = SparkSubmitHook(conn_id="spark_principal_set", principal="will-override")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": "will-override",
"keytab": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--principal"] == "will-override"
@patch(
"airflow.providers.apache.spark.hooks.spark_submit.SparkSubmitHook._create_keytab_path_from_base64_keytab",
return_value="privileged_user.keytab",
)
def test_resolve_connection_keytab_set_connection(self, mock_create_keytab_path_from_base64_keytab):
# Given
hook = SparkSubmitHook(conn_id="spark_keytab_set")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": None,
"keytab": "privileged_user.keytab",
}
assert connection == expected_spark_connection
assert dict_cmd["--keytab"] == "privileged_user.keytab"
@patch(
"airflow.providers.apache.spark.hooks.spark_submit.SparkSubmitHook._create_keytab_path_from_base64_keytab"
)
def test_resolve_connection_keytab_value_override(self, mock_create_keytab_path_from_base64_keytab):
# Given
hook = SparkSubmitHook(conn_id="spark_keytab_set", keytab="will-override")
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"namespace": None,
"principal": None,
"keytab": "will-override",
}
assert connection == expected_spark_connection
assert dict_cmd["--keytab"] == "will-override"
assert not mock_create_keytab_path_from_base64_keytab.called, (
"Should not call _create_keytab_path_from_base64_keytab"
)
def test_resolve_spark_submit_env_vars_standalone_client_mode(self):
# Given
hook = SparkSubmitHook(conn_id="spark_standalone_cluster_client_mode", env_vars={"bar": "foo"})
# When
hook._build_spark_submit_command(self._spark_job_file)
# Then
assert hook._env == {"bar": "foo"}
def test_resolve_spark_submit_env_vars_standalone_cluster_mode(self):
def env_vars_exception_in_standalone_cluster_mode():
# Given
hook = SparkSubmitHook(conn_id="spark_standalone_cluster", env_vars={"bar": "foo"})
# When
hook._build_spark_submit_command(self._spark_job_file)
# Then
with pytest.raises(AirflowException):
env_vars_exception_in_standalone_cluster_mode()
def test_resolve_spark_submit_env_vars_yarn(self):
# Given
hook = SparkSubmitHook(conn_id="spark_yarn_cluster", env_vars={"bar": "foo"})
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
assert cmd[4] == "spark.yarn.appMasterEnv.bar=foo"
assert hook._env == {"bar": "foo"}
def test_resolve_spark_submit_env_vars_k8s(self):
# Given
hook = SparkSubmitHook(conn_id="spark_k8s_cluster", env_vars={"bar": "foo"})
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
assert cmd[4] == "spark.kubernetes.driverEnv.bar=foo"
def test_process_spark_submit_log_yarn(self):
# Given
hook = SparkSubmitHook(conn_id="spark_yarn_cluster")
log_lines = [
"SPARK_MAJOR_VERSION is set to 2, using Spark2",
"WARN NativeCodeLoader: Unable to load native-hadoop library for your "
"platform... using builtin-java classes where applicable",
"WARN DomainSocketFactory: The short-circuit local reads feature cannot "
"be used because libhadoop cannot be loaded.",
"INFO Client: Requesting a new application from cluster with 10 NodeManagers",
"INFO Client: Submitting application application_1486558679801_1820 to ResourceManager",
]
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._yarn_application_id == "application_1486558679801_1820"
@pytest.mark.parametrize(
"pod_name",
[
"spark-pi-edf2ace37be7353a958b38733a12f8e6-driver",
"spark-pi-driver-edf2ace37be7353a958b38733a12f8e6-driver",
],
)
def test_process_spark_submit_log_k8s(self, pod_name):
# Given
hook = SparkSubmitHook(conn_id="spark_k8s_cluster")
log_lines = [
"INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:",
f"pod name: {pod_name}",
"namespace: default",
"labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd, spark-role -> driver",
"pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42",
"creation time: 2018-03-05T10:26:55Z",
"service account name: spark",
"volumes: spark-init-properties, download-jars-volume,download-files-volume, spark-token-2vmlm",
"node name: N/A",
"start time: N/A",
"container images: N/A",
"phase: Pending",
"status: []",
"2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:",
f"pod name: {pod_name}",
"namespace: default",
"Exit code: 999",
]
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._kubernetes_driver_pod == pod_name
assert hook._kubernetes_application_id == "spark-465b868ada474bda82ccb84ab2747fcd"
assert hook._spark_exit_code == 999
def test_process_spark_submit_log_k8s_spark_3(self):
# Given
hook = SparkSubmitHook(conn_id="spark_k8s_cluster")
log_lines = ["exit code: 999"]
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._spark_exit_code == 999
def test_process_spark_client_mode_submit_log_k8s(self):
# Given
hook = SparkSubmitHook(conn_id="spark_k8s_client")
log_lines = [
"INFO - The executor with id 2 exited with exit code 137(SIGKILL, possible container OOM).",
"...",
"Pi is roughly 3.141640",
"SparkContext: Successfully stopped SparkContext",
]
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._spark_exit_code == 0
def test_process_spark_submit_log_standalone_cluster(self):
# Given
hook = SparkSubmitHook(conn_id="spark_standalone_cluster")
log_lines = [
"Running Spark using the REST application submission protocol.",
"17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request "
"to launch an application in spark://spark-standalone-master:6066",
"17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully "
"created as driver-20171128111415-0001. Polling submission state...",
]
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._driver_id == "driver-20171128111415-0001"
def test_process_spark_driver_status_log(self):
# Given
hook = SparkSubmitHook(conn_id="spark_standalone_cluster")
log_lines = [
"Submitting a request for the status of submission "
"driver-20171128111415-0001 in spark://spark-standalone-master:6066",
"17/11/28 11:15:37 INFO RestSubmissionClient: Server responded with SubmissionStatusResponse:",
"{",
'"action" : "SubmissionStatusResponse",',
'"driverState" : "RUNNING",',
'"serverSparkVersion" : "1.6.0",',
'"submissionId" : "driver-20171128111415-0001",',
'"success" : true,',
'"workerHostPort" : "172.18.0.7:38561",',
'"workerId" : "worker-20171128110741-172.18.0.7-38561"',
"}",
]
# When
hook._process_spark_status_log(log_lines)
# Then
assert hook._driver_status == "RUNNING"
def test_process_spark_driver_status_log_bad_response(self):
# Given
hook = SparkSubmitHook(conn_id="spark_standalone_cluster")
log_lines = [
"curl: Failed to connect to http://spark-standalone-master:6066This is an invalid Spark response",
"Timed out",
]
# When
hook._process_spark_status_log(log_lines)
# Then
assert hook._driver_status is None
@patch("airflow.providers.apache.spark.hooks.spark_submit.renew_from_kt")
@patch("airflow.providers.apache.spark.hooks.spark_submit.subprocess.Popen")
def test_yarn_process_on_kill(self, mock_popen, mock_renew_from_kt):
# Given
mock_popen.return_value.stdout = StringIO("stdout")
mock_popen.return_value.stderr = StringIO("stderr")
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
log_lines = [
"SPARK_MAJOR_VERSION is set to 2, using Spark2",
"WARN NativeCodeLoader: Unable to load native-hadoop library for your "
"platform... using builtin-java classes where applicable",
"WARN DomainSocketFactory: The short-circuit local reads feature cannot "
"be used because libhadoop cannot be loaded.",
"INFO Client: Requesting a new application from cluster with 10 "
"NodeManagerapplication_1486558679801_1820s",
"INFO Client: Submitting application application_1486558679801_1820 to ResourceManager",
]
env = {"PATH": "hadoop/bin"}
hook = SparkSubmitHook(conn_id="spark_yarn_cluster", env_vars=env)
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
assert (
call(
["yarn", "application", "-kill", "application_1486558679801_1820"],
env={**os.environ, **env},
stderr=-1,
stdout=-1,
)
in mock_popen.mock_calls
)
# resetting the mock to test kill with keytab & principal
mock_popen.reset_mock()
# Given
hook = SparkSubmitHook(
conn_id="spark_yarn_cluster", keytab="privileged_user.keytab", principal="user/spark@airflow.org"
)
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
expected_env = os.environ.copy()
expected_env["KRB5CCNAME"] = "/tmp/airflow_krb5_ccache"
assert (
call(
["yarn", "application", "-kill", "application_1486558679801_1820"],
env=expected_env,
stderr=-1,
stdout=-1,
)
in mock_popen.mock_calls
)
def test_standalone_cluster_process_on_kill(self):
# Given
log_lines = [
"Running Spark using the REST application submission protocol.",
"17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request "
"to launch an application in spark://spark-standalone-master:6066",
"17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully "
"created as driver-20171128111415-0001. Polling submission state...",
]
hook = SparkSubmitHook(conn_id="spark_standalone_cluster")
hook._process_spark_submit_log(log_lines)
# When
kill_cmd = hook._build_spark_driver_kill_command()
# Then
assert kill_cmd[0] == "spark-submit"
assert kill_cmd[1] == "--master"
assert kill_cmd[2] == "spark://spark-standalone-master:6066"
assert kill_cmd[3] == "--kill"
assert kill_cmd[4] == "driver-20171128111415-0001"
@patch("airflow.providers.cncf.kubernetes.kube_client.get_kube_client")
@patch("airflow.providers.apache.spark.hooks.spark_submit.subprocess.Popen")
def test_k8s_process_on_kill(self, mock_popen, mock_client_method):
# Given
mock_popen.return_value.stdout = StringIO("stdout")
mock_popen.return_value.stderr = StringIO("stderr")
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
client = mock_client_method.return_value
hook = SparkSubmitHook(conn_id="spark_k8s_cluster")
log_lines = [
"INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:",
"pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver",
"namespace: default",
"labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd, spark-role -> driver",
"pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42",
"creation time: 2018-03-05T10:26:55Z",
"service account name: spark",
"volumes: spark-init-properties, download-jars-volume,download-files-volume, spark-token-2vmlm",
"node name: N/A",
"start time: N/A",
"container images: N/A",
"phase: Pending",
"status: []",
"2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:",
"pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver",
"namespace: default",
"Exit code: 0",
]
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
import kubernetes
kwargs = {"pretty": True, "body": kubernetes.client.V1DeleteOptions()}
client.delete_namespaced_pod.assert_called_once_with(
"spark-pi-edf2ace37be7353a958b38733a12f8e6-driver", "mynamespace", **kwargs
)
@pytest.mark.parametrize(
("command", "expected"),
[
(
("spark-submit", "foo", "--bar", "baz", "--password='secret'", "--foo", "bar"),
"spark-submit foo --bar baz --password='******' --foo bar",
),
(
("spark-submit", "foo", "--bar", "baz", "--password='secret'"),
"spark-submit foo --bar baz --password='******'",
),
(
("spark-submit", "foo", "--bar", "baz", '--password="secret"'),
'spark-submit foo --bar baz --password="******"',
),
(
("spark-submit", "foo", "--bar", "baz", "--password=secret"),
"spark-submit foo --bar baz --password=******",
),
(
("spark-submit", "foo", "--bar", "baz", "--password 'secret'"),
"spark-submit foo --bar baz --password '******'",
),
(
("spark-submit", "foo", "--bar", "baz", "--password='sec\"ret'"),
"spark-submit foo --bar baz --password='******'",
),
(
("spark-submit", "foo", "--bar", "baz", '--password="sec\'ret"'),
'spark-submit foo --bar baz --password="******"',
),
(
("spark-submit",),
"spark-submit",
),
],
)
@pytest.mark.db_test
def test_masks_passwords(self, command: str, expected: str) -> None:
# Given
hook = SparkSubmitHook()
# When
command_masked = hook._mask_cmd(command)
# Then
assert command_masked == expected
@pytest.mark.db_test
def test_create_keytab_path_from_base64_keytab_with_decode_exception(self):
hook = SparkSubmitHook()
invalid_base64 = "invalid_base64"
with pytest.raises(AirflowException, match="Failed to decode base64 keytab"):
hook._create_keytab_path_from_base64_keytab(invalid_base64, None)
@pytest.mark.db_test
@patch("pathlib.Path.exists")
@patch("builtins.open", new_callable=mock_open)
def test_create_keytab_path_from_base64_keytab_with_write_exception(
self,
mock_open,
mock_exists,
):
# Given
hook = SparkSubmitHook()
keytab_value = b"abcd"
base64_keytab = base64.b64encode(keytab_value).decode("UTF-8")
_mock_open = mock_open()
_mock_open.write.side_effect = Exception("Write failed")
mock_exists.return_value = False
# When
with pytest.raises(AirflowException, match="Failed to save keytab"):
hook._create_keytab_path_from_base64_keytab(base64_keytab, None)
# Then
assert mock_exists.call_count == 2 # called twice (before write, after write)
@pytest.mark.db_test
@patch("airflow.providers.apache.spark.hooks.spark_submit.shutil.move")
@patch("pathlib.Path.exists")
@patch("builtins.open", new_callable=mock_open)
def test_create_keytab_path_from_base64_keytab_with_move_exception(
self,
mock_open,
mock_exists,
mock_move,
):
# Given
hook = SparkSubmitHook()
keytab_value = b"abcd"
base64_keytab = base64.b64encode(keytab_value).decode("UTF-8")
mock_exists.return_value = False
mock_move.side_effect = Exception("Move failed")
# When
with pytest.raises(AirflowException, match="Failed to save keytab"):
hook._create_keytab_path_from_base64_keytab(base64_keytab, None)
# Then
mock_open().write.assert_called_once_with(keytab_value)
mock_move.assert_called_once()
assert mock_exists.call_count == 2 # called twice (before write, after write)
@pytest.mark.db_test
@patch("airflow.providers.apache.spark.hooks.spark_submit.uuid.uuid4")
@patch("pathlib.Path.resolve")
@patch("airflow.providers.apache.spark.hooks.spark_submit.shutil.move")
@patch("pathlib.Path.exists")
@patch("builtins.open", new_callable=mock_open)
def test_create_keytab_path_from_base64_keytab_with_new_keytab(
self,
mock_open,
mock_exists,
mock_move,
mock_resolve,
mock_uuid4,
):
# Given
hook = SparkSubmitHook()
keytab_value = b"abcd"
base64_keytab = base64.b64encode(keytab_value).decode("UTF-8")
mock_uuid4.return_value = "uuid"
mock_resolve.return_value = Path("resolved_path")
mock_exists.return_value = False
# When
keytab = hook._create_keytab_path_from_base64_keytab(base64_keytab, None)
# Then
assert keytab == "resolved_path/airflow_keytab-uuid"
mock_open().write.assert_called_once_with(keytab_value)
mock_move.assert_called_once()
@pytest.mark.db_test
@patch("pathlib.Path.resolve")
@patch("airflow.providers.apache.spark.hooks.spark_submit.shutil.move")
@patch("pathlib.Path.exists")
@patch("builtins.open", new_callable=mock_open)
def test_create_keytab_path_from_base64_keytab_with_new_keytab_with_principal(
self,
mock_open,
mock_exists,
mock_move,
mock_resolve,
):
# Given
hook = SparkSubmitHook()
principal = "user/spark@airflow.org"
keytab_value = b"abcd"
base64_keytab = base64.b64encode(keytab_value).decode("UTF-8")
mock_resolve.return_value = Path("resolved_path")
mock_exists.return_value = False
# When
keytab = hook._create_keytab_path_from_base64_keytab(base64_keytab, principal)
# Then
assert keytab == f"resolved_path/airflow_keytab-{principal}"
mock_open().write.assert_called_once_with(keytab_value)
mock_move.assert_called_once()
@pytest.mark.db_test
@patch("pathlib.Path.resolve")
@patch("pathlib.Path.exists")
@patch("builtins.open", new_callable=mock_open)
def test_create_keytab_path_from_base64_keytab_with_existing_keytab(
self,
mock_open,
mock_exists,
mock_resolve,
):
# Given
hook = SparkSubmitHook()
principal = "user/spark@airflow.org"
keytab_value = b"abcd"
base64_keytab = base64.b64encode(keytab_value)
mock_resolve.return_value = Path("resolved_path")
mock_exists.return_value = True
_mock_open = mock_open()
_mock_open.read.return_value = keytab_value
# When
keytab = hook._create_keytab_path_from_base64_keytab(base64_keytab.decode("UTF-8"), principal)
# Then
assert keytab == f"resolved_path/airflow_keytab-{principal}"
mock_open.assert_called_with(Path(f"resolved_path/airflow_keytab-{principal}"), "rb")
_mock_open.read.assert_called_once()
assert not _mock_open.write.called, "Keytab file should not be written"
| TestSparkSubmitHook |
python | apache__airflow | devel-common/src/tests_common/test_utils/mock_operators.py | {
"start": 1925,
"end": 2216
} | class ____(BaseOperator):
"""
Empty test operator with extra link.
Example of an Operator that has an extra operator link
and will be overridden by the one defined in tests/plugins/test_plugin.py.
"""
operator_extra_links = (AirflowLink(),)
| EmptyExtraLinkTestOperator |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_variables.py | {
"start": 16917,
"end": 20660
} | class ____:
async def test_update_variable(
self,
client: AsyncClient,
variable,
):
update = VariableUpdate(
name="updated_variable", value="updated-value", tags=["updated-tag"]
)
res = await client.patch(
f"/variables/name/{variable.name}",
json=update.model_dump(mode="json"),
)
assert res.status_code == 204
res = await client.get(
f"/variables/{variable.id}",
)
assert res.status_code == 200
res = parse_obj_as(core.Variable, res.json())
assert res.id == variable.id
assert res.name == update.name
assert res.value == update.value
assert res.tags == update.tags
@pytest.mark.parametrize(
"value",
[
"string-value",
'"string-value"',
123,
12.3,
True,
False,
None,
{"key": "value"},
["value1", "value2"],
{"key": ["value1", "value2"]},
],
)
async def test_update_variable_json_types(
self,
client: AsyncClient,
variable,
value: Any,
):
response = await client.patch(
f"/variables/name/{variable.name}",
json={"value": value},
)
assert response.status_code == 204
response = await client.get(
f"/variables/{variable.id}",
)
assert response.status_code == 200
res = response.json()
assert res["value"] == value
async def test_does_not_exist(
self,
client: AsyncClient,
):
update = VariableUpdate(
name="updated_variable", value="updated-value", tags=["updated-tag"]
)
res = await client.patch(
"/variables/name/doesnotexist",
json=update.model_dump(mode="json"),
)
assert res.status_code == 404
async def test_name_unique(
self,
client: AsyncClient,
variable,
):
same_name_update = VariableUpdate(name=variable.name)
res = await client.patch(
f"/variables/name/{variable.name}",
json=same_name_update.model_dump(mode="json"),
)
assert res.status_code == 409
async def test_name_max_length(
self,
client: AsyncClient,
variable,
):
max_length = MAX_VARIABLE_NAME_LENGTH
res = await client.patch(
f"/variables/name/{variable.name}", json={"name": "v" * max_length}
)
assert res
assert res.status_code == 204
max_length_plus1 = max_length + 1
res = await client.patch(
f"/variables/name/{variable.name}", json={"name": "v" * max_length_plus1}
)
assert res
assert res.status_code == 422
assert "Value should have at most" in res.json()["exception_detail"][0]["msg"]
async def test_value_max_length(
self,
client: AsyncClient,
variable,
):
max_length = MAX_VARIABLE_VALUE_LENGTH - 2 # 2 characters for quotes
res = await client.patch(
f"/variables/name/{variable.name}", json={"value": "v" * max_length}
)
assert res
assert res.status_code == 204
max_length_plus1 = MAX_VARIABLE_VALUE_LENGTH + 1
res = await client.patch(
f"/variables/name/{variable.name}", json={"value": "v" * max_length_plus1}
)
assert res
assert res.status_code == 422
assert (
"Variable value must be less than"
in res.json()["exception_detail"][0]["msg"]
)
| TestUpdateVariableByName |
python | psf__black | tests/data/cases/type_params.py | {
"start": 85,
"end": 773
} | class ____[ T ] : pass
def all_in[T : int,U : (bytes, str),* Ts,**P](): pass
def really_long[WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine](): pass
def even_longer[WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine: WhatIfItHadABound](): pass
def it_gets_worse[WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine, ItCouldBeGenericOverMultipleTypeVars](): pass
def magic[Trailing, Comma,](): pass
def weird_syntax[T: lambda: 42, U: a or b](): pass
def name_3[name_0: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa if aaaaaaaaaaa else name_3](): pass
# output
def func[T]():
pass
async def func[T]():
pass
| C |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 6154,
"end": 10271
} | class ____(Generic[ReceiveMsgType, SendMsgType]):
"""Handle communication between the task in this process and the supervisor parent process."""
log: Logger = attrs.field(repr=False, factory=structlog.get_logger)
socket: socket = attrs.field(factory=lambda: socket(fileno=0))
resp_decoder: msgspec.msgpack.Decoder[_ResponseFrame] = attrs.field(
factory=lambda: msgspec.msgpack.Decoder(_ResponseFrame), repr=False
)
id_counter: Iterator[int] = attrs.field(factory=itertools.count)
# We could be "clever" here and set the default to this based type parameters and a custom
# `__class_getitem__`, but that's a lot of code the one subclass we've got currently. So we'll just use a
# "sort of wrong default"
body_decoder: TypeAdapter[ReceiveMsgType] = attrs.field(factory=lambda: TypeAdapter(ToTask), repr=False)
err_decoder: TypeAdapter[ErrorResponse] = attrs.field(factory=lambda: TypeAdapter(ToTask), repr=False)
def send(self, msg: SendMsgType) -> ReceiveMsgType | None:
"""Send a request to the parent and block until the response is received."""
frame = _RequestFrame(id=next(self.id_counter), body=msg.model_dump())
frame_bytes = frame.as_bytes()
self.socket.sendall(frame_bytes)
if isinstance(msg, ResendLoggingFD):
if recv_fds is None:
return None
# We need special handling here! The server can't send us the fd number, as the number on the
# supervisor will be different to in this process, so we have to mutate the message ourselves here.
frame, fds = self._read_frame(maxfds=1)
resp = self._from_frame(frame)
if TYPE_CHECKING:
assert isinstance(resp, SentFDs)
resp.fds = fds
# Since we know this is an expliclt SendFDs, and since this class is generic SendFDs might not
# always be in the return type union
return resp # type: ignore[return-value]
return self._get_response()
async def asend(self, msg: SendMsgType) -> ReceiveMsgType | None:
"""Send a request to the parent without blocking."""
raise NotImplementedError
@overload
def _read_frame(self, maxfds: None = None) -> _ResponseFrame: ...
@overload
def _read_frame(self, maxfds: int) -> tuple[_ResponseFrame, list[int]]: ...
def _read_frame(self, maxfds: int | None = None) -> tuple[_ResponseFrame, list[int]] | _ResponseFrame:
"""
Get a message from the parent.
This will block until the message has been received.
"""
if self.socket:
self.socket.setblocking(True)
fds = None
if maxfds:
len_bytes, fds, flag, address = recv_fds(self.socket, 4, maxfds)
else:
len_bytes = self.socket.recv(4)
if len_bytes == b"":
raise EOFError("Request socket closed before length")
length = int.from_bytes(len_bytes, byteorder="big")
buffer = bytearray(length)
mv = memoryview(buffer)
pos = 0
while pos < length:
nread = self.socket.recv_into(mv[pos:])
if nread == 0:
raise EOFError(f"Request socket closed before response was complete ({self.id_counter=})")
pos += nread
resp = self.resp_decoder.decode(mv)
if maxfds:
return resp, fds or []
return resp
def _from_frame(self, frame) -> ReceiveMsgType | None:
from airflow.sdk.exceptions import AirflowRuntimeError
if frame.error is not None:
err = self.err_decoder.validate_python(frame.error)
raise AirflowRuntimeError(error=err)
if frame.body is None:
return None
try:
return self.body_decoder.validate_python(frame.body)
except Exception:
self.log.exception("Unable to decode message")
raise
def _get_response(self) -> ReceiveMsgType | None:
frame = self._read_frame()
return self._from_frame(frame)
| CommsDecoder |
python | donnemartin__system-design-primer | solutions/object_oriented_design/call_center/call_center.py | {
"start": 168,
"end": 1006
} | class ____(metaclass=ABCMeta):
def __init__(self, employee_id, name, rank, call_center):
self.employee_id = employee_id
self.name = name
self.rank = rank
self.call = None
self.call_center = call_center
def take_call(self, call):
"""Assume the employee will always successfully take the call."""
self.call = call
self.call.employee = self
self.call.state = CallState.IN_PROGRESS
def complete_call(self):
self.call.state = CallState.COMPLETE
self.call_center.notify_call_completed(self.call)
@abstractmethod
def escalate_call(self):
pass
def _escalate_call(self):
self.call.state = CallState.READY
call = self.call
self.call = None
self.call_center.notify_call_escalated(call)
| Employee |
python | pypa__setuptools | setuptools/_vendor/typing_extensions.py | {
"start": 53727,
"end": 53919
} | class ____:
"""Mixin for TypeVarLike defaults."""
__slots__ = ()
__init__ = _set_default
# Classes using this metaclass must provide a _backported_typevarlike ClassVar
| _DefaultMixin |
python | sphinx-doc__sphinx | sphinx/cmd/make_mode.py | {
"start": 2279,
"end": 8482
} | class ____:
def __init__(
self,
*,
source_dir: str | os.PathLike[str],
build_dir: str | os.PathLike[str],
opts: Sequence[str],
) -> None:
self.source_dir = _StrPath(source_dir)
self.build_dir = _StrPath(build_dir)
self.opts = [*opts]
def build_dir_join(self, *comps: str | os.PathLike[str]) -> _StrPath:
return self.build_dir.joinpath(*comps)
def build_clean(self) -> int:
source_dir = self.source_dir.resolve()
build_dir = self.build_dir.resolve()
if not self.build_dir.exists():
return 0
elif not self.build_dir.is_dir():
print("Error: '%s' is not a directory!" % self.build_dir)
return 1
elif source_dir == build_dir:
print("Error: '%s' is same as source directory!" % self.build_dir)
return 1
elif source_dir.is_relative_to(build_dir):
print("Error: '%s' directory contains source directory!" % self.build_dir)
return 1
print("Removing everything under '%s'..." % self.build_dir)
for item in self.build_dir.iterdir():
rmtree(item)
return 0
def build_help(self) -> None:
if not terminal_supports_colour():
disable_colour()
print(bold('Sphinx v%s' % sphinx.__display_version__))
print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2))
for osname, bname, description in BUILDERS:
if not osname or os.name == osname:
print(f' {blue(bname.ljust(10))} {description}')
def build_latexpdf(self) -> int:
if self.run_generic_build('latex') > 0:
return 1
# Use $MAKE to determine the make command
make_fallback = 'make.bat' if sys.platform == 'win32' else 'make'
makecmd = os.environ.get('MAKE', make_fallback)
if not makecmd.lower().startswith('make'):
raise RuntimeError('Invalid $MAKE command: %r' % makecmd)
try:
with chdir(self.build_dir_join('latex')):
if '-Q' in self.opts:
with open('__LATEXSTDOUT__', 'w', encoding='utf-8') as outfile:
returncode = subprocess.call(
[
makecmd,
'all-pdf',
'LATEXOPTS=-halt-on-error',
],
stdout=outfile,
stderr=subprocess.STDOUT,
)
if returncode:
print(
'Latex error: check %s'
% self.build_dir_join('latex', '__LATEXSTDOUT__')
)
elif '-q' in self.opts:
returncode = subprocess.call(
[
makecmd,
'all-pdf',
'LATEXOPTS=-halt-on-error',
'LATEXMKOPTS=-silent',
],
)
if returncode:
print(
'Latex error: check .log file in %s'
% self.build_dir_join('latex')
)
else:
returncode = subprocess.call([makecmd, 'all-pdf'])
return returncode
except OSError:
print('Error: Failed to run: %s' % makecmd)
return 1
def build_latexpdfja(self) -> int:
if self.run_generic_build('latex') > 0:
return 1
# Use $MAKE to determine the make command
make_fallback = 'make.bat' if sys.platform == 'win32' else 'make'
makecmd = os.environ.get('MAKE', make_fallback)
if not makecmd.lower().startswith('make'):
raise RuntimeError('Invalid $MAKE command: %r' % makecmd)
try:
with chdir(self.build_dir_join('latex')):
return subprocess.call([makecmd, 'all-pdf'])
except OSError:
print('Error: Failed to run: %s' % makecmd)
return 1
def build_info(self) -> int:
if self.run_generic_build('texinfo') > 0:
return 1
# Use $MAKE to determine the make command
makecmd = os.environ.get('MAKE', 'make')
if not makecmd.lower().startswith('make'):
raise RuntimeError('Invalid $MAKE command: %r' % makecmd)
try:
with chdir(self.build_dir_join('texinfo')):
return subprocess.call([makecmd, 'info'])
except OSError:
print('Error: Failed to run: %s' % makecmd)
return 1
def build_gettext(self) -> int:
dtdir = self.build_dir_join('gettext', '.doctrees')
if self.run_generic_build('gettext', doctreedir=dtdir) > 0:
return 1
return 0
def run_generic_build(
self, builder: str, doctreedir: str | os.PathLike[str] | None = None
) -> int:
# compatibility with old Makefile
paper_size = os.getenv('PAPER', '')
if paper_size in {'a4', 'letter'}:
self.opts.extend(['-D', f'latex_elements.papersize={paper_size}paper'])
if doctreedir is None:
doctreedir = self.build_dir_join('doctrees')
args = [
'--builder',
builder,
'--doctree-dir',
str(doctreedir),
str(self.source_dir),
str(self.build_dir_join(builder)),
]
return build_main(args + self.opts)
def run_make_mode(args: Sequence[str]) -> int:
if len(args) < 3:
print(
'Error: at least 3 arguments (builder, source '
'dir, build dir) are required.',
file=sys.stderr,
)
return 1
builder_name = args[0]
make = Make(source_dir=args[1], build_dir=args[2], opts=args[3:])
run_method = f'build_{builder_name}'
if hasattr(make, run_method):
return getattr(make, run_method)()
return make.run_generic_build(builder_name)
| Make |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_dialect.py | {
"start": 2935,
"end": 12631
} | class ____(fixtures.TestBase):
"""python-side dialect tests."""
@testing.combinations(
(
"FOREIGN KEY (tid) REFERENCES some_table(id)",
_fk_expected("tid", "some_table", "id"),
),
(
'FOREIGN KEY (tid) REFERENCES "(2)"(id)',
_fk_expected("tid", '"(2)"', "id"),
),
(
'FOREIGN KEY (tid) REFERENCES some_table("(2)")',
_fk_expected("tid", "some_table", '"(2)"'),
),
(
'FOREIGN KEY (tid1, tid2) REFERENCES some_table("(2)", "(3)")',
_fk_expected("tid1, tid2", "some_table", '"(2)", "(3)"'),
),
(
"FOREIGN KEY (tid) REFERENCES some_table(id) "
"DEFERRABLE INITIALLY DEFERRED",
_fk_expected(
"tid",
"some_table",
"id",
deferrable="DEFERRABLE",
initially="DEFERRED",
),
),
(
"FOREIGN KEY (tid1, tid2) "
"REFERENCES some_schema.some_table(id1, id2)",
_fk_expected(
"tid1, tid2",
"some_table",
"id1, id2",
referred_schema="some_schema",
),
),
(
"FOREIGN KEY (tid1, tid2) "
"REFERENCES some_schema.some_table(id1, id2) "
"MATCH FULL "
"ON UPDATE CASCADE "
"ON DELETE CASCADE "
"DEFERRABLE INITIALLY DEFERRED",
_fk_expected(
"tid1, tid2",
"some_table",
"id1, id2",
referred_schema="some_schema",
onupdate="CASCADE",
ondelete="CASCADE",
match="FULL",
deferrable="DEFERRABLE",
initially="DEFERRED",
),
),
)
def test_fk_parsing(self, condef, expected):
FK_REGEX = postgresql.dialect()._fk_regex_pattern
groups = re.search(FK_REGEX, condef).groups()
eq_(groups, expected)
def test_range_constructor(self):
"""test kwonly argments in the range constructor, as we had
to do dataclasses backwards compat operations"""
r1 = Range(None, 5)
eq_(dataclasses.astuple(r1), (None, 5, "[)", False))
r1 = Range(10, 5, bounds="()")
eq_(dataclasses.astuple(r1), (10, 5, "()", False))
with expect_raises(TypeError):
Range(10, 5, "()") # type: ignore
with expect_raises(TypeError):
Range(None, None, "()", True) # type: ignore
def test_range_frozen(self):
r1 = Range(None, 5)
eq_(dataclasses.astuple(r1), (None, 5, "[)", False))
with expect_raises(dataclasses.FrozenInstanceError):
r1.lower = 8 # type: ignore
@testing.only_on("postgresql+asyncpg")
def test_asyncpg_terminate_catch(self):
"""test for #11005"""
with testing.db.connect() as connection:
emulated_dbapi_connection = connection.connection.dbapi_connection
async def boom():
raise OSError("boom")
with mock.patch.object(
emulated_dbapi_connection,
"_connection",
mock.Mock(close=mock.Mock(return_value=boom())),
) as mock_asyncpg_connection:
emulated_dbapi_connection.terminate()
eq_(
mock_asyncpg_connection.mock_calls,
[mock.call.close(timeout=2), mock.call.terminate()],
)
def test_version_parsing(self):
def mock_conn(res):
return mock.Mock(
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=mock.Mock(return_value=res))
)
)
dialect = postgresql.dialect()
for string, version in [
(
"PostgreSQL 8.3.8 on i686-redhat-linux-gnu, compiled by "
"GCC gcc (GCC) 4.1.2 20070925 (Red Hat 4.1.2-33)",
(8, 3, 8),
),
(
"PostgreSQL 8.5devel on x86_64-unknown-linux-gnu, "
"compiled by GCC gcc (GCC) 4.4.2, 64-bit",
(8, 5),
),
(
"EnterpriseDB 9.1.2.2 on x86_64-unknown-linux-gnu, "
"compiled by gcc (GCC) 4.1.2 20080704 (Red Hat 4.1.2-50), "
"64-bit",
(9, 1, 2),
),
(
"[PostgreSQL 9.2.4 ] VMware vFabric Postgres 9.2.4.0 "
"release build 1080137",
(9, 2, 4),
),
(
"PostgreSQL 10devel on x86_64-pc-linux-gnu"
"compiled by gcc (GCC) 6.3.1 20170306, 64-bit",
(10,),
),
(
"PostgreSQL 10beta1 on x86_64-pc-linux-gnu, "
"compiled by gcc (GCC) 4.8.5 20150623 "
"(Red Hat 4.8.5-11), 64-bit",
(10,),
),
(
"PostgreSQL 8.0.2 on i686-pc-linux-gnu, compiled by GCC gcc "
"(GCC) 3.4.2 20041017 (Red Hat 3.4.2-6.fc3), "
"Redshift 1.0.12103",
(8, 0, 2),
),
]:
eq_(dialect._get_server_version_info(mock_conn(string)), version)
@testing.only_on("postgresql")
def test_ensure_version_is_qualified(
self, future_connection, testing_engine, metadata
):
default_schema_name = future_connection.dialect.default_schema_name
event.listen(
metadata,
"after_create",
DDL(
"""
CREATE OR REPLACE FUNCTION %s.version() RETURNS integer AS $$
BEGIN
return 0;
END;
$$ LANGUAGE plpgsql;"""
% (default_schema_name,)
),
)
event.listen(
metadata,
"before_drop",
DDL("DROP FUNCTION %s.version" % (default_schema_name,)),
)
metadata.create_all(future_connection)
future_connection.commit()
e = testing_engine()
@event.listens_for(e, "do_connect")
def receive_do_connect(dialect, conn_rec, cargs, cparams):
conn = dialect.dbapi.connect(*cargs, **cparams)
cursor = conn.cursor()
cursor.execute(
"set search_path = %s,pg_catalog" % (default_schema_name,)
)
cursor.close()
return conn
with e.connect():
pass
eq_(
e.dialect.server_version_info,
future_connection.dialect.server_version_info,
)
def test_psycopg2_empty_connection_string(self):
dialect = psycopg2_dialect.dialect()
u = url.make_url("postgresql+psycopg2://")
cargs, cparams = dialect.create_connect_args(u)
eq_(cargs, [""])
eq_(cparams, {})
def test_psycopg2_nonempty_connection_string(self):
dialect = psycopg2_dialect.dialect()
u = url.make_url("postgresql+psycopg2://host")
cargs, cparams = dialect.create_connect_args(u)
eq_(cargs, [])
eq_(cparams, {"host": "host"})
def test_psycopg2_empty_connection_string_w_query_one(self):
dialect = psycopg2_dialect.dialect()
u = url.make_url("postgresql+psycopg2:///?service=swh-log")
cargs, cparams = dialect.create_connect_args(u)
eq_(cargs, [])
eq_(cparams, {"service": "swh-log"})
def test_psycopg2_empty_connection_string_w_query_two(self):
dialect = psycopg2_dialect.dialect()
u = url.make_url("postgresql+psycopg2:///?any_random_thing=yes")
cargs, cparams = dialect.create_connect_args(u)
eq_(cargs, [])
eq_(cparams, {"any_random_thing": "yes"})
def test_psycopg2_nonempty_connection_string_w_query(self):
dialect = psycopg2_dialect.dialect()
u = url.make_url(
"postgresql+psycopg2://somehost/?any_random_thing=yes"
)
cargs, cparams = dialect.create_connect_args(u)
eq_(cargs, [])
eq_(cparams, {"host": "somehost", "any_random_thing": "yes"})
def test_psycopg2_disconnect(self):
class Error(Exception):
pass
dbapi = mock.Mock()
dbapi.Error = Error
dialect = psycopg2_dialect.dialect(dbapi=dbapi)
for error in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
"terminating connection",
"closed the connection",
"connection not open",
"could not receive data from server",
"could not send data to server",
# psycopg2 client errors, psycopg2/connection.h,
# psycopg2/cursor.h
"connection already closed",
"cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
"losed the connection unexpectedly",
# these can occur in newer SSL
"connection has been closed unexpectedly",
"SSL error: decryption failed or bad record mac",
"SSL SYSCALL error: Bad file descriptor",
"SSL SYSCALL error: EOF detected",
"SSL SYSCALL error: Operation timed out",
"SSL SYSCALL error: Bad address",
"SSL SYSCALL error: Success",
]:
eq_(dialect.is_disconnect(Error(error), None, None), True)
eq_(dialect.is_disconnect("not an error", None, None), False)
| DialectTest |
python | sphinx-doc__sphinx | sphinx/domains/std/__init__.py | {
"start": 3817,
"end": 6687
} | class ____(ObjectDescription[str]):
index_template: str = _('%s; configuration value')
option_spec: ClassVar[OptionSpec] = {
'no-index': directives.flag,
'no-index-entry': directives.flag,
'no-contents-entry': directives.flag,
'no-typesetting': directives.flag,
'type': directives.unchanged_required,
'default': directives.unchanged_required,
}
def handle_signature(self, sig: str, sig_node: desc_signature) -> str:
sig_node.clear()
sig_node += addnodes.desc_name(sig, sig)
name = ws_re.sub(' ', sig)
sig_node['fullname'] = name
return name
def _object_hierarchy_parts(self, sig_node: desc_signature) -> tuple[str, ...]:
return (sig_node['fullname'],)
def _toc_entry_name(self, sig_node: desc_signature) -> str:
if not sig_node.get('_toc_parts'):
return ''
(name,) = sig_node['_toc_parts']
return name
def add_target_and_index(
self, name: str, sig: str, signode: desc_signature
) -> None:
node_id = make_id(self.env, self.state.document, self.objtype, name)
signode['ids'].append(node_id)
self.state.document.note_explicit_target(signode)
index_entry = self.index_template % name
self.indexnode['entries'].append(('pair', index_entry, node_id, '', None))
domain = self.env.domains.standard_domain
domain.note_object(self.objtype, name, node_id, location=signode)
def transform_content(self, content_node: addnodes.desc_content) -> None:
"""Insert *type* and *default* as a field list."""
field_list = nodes.field_list()
if 'type' in self.options:
field, msgs = self.format_type(self.options['type'])
field_list.append(field)
field_list += msgs
if 'default' in self.options:
field, msgs = self.format_default(self.options['default'])
field_list.append(field)
field_list += msgs
if len(field_list.children) > 0:
content_node.insert(0, field_list)
def format_type(self, type_: str) -> tuple[nodes.field, list[system_message]]:
"""Formats the ``:type:`` option."""
parsed, msgs = self.parse_inline(type_, lineno=self.lineno)
field = nodes.field(
'',
nodes.field_name('', _('Type')),
nodes.field_body('', *parsed),
)
return field, msgs
def format_default(self, default: str) -> tuple[nodes.field, list[system_message]]:
"""Formats the ``:default:`` option."""
parsed, msgs = self.parse_inline(default, lineno=self.lineno)
field = nodes.field(
'',
nodes.field_name('', _('Default')),
nodes.field_body('', *parsed),
)
return field, msgs
| ConfigurationValue |
python | pikepdf__pikepdf | src/pikepdf/models/image.py | {
"start": 1509,
"end": 3079
} | class ____(Exception):
"""This image is not valid according to the PDF 1.7 specification."""
def _array_str(value: Object | str | list):
"""Simplify pikepdf objects to array of str. Keep streams, dictionaries intact."""
def _convert(item):
if isinstance(item, list | Array):
return [_convert(subitem) for subitem in item]
if isinstance(item, Stream | Dictionary | bytes | int):
return item
if isinstance(item, Name | str):
return str(item)
if isinstance(item, (String)):
return bytes(item)
raise NotImplementedError(value)
result = _convert(value)
if not isinstance(result, list):
result = [result]
return result
def _ensure_list(value: list[Object] | Dictionary | Array | Object) -> list[Object]:
"""Ensure value is a list of pikepdf.Object, if it was not already.
To support DecodeParms which can be present as either an array of dicts or a single
dict. It's easier to convert to an array of one dict.
"""
if isinstance(value, list):
return value
return list(value.wrap_in_array().as_list())
def _metadata_from_obj(
obj: Dictionary | Stream, name: str, type_: Callable[[Any], T], default: T
) -> T | None:
"""Retrieve metadata from a dictionary or stream and wrangle types."""
val = getattr(obj, name, default)
try:
return type_(val)
except TypeError:
if val is None:
return None
raise NotImplementedError('Metadata access for ' + name)
| InvalidPdfImageError |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_environment_schema.py | {
"start": 3331,
"end": 7395
} | class ____(NonLaunchableGraphQLContextTestMatrix):
def test_successful_run_config_schema(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "required_resource_job")
result = execute_dagster_graphql(
graphql_context,
RUN_CONFIG_SCHEMA_QUERY,
variables={
"selector": selector,
"mode": "default",
},
)
assert result.data["runConfigSchemaOrError"]["__typename"] == "RunConfigSchema"
def test_run_config_schema_pipeline_not_found(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "jkdjfkdjfd")
result = execute_dagster_graphql(
graphql_context,
RUN_CONFIG_SCHEMA_QUERY,
variables={"selector": selector, "mode": "default"},
)
assert result.data["runConfigSchemaOrError"]["__typename"] == "PipelineNotFoundError"
def test_run_config_schema_op_not_found(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "required_resource_job", ["kdjfkdj"])
result = execute_dagster_graphql(
graphql_context,
RUN_CONFIG_SCHEMA_QUERY,
variables={
"selector": selector,
"mode": "default",
},
)
assert result.data["runConfigSchemaOrError"]["__typename"] == "InvalidSubsetError"
def test_run_config_schema_mode_not_found(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "required_resource_job")
result = execute_dagster_graphql(
graphql_context,
RUN_CONFIG_SCHEMA_QUERY,
variables={"selector": selector, "mode": "kdjfdk"},
)
assert result.data["runConfigSchemaOrError"]["__typename"] == "ModeNotFoundError"
def test_basic_valid_config_on_run_config_schema(
self, graphql_context: WorkspaceRequestContext, snapshot
):
selector = infer_job_selector(graphql_context, "csv_hello_world")
result = execute_dagster_graphql(
graphql_context,
RUN_CONFIG_SCHEMA_CONFIG_VALIDATION_QUERY,
variables={
"selector": selector,
"mode": "default",
"runConfigData": csv_hello_world_ops_config(),
},
)
assert not result.errors
assert result.data
assert (
result.data["runConfigSchemaOrError"]["isRunConfigValid"]["__typename"]
== "PipelineConfigValidationValid"
)
snapshot.assert_match(result.data)
def test_full_yaml(self, graphql_context, snapshot):
selector = infer_job_selector(graphql_context, "csv_hello_world")
result = execute_dagster_graphql(
graphql_context,
RUN_CONFIG_SCHEMA_ROOT_DEFAULT_YAML_QUERY,
variables={
"selector": selector,
"mode": "default",
"runConfigData": csv_hello_world_ops_config(),
},
)
assert result
assert not result.errors
assert result.data
snapshot.assert_match(result.data)
def test_basic_invalid_config_on_run_config_schema(
self, graphql_context: WorkspaceRequestContext, snapshot
):
selector = infer_job_selector(graphql_context, "csv_hello_world")
result = execute_dagster_graphql(
graphql_context,
RUN_CONFIG_SCHEMA_CONFIG_VALIDATION_QUERY,
variables={
"selector": selector,
"mode": "default",
"runConfigData": {"nope": "kdjfd"},
},
)
assert not result.errors
assert result.data
assert (
result.data["runConfigSchemaOrError"]["isRunConfigValid"]["__typename"]
== "RunConfigValidationInvalid"
)
snapshot.assert_match(result.data)
| TestEnvironmentSchema |
python | huggingface__transformers | tests/quantization/bnb/test_4bit.py | {
"start": 17309,
"end": 19571
} | class ____(Base4bitTest):
def setUp(self):
super().setUp()
# model_name
self.model_name = "bigscience/bloom-560m"
self.seq_to_seq_name = "google-t5/t5-small"
# Different types of model
self.base_model = AutoModel.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_4bit=True), device_map="auto"
)
# Sequence classification model
self.sequence_model = AutoModelForSequenceClassification.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_4bit=True), device_map="auto"
)
# CausalLM model
self.model_4bit = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_4bit=True), device_map="auto"
)
# Seq2seq model
self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained(
self.seq_to_seq_name, quantization_config=BitsAndBytesConfig(load_in_4bit=True), device_map="auto"
)
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
del self.base_model
del self.sequence_model
del self.model_4bit
del self.seq_to_seq_model
gc.collect()
backend_empty_cache(torch_device)
def test_correct_head_class(self):
r"""
A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification)
are kept in their native class.
"""
from bitsandbytes.nn import Params4bit
self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Params4bit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_4bit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
@apply_skip_if_not_implemented
| Classes4BitModelTest |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/layers.py | {
"start": 22052,
"end": 23031
} | class ____(Layer):
"""A layer that randomly sets a fraction p of the output units of the previous layer
to zero.
Parameters:
-----------
p: float
The probability that unit x is set to zero.
"""
def __init__(self, p=0.2):
self.p = p
self._mask = None
self.input_shape = None
self.n_units = None
self.pass_through = True
self.trainable = True
def forward_pass(self, X, training=True):
c = (1 - self.p)
if training:
self._mask = np.random.uniform(size=X.shape) > self.p
c = self._mask
return X * c
def backward_pass(self, accum_grad):
return accum_grad * self._mask
def output_shape(self):
return self.input_shape
activation_functions = {
'relu': ReLU,
'sigmoid': Sigmoid,
'selu': SELU,
'elu': ELU,
'softmax': Softmax,
'leaky_relu': LeakyReLU,
'tanh': TanH,
'softplus': SoftPlus
}
| Dropout |
python | huggingface__transformers | src/transformers/models/sam2/modular_sam2.py | {
"start": 15866,
"end": 17002
} | class ____(nn.Module):
r"""
Turns pixel values into patch embeddings for transformer consumption.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`Sam2ImageProcessorFast.__call__`] for details.
Returns:
embeddings (`torch.FloatTensor`):
Patch embeddings depend on image_size, patch_kernel_size, patch_stride and patch_padding
"""
def __init__(self, config: Sam2HieraDetConfig):
super().__init__()
num_channels = config.num_channels
hidden_size = config.hidden_size
self.projection = nn.Conv2d(
num_channels,
hidden_size,
kernel_size=config.patch_kernel_size,
stride=config.patch_stride,
padding=config.patch_padding,
)
def forward(self, pixel_values):
_, num_channels, height, width = pixel_values.shape
embeddings = self.projection(pixel_values).permute(0, 2, 3, 1)
return embeddings
| Sam2PatchEmbeddings |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_base.py | {
"start": 12588,
"end": 29540
} | class ____(_ORMClassConfigurator):
"""Abstract base for a configurator that configures a class for a
declarative mapping, or an unmapped ORM dataclass.
Defines scanning of pep-484 annotations as well as ORM dataclass
applicators
"""
__slots__ = ()
clsdict_view: _ClassDict
collected_annotations: Dict[str, _CollectedAnnotation]
collected_attributes: Dict[str, Any]
is_dataclass_prior_to_mapping: bool
allow_unmapped_annotations: bool
dataclass_setup_arguments: Optional[_DataclassArguments]
"""if the class has SQLAlchemy native dataclass parameters, where
we will turn the class into a dataclass within the declarative mapping
process.
"""
allow_dataclass_fields: bool
"""if true, look for dataclass-processed Field objects on the target
class as well as superclasses and extract ORM mapping directives from
the "metadata" attribute of each Field.
if False, dataclass fields can still be used, however they won't be
mapped.
"""
_include_dunders = {
"__table__",
"__mapper_args__",
"__tablename__",
"__table_args__",
}
_match_exclude_dunders = re.compile(r"^(?:_sa_|__)")
def _scan_attributes(self) -> None:
raise NotImplementedError()
def _setup_dataclasses_transforms(
self, *, enable_descriptor_defaults: bool, revert: bool = False
) -> None:
dataclass_setup_arguments = self.dataclass_setup_arguments
if not dataclass_setup_arguments:
return
# can't use is_dataclass since it uses hasattr
if "__dataclass_fields__" in self.cls.__dict__:
raise exc.InvalidRequestError(
f"Class {self.cls} is already a dataclass; ensure that "
"base classes / decorator styles of establishing dataclasses "
"are not being mixed. "
"This can happen if a class that inherits from "
"'MappedAsDataclass', even indirectly, is been mapped with "
"'@registry.mapped_as_dataclass'"
)
# can't create a dataclass if __table__ is already there. This would
# fail an assertion when calling _get_arguments_for_make_dataclass:
# assert False, "Mapped[] received without a mapping declaration"
if "__table__" in self.cls.__dict__:
raise exc.InvalidRequestError(
f"Class {self.cls} already defines a '__table__'. "
"ORM Annotated Dataclasses do not support a pre-existing "
"'__table__' element"
)
raise_for_non_dc_attrs = collections.defaultdict(list)
def _allow_dataclass_field(
key: str, originating_class: Type[Any]
) -> bool:
if (
originating_class is not self.cls
and "__dataclass_fields__" not in originating_class.__dict__
):
raise_for_non_dc_attrs[originating_class].append(key)
return True
field_list = [
_AttributeOptions._get_arguments_for_make_dataclass(
self,
key,
anno,
mapped_container,
self.collected_attributes.get(key, _NoArg.NO_ARG),
dataclass_setup_arguments,
enable_descriptor_defaults,
)
for key, anno, mapped_container in (
(
key,
mapped_anno if mapped_anno else raw_anno,
mapped_container,
)
for key, (
raw_anno,
mapped_container,
mapped_anno,
is_dc,
attr_value,
originating_module,
originating_class,
) in self.collected_annotations.items()
if _allow_dataclass_field(key, originating_class)
and (
key not in self.collected_attributes
# issue #9226; check for attributes that we've collected
# which are already instrumented, which we would assume
# mean we are in an ORM inheritance mapping and this
# attribute is already mapped on the superclass. Under
# no circumstance should any QueryableAttribute be sent to
# the dataclass() function; anything that's mapped should
# be Field and that's it
or not isinstance(
self.collected_attributes[key], QueryableAttribute
)
)
)
]
if raise_for_non_dc_attrs:
for (
originating_class,
non_dc_attrs,
) in raise_for_non_dc_attrs.items():
raise exc.InvalidRequestError(
f"When transforming {self.cls} to a dataclass, "
f"attribute(s) "
f"{', '.join(repr(key) for key in non_dc_attrs)} "
f"originates from superclass "
f"{originating_class}, which is not a dataclass. When "
f"declaring SQLAlchemy Declarative "
f"Dataclasses, ensure that all mixin classes and other "
f"superclasses which include attributes are also a "
f"subclass of MappedAsDataclass or make use of the "
f"@unmapped_dataclass decorator.",
code="dcmx",
)
annotations = {}
defaults = {}
for item in field_list:
if len(item) == 2:
name, tp = item
elif len(item) == 3:
name, tp, spec = item
defaults[name] = spec
else:
assert False
annotations[name] = tp
revert_dict = {}
for k, v in defaults.items():
if k in self.cls.__dict__:
revert_dict[k] = self.cls.__dict__[k]
setattr(self.cls, k, v)
self._apply_dataclasses_to_any_class(
dataclass_setup_arguments, self.cls, annotations
)
if revert:
# used for mixin dataclasses; we have to restore the
# mapped_column(), relationship() etc. to the class so these
# take place for a mapped class scan
for k, v in revert_dict.items():
setattr(self.cls, k, v)
def _collect_annotation(
self,
name: str,
raw_annotation: _AnnotationScanType,
originating_class: Type[Any],
expect_mapped: Optional[bool],
attr_value: Any,
) -> Optional[_CollectedAnnotation]:
if name in self.collected_annotations:
return self.collected_annotations[name]
if raw_annotation is None:
return None
is_dataclass = self.is_dataclass_prior_to_mapping
allow_unmapped = self.allow_unmapped_annotations
if expect_mapped is None:
is_dataclass_field = isinstance(attr_value, dataclasses.Field)
expect_mapped = (
not is_dataclass_field
and not allow_unmapped
and (
attr_value is None
or isinstance(attr_value, _MappedAttribute)
)
)
is_dataclass_field = False
extracted = _extract_mapped_subtype(
raw_annotation,
self.cls,
originating_class.__module__,
name,
type(attr_value),
required=False,
is_dataclass_field=is_dataclass_field,
expect_mapped=expect_mapped and not is_dataclass,
)
if extracted is None:
# ClassVar can come out here
return None
extracted_mapped_annotation, mapped_container = extracted
if attr_value is None and not is_literal(extracted_mapped_annotation):
for elem in get_args(extracted_mapped_annotation):
if is_fwd_ref(
elem, check_generic=True, check_for_plain_string=True
):
elem = de_stringify_annotation(
self.cls,
elem,
originating_class.__module__,
include_generic=True,
)
# look in Annotated[...] for an ORM construct,
# such as Annotated[int, mapped_column(primary_key=True)]
if isinstance(elem, _IntrospectsAnnotations):
attr_value = elem.found_in_pep593_annotated()
self.collected_annotations[name] = ca = _CollectedAnnotation(
raw_annotation,
mapped_container,
extracted_mapped_annotation,
is_dataclass,
attr_value,
originating_class.__module__,
originating_class,
)
return ca
@classmethod
def _apply_dataclasses_to_any_class(
cls,
dataclass_setup_arguments: _DataclassArguments,
klass: Type[_O],
use_annotations: Mapping[str, _AnnotationScanType],
) -> None:
cls._assert_dc_arguments(dataclass_setup_arguments)
dataclass_callable = dataclass_setup_arguments["dataclass_callable"]
if dataclass_callable is _NoArg.NO_ARG:
dataclass_callable = dataclasses.dataclass
restored: Optional[Any]
if use_annotations:
# apply constructed annotations that should look "normal" to a
# dataclasses callable, based on the fields present. This
# means remove the Mapped[] container and ensure all Field
# entries have an annotation
restored = util.get_annotations(klass)
klass.__annotations__ = cast("Dict[str, Any]", use_annotations)
else:
restored = None
try:
dataclass_callable( # type: ignore[call-overload]
klass,
**{ # type: ignore[call-overload,unused-ignore]
k: v
for k, v in dataclass_setup_arguments.items()
if v is not _NoArg.NO_ARG
and k not in ("dataclass_callable",)
},
)
except (TypeError, ValueError) as ex:
raise exc.InvalidRequestError(
f"Python dataclasses error encountered when creating "
f"dataclass for {klass.__name__!r}: "
f"{ex!r}. Please refer to Python dataclasses "
"documentation for additional information.",
code="dcte",
) from ex
finally:
# restore original annotations outside of the dataclasses
# process; for mixins and __abstract__ superclasses, SQLAlchemy
# Declarative will need to see the Mapped[] container inside the
# annotations in order to map subclasses
if use_annotations:
if restored is None:
del klass.__annotations__
else:
klass.__annotations__ = restored # type: ignore[assignment] # noqa: E501
@classmethod
def _assert_dc_arguments(cls, arguments: _DataclassArguments) -> None:
allowed = {
"init",
"repr",
"order",
"eq",
"unsafe_hash",
"kw_only",
"match_args",
"dataclass_callable",
}
disallowed_args = set(arguments).difference(allowed)
if disallowed_args:
msg = ", ".join(f"{arg!r}" for arg in sorted(disallowed_args))
raise exc.ArgumentError(
f"Dataclass argument(s) {msg} are not accepted"
)
def _cls_attr_override_checker(
self, cls: Type[_O]
) -> Callable[[str, Any], bool]:
"""Produce a function that checks if a class has overridden an
attribute, taking SQLAlchemy-enabled dataclass fields into account.
"""
if self.allow_dataclass_fields:
sa_dataclass_metadata_key = _get_immediate_cls_attr(
cls, "__sa_dataclass_metadata_key__"
)
else:
sa_dataclass_metadata_key = None
if not sa_dataclass_metadata_key:
def attribute_is_overridden(key: str, obj: Any) -> bool:
return getattr(cls, key, obj) is not obj
else:
all_datacls_fields = {
f.name: f.metadata[sa_dataclass_metadata_key]
for f in util.dataclass_fields(cls)
if sa_dataclass_metadata_key in f.metadata
}
local_datacls_fields = {
f.name: f.metadata[sa_dataclass_metadata_key]
for f in util.local_dataclass_fields(cls)
if sa_dataclass_metadata_key in f.metadata
}
absent = object()
def attribute_is_overridden(key: str, obj: Any) -> bool:
if _is_declarative_props(obj):
obj = obj.fget
# this function likely has some failure modes still if
# someone is doing a deep mixing of the same attribute
# name as plain Python attribute vs. dataclass field.
ret = local_datacls_fields.get(key, absent)
if _is_declarative_props(ret):
ret = ret.fget
if ret is obj:
return False
elif ret is not absent:
return True
all_field = all_datacls_fields.get(key, absent)
ret = getattr(cls, key, obj)
if ret is obj:
return False
# for dataclasses, this could be the
# 'default' of the field. so filter more specifically
# for an already-mapped InstrumentedAttribute
if ret is not absent and isinstance(
ret, InstrumentedAttribute
):
return True
if all_field is obj:
return False
elif all_field is not absent:
return True
# can't find another attribute
return False
return attribute_is_overridden
def _cls_attr_resolver(
self, cls: Type[Any]
) -> Callable[[], Iterable[Tuple[str, Any, Any, bool]]]:
"""produce a function to iterate the "attributes" of a class
which we want to consider for mapping, adjusting for SQLAlchemy fields
embedded in dataclass fields.
"""
cls_annotations = util.get_annotations(cls)
cls_vars = vars(cls)
_include_dunders = self._include_dunders
_match_exclude_dunders = self._match_exclude_dunders
names = [
n
for n in util.merge_lists_w_ordering(
list(cls_vars), list(cls_annotations)
)
if not _match_exclude_dunders.match(n) or n in _include_dunders
]
if self.allow_dataclass_fields:
sa_dataclass_metadata_key: Optional[str] = _get_immediate_cls_attr(
cls, "__sa_dataclass_metadata_key__"
)
else:
sa_dataclass_metadata_key = None
if not sa_dataclass_metadata_key:
def local_attributes_for_class() -> (
Iterable[Tuple[str, Any, Any, bool]]
):
return (
(
name,
cls_vars.get(name),
cls_annotations.get(name),
False,
)
for name in names
)
else:
dataclass_fields = {
field.name: field for field in util.local_dataclass_fields(cls)
}
fixed_sa_dataclass_metadata_key = sa_dataclass_metadata_key
def local_attributes_for_class() -> (
Iterable[Tuple[str, Any, Any, bool]]
):
for name in names:
field = dataclass_fields.get(name, None)
if field and sa_dataclass_metadata_key in field.metadata:
yield field.name, _as_dc_declaredattr(
field.metadata, fixed_sa_dataclass_metadata_key
), cls_annotations.get(field.name), True
else:
yield name, cls_vars.get(name), cls_annotations.get(
name
), False
return local_attributes_for_class
| _ClassScanAbstractConfig |
python | Textualize__textual | src/textual/css/model.py | {
"start": 6363,
"end": 8750
} | class ____:
selector_set: list[SelectorSet] = field(default_factory=list)
styles: Styles = field(default_factory=Styles)
errors: list[tuple[Token, str | HelpText]] = field(default_factory=list)
is_default_rules: bool = False
tie_breaker: int = 0
selector_names: set[str] = field(default_factory=set)
pseudo_classes: set[str] = field(default_factory=set)
def __hash__(self):
return id(self)
@classmethod
def _selector_to_css(cls, selectors: list[Selector]) -> str:
tokens: list[str] = []
for selector in selectors:
if selector.combinator == CombinatorType.DESCENDENT:
tokens.append(" ")
elif selector.combinator == CombinatorType.CHILD:
tokens.append(" > ")
tokens.append(selector.css)
return "".join(tokens).strip()
@property
def selectors(self):
return ", ".join(
self._selector_to_css(selector_set.selectors)
for selector_set in self.selector_set
)
@property
def css(self) -> str:
"""Generate the CSS this RuleSet
Returns:
A string containing CSS code.
"""
declarations = "\n".join(f" {line}" for line in self.styles.css_lines)
css = f"{self.selectors} {{\n{declarations}\n}}"
return css
def _post_parse(self) -> None:
"""Called after the RuleSet is parsed."""
# Build a set of the class names that have been updated
class_type = SelectorType.CLASS
id_type = SelectorType.ID
type_type = SelectorType.TYPE
universal_type = SelectorType.UNIVERSAL
add_selector = self.selector_names.add
add_pseudo_classes = self.pseudo_classes.update
for selector_set in self.selector_set:
for selector in selector_set.selectors:
add_pseudo_classes(selector.pseudo_classes)
selector = selector_set.selectors[-1]
selector_type = selector.type
if selector_type == universal_type:
add_selector("*")
elif selector_type == type_type:
add_selector(selector.name)
elif selector_type == class_type:
add_selector(f".{selector.name}")
elif selector_type == id_type:
add_selector(f"#{selector.name}")
| RuleSet |
python | pallets__flask | src/flask/json/tag.py | {
"start": 5308,
"end": 5599
} | class ____(JSONTag):
__slots__ = ()
key = " u"
def check(self, value: t.Any) -> bool:
return isinstance(value, UUID)
def to_json(self, value: t.Any) -> t.Any:
return value.hex
def to_python(self, value: t.Any) -> t.Any:
return UUID(value)
| TagUUID |
python | celery__celery | t/unit/utils/test_functional.py | {
"start": 10884,
"end": 12175
} | class ____:
def test_starkwargs(self):
assert fun_takes_argument('foo', lambda **kw: 1)
def test_named(self):
assert fun_takes_argument('foo', lambda a, foo, bar: 1)
def fun(a, b, c, d):
return 1
assert fun_takes_argument('foo', fun, position=4)
def test_starargs(self):
assert fun_takes_argument('foo', lambda a, *args: 1)
def test_does_not(self):
assert not fun_takes_argument('foo', lambda a, bar, baz: 1)
assert not fun_takes_argument('foo', lambda: 1)
def fun(a, b, foo):
return 1
assert not fun_takes_argument('foo', fun, position=4)
@pytest.mark.parametrize('a,b,expected', [
((1, 2, 3), [4, 5], (1, 2, 3, 4, 5)),
((1, 2), [3, 4, 5], [1, 2, 3, 4, 5]),
([1, 2, 3], (4, 5), [1, 2, 3, 4, 5]),
([1, 2], (3, 4, 5), (1, 2, 3, 4, 5)),
])
def test_seq_concat_seq(a, b, expected):
res = seq_concat_seq(a, b)
assert type(res) is type(expected)
assert res == expected
@pytest.mark.parametrize('a,b,expected', [
((1, 2, 3), 4, (1, 2, 3, 4)),
([1, 2, 3], 4, [1, 2, 3, 4]),
])
def test_seq_concat_item(a, b, expected):
res = seq_concat_item(a, b)
assert type(res) is type(expected)
assert res == expected
| test_fun_takes_argument |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chartsheet07.py | {
"start": 315,
"end": 1869
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chartsheet07.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/chartsheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/chartsheets/sheet1.xml": ["<pageSetup", "<drawing"],
}
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [61296640, 61298176]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chartsheet.set_paper(9)
chartsheet.set_portrait()
chartsheet.set_chart(chart)
chartsheet.horizontal_dpi = 200
chartsheet.vertical_dpi = 200
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | test/test_testing.py | {
"start": 93156,
"end": 99204
} | class ____(TestCase):
@classmethod
def _check_python_output(cls, program) -> str:
return subprocess.check_output(
[sys.executable, "-W", "always", "-c", program],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),).decode("utf-8")
# The test is flaky on ROCm/XPU and has been open and close multiple times
# https://github.com/pytorch/pytorch/issues/110040
@skipIfRocm
def test_circular_dependencies(self) -> None:
""" Checks that all modules inside torch can be imported
Prevents regression reported in https://github.com/pytorch/pytorch/issues/77441 """
ignored_modules = ["torch.utils.tensorboard", # deps on tensorboard
"torch.distributed.elastic.rendezvous", # depps on etcd
"torch.backends._coreml", # depends on pycoreml
"torch.contrib.", # something weird
"torch.testing._internal.distributed.", # just fails
"torch.ao.pruning._experimental.", # depends on pytorch_lightning, not user-facing
"torch.onnx._internal", # depends on onnx-script
"torch._inductor.runtime.triton_helpers", # depends on triton
"torch._inductor.codegen.cuda", # depends on cutlass
"torch._inductor.codegen.cutedsl", # depends on cutlass
"torch.distributed.benchmarks", # depends on RPC and DDP Optim
"torch.distributed.examples", # requires CUDA and torchvision
"torch.distributed.tensor.examples", # example scripts
"torch.distributed._tools.sac_ilp", # depends on pulp
"torch.csrc", # files here are devtools, not part of torch
"torch.include", # torch include files after install
]
if IS_WINDOWS or IS_MACOS or IS_JETSON:
# Distributed should be importable on Windows(except nn.api.), but not on Mac
if IS_MACOS or IS_JETSON:
ignored_modules.append("torch.distributed.")
else:
ignored_modules.append("torch.distributed.nn.api.")
ignored_modules.append("torch.distributed.optim.")
ignored_modules.append("torch.distributed.rpc.")
ignored_modules.append("torch.testing._internal.dist_utils")
# And these both end up with transitive dependencies on distributed
ignored_modules.append("torch.nn.parallel._replicated_tensor_ddp_interop")
ignored_modules.append("torch.testing._internal.common_fsdp")
ignored_modules.append("torch.testing._internal.common_distributed")
torch_dir = os.path.dirname(torch.__file__)
for base, _, files in os.walk(torch_dir):
prefix = os.path.relpath(base, os.path.dirname(torch_dir)).replace(os.path.sep, ".")
for f in files:
if not f.endswith(".py"):
continue
mod_name = f"{prefix}.{f[:-3]}" if f != "__init__.py" else prefix
# Do not attempt to import executable modules
if f == "__main__.py":
continue
if any(mod_name.startswith(x) for x in ignored_modules):
continue
try:
mod = importlib.import_module(mod_name)
except Exception as e:
raise RuntimeError(f"Failed to import {mod_name}: {e}") from e
self.assertTrue(inspect.ismodule(mod))
def test_lazy_imports_are_lazy(self) -> None:
out = self._check_python_output("import sys;import torch;print(all(x not in sys.modules for x in torch._lazy_modules))")
self.assertEqual(out.strip(), "True")
def test_no_warning_on_import(self) -> None:
out = self._check_python_output("import torch")
self.assertEqual(out, "")
def test_not_import_sympy(self) -> None:
out = self._check_python_output("import torch;import sys;print('sympy' not in sys.modules)")
self.assertEqual(out.strip(), "True",
"PyTorch should not depend on SymPy at import time as importing SymPy is *very* slow.\n"
"See the beginning of the following blog post for how to profile and find which file is importing sympy:\n"
"https://dev-discuss.pytorch.org/t/delving-into-what-happens-when-you-import-torch/1589\n\n"
"If you hit this error, you may want to:\n"
" - Refactor your code to avoid depending on sympy files you may not need to depend\n"
" - Use TYPE_CHECKING if you are using sympy + strings if you are using sympy on type annotations\n"
" - Import things that depend on SymPy locally")
@parametrize('path', ['torch', 'functorch'])
def test_no_mutate_global_logging_on_import(self, path) -> None:
# Calling logging.basicConfig, among other things, modifies the global
# logging state. It is not OK to modify the global logging state on
# `import torch` (or other submodules we own) because users do not expect it.
expected = string.ascii_lowercase
commands = [
'import logging',
f'import {path}',
'_logger = logging.getLogger("torch_test_testing")',
'logging.root.addHandler(logging.StreamHandler())',
'logging.root.setLevel(logging.INFO)',
f'_logger.info("{expected}")'
]
out = self._check_python_output("; ".join(commands))
self.assertEqual(out.strip(), expected)
| TestImports |
python | jpadilla__pyjwt | jwt/exceptions.py | {
"start": 2099,
"end": 2261
} | class ____(InvalidTokenError):
"""Raised when a token's ``sub`` claim is not a string or doesn't match the expected ``subject``"""
pass
| InvalidSubjectError |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 43790,
"end": 44593
} | class ____(BiffRecord):
"""
This record contains a list of all strings used anywhere in the
workbook. Each string occurs only once. The workbook uses indexes into
the list to reference the strings.
Record SST, BIFF8:
Offset Size Contents
0 4 Total number of strings in the workbook (see below)
4 4 Number of following strings (nm)
8 var. List of nm Unicode strings, 16-bit string length
The first field of the SST record counts the total occurrence
of strings in the workbook. For instance, the string AAA is used
3 times and the string BBB is used 2 times. The first field contains
5 and the second field contains 2, followed by the two strings.
"""
_REC_ID = 0x00FC
| SSTRecord |
python | django__django | tests/custom_methods/tests.py | {
"start": 91,
"end": 1200
} | class ____(TestCase):
def test_custom_methods(self):
a = Article.objects.create(
headline="Parrot programs in Python", pub_date=date(2005, 7, 27)
)
b = Article.objects.create(
headline="Beatles reunite", pub_date=date(2005, 7, 27)
)
self.assertFalse(a.was_published_today())
self.assertQuerySetEqual(
a.articles_from_same_day_1(),
[
"Beatles reunite",
],
lambda a: a.headline,
)
self.assertQuerySetEqual(
a.articles_from_same_day_2(),
[
"Beatles reunite",
],
lambda a: a.headline,
)
self.assertQuerySetEqual(
b.articles_from_same_day_1(),
[
"Parrot programs in Python",
],
lambda a: a.headline,
)
self.assertQuerySetEqual(
b.articles_from_same_day_2(),
[
"Parrot programs in Python",
],
lambda a: a.headline,
)
| MethodsTests |
python | ipython__ipython | IPython/core/profileapp.py | {
"start": 4172,
"end": 4535
} | class ____(BaseIPythonApplication):
description = """print the path to an IPython profile dir"""
def parse_command_line(self, argv=None):
super(ProfileLocate, self).parse_command_line(argv)
if self.extra_args:
self.profile = self.extra_args[0]
def start(self):
print(self.profile_dir.location)
| ProfileLocate |
python | Pylons__pyramid | src/pyramid/security.py | {
"start": 6694,
"end": 7046
} | class ____(PermitsResult):
"""
An instance of ``Allowed`` is returned when a security-related
API or other :app:`Pyramid` code allows an action unrelated to
an ACL check. It evaluates equal to all boolean true types. It
has an attribute named ``msg`` describing the circumstances for
the allow.
"""
boolval = 1
| Allowed |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 465170,
"end": 481101
} | class ____(VegaLiteSchema):
"""
HeaderConfig schema wrapper.
Parameters
----------
format : str, dict, :class:`Dict`, :class:`Format`, :class:`TimeFormatSpecifier`
The text format specifier for formatting number and date/time in labels of guides
(axes, legends, headers) and text marks.
If the format type is ``"number"`` (e.g., for quantitative fields), this is a D3's
`number format pattern string <https://github.com/d3/d3-format#locale_format>`__.
If the format type is ``"time"`` (e.g., for temporal fields), this is either: a)
D3's `time format pattern <https://d3js.org/d3-time-format#locale_format>`__ if you
desire to set a static time format.
b) `dynamic time format specifier object
<https://vega.github.io/vega-lite/docs/format.html#dynamic-time-format>`__ if you
desire to set a dynamic time format that uses different formats depending on the
granularity of the input date (e.g., if the date lies on a year, month, date, hour,
etc. boundary).
When used with a `custom formatType
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__, this
value will be passed as ``format`` alongside ``datum.value`` to the registered
function.
**Default value:** Derived from `numberFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number
format and from `timeFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time
format.
formatType : str
The format type for labels. One of ``"number"``, ``"time"``, or a `registered custom
format type
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__.
**Default value:**
* ``"time"`` for temporal fields and ordinal and nominal fields with ``timeUnit``.
* ``"number"`` for quantitative fields as well as ordinal and nominal fields without
``timeUnit``.
labelAlign : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
Horizontal text alignment of header labels. One of ``"left"``, ``"center"``, or
``"right"``.
labelAnchor : :class:`TitleAnchor`, Literal[None, 'start', 'middle', 'end']
The anchor position for placing the labels. One of ``"start"``, ``"middle"``, or
``"end"``. For example, with a label orientation of top these anchor positions map
to a left-, center-, or right-aligned label.
labelAngle : float
The rotation angle of the header labels.
**Default value:** ``0`` for column header, ``-90`` for row header.
labelBaseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
The vertical text baseline for the header labels. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, or ``"line-bottom"``. The
``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and
``"bottom"``, but are calculated relative to the ``titleLineHeight`` rather than
``titleFontSize`` alone.
labelColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
The color of the header label, can be in hex color code or regular color name.
labelExpr : str
`Vega expression <https://vega.github.io/vega/docs/expressions/>`__ for customizing
labels.
**Note:** The label text and value can be assessed via the ``label`` and ``value``
properties of the header's backing ``datum`` object.
labelFont : str, dict, :class:`ExprRef`
The font of the header label.
labelFontSize : dict, float, :class:`ExprRef`
The font size of the header label, in pixels.
labelFontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style of the header label.
labelFontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight of the header label.
labelLimit : dict, float, :class:`ExprRef`
The maximum length of the header label in pixels. The text value will be
automatically truncated if the rendered size exceeds the limit.
**Default value:** ``0``, indicating no limit
labelLineHeight : dict, float, :class:`ExprRef`
Line height in pixels for multi-line header labels or title text with ``"line-top"``
or ``"line-bottom"`` baseline.
labelOrient : :class:`Orient`, Literal['left', 'right', 'top', 'bottom']
The orientation of the header label. One of ``"top"``, ``"bottom"``, ``"left"`` or
``"right"``.
labelPadding : dict, float, :class:`ExprRef`
The padding, in pixel, between facet header's label and the plot.
**Default value:** ``10``
labels : bool
A boolean flag indicating if labels should be included as part of the header.
**Default value:** ``true``.
orient : :class:`Orient`, Literal['left', 'right', 'top', 'bottom']
Shortcut for setting both labelOrient and titleOrient.
title : None
Set to null to disable title for the axis, legend, or header.
titleAlign : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
Horizontal text alignment (to the anchor) of header titles.
titleAnchor : :class:`TitleAnchor`, Literal[None, 'start', 'middle', 'end']
The anchor position for placing the title. One of ``"start"``, ``"middle"``, or
``"end"``. For example, with an orientation of top these anchor positions map to a
left-, center-, or right-aligned title.
titleAngle : float
The rotation angle of the header title.
**Default value:** ``0``.
titleBaseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
The vertical text baseline for the header title. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, or ``"line-bottom"``. The
``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and
``"bottom"``, but are calculated relative to the ``titleLineHeight`` rather than
``titleFontSize`` alone.
**Default value:** ``"middle"``
titleColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Color of the header title, can be in hex color code or regular color name.
titleFont : str, dict, :class:`ExprRef`
Font of the header title. (e.g., ``"Helvetica Neue"``).
titleFontSize : dict, float, :class:`ExprRef`
Font size of the header title.
titleFontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style of the header title.
titleFontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
Font weight of the header title. This can be either a string (e.g ``"bold"``,
``"normal"``) or a number (``100``, ``200``, ``300``, ..., ``900`` where
``"normal"`` = ``400`` and ``"bold"`` = ``700``).
titleLimit : dict, float, :class:`ExprRef`
The maximum length of the header title in pixels. The text value will be
automatically truncated if the rendered size exceeds the limit.
**Default value:** ``0``, indicating no limit
titleLineHeight : dict, float, :class:`ExprRef`
Line height in pixels for multi-line header title text or title text with
``"line-top"`` or ``"line-bottom"`` baseline.
titleOrient : :class:`Orient`, Literal['left', 'right', 'top', 'bottom']
The orientation of the header title. One of ``"top"``, ``"bottom"``, ``"left"`` or
``"right"``.
titlePadding : dict, float, :class:`ExprRef`
The padding, in pixel, between facet header's title and the label.
**Default value:** ``10``
"""
_schema = {"$ref": "#/definitions/HeaderConfig"}
def __init__(
self,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
labelAnchor: Optional[SchemaBase | TitleAnchor_T] = Undefined,
labelAngle: Optional[float] = Undefined,
labelBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
labelColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T
] = Undefined,
labelExpr: Optional[str] = Undefined,
labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOrient: Optional[SchemaBase | Orient_T] = Undefined,
labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labels: Optional[bool] = Undefined,
orient: Optional[SchemaBase | Orient_T] = Undefined,
title: Optional[None] = Undefined,
titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
titleAnchor: Optional[SchemaBase | TitleAnchor_T] = Undefined,
titleAngle: Optional[float] = Undefined,
titleBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
titleColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T
] = Undefined,
titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOrient: Optional[SchemaBase | Orient_T] = Undefined,
titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
format=format,
formatType=formatType,
labelAlign=labelAlign,
labelAnchor=labelAnchor,
labelAngle=labelAngle,
labelBaseline=labelBaseline,
labelColor=labelColor,
labelExpr=labelExpr,
labelFont=labelFont,
labelFontSize=labelFontSize,
labelFontStyle=labelFontStyle,
labelFontWeight=labelFontWeight,
labelLimit=labelLimit,
labelLineHeight=labelLineHeight,
labelOrient=labelOrient,
labelPadding=labelPadding,
labels=labels,
orient=orient,
title=title,
titleAlign=titleAlign,
titleAnchor=titleAnchor,
titleAngle=titleAngle,
titleBaseline=titleBaseline,
titleColor=titleColor,
titleFont=titleFont,
titleFontSize=titleFontSize,
titleFontStyle=titleFontStyle,
titleFontWeight=titleFontWeight,
titleLimit=titleLimit,
titleLineHeight=titleLineHeight,
titleOrient=titleOrient,
titlePadding=titlePadding,
**kwds,
)
| HeaderConfig |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 236953,
"end": 243093
} | class ____(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session() as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim_multiscale(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
@def_function.function
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testUnweightedIsDifferentiableEager(self):
if not context.executing_eagerly():
self.skipTest("Eager mode only")
img = self._LoadTestImages()
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session() as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session():
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session():
_ = self.evaluate(score_tensor)
| MultiscaleSSIMTest |
python | pandas-dev__pandas | pandas/tests/frame/test_query_eval.py | {
"start": 822,
"end": 2645
} | class ____:
@pytest.fixture
def df(self):
return DataFrame({"A": [1, 2, 3]})
@pytest.fixture
def expected1(self, df):
return df[df.A > 0]
@pytest.fixture
def expected2(self, df):
return df.A + 1
def test_query_default(self, df, expected1, expected2):
# GH 12749
# this should always work, whether NUMEXPR_INSTALLED or not
result = df.query("A>0")
tm.assert_frame_equal(result, expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, expected2)
def test_query_None(self, df, expected1, expected2):
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, expected2)
def test_query_python(self, df, expected1, expected2):
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, expected2)
def test_query_numexpr(self, df, expected1, expected2):
if NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, expected2)
else:
msg = (
r"'numexpr' is not installed or an unsupported version. "
r"Cannot use engine='numexpr' for query/eval if 'numexpr' is "
r"not installed"
)
with pytest.raises(ImportError, match=msg):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError, match=msg):
df.eval("A+1", engine="numexpr")
| TestCompat |
python | getsentry__sentry | src/sentry/sentry_apps/api/serializers/request.py | {
"start": 461,
"end": 3169
} | class ____(Serializer):
def __init__(self, sentry_app: SentryApp) -> None:
self.sentry_app = sentry_app
def get_attrs(
self, item_list: Sequence[Any], user: Any, **kwargs: Any
) -> MutableMapping[Any, Any]:
project_ids = {item.data.get("project_id") for item in item_list}
projects = Project.objects.filter(id__in=project_ids)
projects_by_id = {project.id: project for project in projects}
organization_ids = {item.data.get("organization_id") for item in item_list}
organizations = Organization.objects.filter(id__in=organization_ids)
organizations_by_id = {organization.id: organization for organization in organizations}
return {
item: {
"organization": organizations_by_id.get(item.data.get("organization_id")),
"project": projects_by_id.get(item.data.get("project_id")),
}
for item in item_list
}
def serialize(
self, obj: Any, attrs: Mapping[Any, Any], user: Any, **kwargs: Any
) -> Mapping[str, Any]:
organization = attrs.get("organization")
project = attrs.get("project")
response_code = obj.data.get("response_code")
data = {
"webhookUrl": obj.data.get("webhook_url"),
"sentryAppSlug": self.sentry_app.slug,
"eventType": obj.data.get("event_type"),
"date": obj.data.get("date"),
"responseCode": response_code,
}
if response_code >= 400 or response_code == TIMEOUT_STATUS_CODE:
# add error data to display in Sentry app dashboard
data.update(
{
"requestBody": obj.data.get("request_body"),
"requestHeaders": obj.data.get("request_headers"),
"responseBody": obj.data.get("response_body"),
}
)
if project and "error_id" in obj.data:
# Make sure the project actually belongs to the org that owns the Sentry App
if project.organization_id == self.sentry_app.owner_id:
# Make sure the event actually exists
event = eventstore.backend.get_event_by_id(project.id, obj.data["error_id"])
if event is not None and event.group_id is not None:
data["errorUrl"] = reverse(
"sentry-organization-event-detail",
args=[project.organization.slug, event.group_id, event.event_id],
)
if organization:
data["organization"] = {"name": organization.name, "slug": organization.slug}
return data
| RequestSerializer |
python | huggingface__transformers | tests/models/efficientnet/test_modeling_efficientnet.py | {
"start": 1368,
"end": 4307
} | class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
num_channels=3,
kernel_sizes=[3, 3, 5],
in_channels=[32, 16, 24],
out_channels=[16, 24, 20],
strides=[1, 1, 2],
num_block_repeats=[1, 1, 2],
expand_ratios=[1, 6, 6],
is_training=True,
use_labels=True,
intermediate_size=37,
hidden_act="gelu",
num_labels=10,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.kernel_sizes = kernel_sizes
self.in_channels = in_channels
self.out_channels = out_channels
self.strides = strides
self.num_block_repeats = num_block_repeats
self.expand_ratios = expand_ratios
self.is_training = is_training
self.hidden_act = hidden_act
self.num_labels = num_labels
self.use_labels = use_labels
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return EfficientNetConfig(
image_size=self.image_size,
num_channels=self.num_channels,
kernel_sizes=self.kernel_sizes,
in_channels=self.in_channels,
out_channels=self.out_channels,
strides=self.strides,
num_block_repeats=self.num_block_repeats,
expand_ratios=self.expand_ratios,
hidden_act=self.hidden_act,
num_labels=self.num_labels,
)
def create_and_check_model(self, config, pixel_values, labels):
model = EfficientNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected last hidden states: B, C, H // 4, W // 4
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, config.hidden_dim, self.image_size // 4, self.image_size // 4),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
model = EfficientNetForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| EfficientNetModelTester |
python | doocs__leetcode | solution/2100-2199/2185.Counting Words With a Given Prefix/Solution2.py | {
"start": 0,
"end": 572
} | class ____:
def __init__(self):
self.children = [None] * 26
self.cnt = 0
def insert(self, w):
node = self
for c in w:
i = ord(c) - ord('a')
if node.children[i] is None:
node.children[i] = Trie()
node = node.children[i]
node.cnt += 1
def search(self, pref):
node = self
for c in pref:
i = ord(c) - ord('a')
if node.children[i] is None:
return 0
node = node.children[i]
return node.cnt
| Trie |
python | xlwings__xlwings | tests/test_range.py | {
"start": 27450,
"end": 29072
} | class ____(TestBase):
# 2d Range
def test_slice1(self):
r = self.wb1.sheets[0].range("B2:D4")
self.assertEqual(r[0:, 1:].address, "$C$2:$D$4")
def test_slice2(self):
r = self.wb1.sheets[0].range("B2:D4")
self.assertEqual(r[1:2, 1:2].address, "$C$3")
def test_slice3(self):
r = self.wb1.sheets[0].range("B2:D4")
self.assertEqual(r[:1, :2].address, "$B$2:$C$2")
def test_slice4(self):
r = self.wb1.sheets[0].range("B2:D4")
self.assertEqual(r[:, :].address, "$B$2:$D$4")
# Row
def test_slice1row(self):
r = self.wb1.sheets[0].range("B2:D2")
self.assertEqual(r[1:].address, "$C$2:$D$2")
def test_slice2row(self):
r = self.wb1.sheets[0].range("B2:D2")
self.assertEqual(r[1:2].address, "$C$2")
def test_slice3row(self):
r = self.wb1.sheets[0].range("B2:D2")
self.assertEqual(r[:2].address, "$B$2:$C$2")
def test_slice4row(self):
r = self.wb1.sheets[0].range("B2:D2")
self.assertEqual(r[:].address, "$B$2:$D$2")
# Column
def test_slice1col(self):
r = self.wb1.sheets[0].range("B2:B4")
self.assertEqual(r[1:].address, "$B$3:$B$4")
def test_slice2col(self):
r = self.wb1.sheets[0].range("B2:B4")
self.assertEqual(r[1:2].address, "$B$3")
def test_slice3col(self):
r = self.wb1.sheets[0].range("B2:B4")
self.assertEqual(r[:2].address, "$B$2:$B$3")
def test_slice4col(self):
r = self.wb1.sheets[0].range("B2:B4")
self.assertEqual(r[:].address, "$B$2:$B$4")
| TestRangeSlicing |
python | tox-dev__tox | src/tox/config/loader/replacer.py | {
"start": 911,
"end": 1228
} | class ____(ValueError):
"""Could not stabilize on replacement value."""
@staticmethod
def check(depth: int, value: Any) -> None:
if depth > MAX_REPLACE_DEPTH:
msg = f"Could not expand {value} after recursing {depth} frames"
raise MatchRecursionError(msg)
| MatchRecursionError |
python | google__pytype | pytype/tests/test_overriding.py | {
"start": 27883,
"end": 30382
} | class ____(test_base.BaseTest):
"""Tests for @typing.override."""
def test_valid_override(self):
self.Check("""
from typing_extensions import override
class A:
def f(self):
pass
class B(A):
@override
def f(self):
pass
""")
def test_invalid_override(self):
errors = self.CheckWithErrors("""
from typing_extensions import override
class A:
def f(self):
pass
class B(A):
@override
def g(self): # override-error[e]
pass
""")
self.assertErrorSequences(
errors, {"e": ["Attribute 'g' not found on any parent class"]}
)
def test_multiple_inheritance(self):
self.CheckWithErrors("""
from typing_extensions import override
class A:
def f(self):
pass
class B:
def g(self):
pass
class C(A, B):
@override
def f(self):
pass
@override
def g(self):
pass
@override
def h(self): # override-error
pass
""")
def test_nested_class(self):
self.CheckWithErrors("""
from typing_extensions import override
class A:
class B:
pass
class C(A):
@override
class B:
pass
@override
class B2: # override-error
pass
""")
def test_grandparent(self):
self.CheckWithErrors("""
from typing_extensions import override
class Base:
def f(self) -> int:
return 0
class Parent(Base):
pass
class Child(Parent):
@override
def f(self) -> int:
return 5
@override
def g(self) -> int: # override-error
return 5
""")
def test_strict_mode(self):
errors = self.CheckWithErrors("""
# pytype: features=require-override-decorator
from typing_extensions import override
class A:
def f(self):
pass
def g(self):
pass
class B(A):
@override
def f(self):
pass
def g(self): # override-error[e]
pass
def h(self):
pass
""")
self.assertErrorSequences(
errors,
{
"e": [
"Missing @typing.override decorator for 'g', which overrides"
" 'A.g'"
]
},
)
if __name__ == "__main__":
test_base.main()
| TypingOverrideTest |
python | gevent__gevent | src/gevent/_ffi/watcher.py | {
"start": 1878,
"end": 2757
} | class ____(int):
def __repr__(self):
return "<NoWatcher>"
_NoWatcherResult = _NoWatcherResult(0)
def events_to_str(event_field, all_events):
result = []
for (flag, string) in all_events:
c_flag = flag
if event_field & c_flag:
result.append(string)
event_field &= (~c_flag)
if not event_field:
break
if event_field:
result.append(hex(event_field))
return '|'.join(result)
def not_while_active(func):
@functools.wraps(func)
def nw(self, *args, **kwargs):
if self.active:
raise ValueError("not while active")
func(self, *args, **kwargs)
return nw
def only_if_watcher(func):
@functools.wraps(func)
def if_w(self):
if self._watcher:
return func(self)
return _NoWatcherResult
return if_w
| _NoWatcherResult |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/utils.py | {
"start": 5401,
"end": 8166
} | class ____(FuturesAwareThreadPoolExecutor):
"""A ThreadPoolExecutor that copies over contextvars at submit time."""
def submit(self, fn, *args, **kwargs):
ctx = copy_context()
return super().submit(ctx.run, fn, *args, **kwargs)
def is_valid_email(email: str) -> bool:
regex = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,7}\b"
return bool(re.fullmatch(regex, email))
T = TypeVar("T")
P = TypeVar("P")
def imap(
executor: ThreadPoolExecutor,
iterable: Iterator[T],
func: Callable[[T], P],
) -> Iterator[P]:
"""A version of `concurrent.futures.ThreadpoolExecutor.map` which tails the input iterator in
a separate thread. This means that the map function can begin processing and yielding results from
the first elements of the iterator before the iterator is fully consumed.
Args:
executor: The ThreadPoolExecutor to use for parallel execution.
iterable: The iterator to apply the function to.
func: The function to apply to each element of the iterator.
"""
work_queue: deque[Future] = deque([])
# create a small task which waits on the iterator
# and enqueues work items as they become available
def _apply_func_to_iterator_results(iterable: Iterator) -> None:
for arg in iterable:
work_queue.append(executor.submit(func, arg))
enqueuing_task = executor.submit(
_apply_func_to_iterator_results,
iterable,
)
while True:
if (not enqueuing_task or enqueuing_task.done()) and len(work_queue) == 0:
break
if len(work_queue) > 0:
current_work_item = work_queue[0]
try:
yield current_work_item.result(timeout=0.1)
work_queue.popleft()
except TimeoutError:
pass
# Ensure any exceptions from the enqueuing task processing the iterator are raised,
# after all work items have been processed.
exc = enqueuing_task.exception()
if exc:
raise exc
def exhaust_iterator_and_yield_results_with_exception(iterable: Iterator[T]) -> Iterator[T]:
"""Fully exhausts an iterator and then yield its results. If the iterator raises an exception,
raise that exception at the position in the iterator where it was originally raised.
This is useful if we want to exhaust an iterator, but don't want to discard the elements
that were yielded before the exception was raised.
"""
results = []
caught_exception = None
try:
for result in iterable:
results.append(result)
except Exception as e:
caught_exception = e
yield from results
if caught_exception:
raise caught_exception
| InheritContextThreadPoolExecutor |
python | django__django | tests/model_options/models/default_related_name.py | {
"start": 559,
"end": 783
} | class ____(models.Model):
name = models.CharField(max_length=128)
address = models.CharField(max_length=128)
class Meta:
abstract = True
default_related_name = "%(app_label)s_%(model_name)ss"
| Store |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 6686,
"end": 6871
} | class ____:
def __aiter__(self) -> AsyncIterable[str]:
... # Y045 "__aiter__" methods should return an AsyncIterator, not an AsyncIterable
| AsyncIteratorReturningAsyncIterable |
python | pydata__xarray | xarray/tests/test_treenode.py | {
"start": 11522,
"end": 14316
} | class ____:
def test_parents(self) -> None:
_, leaf_f = create_test_tree()
expected = ["e", "b", "a"]
assert [node.name for node in leaf_f.parents] == expected
def test_lineage(self) -> None:
_, leaf_f = create_test_tree()
expected = ["f", "e", "b", "a"]
with pytest.warns(DeprecationWarning):
assert [node.name for node in leaf_f.lineage] == expected
def test_ancestors(self) -> None:
_, leaf_f = create_test_tree()
with pytest.warns(DeprecationWarning):
ancestors = leaf_f.ancestors
expected = ["a", "b", "e", "f"]
for node, expected_name in zip(ancestors, expected, strict=True):
assert node.name == expected_name
def test_subtree(self) -> None:
root, _ = create_test_tree()
expected = [
"a",
"b",
"c",
"d",
"e",
"h",
"f",
"g",
"i",
]
actual = [node.name for node in root.subtree]
assert expected == actual
def test_subtree_with_keys(self) -> None:
root, _ = create_test_tree()
expected_names = [
"a",
"b",
"c",
"d",
"e",
"h",
"f",
"g",
"i",
]
expected_paths = [
".",
"b",
"c",
"b/d",
"b/e",
"c/h",
"b/e/f",
"b/e/g",
"c/h/i",
]
result_paths, result_names = zip(
*[(path, node.name) for path, node in root.subtree_with_keys], strict=False
)
assert list(result_names) == expected_names
assert list(result_paths) == expected_paths
def test_descendants(self) -> None:
root, _ = create_test_tree()
descendants = root.descendants
expected = [
"b",
"c",
"d",
"e",
"h",
"f",
"g",
"i",
]
for node, expected_name in zip(descendants, expected, strict=True):
assert node.name == expected_name
def test_leaves(self) -> None:
tree, _ = create_test_tree()
leaves = tree.leaves
expected = [
"d",
"f",
"g",
"i",
]
for node, expected_name in zip(leaves, expected, strict=True):
assert node.name == expected_name
def test_levels(self) -> None:
a, f = create_test_tree()
assert a.level == 0
assert f.level == 3
assert a.depth == 3
assert f.depth == 3
assert a.width == 1
assert f.width == 3
| TestAncestry |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-moorcheh/llama_index/vector_stores/moorcheh/utils.py | {
"start": 815,
"end": 2402
} | class ____(BaseSparseEmbedding):
"""Default Moorcheh sparse embedding."""
tokenizer: Callable = Field(
default_factory=get_default_tokenizer,
description="A callable that returns token input ids.",
)
def build_sparse_embeddings(
self, input_batch: List[List[int]]
) -> List[SparseEmbedding]:
# store a batch of sparse embeddings
sparse_emb_list = []
# iterate through input batch
for token_ids in input_batch:
sparse_emb = {}
# convert the input_ids list to a dictionary of key to frequency values
d = dict(Counter(token_ids))
for idx in d:
sparse_emb[idx] = float(d[idx])
sparse_emb_list.append(sparse_emb)
# return sparse_emb list
return sparse_emb_list
def _get_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query synchronously."""
token_ids = self.tokenizer([query])[0]
return self.build_sparse_embeddings([token_ids])[0]
async def _aget_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query asynchronously."""
return self._get_query_embedding(query)
def _get_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text synchronously."""
return self._get_query_embedding(text)
async def _aget_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text asynchronously."""
return self._get_query_embedding(text)
| DefaultMoorchehSparseEmbedding |
python | pypa__warehouse | tests/common/db/oidc.py | {
"start": 3005,
"end": 3371
} | class ____(WarehouseFactory):
class Meta:
model = ActiveStatePublisher
id = factory.Faker("uuid4", cast_to=None)
organization = factory.Faker("pystr", max_chars=12)
activestate_project_name = factory.Faker("pystr", max_chars=12)
actor = factory.Faker("pystr", max_chars=12)
actor_id = factory.Faker("uuid4")
| ActiveStatePublisherFactory |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/operators/newly_true_operator.py | {
"start": 688,
"end": 3463
} | class ____(BuiltinAutomationCondition[T_EntityKey]):
operand: AutomationCondition[T_EntityKey]
@property
def name(self) -> str:
return "NEWLY_TRUE"
@property
def children(self) -> Sequence[AutomationCondition[T_EntityKey]]:
return [self.operand]
def _get_previous_child_true_subset(
self, context: AutomationContext[T_EntityKey]
) -> Optional[EntitySubset[T_EntityKey]]:
"""Returns the true subset of the child from the previous tick, which is stored in the
extra state field of the cursor.
"""
true_subset = context.get_structured_cursor(as_type=SerializableEntitySubset)
if not true_subset:
return None
return context.asset_graph_view.get_subset_from_serializable_subset(true_subset)
def get_node_unique_id(
self,
*,
parent_unique_id: Optional[str],
index: Optional[int],
target_key: Optional[EntityKey],
) -> str:
# newly true conditions should have stable cursoring logic regardless of where they
# exist in the broader condition tree, as they're always evaluated over the entire
# subset
return self._get_stable_unique_id(target_key)
def get_backcompat_node_unique_ids(
self,
*,
parent_unique_id: Optional[str] = None,
index: Optional[int] = None,
target_key: Optional[EntityKey] = None,
) -> Sequence[str]:
return [
# get the standard globally-aware unique id for backcompat purposes
super().get_node_unique_id(
parent_unique_id=parent_unique_id, index=index, target_key=target_key
)
]
async def evaluate(self, context: AutomationContext) -> AutomationResult: # pyright: ignore[reportIncompatibleMethodOverride]
# evaluate child condition
child_result = await context.for_child_condition(
self.operand,
child_indices=[0],
# must evaluate child condition over the entire subset to avoid missing state transitions
candidate_subset=context.asset_graph_view.get_full_subset(key=context.key),
).evaluate_async()
# get the set of asset partitions of the child which newly became true
newly_true_child_subset = child_result.true_subset.compute_difference(
self._get_previous_child_true_subset(context) or context.get_empty_subset()
)
return AutomationResult(
context=context,
true_subset=context.candidate_subset.compute_intersection(newly_true_child_subset),
child_results=[child_result],
structured_cursor=child_result.true_subset.convert_to_serializable_subset(),
)
| NewlyTrueCondition |
python | pypa__setuptools | setuptools/_scripts.py | {
"start": 3157,
"end": 6624
} | class ____:
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(
r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = %(spec)r
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
"""
).lstrip()
command_spec_class = CommandSpec
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
# If distribution is not an importlib.metadata.Distribution, assume
# it's a pkg_resources.Distribution and transform it.
if not hasattr(dist, 'entry_points'):
SetuptoolsWarning.emit("Unsupported distribution encountered.")
dist = metadata.Distribution.at(dist.egg_info)
if header is None:
header = cls.get_header()
spec = f'{dist.name}=={dist.version}'
for type_ in 'console', 'gui':
group = f'{type_}_scripts'
for ep in dist.entry_points.select(group=group):
name = ep.name
cls._ensure_safe_name(ep.name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, ep.name, header, script_text)
yield from args
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(
cls,
script_text: str = "",
executable: str | CommandSpec | Iterable[str] | None = None,
) -> str:
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
| ScriptWriter |
python | matplotlib__matplotlib | lib/matplotlib/axis.py | {
"start": 15037,
"end": 17119
} | class ____(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in axes coords, y in data coords
ax = self.axes
self.tick1line.set(
data=([0], [0]), transform=ax.get_yaxis_transform("tick1"))
self.tick2line.set(
data=([1], [0]), transform=ax.get_yaxis_transform("tick2"))
self.gridline.set(
data=([0, 1], [0, 0]), transform=ax.get_yaxis_transform("grid"))
# the y loc is 3 points below the min of y axis
trans, va, ha = self._get_text1_transform()
self.label1.set(
x=0, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
trans, va, ha = self._get_text2_transform()
self.label2.set(
x=1, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
def _get_text1_transform(self):
return self.axes.get_yaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_yaxis_text2_transform(self._pad)
def _apply_tickdir(self, tickdir):
# docstring inherited
super()._apply_tickdir(tickdir)
mark1, mark2 = {
'out': (mlines.TICKLEFT, mlines.TICKRIGHT),
'in': (mlines.TICKRIGHT, mlines.TICKLEFT),
'inout': ('_', '_'),
}[self._tickdir]
self.tick1line.set_marker(mark1)
self.tick2line.set_marker(mark2)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
self.tick1line.set_ydata((loc,))
self.tick2line.set_ydata((loc,))
self.gridline.set_ydata((loc,))
self.label1.set_y(loc)
self.label2.set_y(loc)
self._loc = loc
self.stale = True
def get_view_interval(self):
# docstring inherited
return self.axes.viewLim.intervaly
| YTick |
python | Textualize__textual | src/textual/_widget_navigation.py | {
"start": 464,
"end": 5640
} | class ____(Protocol):
"""Non-widgets that have an enabled/disabled status."""
disabled: bool
Direction: TypeAlias = Literal[-1, 1]
"""Valid values to determine navigation direction.
In a vertical setting, 1 points down and -1 points up.
In a horizontal setting, 1 points right and -1 points left.
"""
def get_directed_distance(
index: int, start: int, direction: Direction, wrap_at: int
) -> int:
"""Computes the distance going from `start` to `index` in the given direction.
Starting at `start`, this is the number of steps you need to take in the given
`direction` to reach `index`, assuming there is wrapping at 0 and `wrap_at`.
This is also the smallest non-negative integer solution `d` to
`(start + d * direction) % wrap_at == index`.
The diagram below illustrates the computation of `d1 = distance(2, 8, 1, 10)` and
`d2 = distance(2, 8, -1, 10)`:
```
start ────────────────────┐
index ────────┐ │
indices 0 1 2 3 4 5 6 7 8 9
d1 2 3 4 0 1
> > > > > (direction == 1)
d2 6 5 4 3 2 1 0
< < < < < < < (direction == -1)
```
Args:
index: The index that we want to reach.
start: The starting point to consider when computing the distance.
direction: The direction in which we want to compute the distance.
wrap_at: Controls at what point wrapping around takes place.
Returns:
The computed distance.
"""
return direction * (index - start) % wrap_at
def find_first_enabled(
candidates: Sequence[Disableable],
) -> int | None:
"""Find the first enabled candidate in a sequence of possibly-disabled objects.
Args:
candidates: The sequence of candidates to consider.
Returns:
The first enabled candidate or `None` if none were available.
"""
return next(
(index for index, candidate in enumerate(candidates) if not candidate.disabled),
None,
)
def find_last_enabled(candidates: Sequence[Disableable]) -> int | None:
"""Find the last enabled candidate in a sequence of possibly-disabled objects.
Args:
candidates: The sequence of candidates to consider.
Returns:
The last enabled candidate or `None` if none were available.
"""
total_candidates = len(candidates)
return next(
(
total_candidates - offset_from_end
for offset_from_end, candidate in enumerate(reversed(candidates), start=1)
if not candidate.disabled
),
None,
)
def find_next_enabled(
candidates: Sequence[Disableable],
anchor: int | None,
direction: Direction,
) -> int | None:
"""Find the next enabled object if we're currently at the given anchor.
The definition of "next" depends on the given direction and this function will wrap
around the ends of the sequence of object candidates.
Args:
candidates: The sequence of object candidates to consider.
anchor: The point of the sequence from which we'll start looking for the next
enabled object.
direction: The direction in which to traverse the candidates when looking for
the next enabled candidate.
Returns:
The next enabled object. If none are available, return the anchor.
"""
if anchor is None:
if candidates:
return (
find_first_enabled(candidates)
if direction == 1
else find_last_enabled(candidates)
)
return None
for index, candidate in loop_from_index(candidates, anchor, direction, wrap=True):
if not candidate.disabled:
return index
return anchor
def find_next_enabled_no_wrap(
candidates: Sequence[Disableable],
anchor: int | None,
direction: Direction,
with_anchor: bool = False,
) -> int | None:
"""Find the next enabled object starting from the given anchor (without wrapping).
The meaning of "next" and "past" depend on the direction specified.
Args:
candidates: The sequence of object candidates to consider.
anchor: The point of the sequence from which we'll start looking for the next
enabled object.
direction: The direction in which to traverse the candidates when looking for
the next enabled candidate.
with_anchor: Whether to consider the anchor or not.
Returns:
The next enabled object. If none are available, return None.
"""
if anchor is None:
if candidates:
return (
find_first_enabled(candidates)
if direction == 1
else find_last_enabled(candidates)
)
return None
start = anchor if with_anchor else anchor + direction
counter = count(start, direction)
valid_candidates = (
candidates[start:] if direction == 1 else reversed(candidates[: start + 1])
)
for idx, candidate in zip(counter, valid_candidates):
if candidate.disabled:
continue
return idx
return None
| Disableable |
python | google__jax | tests/scipy_optimize_test.py | {
"start": 1484,
"end": 4184
} | class ____(jtu.JaxTestCase):
@jtu.sample_product(
maxiter=[None],
func_and_init=[(rosenbrock, np.zeros(2, dtype='float32')),
(himmelblau, np.ones(2, dtype='float32')),
(matyas, np.ones(2) * 6.),
(eggholder, np.ones(2) * 100.)],
)
def test_minimize(self, maxiter, func_and_init):
# Note, cannot compare step for step with scipy BFGS because our line search is _slightly_ different.
func, x0 = func_and_init
@jit
def min_op(x0):
result = jax.scipy.optimize.minimize(
func(jnp),
x0,
method='BFGS',
options=dict(maxiter=maxiter, gtol=1e-6),
)
return result.x
jax_res = min_op(x0)
# Newer scipy versions perform poorly in float32. See
# https://github.com/scipy/scipy/issues/19024.
x0_f64 = x0.astype('float64')
scipy_res = scipy.optimize.minimize(func(np), x0_f64, method='BFGS').x
self.assertAllClose(scipy_res, jax_res, atol=2e-4, rtol=2e-4,
check_dtypes=False)
def test_fixes4594(self):
n = 2
A = jnp.eye(n) * 1e4
def f(x):
return jnp.mean((A @ x) ** 2)
results = jax.scipy.optimize.minimize(f, jnp.ones(n), method='BFGS')
self.assertAllClose(results.x, jnp.zeros(n), atol=1e-6, rtol=1e-6)
@jtu.skip_on_flag('jax_enable_x64', False)
def test_zakharov(self):
def zakharov_fn(x):
ii = jnp.arange(1, len(x) + 1, step=1, dtype=x.dtype)
answer = zakharovFromIndices(x=x, ii=ii)
return answer
x0 = jnp.array([600.0, 700.0, 200.0, 100.0, 90.0, 1e4])
eval_func = jax.jit(zakharov_fn)
jax_res = jax.scipy.optimize.minimize(fun=eval_func, x0=x0, method='BFGS')
self.assertLess(jax_res.fun, 1e-6)
@jtu.ignore_warning(category=RuntimeWarning, message='divide by zero')
def test_minimize_bad_initial_values(self):
# This test runs deliberately "bad" initial values to test that handling
# of failed line search, etc. is the same across implementations
initial_value = jnp.array([92, 0.001])
opt_fn = himmelblau(jnp)
jax_res = jax.scipy.optimize.minimize(
fun=opt_fn,
x0=initial_value,
method='BFGS',
).x
scipy_res = scipy.optimize.minimize(
fun=opt_fn,
jac=jax.grad(opt_fn),
method='BFGS',
x0=initial_value
).x
self.assertAllClose(scipy_res, jax_res, atol=2e-5, check_dtypes=False)
def test_args_must_be_tuple(self):
A = jnp.eye(2) * 1e4
def f(x):
return jnp.mean((A @ x) ** 2)
with self.assertRaisesRegex(TypeError, "args .* must be a tuple"):
jax.scipy.optimize.minimize(f, jnp.ones(2), args=45, method='BFGS')
| TestBFGS |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table39.py | {
"start": 306,
"end": 1214
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table39.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(1, 0, 1)
worksheet.write(2, 0, 2)
worksheet.write(3, 0, 3)
worksheet.write(4, 0, 4)
worksheet.write(5, 0, 5)
worksheet.write(1, 1, 10)
worksheet.write(2, 1, 15)
worksheet.write(3, 1, 20)
worksheet.write(4, 1, 10)
worksheet.write(5, 1, 15)
worksheet.set_column("A:B", 10.288)
worksheet.add_table("A1:B6", {"description": "Alt text"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | numpy__numpy | numpy/lib/_index_tricks_impl.py | {
"start": 6989,
"end": 8755
} | class ____(nd_grid):
"""
An instance which returns a dense multi-dimensional "meshgrid".
An instance which returns a dense (or fleshed out) mesh-grid
when indexed, so that each returned argument has the same shape.
The dimensions and number of the output arrays are equal to the
number of indexing dimensions. If the step length is not a complex
number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
-------
mesh-grid : ndarray
A single array, containing a set of `ndarray`\\ s all of the same
dimensions. stacked along the first axis.
See Also
--------
ogrid : like `mgrid` but returns open (not fleshed out) mesh grids
meshgrid: return coordinate matrices from coordinate vectors
r_ : array concatenator
:ref:`how-to-partition`
Examples
--------
>>> import numpy as np
>>> np.mgrid[0:5, 0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> np.mgrid[0:4].shape
(4,)
>>> np.mgrid[0:4, 0:5].shape
(2, 4, 5)
>>> np.mgrid[0:4, 0:5, 0:6].shape
(3, 4, 5, 6)
"""
__slots__ = ()
def __init__(self):
super().__init__(sparse=False)
mgrid = MGridClass()
| MGridClass |
python | fluentpython__example-code-2e | 13-protocol-abc/frenchdeck2.py | {
"start": 86,
"end": 778
} | class ____(abc.MutableSequence):
ranks = [str(n) for n in range(2, 11)] + list('JQKA')
suits = 'spades diamonds clubs hearts'.split()
def __init__(self):
self._cards = [Card(rank, suit) for suit in self.suits
for rank in self.ranks]
def __len__(self):
return len(self._cards)
def __getitem__(self, position):
return self._cards[position]
def __setitem__(self, position, value): # <1>
self._cards[position] = value
def __delitem__(self, position): # <2>
del self._cards[position]
def insert(self, position, value): # <3>
self._cards.insert(position, value)
| FrenchDeck2 |
python | numpy__numpy | numpy/_core/tests/test_dtype.py | {
"start": 40071,
"end": 47623
} | class ____:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names': ['top', 'bottom'],"
" 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]],"
" 'offsets': [0, 76800],"
" 'itemsize': 80000,"
" 'aligned': True}")
with np.printoptions(legacy='1.21'):
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names': ['rgba', 'r', 'g', 'b'],"
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
" 'offsets': [0, 0, 1, 2],"
" 'titles': ['Color', 'Red pixel', "
"'Green pixel', 'Blue pixel'],"
" 'itemsize': 4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names': ['r', 'b'],"
" 'formats': ['u1', 'u1'],"
" 'offsets': [0, 2],"
" 'titles': ['Red pixel', 'Blue pixel'],"
" 'itemsize': 3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names': ['rgba', 'r', 'g', 'b'],"
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
" 'offsets': [0, 0, 1, 2],"
" 'titles': ['Color', 'Red pixel', "
"'Green pixel', 'Blue pixel'],"
" 'itemsize': 4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names': ['r', 'b'], "
"'formats': ['u1', 'u1'], "
"'offsets': [0, 2], "
"'titles': ['Red pixel', 'Blue pixel'], "
"'itemsize': 4})")
def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
def test_repr_str_subarray(self):
dt = np.dtype(('<i2', (1,)))
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
assert_equal(str(dt), "('<i2', (1,))")
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
def test_empty_string_to_object(self):
# Pull request #4722
np.array(["", ""]).astype(object)
def test_void_subclass_unsized(self):
dt = np.dtype(np.record)
assert_equal(repr(dt), "dtype('V')")
assert_equal(str(dt), '|V0')
assert_equal(dt.name, 'record')
def test_void_subclass_sized(self):
dt = np.dtype((np.record, 2))
assert_equal(repr(dt), "dtype('V2')")
assert_equal(str(dt), '|V2')
assert_equal(dt.name, 'record16')
def test_void_subclass_fields(self):
dt = np.dtype((np.record, [('a', '<u2')]))
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
assert_equal(dt.name, 'record16')
def test_custom_dtype_str(self):
dt = np.dtypes.StringDType()
assert_equal(dt.str, "StringDType()")
| TestString |
python | scrapy__scrapy | tests/test_downloadermiddleware_retry.py | {
"start": 8511,
"end": 21511
} | class ____:
def get_spider(self, settings=None):
crawler = get_crawler(Spider, settings or {})
return crawler._create_spider("foo")
def test_basic_usage(self):
request = Request("https://example.com")
spider = self.get_spider()
with LogCapture() as log:
new_request = get_retry_request(
request,
spider=spider,
)
assert isinstance(new_request, Request)
assert new_request != request
assert new_request.dont_filter
expected_retry_times = 1
assert new_request.meta["retry_times"] == expected_retry_times
assert new_request.priority == -1
expected_reason = "unspecified"
for stat in ("retry/count", f"retry/reason_count/{expected_reason}"):
assert spider.crawler.stats.get_value(stat) == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_max_retries_reached(self):
request = Request("https://example.com")
spider = self.get_spider()
max_retry_times = 0
with LogCapture() as log:
new_request = get_retry_request(
request,
spider=spider,
max_retry_times=max_retry_times,
)
assert new_request is None
assert spider.crawler.stats.get_value("retry/max_reached") == 1
failure_count = max_retry_times + 1
expected_reason = "unspecified"
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"ERROR",
f"Gave up retrying {request} (failed {failure_count} times): "
f"{expected_reason}",
)
)
def test_one_retry(self):
request = Request("https://example.com")
spider = self.get_spider()
with LogCapture() as log:
new_request = get_retry_request(
request,
spider=spider,
max_retry_times=1,
)
assert isinstance(new_request, Request)
assert new_request != request
assert new_request.dont_filter
expected_retry_times = 1
assert new_request.meta["retry_times"] == expected_retry_times
assert new_request.priority == -1
expected_reason = "unspecified"
for stat in ("retry/count", f"retry/reason_count/{expected_reason}"):
assert spider.crawler.stats.get_value(stat) == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_two_retries(self):
spider = self.get_spider()
request = Request("https://example.com")
new_request = request
max_retry_times = 2
for index in range(max_retry_times):
with LogCapture() as log:
new_request = get_retry_request(
new_request,
spider=spider,
max_retry_times=max_retry_times,
)
assert isinstance(new_request, Request)
assert new_request != request
assert new_request.dont_filter
expected_retry_times = index + 1
assert new_request.meta["retry_times"] == expected_retry_times
assert new_request.priority == -expected_retry_times
expected_reason = "unspecified"
for stat in ("retry/count", f"retry/reason_count/{expected_reason}"):
value = spider.crawler.stats.get_value(stat)
assert value == expected_retry_times
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
with LogCapture() as log:
new_request = get_retry_request(
new_request,
spider=spider,
max_retry_times=max_retry_times,
)
assert new_request is None
assert spider.crawler.stats.get_value("retry/max_reached") == 1
failure_count = max_retry_times + 1
expected_reason = "unspecified"
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"ERROR",
f"Gave up retrying {request} (failed {failure_count} times): "
f"{expected_reason}",
)
)
def test_no_spider(self):
request = Request("https://example.com")
with pytest.raises(TypeError):
get_retry_request(request) # pylint: disable=missing-kwoa
def test_max_retry_times_setting(self):
max_retry_times = 0
spider = self.get_spider({"RETRY_TIMES": max_retry_times})
request = Request("https://example.com")
new_request = get_retry_request(
request,
spider=spider,
)
assert new_request is None
def test_max_retry_times_meta(self):
max_retry_times = 0
spider = self.get_spider({"RETRY_TIMES": max_retry_times + 1})
meta = {"max_retry_times": max_retry_times}
request = Request("https://example.com", meta=meta)
new_request = get_retry_request(
request,
spider=spider,
)
assert new_request is None
def test_max_retry_times_argument(self):
max_retry_times = 0
spider = self.get_spider({"RETRY_TIMES": max_retry_times + 1})
meta = {"max_retry_times": max_retry_times + 1}
request = Request("https://example.com", meta=meta)
new_request = get_retry_request(
request,
spider=spider,
max_retry_times=max_retry_times,
)
assert new_request is None
def test_priority_adjust_setting(self):
priority_adjust = 1
spider = self.get_spider({"RETRY_PRIORITY_ADJUST": priority_adjust})
request = Request("https://example.com")
new_request = get_retry_request(
request,
spider=spider,
)
assert new_request.priority == priority_adjust
def test_priority_adjust_argument(self):
priority_adjust = 1
spider = self.get_spider({"RETRY_PRIORITY_ADJUST": priority_adjust + 1})
request = Request("https://example.com")
new_request = get_retry_request(
request,
spider=spider,
priority_adjust=priority_adjust,
)
assert new_request.priority == priority_adjust
def test_log_extra_retry_success(self):
request = Request("https://example.com")
spider = self.get_spider()
with LogCapture(attributes=("spider",)) as log:
get_retry_request(
request,
spider=spider,
)
log.check_present(spider)
def test_log_extra_retries_exceeded(self):
request = Request("https://example.com")
spider = self.get_spider()
with LogCapture(attributes=("spider",)) as log:
get_retry_request(
request,
spider=spider,
max_retry_times=0,
)
log.check_present(spider)
def test_reason_string(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = "because"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
for stat in ("retry/count", f"retry/reason_count/{expected_reason}"):
assert spider.crawler.stats.get_value(stat) == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_reason_builtin_exception(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = NotImplementedError()
expected_reason_string = "builtins.NotImplementedError"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
stat = spider.crawler.stats.get_value(
f"retry/reason_count/{expected_reason_string}"
)
assert stat == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_reason_builtin_exception_class(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = NotImplementedError
expected_reason_string = "builtins.NotImplementedError"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
stat = spider.crawler.stats.get_value(
f"retry/reason_count/{expected_reason_string}"
)
assert stat == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_reason_custom_exception(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = IgnoreRequest()
expected_reason_string = "scrapy.exceptions.IgnoreRequest"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
stat = spider.crawler.stats.get_value(
f"retry/reason_count/{expected_reason_string}"
)
assert stat == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_reason_custom_exception_class(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = IgnoreRequest
expected_reason_string = "scrapy.exceptions.IgnoreRequest"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
stat = spider.crawler.stats.get_value(
f"retry/reason_count/{expected_reason_string}"
)
assert stat == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_custom_logger(self):
logger = logging.getLogger("custom-logger")
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = "because"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
logger=logger,
)
log.check_present(
(
"custom-logger",
"DEBUG",
f"Retrying {request} (failed 1 times): {expected_reason}",
)
)
def test_custom_stats_key(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = "because"
stats_key = "custom_retry"
get_retry_request(
request,
spider=spider,
reason=expected_reason,
stats_base_key=stats_key,
)
for stat in (
f"{stats_key}/count",
f"{stats_key}/reason_count/{expected_reason}",
):
assert spider.crawler.stats.get_value(stat) == 1
| TestGetRetryRequest |
python | kamyu104__LeetCode-Solutions | Python/sum-of-square-numbers.py | {
"start": 56,
"end": 347
} | class ____(object):
def judgeSquareSum(self, c):
"""
:type c: int
:rtype: bool
"""
for a in xrange(int(math.sqrt(c))+1):
b = int(math.sqrt(c-a**2))
if a**2 + b**2 == c:
return True
return False
| Solution |
python | langchain-ai__langchain | libs/core/langchain_core/tools/base.py | {
"start": 52107,
"end": 52537
} | class ____(BaseModel, ABC):
"""Base class for toolkits containing related tools.
A toolkit is a collection of related tools that can be used together
to accomplish a specific task or work with a particular system.
"""
@abstractmethod
def get_tools(self) -> list[BaseTool]:
"""Get all tools in the toolkit.
Returns:
List of tools contained in this toolkit.
"""
| BaseToolkit |
python | PyCQA__pylint | pylint/extensions/no_self_use.py | {
"start": 627,
"end": 3694
} | class ____(BaseChecker):
name = "no_self_use"
msgs = {
"R6301": (
"Method could be a function",
"no-self-use",
"Used when a method doesn't use its bound instance, and so could "
"be written as a function.",
{"old_names": [("R0201", "old-no-self-use")]},
),
}
def __init__(self, linter: PyLinter) -> None:
super().__init__(linter)
self._first_attrs: list[str | None] = []
self._meth_could_be_func: bool | None = None
def visit_name(self, node: nodes.Name) -> None:
"""Check if the name handle an access to a class member
if so, register it.
"""
if self._first_attrs and (
node.name == self._first_attrs[-1] or not self._first_attrs[-1]
):
self._meth_could_be_func = False
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
if not node.is_method():
return
self._meth_could_be_func = True
self._check_first_arg_for_type(node)
visit_asyncfunctiondef = visit_functiondef
def _check_first_arg_for_type(self, node: nodes.FunctionDef) -> None:
"""Check the name of first argument."""
# pylint: disable=duplicate-code
if node.args.posonlyargs:
first_arg = node.args.posonlyargs[0].name
elif node.args.args:
first_arg = node.argnames()[0]
else:
first_arg = None
self._first_attrs.append(first_arg)
# static method
if node.type == "staticmethod":
self._first_attrs[-1] = None
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
"""On method node, check if this method couldn't be a function.
ignore class, static and abstract methods, initializer,
methods overridden from a parent class.
"""
if node.is_method():
first = self._first_attrs.pop()
if first is None:
return
class_node = node.parent.frame()
if (
self._meth_could_be_func
and node.type == "method"
and node.name not in PYMETHODS
and not (
node.is_abstract()
or overrides_a_method(class_node, node.name)
or decorated_with_property(node)
or _has_bare_super_call(node)
or is_protocol_class(class_node)
or is_overload_stub(node)
)
):
self.add_message("no-self-use", node=node, confidence=INFERENCE)
leave_asyncfunctiondef = leave_functiondef
def _has_bare_super_call(fundef_node: nodes.FunctionDef) -> bool:
for call in fundef_node.nodes_of_class(nodes.Call):
match call:
case nodes.Call(func=nodes.Name(name="super"), args=[]):
return True
return False
def register(linter: PyLinter) -> None:
linter.register_checker(NoSelfUseChecker(linter))
| NoSelfUseChecker |
python | catalyst-team__catalyst | tests/pipelines/test_multihead_classification.py | {
"start": 956,
"end": 10390
} | class ____(nn.Module):
def __init__(self, in_features: int, out_features1: int, out_features2: int):
super().__init__()
self.shared = nn.Linear(in_features, 128)
self.head1 = nn.Linear(128, out_features1)
self.head2 = nn.Linear(128, out_features2)
def forward(self, x):
x = self.shared(x)
y1 = self.head1(x)
y2 = self.head2(x)
return y1, y2
def train_experiment(engine=None):
with TemporaryDirectory() as logdir:
# sample data
num_samples, num_features, num_classes1, num_classes2 = int(1e4), int(1e1), 4, 10
X = torch.rand(num_samples, num_features)
y1 = (torch.rand(num_samples) * num_classes1).to(torch.int64)
y2 = (torch.rand(num_samples) * num_classes2).to(torch.int64)
# pytorch loaders
dataset = TensorDataset(X, y1, y2)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = CustomModule(num_features, num_classes1, num_classes2)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [2])
callbacks = [
dl.CriterionCallback(
metric_key="loss1", input_key="logits1", target_key="targets1"
),
dl.CriterionCallback(
metric_key="loss2", input_key="logits2", target_key="targets2"
),
dl.MetricAggregationCallback(
metric_key="loss", metrics=["loss1", "loss2"], mode="mean"
),
dl.BackwardCallback(metric_key="loss"),
dl.OptimizerCallback(metric_key="loss"),
dl.SchedulerCallback(),
dl.AccuracyCallback(
input_key="logits1",
target_key="targets1",
num_classes=num_classes1,
prefix="one_",
),
dl.AccuracyCallback(
input_key="logits2",
target_key="targets2",
num_classes=num_classes2,
prefix="two_",
),
dl.CheckpointCallback(
"./logs/one",
loader_key="valid",
metric_key="one_accuracy01",
minimize=False,
topk=1,
),
dl.CheckpointCallback(
"./logs/two",
loader_key="valid",
metric_key="two_accuracy03",
minimize=False,
topk=3,
),
]
if SETTINGS.ml_required:
# catalyst[ml] required
callbacks.append(
dl.ConfusionMatrixCallback(
input_key="logits1",
target_key="targets1",
num_classes=num_classes1,
prefix="one_cm",
)
)
# catalyst[ml] required
callbacks.append(
dl.ConfusionMatrixCallback(
input_key="logits2",
target_key="targets2",
num_classes=num_classes2,
prefix="two_cm",
)
)
# model training
runner = CustomRunner()
runner.train(
engine=engine,
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
num_epochs=1,
verbose=False,
callbacks=callbacks,
loggers={
"console": dl.ConsoleLogger(),
"tb": dl.TensorboardLogger("./logs/tb"),
},
)
def train_experiment_from_configs(*auxiliary_configs: str):
configs_dir = Path(__file__).parent / "configs"
main_config = f"{Path(__file__).stem}.yml"
d = utils.load_config(str(configs_dir / main_config), ordered=True)["shared"]
X = torch.rand(d["num_samples"], d["num_features"])
y1 = (torch.rand(d["num_samples"]) * d["num_classes1"]).to(torch.int64)
y2 = (torch.rand(d["num_samples"]) * d["num_classes2"]).to(torch.int64)
torch.save(X, Path("tests") / "X.pt")
torch.save(y1, Path("tests") / "y1.pt")
torch.save(y2, Path("tests") / "y2.pt")
run_experiment_from_configs(configs_dir, main_config, *auxiliary_configs)
# Device
@mark.skipif(not IS_CPU_REQUIRED, reason="CUDA device is not available")
def test_run_on_cpu():
train_experiment(dl.CPUEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED or not IS_CPU_REQUIRED, reason="CPU device is not available"
)
def test_config_run_on_cpu():
train_experiment_from_configs("engine_cpu.yml")
@mark.skipif(
not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]), reason="CUDA device is not available"
)
def test_run_on_torch_cuda0():
train_experiment(dl.GPUEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED or not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]),
reason="CUDA device is not available",
)
def test_config_run_on_torch_cuda0():
train_experiment_from_configs("engine_gpu.yml")
@mark.skipif(
not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_run_on_amp():
train_experiment(dl.GPUEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_config_run_on_amp():
train_experiment_from_configs("engine_gpu_amp.yml")
# DP
@mark.skipif(
not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_run_on_torch_dp():
train_experiment(dl.DataParallelEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_config_run_on_torch_dp():
train_experiment_from_configs("engine_dp.yml")
@mark.skipif(
not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_run_on_amp_dp():
train_experiment(dl.DataParallelEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_config_run_on_amp_dp():
train_experiment_from_configs("engine_dp_amp.yml")
# DDP
@mark.skipif(
not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_run_on_torch_ddp():
train_experiment(dl.DistributedDataParallelEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_config_run_on_torch_ddp():
train_experiment_from_configs("engine_ddp.yml")
@mark.skipif(
not all(
[
IS_DDP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_run_on_amp_ddp():
train_experiment(dl.DistributedDataParallelEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all(
[
IS_DDP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_config_run_on_amp_ddp():
train_experiment_from_configs("engine_ddp_amp.yml")
def _train_fn(local_rank, world_size):
process_group_kwargs = {
"backend": "nccl",
"world_size": world_size,
}
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
dist.init_process_group(**process_group_kwargs)
train_experiment(dl.Engine())
dist.destroy_process_group()
@mark.skipif(
not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_run_on_torch_ddp_spawn():
world_size: int = torch.cuda.device_count()
mp.spawn(
_train_fn,
args=(world_size,),
nprocs=world_size,
join=True,
)
def _train_fn_amp(local_rank, world_size):
process_group_kwargs = {
"backend": "nccl",
"world_size": world_size,
}
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
dist.init_process_group(**process_group_kwargs)
train_experiment(dl.Engine(fp16=True))
dist.destroy_process_group()
@mark.skipif(
not all(
[
IS_DDP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_run_on_torch_ddp_amp_spawn():
world_size: int = torch.cuda.device_count()
mp.spawn(
_train_fn_amp,
args=(world_size,),
nprocs=world_size,
join=True,
)
| CustomModule |
python | weaviate__weaviate-python-client | weaviate/collections/batch/base.py | {
"start": 5327,
"end": 5630
} | class ____:
results: BatchResult = field(default_factory=BatchResult)
failed_objects: List[ErrorObject] = field(default_factory=list)
failed_references: List[ErrorReference] = field(default_factory=list)
imported_shards: Set[Shard] = field(default_factory=set)
@dataclass
| _BatchDataWrapper |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 4348,
"end": 4528
} | class ____(ShowFieldTypeAndContent, Model2A):
objects = PolymorphicManager()
my_objects = MyManager()
field4 = models.CharField(max_length=30)
| ModelWithMyManagerNoDefault |
python | apache__thrift | lib/py/src/protocol/TCompactProtocol.py | {
"start": 2209,
"end": 3006
} | class ____(object):
STOP = 0x00
TRUE = 0x01
FALSE = 0x02
BYTE = 0x03
I16 = 0x04
I32 = 0x05
I64 = 0x06
DOUBLE = 0x07
BINARY = 0x08
LIST = 0x09
SET = 0x0A
MAP = 0x0B
STRUCT = 0x0C
CTYPES = {
TType.STOP: CompactType.STOP,
TType.BOOL: CompactType.TRUE, # used for collection
TType.BYTE: CompactType.BYTE,
TType.I16: CompactType.I16,
TType.I32: CompactType.I32,
TType.I64: CompactType.I64,
TType.DOUBLE: CompactType.DOUBLE,
TType.STRING: CompactType.BINARY,
TType.STRUCT: CompactType.STRUCT,
TType.LIST: CompactType.LIST,
TType.SET: CompactType.SET,
TType.MAP: CompactType.MAP,
}
TTYPES = {}
for k, v in CTYPES.items():
TTYPES[v] = k
TTYPES[CompactType.FALSE] = TType.BOOL
del k
del v
| CompactType |
python | getsentry__sentry | src/sentry/api/endpoints/organization_access_request_details.py | {
"start": 1446,
"end": 1569
} | class ____(serializers.Serializer):
isApproved = serializers.BooleanField()
@region_silo_endpoint
| AccessRequestSerializer |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 8683,
"end": 8856
} | class ____(_ConfigUpdateModel):
factor: Optional[int]
asyncEnabled: Optional[bool]
deletionStrategy: Optional[ReplicationDeletionStrategy]
| _ReplicationConfigUpdate |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_project_forms.py | {
"start": 1440,
"end": 5997
} | class ____(TestCase):
def test_import_repo_url(self):
"""Validate different type of repository URLs on importing a Project."""
common_urls = [
# Invalid
("./path/to/relative/folder", False),
("../../path/to/relative/folder", False),
("../../path/to/@/folder", False),
("/path/to/local/folder", False),
("/path/to/@/folder", False),
("file:///path/to/local/folder", False),
("file:///path/to/@/folder", False),
("github.com/humitos/foo", False),
("https://github.com/|/foo", False),
("git://github.com/&&/foo", False),
# Valid
("git://github.com/humitos/foo", True),
("http://github.com/humitos/foo", True),
("https://github.com/humitos/foo", True),
("http://gitlab.com/humitos/foo", True),
("http://bitbucket.com/humitos/foo", True),
("ftp://ftpserver.com/humitos/foo", True),
("ftps://ftpserver.com/humitos/foo", True),
("lp:zaraza", True),
]
public_urls = [
("git@github.com:humitos/foo", False),
("ssh://git@github.com/humitos/foo", False),
("ssh+git://github.com/humitos/foo", False),
("strangeuser@bitbucket.org:strangeuser/readthedocs.git", False),
("user@one-ssh.domain.com:22/_ssh/docs", False),
] + common_urls
private_urls = [
("git@github.com:humitos/foo", True),
("ssh://git@github.com/humitos/foo", True),
("ssh+git://github.com/humitos/foo", True),
("strangeuser@bitbucket.org:strangeuser/readthedocs.git", True),
("user@one-ssh.domain.com:22/_ssh/docs", True),
] + common_urls
with override_settings(ALLOW_PRIVATE_REPOS=False):
for url, valid in public_urls:
initial = {
"name": "foo",
"repo_type": "git",
"repo": url,
"language": "en",
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
with override_settings(ALLOW_PRIVATE_REPOS=True):
for url, valid in private_urls:
initial = {
"name": "foo",
"repo_type": "git",
"repo": url,
"language": "en",
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
def test_empty_slug(self):
initial = {
"name": "''",
"repo_type": "git",
"repo": "https://github.com/user/repository",
"language": "en",
}
form = ProjectBasicsForm(initial)
self.assertFalse(form.is_valid())
self.assertIn("name", form.errors)
@override_settings(ALLOW_PRIVATE_REPOS=False)
def test_length_of_tags(self):
project = get(Project)
data = {
"name": "Project",
"repo": "https://github.com/readthedocs/readthedocs.org/",
"repo_type": project.repo_type,
"default_version": LATEST,
"versioning_scheme": project.versioning_scheme,
"documentation_type": "sphinx",
"language": "en",
}
data["tags"] = "{},{}".format("a" * 50, "b" * 99)
form = UpdateProjectForm(data, instance=project)
self.assertTrue(form.is_valid())
data["tags"] = "{},{}".format("a" * 90, "b" * 100)
form = UpdateProjectForm(data, instance=project)
self.assertTrue(form.is_valid())
data["tags"] = "{},{}".format("a" * 99, "b" * 101)
form = UpdateProjectForm(data, instance=project)
self.assertFalse(form.is_valid())
self.assertTrue(form.has_error("tags"))
error_msg = "Length of each tag must be less than or equal to 100 characters."
self.assertDictEqual(form.errors, {"tags": [error_msg]})
def test_strip_repo_url(self):
form = ProjectBasicsForm(
{
"name": "foo",
"repo_type": "git",
"repo": "https://github.com/rtfd/readthedocs.org/",
"language": "en",
}
)
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data["repo"], "https://github.com/rtfd/readthedocs.org"
)
| TestProjectForms |
python | aio-libs__aiohttp | aiohttp/tracing.py | {
"start": 9043,
"end": 9193
} | class ____:
"""Parameters sent by the `on_dns_resolvehost_start` signal"""
host: str
@frozen_dataclass_decorator
| TraceDnsResolveHostStartParams |
python | realpython__materials | python-magic-methods/rectangle.py | {
"start": 0,
"end": 621
} | class ____:
def __init__(self, height, width):
self.height = height
self.width = width
def area(self):
return self.height * self.width
def __eq__(self, other):
return self.area() == other.area()
def __lt__(self, other):
return self.area() < other.area()
def __gt__(self, other):
return self.area() > other.area()
def __repr__(self):
return (
f"{type(self).__name__}(height={self.height}, width={self.width})"
)
def __dir__(self):
print("__dir__ called")
return sorted(self.__dict__.keys())
| Rectangle |
python | openai__openai-python | src/openai/types/chat/chat_completion_stream_options_param.py | {
"start": 213,
"end": 1311
} | class ____(TypedDict, total=False):
include_obfuscation: bool
"""When true, stream obfuscation will be enabled.
Stream obfuscation adds random characters to an `obfuscation` field on streaming
delta events to normalize payload sizes as a mitigation to certain side-channel
attacks. These obfuscation fields are included by default, but add a small
amount of overhead to the data stream. You can set `include_obfuscation` to
false to optimize for bandwidth if you trust the network links between your
application and the OpenAI API.
"""
include_usage: bool
"""If set, an additional chunk will be streamed before the `data: [DONE]` message.
The `usage` field on this chunk shows the token usage statistics for the entire
request, and the `choices` field will always be an empty array.
All other chunks will also include a `usage` field, but with a null value.
**NOTE:** If the stream is interrupted, you may not receive the final usage
chunk which contains the total token usage for the request.
"""
| ChatCompletionStreamOptionsParam |
python | Netflix__metaflow | metaflow/plugins/cards/card_creator.py | {
"start": 172,
"end": 1097
} | class ____:
"""
This class is responsible for managing the card creation processes.
"""
async_card_processes = {
# "carduuid": {
# "proc": subprocess.Popen,
# "started": time.time()
# }
}
@classmethod
def _register_card_process(cls, carduuid, proc):
cls.async_card_processes[carduuid] = {
"proc": proc,
"started": time.time(),
}
@classmethod
def _get_card_process(cls, carduuid):
proc_dict = cls.async_card_processes.get(carduuid, None)
if proc_dict is not None:
return proc_dict["proc"], proc_dict["started"]
return None, None
@classmethod
def _remove_card_process(cls, carduuid):
if carduuid in cls.async_card_processes:
cls.async_card_processes[carduuid]["proc"].kill()
del cls.async_card_processes[carduuid]
| CardProcessManager |
python | ray-project__ray | java/serve/src/main/resources/test_python_deployment.py | {
"start": 170,
"end": 427
} | class ____(object):
def __init__(self, value):
self.value = int(value)
def increase(self, delta):
self.value += int(delta)
return str(self.value)
def reconfigure(self, value_str):
self.value = int(value_str)
| Counter |
python | huggingface__transformers | src/transformers/models/albert/modeling_albert.py | {
"start": 9924,
"end": 10561
} | class ____(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
for layer_index, albert_layer in enumerate(self.albert_layers):
hidden_states = albert_layer(hidden_states, attention_mask, **kwargs)
return hidden_states
| AlbertLayerGroup |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_glue.py | {
"start": 4529,
"end": 4751
} | class ____:
@pytest.fixture(autouse=True)
def mock_conn(self, monkeypatch):
self.client = boto3.client("glue")
monkeypatch.setattr(GlueJobHook, "conn", self.client)
| TestGlueJobCompleteCustomWaiterBase |
python | google__jax | tests/source_mapper_test.py | {
"start": 1516,
"end": 4508
} | class ____(jtu.JaxTestCase):
def setUp(self):
if sys.platform == "win32":
self.skipTest("Only works on non-Windows platforms")
def test_jaxpr_pass(self):
def jax_fn(x, y):
return x + y
test_x = jnp.array([1, 2, 3])
test_y = jnp.array([4, 5, 6])
source_maps = source_mapper.generate_sourcemaps(
jax_fn,
passes=source_mapper.filter_passes("jaxpr"))(test_x, test_y)
self.assertLen(source_maps, 1)
dump = source_maps[0]
self.assertEqual(dump.pass_name, "jaxpr")
self.assertIn("add a b", dump.generated_code)
source_map = dump.source_map
self.assertLen(source_map.sources, 1)
self.assertEqual(source_map.sources[0],
source_mapper.canonicalize_filename(__file__))
mappings = source_map.mappings
self.assertLen(mappings, len(dump.generated_code.split("\n")) + 1)
gen_col, file_idx, src_line, _ = mappings[0][0]
# It's hard to guarantee at what column the add instruction will be
# generated in the dump. We just sanity-check that it's greater than 0.
self.assertGreater(gen_col, 0)
# There is only one file, so we should map to that
self.assertEqual(file_idx, 0)
# These should line up with the function definition of jax_fn above.
self.assertEqual(src_line, jax_fn.__code__.co_firstlineno)
# TODO(justinfu): This fails on external but not internal builds.
# self.assertEqual(src_col, 13)
@parameterized.parameters(
("hlo:stable-hlo", "stablehlo.add", 13),
("hlo:original", "add", 0),
# TODO(justinfu): Make the hlo:optimized test less strict.
# ("hlo:optimized", "add", 0),
)
def test_hlo_passes(self, pass_name, expected_hlo_op, expected_col):
del expected_col
def jax_fn(x, y):
return x + y
test_x = jnp.array([1, 2, 3])
test_y = jnp.array([4, 5, 6])
source_maps = source_mapper.generate_sourcemaps(
jax_fn,
passes=source_mapper.filter_passes(pass_name))(test_x, test_y)
self.assertLen(source_maps, 1)
dump = source_maps[0]
self.assertEqual(dump.pass_name, pass_name)
self.assertIn(expected_hlo_op, dump.generated_code)
source_map = dump.source_map
self.assertLen(source_map.sources, 1)
self.assertEqual(source_map.sources[0],
source_mapper.canonicalize_filename(__file__))
mappings = source_map.mappings
self.assertLen(mappings, len(dump.generated_code.split("\n")) + 1)
nonempty_mappings = [m for m in mappings if m]
self.assertLen(nonempty_mappings, 1)
gen_col, file_idx, src_line, _ = nonempty_mappings[0][0]
self.assertGreater(gen_col, 0)
# There is only one file, so we should map to that
self.assertEqual(file_idx, 0)
# These should line up with the function definition of jax_fn above.
self.assertEqual(src_line, jax_fn.__code__.co_firstlineno)
# TODO(justinfu): This fails on external but not internal builds.
# self.assertEqual(src_col, expected_col)
| SourceMapperTest |
python | prabhupant__python-ds | data_structures/graphs/iterative_dfs.py | {
"start": 127,
"end": 1050
} | class ____:
def __init__(self, vertices):
self.vertices = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
def dfs(self):
visited = [False] * self.vertices
stack = []
for s in range(self.vertices):
if visited[s] == False:
visited[s] = True
stack.append(s)
while stack:
s = stack.pop()
print(s, end=' ')
for i in self.graph[s]:
if visited[i] == False:
stack.append(i)
visited[i] = True
# g = Graph(4)
# g.add_edge(0, 1)
# g.add_edge(0, 2)
# g.add_edge(1, 2)
# g.add_edge(2, 0)
# g.add_edge(2, 3)
# g.add_edge(3, 3)
g = Graph(5)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(3, 4)
g.dfs()
| Graph |
python | kamyu104__LeetCode-Solutions | Python/range-sum-query-2d-mutable.py | {
"start": 109,
"end": 2390
} | class ____(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if not matrix:
return
self.__matrix = matrix
self.__bit = [[0] * (len(self.__matrix[0]) + 1) \
for _ in xrange(len(self.__matrix) + 1)]
for i in xrange(1, len(self.__bit)):
for j in xrange(1, len(self.__bit[0])):
self.__bit[i][j] = matrix[i-1][j-1] + self.__bit[i-1][j] + \
self.__bit[i][j-1] - self.__bit[i-1][j-1]
for i in reversed(xrange(1, len(self.__bit))):
for j in reversed(xrange(1, len(self.__bit[0]))):
last_i, last_j = i - (i & -i), j - (j & -j)
self.__bit[i][j] = self.__bit[i][j] - self.__bit[i][last_j] - \
self.__bit[last_i][j] + self.__bit[last_i][last_j]
def update(self, row, col, val):
"""
update the element at matrix[row,col] to val.
:type row: int
:type col: int
:type val: int
:rtype: void
"""
if val - self.__matrix[row][col]:
self.__add(row, col, val - self.__matrix[row][col])
self.__matrix[row][col] = val
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.__sum(row2, col2) - self.__sum(row2, col1 - 1) - \
self.__sum(row1 - 1, col2) + self.__sum(row1 - 1, col1 - 1)
def __sum(self, row, col):
row += 1
col += 1
ret = 0
i = row
while i > 0:
j = col
while j > 0:
ret += self.__bit[i][j]
j -= (j & -j)
i -= (i & -i)
return ret
def __add(self, row, col, val):
row += 1
col += 1
i = row
while i <= len(self.__matrix):
j = col
while j <= len(self.__matrix[0]):
self.__bit[i][j] += val
j += (j & -j)
i += (i & -i)
| NumMatrix |
python | django__django | tests/lookup/test_lookups.py | {
"start": 243,
"end": 1779
} | class ____(SimpleTestCase):
def test_equality(self):
lookup = Lookup(Value(1), Value(2))
self.assertEqual(lookup, lookup)
self.assertEqual(lookup, Lookup(lookup.lhs, lookup.rhs))
self.assertEqual(lookup, mock.ANY)
self.assertNotEqual(lookup, Lookup(lookup.lhs, Value(3)))
self.assertNotEqual(lookup, Lookup(Value(3), lookup.rhs))
self.assertNotEqual(lookup, CustomLookup(lookup.lhs, lookup.rhs))
def test_repr(self):
tests = [
(Lookup(Value(1), Value("a")), "Lookup(Value(1), Value('a'))"),
(
YearLookup(
Value(datetime(2010, 1, 1, 0, 0, 0)),
Value(datetime(2010, 1, 1, 23, 59, 59)),
),
"YearLookup("
"Value(datetime.datetime(2010, 1, 1, 0, 0)), "
"Value(datetime.datetime(2010, 1, 1, 23, 59, 59)))",
),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertEqual(repr(lookup), expected)
def test_hash(self):
lookup = Lookup(Value(1), Value(2))
self.assertEqual(hash(lookup), hash(lookup))
self.assertEqual(hash(lookup), hash(Lookup(lookup.lhs, lookup.rhs)))
self.assertNotEqual(hash(lookup), hash(Lookup(lookup.lhs, Value(3))))
self.assertNotEqual(hash(lookup), hash(Lookup(Value(3), lookup.rhs)))
self.assertNotEqual(hash(lookup), hash(CustomLookup(lookup.lhs, lookup.rhs)))
| LookupTests |
python | cython__cython | Cython/Compiler/TreeFragment.py | {
"start": 432,
"end": 3360
} | class ____(Main.Context):
def __init__(self, name, include_directories=None, compiler_directives=None, cpp=False):
if include_directories is None:
include_directories = []
if compiler_directives is None:
compiler_directives = {}
Main.Context.__init__(self, include_directories, compiler_directives, cpp=cpp, language_level='3')
self.module_name = name
def find_module(self, module_name, from_module=None, pos=None, need_pxd=1, absolute_fallback=True, relative_import=False):
if module_name not in (self.module_name, 'cython'):
raise AssertionError("Not yet supporting any cimports/includes from string code snippets")
return ModuleScope(module_name, parent_module=None, context=self)
def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None,
context=None, allow_struct_enum_decorator=False,
in_utility_code=False):
"""
Utility method to parse a (unicode) string of code. This is mostly
used for internal Cython compiler purposes (creating code snippets
that transforms should emit, as well as unit testing).
code - a unicode string containing Cython (module-level) code
name - a descriptive name for the code source (to use in error messages etc.)
in_utility_code - used to suppress some messages from utility code. False by default
because some generated code snippets like properties and dataclasses
probably want to see those messages.
RETURNS
The tree, i.e. a ModuleNode. The ModuleNode's scope attribute is
set to the scope used when parsing.
"""
if context is None:
context = StringParseContext(name)
# Since source files carry an encoding, it makes sense in this context
# to use a unicode string so that code fragments don't have to bother
# with encoding. This means that test code passed in should not have an
# encoding header.
assert isinstance(code, str), "unicode code snippets only please"
encoding = "UTF-8"
module_name = name
if initial_pos is None:
initial_pos = (name, 1, 0)
code_source = StringSourceDescriptor(name, code)
if in_utility_code:
code_source.in_utility_code = True
scope = context.find_module(module_name, pos=initial_pos, need_pxd=False)
buf = StringIO(code)
scanner = PyrexScanner(buf, code_source, source_encoding = encoding,
scope = scope, context = context, initial_pos = initial_pos)
ctx = Parsing.Ctx(allow_struct_enum_decorator=allow_struct_enum_decorator)
if level is None:
tree = Parsing.p_module(scanner, 0, module_name, ctx=ctx)
tree.scope = scope
tree.is_pxd = False
else:
tree = Parsing.p_code(scanner, level=level, ctx=ctx)
tree.scope = scope
return tree
| StringParseContext |
python | python-markdown__markdown | markdown/util.py | {
"start": 7073,
"end": 8928
} | class ____:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__(self):
""" Create an `HtmlStash`. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks: list[str | etree.Element] = []
self.tag_counter = 0
self.tag_data: list[TagData] = [] # list of dictionaries in the order tags appear
def store(self, html: str | etree.Element) -> str:
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
html: An html segment.
Returns:
A placeholder string.
"""
self.rawHtmlBlocks.append(html)
placeholder = self.get_placeholder(self.html_counter)
self.html_counter += 1
return placeholder
def reset(self) -> None:
""" Clear the stash. """
self.html_counter = 0
self.rawHtmlBlocks = []
def get_placeholder(self, key: int) -> str:
return HTML_PLACEHOLDER % key
def store_tag(self, tag: str, attrs: dict[str, str], left_index: int, right_index: int) -> str:
"""Store tag data and return a placeholder."""
self.tag_data.append({'tag': tag, 'attrs': attrs,
'left_index': left_index,
'right_index': right_index})
placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
self.tag_counter += 1 # equal to the tag's index in `self.tag_data`
return placeholder
# Used internally by `Registry` for each item in its sorted list.
# Provides an easier to read API when editing the code later.
# For example, `item.name` is more clear than `item[0]`.
| HtmlStash |
python | PyCQA__pylint | tests/functional/g/generic_class_syntax.py | {
"start": 508,
"end": 616
} | class ____(Entity[int]):
def __init__(self, data: int) -> None:
Entity.__init__(self, data)
| Switch |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 12711,
"end": 14098
} | class ____(Request):
"""
Adds a task entry to the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "add_task"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, **kwargs: Any) -> None:
super(AddTaskRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| AddTaskRequest |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 468441,
"end": 468980
} | class ____(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self, code):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
| TildeNode |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_orm.py | {
"start": 9679,
"end": 11553
} | class ____(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("x", String(5)),
Column("y", String(5)),
Column("z", String(5)),
Column("q", String(5)),
Column("p", String(5)),
Column("r", String(5)),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
A = cls.classes.A
a = cls.tables.a
cls.mapper_registry.map_imperatively(A, a)
@classmethod
def insert_data(cls, connection):
A = cls.classes.A
s = Session(connection)
s.add_all(
[
A(
id=i,
**{
letter: "%s%d" % (letter, i)
for letter in ["x", "y", "z", "p", "q", "r"]
},
)
for i in range(1, 1001)
]
)
s.commit()
@profiling.function_call_count(variance=0.10)
def test_baseline(self):
# as of [ticket:2778], this is at 39025
A = self.classes.A
s = fixture_session()
s.query(A).all()
@profiling.function_call_count(variance=0.10)
def test_defer_many_cols(self):
# with [ticket:2778], this goes from 50805 to 32817,
# as it should be fewer function calls than the baseline
A = self.classes.A
s = fixture_session()
s.query(A).options(
*[
defer(getattr(A, letter))
for letter in ["x", "y", "z", "p", "q", "r"]
]
).all()
| DeferOptionsTest |
python | getsentry__sentry | tests/sentry/integrations/github_enterprise/test_webhooks.py | {
"start": 21503,
"end": 26977
} | class ____(APITestCase):
def setUp(self) -> None:
self.url = "/extensions/github-enterprise/webhook/"
self.metadata = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
self.create_integration(
external_id="35.232.149.196:234",
organization=self.project.organization,
provider="github_enterprise",
name="octocat",
metadata={
"domain_name": "35.232.149.196/baxterthehacker",
"installation": {
"id": "2",
"private_key": "private_key",
"verify_ssl": True,
},
},
)
self.repo = Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github_enterprise",
name="baxterthehacker/public-repo",
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_opened(
self, mock_record: MagicMock, mock_get_installation_metadata: MagicMock
) -> None:
mock_get_installation_metadata.return_value = self.metadata
response = self.client.post(
path=self.url,
data=PULL_REQUEST_OPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=aa5b11bc52b9fac082cb59f9ee8667cb222c3aff",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
prs = PullRequest.objects.filter(
repository_id=self.repo.id, organization_id=self.project.organization.id
)
assert len(prs) == 1
pr = prs[0]
assert pr.key == "1"
assert pr.message == "This is a pretty simple change that we need to pull into master."
assert pr.title == "Update the README with new information"
assert pr.author is not None
assert pr.author.name == "baxterthehacker"
assert_success_metric(mock_record)
@patch("sentry.integrations.github.webhook.PullRequestEventWebhook.__call__")
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_webhook_error_metric(
self,
mock_record: MagicMock,
mock_event: MagicMock,
mock_get_installation_metadata: MagicMock,
) -> None:
mock_get_installation_metadata.return_value = self.metadata
error = Exception("error")
mock_event.side_effect = error
response = self.client.post(
path=self.url,
data=PULL_REQUEST_OPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=aa5b11bc52b9fac082cb59f9ee8667cb222c3aff",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 500
assert_failure_metric(mock_record, error)
def test_edited(self, mock_get_installation_metadata: MagicMock) -> None:
mock_get_installation_metadata.return_value = self.metadata
pr = PullRequest.objects.create(
key="1",
repository_id=self.repo.id,
organization_id=self.project.organization.id,
)
response = self.client.post(
path=self.url,
data=PULL_REQUEST_EDITED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=b50a13afd33b514e8e62e603827ea62530f0690e",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
pr = PullRequest.objects.get(id=pr.id)
assert pr.key == "1"
assert pr.message == "new edited body"
assert pr.title == "new edited title"
assert pr.author is not None
assert pr.author.name == "baxterthehacker"
def test_closed(self, mock_get_installation_metadata: MagicMock) -> None:
mock_get_installation_metadata.return_value = self.metadata
response = self.client.post(
path=self.url,
data=PULL_REQUEST_CLOSED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=dff1c803cf1e48c1b9aefe4a17952ea132758806",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
prs = PullRequest.objects.filter(
repository_id=self.repo.id, organization_id=self.project.organization.id
)
assert len(prs) == 1
pr = prs[0]
assert pr.key == "1"
assert pr.message == "new closed body"
assert pr.title == "new closed title"
assert pr.author is not None
assert pr.author.name == "baxterthehacker"
assert pr.merge_commit_sha == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
| PullRequestEventWebhook |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.