language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/grouping/component.py | {
"start": 15568,
"end": 16098
} | class ____(
BaseGroupingComponent[SaltGroupingComponent | ViolationGroupingComponent | URIGroupingComponent]
):
id: str = "csp"
@property
def key(self) -> str:
key = "csp"
local_script_violation = self.get_subcomponent("violation")
url = self.get_subcomponent("uri")
if local_script_violation and local_script_violation.contributes:
key += "_local_script_violation"
elif url and url.contributes:
key += "_url"
return key
| CSPGroupingComponent |
python | pdm-project__pdm | src/pdm/cli/commands/export.py | {
"start": 570,
"end": 5351
} | class ____(BaseCommand):
"""Export the locked packages set to other formats"""
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
lockfile_option.add_to_parser(parser)
parser.add_argument(
"-f",
"--format",
choices=["requirements", "pylock"],
default="requirements",
help="Export to requirements.txt format or pylock.toml format",
)
groups_group.add_to_parser(parser)
parser.add_argument(
"--no-hashes",
"--without-hashes",
dest="hashes",
action="store_false",
default=True,
help="Don't include artifact hashes",
)
parser.add_argument(
"--no-markers",
action="store_false",
default=True,
dest="markers",
help="(DEPRECATED)Don't include platform markers",
)
parser.add_argument(
"--no-extras", action="store_false", default=True, dest="extras", help="Strip extras from the requirements"
)
parser.add_argument(
"-o",
"--output",
help="Write output to the given file, or print to stdout if not given",
)
parser.add_argument(
"--pyproject",
action="store_true",
help="Read the list of packages from pyproject.toml",
)
parser.add_argument("--expandvars", action="store_true", help="Expand environment variables in requirements")
group = parser.add_mutually_exclusive_group()
group.add_argument("--self", action="store_true", help="Include the project itself")
group.add_argument(
"--editable-self", action="store_true", help="Include the project itself as an editable dependency"
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
from pdm.models.repositories.lock import Package
if options.format == "pylock":
locked_repository = project.get_locked_repository()
if options.self or options.editable_self:
locked_repository.add_package(
Package(project.make_self_candidate(editable=options.editable_self), [], "")
)
doc = tomlkit.dumps(PyLockConverter(project, locked_repository).convert())
if options.output:
Path(options.output).write_text(doc, encoding="utf-8")
else:
print(doc)
return
if options.pyproject:
options.hashes = False
selection = GroupSelection.from_options(project, options)
if options.markers is False:
project.core.ui.warn(
"The --no-markers option is on, the exported requirements can only work on the current platform"
)
packages: Iterable[Requirement] | Iterable[Candidate]
if options.pyproject:
all_deps = project._resolve_dependencies(list(selection))
packages = [r for group in selection for r in project.get_dependencies(group, all_deps)]
else:
if not project.lockfile.exists():
raise PdmUsageError("No lockfile found, please run `pdm lock` first.")
if FLAG_INHERIT_METADATA not in project.lockfile.strategy:
raise PdmUsageError(
"Can't export a lock file without environment markers, please re-generate the lock file with `inherit_metadata` strategy."
)
groups = set(selection)
candidates = sorted(
(
entry.candidate
for entry in project.get_locked_repository().evaluate_candidates(groups, not options.markers)
),
key=lambda c: not c.req.extras,
)
packages = []
seen_extras: set[str] = set()
for candidate in candidates:
if options.extras:
key = candidate.req.key or ""
if candidate.req.extras:
seen_extras.add(key)
elif key in seen_extras:
continue
elif candidate.req.extras:
continue
if not options.markers and candidate.req.marker:
candidate.req.marker = None
packages.append(candidate) # type: ignore[arg-type]
content = FORMATS[options.format].export(project, packages, options)
if options.output:
Path(options.output).write_text(content, encoding="utf-8")
else:
# Use a regular print to avoid any formatting / wrapping.
print(content)
| Command |
python | sympy__sympy | sympy/physics/quantum/grover.py | {
"start": 1633,
"end": 2091
} | class ____(Atom):
"""Wrapper for python functions used in `OracleGate`s"""
def __new__(cls, function):
if not callable(function):
raise TypeError('Callable expected, got: %r' % function)
obj = Atom.__new__(cls)
obj.function = function
return obj
def _hashable_content(self):
return type(self), self.function
def __call__(self, *args):
return self.function(*args)
| OracleGateFunction |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0025_migrate_private_versions.py | {
"start": 541,
"end": 757
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0024_status_code_choices"),
]
operations = [
migrations.RunPython(forwards_func),
]
| Migration |
python | huggingface__transformers | src/transformers/models/squeezebert/modeling_squeezebert.py | {
"start": 1323,
"end": 3177
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| SqueezeBertEmbeddings |
python | django__django | tests/template_tests/test_extends_relative.py | {
"start": 2336,
"end": 3517
} | class ____(SimpleTestCase):
def test_normal_include(self):
engine = Engine(dirs=[RELATIVE])
template = engine.get_template("dir1/dir2/inc2.html")
output = template.render(Context({}))
self.assertEqual(output.strip(), "dir2 include")
def test_normal_include_variable(self):
engine = Engine(dirs=[RELATIVE])
template = engine.get_template("dir1/dir2/inc3.html")
output = template.render(Context({"tmpl": "./include_content.html"}))
self.assertEqual(output.strip(), "dir2 include")
def test_dir2_include(self):
engine = Engine(dirs=[RELATIVE])
template = engine.get_template("dir1/dir2/inc1.html")
output = template.render(Context({}))
self.assertEqual(output.strip(), "three")
def test_include_error(self):
engine = Engine(dirs=[RELATIVE])
msg = (
"The relative path '\"./../three.html\"' points outside the file "
"hierarchy that template 'error_include.html' is in."
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
engine.render_to_string("error_include.html")
| IncludeRelativeBehaviorTests |
python | pydantic__pydantic | pydantic/warnings.py | {
"start": 4585,
"end": 4802
} | class ____(CoreSchemaGenerationWarning):
"""A warning raised when the [`extra`][pydantic.ConfigDict.extra] configuration is incompatible with the `closed` or `extra_items` specification."""
| TypedDictExtraConfigWarning |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 25682,
"end": 27927
} | class ____:
@pytest.mark.parametrize("cumprod", [np.cumprod, np.cumulative_prod])
def test_basic(self, cumprod):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, cumprod, a)
assert_raises(ArithmeticError, cumprod, a2, 1)
assert_raises(ArithmeticError, cumprod, a)
else:
assert_array_equal(cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
1320, 6600, 26400], ctype))
assert_array_equal(cumprod(a2, axis=0),
np.array([[1, 2, 3, 4],
[5, 12, 21, 36],
[50, 36, 84, 180]], ctype))
assert_array_equal(cumprod(a2, axis=-1),
np.array([[1, 2, 6, 24],
[5, 30, 210, 1890],
[10, 30, 120, 600]], ctype))
def test_cumulative_include_initial():
arr = np.arange(8).reshape((2, 2, 2))
expected = np.array([
[[0, 0], [0, 1], [2, 4]], [[0, 0], [4, 5], [10, 12]]
])
assert_array_equal(
np.cumulative_sum(arr, axis=1, include_initial=True), expected
)
expected = np.array([
[[1, 0, 0], [1, 2, 6]], [[1, 4, 20], [1, 6, 42]]
])
assert_array_equal(
np.cumulative_prod(arr, axis=2, include_initial=True), expected
)
out = np.zeros((3, 2), dtype=np.float64)
expected = np.array([[0, 0], [1, 2], [4, 6]], dtype=np.float64)
arr = np.arange(1, 5).reshape((2, 2))
np.cumulative_sum(arr, axis=0, out=out, include_initial=True)
assert_array_equal(out, expected)
expected = np.array([1, 2, 4])
assert_array_equal(
np.cumulative_prod(np.array([2, 2]), include_initial=True), expected
)
| TestCumprod |
python | astropy__astropy | astropy/nddata/mixins/tests/test_ndslicing.py | {
"start": 657,
"end": 6024
} | class ____(NDUncertainty):
@property
def uncertainty_type(self):
return "fake"
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
def test_slicing_only_data():
data = np.arange(10)
nd = NDDataSliceable(data)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
def test_slicing_data_scalar_fail():
data = np.array(10)
nd = NDDataSliceable(data)
with pytest.raises(TypeError): # as exc
nd[:]
# assert exc.value.args[0] == 'Scalars cannot be sliced.'
def test_slicing_1ddata_ndslice():
data = np.array([10, 20])
nd = NDDataSliceable(data)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
@pytest.mark.parametrize("prop_name", ["mask", "uncertainty"])
def test_slicing_1dmask_ndslice(prop_name):
# Data is 2d but mask/uncertainty only 1d so this should let the IndexError when
# slicing the mask rise to the user.
data = np.ones((3, 3))
kwarg = {prop_name: np.ones(3)}
nd = NDDataSliceable(data, **kwarg)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
def test_slicing_all_npndarray_1d():
data = np.arange(10)
mask = data > 3
uncertainty = StdDevUncertainty(np.linspace(10, 20, 10))
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
# Just to have them too
unit = u.s
meta = {"observer": "Brian"}
nd = NDDataSliceable(
data, mask=mask, uncertainty=uncertainty, wcs=wcs, unit=unit, meta=meta
)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5].array, nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
assert unit is nd2.unit
assert meta == nd.meta
def test_slicing_all_npndarray_nd():
# See what happens for multidimensional properties
data = np.arange(1000).reshape(10, 10, 10)
mask = data > 3
uncertainty = np.linspace(10, 20, 1000).reshape(10, 10, 10)
naxis = 3
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
# Slice only 1D
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
# Slice 3D
nd2 = nd[2:5, :, 4:7]
assert_array_equal(data[2:5, :, 4:7], nd2.data)
assert_array_equal(mask[2:5, :, 4:7], nd2.mask)
assert_array_equal(uncertainty[2:5, :, 4:7], nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1, 5, 1) == nd.wcs.pixel_to_world(5, 5, 3)
def test_slicing_all_npndarray_shape_diff():
data = np.arange(10)
mask = (data > 3)[0:9]
uncertainty = np.linspace(10, 20, 15)
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
# All are sliced even if the shapes differ (no Info)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
def test_slicing_all_something_wrong():
data = np.arange(10)
mask = [False] * 10
uncertainty = UnknownUncertainty({"rdnoise": 2.9, "gain": 1.4})
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
# Sliced properties:
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
# Not sliced attributes (they will raise a Info nevertheless)
assert uncertainty.array == nd2.uncertainty.array
assert uncertainty.uncertainty_type == nd2.uncertainty.uncertainty_type
assert uncertainty.unit == nd2.uncertainty.unit
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
def test_boolean_slicing():
data = np.arange(10)
mask = data.copy()
uncertainty = StdDevUncertainty(data.copy())
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
with pytest.raises(ValueError):
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
nd.wcs = None
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
assert_array_equal(data[3:8], nd2.data)
assert_array_equal(mask[3:8], nd2.mask)
| SomeUncertainty |
python | celery__celery | celery/concurrency/solo.py | {
"start": 145,
"end": 754
} | class ____(BasePool):
"""Solo task pool (blocking, inline, fast)."""
body_can_be_buffer = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.on_apply = apply_target
self.limit = 1
signals.worker_process_init.send(sender=None)
def _get_info(self):
info = super()._get_info()
info.update({
'max-concurrency': 1,
'processes': [os.getpid()],
'max-tasks-per-child': None,
'put-guarded-by-semaphore': True,
'timeouts': (),
})
return info
| TaskPool |
python | pypa__warehouse | warehouse/packaging/models.py | {
"start": 36346,
"end": 38990
} | class ____(db.ModelBase):
__tablename__ = "journals"
@declared_attr
def __table_args__(cls): # noqa
return (
Index("journals_changelog", "submitted_date", "name", "version", "action"),
Index("journals_name_idx", "name"),
Index("journals_version_idx", "version"),
Index("journals_submitted_by_idx", "submitted_by"),
Index("journals_submitted_date_id_idx", cls.submitted_date, cls.id),
# Composite index for journals to be able to sort by
# `submitted_by`, and `submitted_date` in descending order.
Index(
"journals_submitted_by_and_reverse_date_idx",
cls._submitted_by,
cls.submitted_date.desc(),
),
# Reverse index on ID, most recent project's journal entry for triggers
Index(
"journals_name_id_idx",
cls.name,
cls.id.desc(),
),
)
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str | None] = mapped_column(Text)
version: Mapped[str | None]
action: Mapped[str | None]
submitted_date: Mapped[datetime_now] = mapped_column()
_submitted_by: Mapped[str | None] = mapped_column(
"submitted_by",
CITEXT,
ForeignKey("users.username", onupdate="CASCADE"),
nullable=True,
)
submitted_by: Mapped[User] = orm.relationship(lazy="raise_on_sql")
@db.listens_for(db.Session, "before_flush")
def ensure_monotonic_journals(config, session, flush_context, instances):
# We rely on `journals.id` to be a monotonically increasing integer,
# however the way that SERIAL is implemented, it does not guarentee
# that is the case.
#
# Ultimately SERIAL fetches the next integer regardless of what happens
# inside of the transaction. So journals.id will get filled in, in order
# of when the `INSERT` statements were executed, but not in the order
# that transactions were committed.
#
# The way this works, not even the SERIALIZABLE transaction types give
# us this property. Instead we have to implement our own locking that
# ensures that each new journal entry will be serialized.
for obj in session.new:
if isinstance(obj, JournalEntry):
session.execute(
select(
func.pg_advisory_xact_lock(
cast(cast(JournalEntry.__tablename__, REGCLASS), Integer),
_MONOTONIC_SEQUENCE,
)
)
)
return
| JournalEntry |
python | openai__openai-python | src/openai/types/evals/create_eval_completions_run_data_source_param.py | {
"start": 4974,
"end": 5475
} | class ____(TypedDict, total=False):
item_reference: Required[str]
"""A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" """
type: Required[Literal["item_reference"]]
"""The type of input messages. Always `item_reference`."""
InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference]
SamplingParamsResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
| InputMessagesItemReference |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/serializers/test_data_condition_group_serializer.py | {
"start": 443,
"end": 2634
} | class ____(TestCase):
def test_serialize_simple(self) -> None:
condition_group = self.create_data_condition_group(
organization_id=self.organization.id,
logic_type=DataConditionGroup.Type.ANY,
)
result = serialize(condition_group)
assert result == {
"id": str(condition_group.id),
"organizationId": str(self.organization.id),
"logicType": DataConditionGroup.Type.ANY,
"conditions": [],
"actions": [],
}
def test_serialize_full(self) -> None:
condition_group = self.create_data_condition_group(
organization_id=self.organization.id,
logic_type=DataConditionGroup.Type.ANY,
)
condition = self.create_data_condition(
condition_group=condition_group,
type=Condition.GREATER,
comparison=100,
condition_result=DetectorPriorityLevel.HIGH,
)
action = self.create_action(
type=Action.Type.EMAIL,
data={},
config={
"target_identifier": "123",
"target_type": ActionTarget.USER.value,
},
)
self.create_data_condition_group_action(condition_group=condition_group, action=action)
result = serialize(condition_group)
assert result == {
"id": str(condition_group.id),
"organizationId": str(self.organization.id),
"logicType": DataConditionGroup.Type.ANY,
"conditions": [
{
"id": str(condition.id),
"type": "gt",
"comparison": 100,
"conditionResult": DetectorPriorityLevel.HIGH,
}
],
"actions": [
{
"id": str(action.id),
"type": "email",
"data": {},
"integrationId": None,
"config": {"targetType": "user", "targetIdentifier": "123"},
"status": "active",
}
],
}
| TestDataConditionGroupSerializer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 493082,
"end": 493462
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(CheckStep, graphql_name="node")
"""The item at the end of the edge."""
| CheckStepEdge |
python | kamyu104__LeetCode-Solutions | Python/range-xor-queries-with-subarray-reversals.py | {
"start": 153,
"end": 2055
} | class ____(object):
__slots__ = ('value', 'prior', 'cnt', 'xor_sum', 'rev', 'l', 'r')
def __init__(self, value):
self.value = value
self.prior = random.randint(1, 1 << 30)
self.cnt = 1
self.xor_sum = value
self.rev = False
self.l = None
self.r = None
def cnt(t):
return t.cnt if t else 0
def xor_sum(t):
return t.xor_sum if t else 0
def upd_cnt(t):
if t:
t.cnt = 1 + cnt(t.l) + cnt(t.r)
t.xor_sum = t.value ^ xor_sum(t.l) ^ xor_sum(t.r)
def push(t):
if t and t.rev:
t.rev = False
t.l, t.r = t.r, t.l
if t.l:
t.l.rev ^= True
if t.r:
t.r.rev ^= True
def merge(l, r):
push(l)
push(r)
if not l or not r:
return l or r
if l.prior > r.prior:
l.r = merge(l.r, r)
upd_cnt(l)
return l
else:
r.l = merge(l, r.l)
upd_cnt(r)
return r
def split(t, key, add=0):
if not t:
return (None, None)
push(t)
cur_key = add + cnt(t.l)
if key <= cur_key:
l, t.l = split(t.l, key, add)
upd_cnt(t)
return (l, t)
else:
t.r, r = split(t.r, key, add + 1 + cnt(t.l))
upd_cnt(t)
return (t, r)
def reverse(t, l, r):
t1, t2 = split(t, l)
t2, t3 = split(t2, r - l + 1)
if t2:
t2.rev ^= True
return merge(merge(t1, t2), t3)
def heapify(t):
if not t:
return
mx = t
if t.l and t.l.prior > mx.prior:
mx = t.l
if t.r and t.r.prior > mx.prior:
mx = t.r
if mx != t:
t.prior, mx.prior = mx.prior, t.prior
heapify(mx)
def build(a, i, n):
if not n:
return None
mid = n // 2
t = TreapNode(a[i + mid])
t.l = build(a, i, mid)
t.r = build(a, i + mid + 1, n - mid - 1)
heapify(t)
upd_cnt(t)
return t
# treap
| TreapNode |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 636280,
"end": 636731
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node", "starred_at")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(sgqlc.types.non_null("User"), graphql_name="node")
starred_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="starredAt"
)
| StargazerEdge |
python | great-expectations__great_expectations | great_expectations/render/renderer_configuration.py | {
"start": 4351,
"end": 4499
} | class ____(str, Enum):
"""Possible formats that can be rendered via MetaNotes."""
STRING = "string"
MARKDOWN = "markdown"
| MetaNotesFormat |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/dynamic_input_shapes_test.py | {
"start": 1162,
"end": 3991
} | class ____(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, x):
conv_filter1 = constant_op.constant(
np.ones([3, 3, 1, 8]), name="weights1", dtype=dtypes.float32)
bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter1,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias1)
x = nn.relu(x)
conv_filter2 = constant_op.constant(
np.ones([3, 3, 8, 1]), name="weights2", dtype=dtypes.float32)
bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter2,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
x = nn.bias_add(x, bias2)
return array_ops.identity(x, name="output")
def GetParams(self):
# TODO(laigd): we should test the following cases:
# - batch size is not changed, other dims are changing
# - batch size is decreasing, other dims are identical
# - batch size is decreasing, other dims are changing
# - batch size is increasing, other dims are identical
# - batch size is increasing, other dims are changing
input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
[[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
[[1, 224, 224, 1]], [[1, 128, 224, 1]]]
expected_output_dims = input_dims
return trt_test.TfTrtIntegrationTestParams(
graph_fn=self.GraphFn,
input_specs=[
tensor_spec.TensorSpec([None, None, None, 1], dtypes.float32,
"input")
],
output_specs=[
tensor_spec.TensorSpec([None, None, None, 1], dtypes.float32,
"output")
],
input_dims=input_dims,
expected_output_dims=expected_output_dims)
def setUp(self):
super().setUp()
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimensional input.
self.DisableNonTrtOptimizers()
def ExpectedEnginesToBuild(self, run_params):
return ["TRTEngineOp_000"]
def ShouldRunTest(self, run_params):
return (run_params.dynamic_engine and not trt_test.IsQuantizationMode(
run_params.precision_mode)), "test dynamic engine and non-INT8"
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-03 if run_params.precision_mode == "FP32" else 1.e-01
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-03 if run_params.precision_mode == "FP32" else 1.e-01
if __name__ == "__main__":
test.main()
| DynamicInputShapesTest |
python | numba__numba | numba/core/types/containers.py | {
"start": 25731,
"end": 27633
} | class ____(Type):
"""A mutable struct.
"""
def __init__(self, fields):
"""
Parameters
----------
fields : Sequence
A sequence of field descriptions, which is a 2-tuple-like object
containing `(name, type)`, where `name` is a `str` for the field
name, and `type` is a numba type for the field type.
"""
def check_field_pair(fieldpair):
name, typ = fieldpair
if not isinstance(name, str):
msg = "expecting a str for field name"
raise ValueError(msg)
if not isinstance(typ, Type):
msg = "expecting a Numba Type for field type"
raise ValueError(msg)
return name, typ
fields = tuple(map(check_field_pair, fields))
self._fields = tuple(map(check_field_pair,
self.preprocess_fields(fields)))
self._typename = self.__class__.__qualname__
name = f"numba.{self._typename}{self._fields}"
super().__init__(name=name)
def preprocess_fields(self, fields):
"""Subclasses can override this to do additional clean up on fields.
The default is an identity function.
Parameters:
-----------
fields : Sequence[Tuple[str, Type]]
"""
return fields
@property
def field_dict(self):
"""Return an immutable mapping for the field names and their
corresponding types.
"""
return MappingProxyType(dict(self._fields))
def get_data_type(self):
"""Get the payload type for the actual underlying structure referred
to by this struct reference.
See also: `ClassInstanceType.get_data_type`
"""
return StructRefPayload(
typename=self.__class__.__name__, fields=self._fields,
)
| StructRef |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/common.py | {
"start": 6138,
"end": 8165
} | class ____(
gym.Wrapper[ObsType, ActType, ObsType, ActType], gym.utils.RecordConstructorArgs
):
"""The wrapped environment is automatically reset when a terminated or truncated state is reached.
This follows the vector autoreset api where on the step after an episode terminates or truncated then the environment is reset.
Change logs:
* v0.24.0 - Initially added as `AutoResetWrapper`
* v1.0.0 - renamed to `Autoreset` and autoreset order was changed to reset on the step after the environment terminates or truncates. As a result, `"final_observation"` and `"final_info"` is removed.
"""
def __init__(self, env: gym.Env):
"""A class for providing an automatic reset functionality for gymnasium environments when calling :meth:`self.step`.
Args:
env (gym.Env): The environment to apply the wrapper
"""
gym.utils.RecordConstructorArgs.__init__(self)
gym.Wrapper.__init__(self, env)
self.autoreset = False
def reset(
self, *, seed: int | None = None, options: dict[str, Any] | None = None
) -> tuple[WrapperObsType, dict[str, Any]]:
"""Resets the environment and sets autoreset to False preventing."""
self.autoreset = False
return super().reset(seed=seed, options=options)
def step(
self, action: ActType
) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:
"""Steps through the environment with action and resets the environment if a terminated or truncated signal is encountered.
Args:
action: The action to take
Returns:
The autoreset environment :meth:`step`
"""
if self.autoreset:
obs, info = self.env.reset()
reward, terminated, truncated = 0.0, False, False
else:
obs, reward, terminated, truncated, info = self.env.step(action)
self.autoreset = terminated or truncated
return obs, reward, terminated, truncated, info
| Autoreset |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 757615,
"end": 759635
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"digest_method",
"enterprise",
"external_identities",
"idp_certificate",
"issuer",
"recovery_codes",
"signature_method",
"sso_url",
)
digest_method = sgqlc.types.Field(SamlDigestAlgorithm, graphql_name="digestMethod")
enterprise = sgqlc.types.Field(Enterprise, graphql_name="enterprise")
external_identities = sgqlc.types.Field(
sgqlc.types.non_null(ExternalIdentityConnection),
graphql_name="externalIdentities",
args=sgqlc.types.ArgDict(
(
(
"members_only",
sgqlc.types.Arg(Boolean, graphql_name="membersOnly", default=None),
),
("login", sgqlc.types.Arg(String, graphql_name="login", default=None)),
(
"user_name",
sgqlc.types.Arg(String, graphql_name="userName", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
idp_certificate = sgqlc.types.Field(X509Certificate, graphql_name="idpCertificate")
issuer = sgqlc.types.Field(String, graphql_name="issuer")
recovery_codes = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="recoveryCodes"
)
signature_method = sgqlc.types.Field(
SamlSignatureAlgorithm, graphql_name="signatureMethod"
)
sso_url = sgqlc.types.Field(URI, graphql_name="ssoUrl")
| EnterpriseIdentityProvider |
python | aio-libs__aiohttp | aiohttp/streams.py | {
"start": 18337,
"end": 20378
} | class ____(Generic[_T]):
"""DataQueue is a general-purpose blocking queue with one reader."""
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._eof = False
self._waiter: asyncio.Future[None] | None = None
self._exception: type[BaseException] | BaseException | None = None
self._buffer: collections.deque[_T] = collections.deque()
def __len__(self) -> int:
return len(self._buffer)
def is_eof(self) -> bool:
return self._eof
def at_eof(self) -> bool:
return self._eof and not self._buffer
def exception(self) -> type[BaseException] | BaseException | None:
return self._exception
def set_exception(
self,
exc: type[BaseException] | BaseException,
exc_cause: BaseException = _EXC_SENTINEL,
) -> None:
self._eof = True
self._exception = exc
if (waiter := self._waiter) is not None:
self._waiter = None
set_exception(waiter, exc, exc_cause)
def feed_data(self, data: _T) -> None:
self._buffer.append(data)
if (waiter := self._waiter) is not None:
self._waiter = None
set_result(waiter, None)
def feed_eof(self) -> None:
self._eof = True
if (waiter := self._waiter) is not None:
self._waiter = None
set_result(waiter, None)
async def read(self) -> _T:
if not self._buffer and not self._eof:
assert not self._waiter
self._waiter = self._loop.create_future()
try:
await self._waiter
except (asyncio.CancelledError, asyncio.TimeoutError):
self._waiter = None
raise
if self._buffer:
return self._buffer.popleft()
if self._exception is not None:
raise self._exception
raise EofStream
def __aiter__(self) -> AsyncStreamIterator[_T]:
return AsyncStreamIterator(self.read)
| DataQueue |
python | huggingface__transformers | src/transformers/models/aria/modeling_aria.py | {
"start": 12256,
"end": 14266
} | class ____(nn.Module):
def __init__(self, config: AriaTextConfig) -> None:
super().__init__()
self.config = config
self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts)
self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts)
def route_tokens_to_experts(self, router_logits):
top_logits, top_indices = torch.topk(router_logits, k=self.config.moe_topk, dim=1)
scores = nn.functional.softmax(top_logits, dim=-1)
return top_indices, scores
def forward(self, hidden_states, router_logits) -> torch.Tensor:
top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
original_dtype = top_k_index.dtype
tokens_per_expert = torch.histc(
top_k_index.flatten().to(torch.float32),
bins=self.config.moe_num_experts,
min=0,
max=self.config.moe_num_experts - 1,
).to(original_dtype)
indices = top_k_index
flatten_indices = indices.view(-1)
sorted_indices = torch.argsort(flatten_indices)
permuted_tokens = hidden_states.index_select(0, sorted_indices // self.config.moe_topk)
fc1_output = self.fc1(permuted_tokens, tokens_per_expert)
projection, gate = torch.chunk(fc1_output, 2, dim=-1)
fc1_output = nn.functional.silu(projection) * gate
expert_output = self.fc2(fc1_output, tokens_per_expert)
unpermuted_tokens = torch.zeros(
(top_k_weights.shape[0] * self.config.moe_topk, expert_output.size(1)),
dtype=expert_output.dtype,
device=expert_output.device,
)
unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)
unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))
output = (unpermuted_tokens * top_k_weights.unsqueeze(-1)).sum(dim=1)
return output
| AriaExperts |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 5818,
"end": 5881
} | class ____(BinOp):
pass
@infer_global(operator.iadd)
| BinOpAdd |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/dataproc.py | {
"start": 10106,
"end": 18083
} | class ____(DataprocBaseTrigger):
"""
DataprocClusterTrigger run on the trigger worker to perform create Build operation.
:param cluster_name: The name of the cluster.
:param project_id: Google Cloud Project where the job is running
:param region: The Cloud Dataproc region in which to handle the request.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval_seconds: polling period in seconds to check for the status
"""
def __init__(self, cluster_name: str, **kwargs):
super().__init__(**kwargs)
self.cluster_name = cluster_name
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger",
{
"cluster_name": self.cluster_name,
"project_id": self.project_id,
"region": self.region,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
"delete_on_error": self.delete_on_error,
},
)
if not AIRFLOW_V_3_0_PLUS:
@provide_session
def get_task_instance(self, session: Session) -> TaskInstance:
query = session.query(TaskInstance).filter(
TaskInstance.dag_id == self.task_instance.dag_id,
TaskInstance.task_id == self.task_instance.task_id,
TaskInstance.run_id == self.task_instance.run_id,
TaskInstance.map_index == self.task_instance.map_index,
)
task_instance = query.one_or_none()
if task_instance is None:
raise AirflowException(
"TaskInstance with dag_id: %s,task_id: %s, run_id: %s and map_index: %s is not found.",
self.task_instance.dag_id,
self.task_instance.task_id,
self.task_instance.run_id,
self.task_instance.map_index,
)
return task_instance
async def get_task_state(self):
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance
task_states_response = await sync_to_async(RuntimeTaskInstance.get_task_states)(
dag_id=self.task_instance.dag_id,
task_ids=[self.task_instance.task_id],
run_ids=[self.task_instance.run_id],
map_index=self.task_instance.map_index,
)
try:
task_state = task_states_response[self.task_instance.run_id][self.task_instance.task_id]
except Exception:
raise AirflowException(
"TaskInstance with dag_id: %s, task_id: %s, run_id: %s and map_index: %s is not found",
self.task_instance.dag_id,
self.task_instance.task_id,
self.task_instance.run_id,
self.task_instance.map_index,
)
return task_state
async def safe_to_cancel(self) -> bool:
"""
Whether it is safe to cancel the external job which is being executed by this trigger.
This is to avoid the case that `asyncio.CancelledError` is called because the trigger itself is stopped.
Because in those cases, we should NOT cancel the external job.
"""
if AIRFLOW_V_3_0_PLUS:
task_state = await self.get_task_state()
else:
# Database query is needed to get the latest state of the task instance.
task_instance = self.get_task_instance() # type: ignore[call-arg]
task_state = task_instance.state
return task_state != TaskInstanceState.DEFERRED
async def run(self) -> AsyncIterator[TriggerEvent]:
try:
while True:
cluster = await self.fetch_cluster()
state = cluster.status.state
if state == ClusterStatus.State.ERROR:
await self.delete_when_error_occurred(cluster)
yield TriggerEvent(
{
"cluster_name": self.cluster_name,
"cluster_state": ClusterStatus.State.DELETING.name, # type: ignore
"cluster": Cluster.to_dict(cluster),
}
)
return
elif state == ClusterStatus.State.RUNNING:
yield TriggerEvent(
{
"cluster_name": self.cluster_name,
"cluster_state": ClusterStatus.State(state).name,
"cluster": Cluster.to_dict(cluster),
}
)
return
else:
self.log.info("Current state is %s", state)
self.log.info("Sleeping for %s seconds.", self.polling_interval_seconds)
await asyncio.sleep(self.polling_interval_seconds)
except asyncio.CancelledError:
try:
if self.delete_on_error and await self.safe_to_cancel():
self.log.info(
"Deleting the cluster as it is safe to delete as the airflow TaskInstance is not in "
"deferred state."
)
self.log.info("Deleting cluster %s.", self.cluster_name)
# The synchronous hook is utilized to delete the cluster when a task is cancelled.
# This is because the asynchronous hook deletion is not awaited when the trigger task
# is cancelled. The call for deleting the cluster through the sync hook is not a blocking
# call, which means it does not wait until the cluster is deleted.
self.get_sync_hook().delete_cluster(
region=self.region, cluster_name=self.cluster_name, project_id=self.project_id
)
self.log.info("Deleted cluster %s during cancellation.", self.cluster_name)
except Exception as e:
self.log.error("Error during cancellation handling: %s", e)
raise AirflowException("Error during cancellation handling: %s", e)
async def fetch_cluster(self) -> Cluster:
"""Fetch the cluster status."""
return await self.get_async_hook().get_cluster(
project_id=self.project_id, region=self.region, cluster_name=self.cluster_name
)
async def delete_when_error_occurred(self, cluster: Cluster) -> None:
"""
Delete the cluster on error.
:param cluster: The cluster to delete.
"""
if self.delete_on_error:
self.log.info("Deleting cluster %s.", self.cluster_name)
await self.get_async_hook().delete_cluster(
region=self.region, cluster_name=self.cluster_name, project_id=self.project_id
)
self.log.info("Cluster %s has been deleted.", self.cluster_name)
else:
self.log.info("Cluster %s is not deleted as delete_on_error is set to False.", self.cluster_name)
| DataprocClusterTrigger |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 232625,
"end": 239657
} | class ____(TestCase):
def test_basic(self):
dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128]
for dt in dts:
c = np.ones(53, dtype=bool)
assert_equal(np.where(c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
# assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
# assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
# assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
@skip(reason="object arrays")
def test_exotic_2(self):
# object cast
d = np.array(
[
-1.34,
-0.16,
-0.54,
-0.31,
-0.08,
-0.95,
0.000,
0.313,
0.547,
-0.18,
0.876,
0.236,
1.969,
0.310,
0.699,
1.013,
1.267,
0.229,
-1.39,
0.487,
]
)
nan = float("NaN")
e = np.array(
[
"5z",
"0l",
nan,
"Wz",
nan,
nan,
"Xq",
"cs",
nan,
nan,
"QN",
nan,
nan,
"Fd",
nan,
nan,
"kp",
nan,
"36",
"i1",
],
dtype=object,
)
m = np.array(
[0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool
)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1.0, 2.0], dtype=np.float32)
e = float("NaN")
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float("Infinity")
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float("-Infinity")
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = 1e150
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:, np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:, 0], a[:, 0])
assert_array_equal(r[:, 1], b[:, 0])
def test_dtype_mix(self):
c = np.array(
[
False,
True,
False,
False,
False,
False,
True,
False,
False,
False,
True,
False,
]
)
a = np.uint8(1)
b = np.array(
[5.0, 0.0, 3.0, 2.0, -1.0, -4.0, 0.0, -10.0, 10.0, 1.0, 0.0, 3.0],
dtype=np.float64,
)
r = np.array(
[5.0, 1.0, 3.0, 2.0, -1.0, -4.0, 1.0, -10.0, 10.0, 1.0, 1.0, 3.0],
dtype=np.float64,
)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
@skip(reason="endianness")
def test_foreign(self):
c = np.array(
[
False,
True,
False,
False,
False,
False,
True,
False,
False,
False,
True,
False,
]
)
r = np.array(
[5.0, 1.0, 3.0, 2.0, -1.0, -4.0, 1.0, -10.0, 10.0, 1.0, 1.0, 3.0],
dtype=np.float64,
)
a = np.ones(1, dtype=">i4")
b = np.array(
[5.0, 0.0, 3.0, 2.0, -1.0, -4.0, 0.0, -10.0, 10.0, 1.0, 0.0, 3.0],
dtype=np.float64,
)
assert_equal(np.where(c, a, b), r)
b = b.astype(">f8")
assert_equal(np.where(c, a, b), r)
a = a.astype("<i4")
assert_equal(np.where(c, a, b), r)
c = c.astype(">i4")
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises((RuntimeError, ValueError), np.where, c, a, a)
assert_raises((RuntimeError, ValueError), np.where, c[0], a, b)
def test_empty_result(self):
# pass empty where result through an assignment which reads the data of
# empty arrays, error detectable with valgrind, see gh-8922
x = np.zeros((1, 1))
ibad = np.vstack(np.where(x == 99.0))
assert_array_equal(ibad, np.atleast_2d(np.array([[], []], dtype=np.intp)))
def test_largedim(self):
# invalid read regression gh-9304
shape = [10, 2, 3, 4, 5, 6]
np.random.seed(2)
array = np.random.rand(*shape)
for _ in range(10):
benchmark = array.nonzero()
result = array.nonzero()
assert_array_equal(benchmark, result)
def test_kwargs(self):
a = np.zeros(1)
with assert_raises(TypeError):
np.where(a, x=a, y=a)
| TestWhere |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 858037,
"end": 858875
} | class ____(sgqlc.types.Type):
"""Iteration field iteration settings for a project."""
__schema__ = github_schema
__field_names__ = ("duration", "id", "start_date", "title", "title_html")
duration = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="duration")
"""The iteration's duration in days"""
id = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="id")
"""The iteration's ID."""
start_date = sgqlc.types.Field(sgqlc.types.non_null(Date), graphql_name="startDate")
"""The iteration's start date"""
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""The iteration's title."""
title_html = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="titleHTML")
"""The iteration's html title."""
| ProjectV2IterationFieldIteration |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/choice.py | {
"start": 2141,
"end": 23651
} | class ____:
type: ChoiceTypeT
value: ChoiceT
constraints: ChoiceConstraintsT
was_forced: bool
index: int | None = None
def copy(
self,
*,
with_value: ChoiceT | None = None,
with_constraints: ChoiceConstraintsT | None = None,
) -> "ChoiceNode":
# we may want to allow this combination in the future, but for now it's
# a footgun.
if self.was_forced:
assert with_value is None, "modifying a forced node doesn't make sense"
# explicitly not copying index. node indices are only assigned via
# ExampleRecord. This prevents footguns with relying on stale indices
# after copying.
return ChoiceNode(
type=self.type,
value=self.value if with_value is None else with_value,
constraints=(
self.constraints if with_constraints is None else with_constraints
),
was_forced=self.was_forced,
)
@property
def trivial(self) -> bool:
"""
A node is trivial if it cannot be simplified any further. This does not
mean that modifying a trivial node can't produce simpler test cases when
viewing the tree as a whole. Just that when viewing this node in
isolation, this is the simplest the node can get.
"""
if self.was_forced:
return True
if self.type != "float":
zero_value = choice_from_index(0, self.type, self.constraints)
return choice_equal(self.value, zero_value)
else:
constraints = cast(FloatConstraints, self.constraints)
min_value = constraints["min_value"]
max_value = constraints["max_value"]
shrink_towards = 0.0
if min_value == -math.inf and max_value == math.inf:
return choice_equal(self.value, shrink_towards)
if (
not math.isinf(min_value)
and not math.isinf(max_value)
and math.ceil(min_value) <= math.floor(max_value)
):
# the interval contains an integer. the simplest integer is the
# one closest to shrink_towards
shrink_towards = max(math.ceil(min_value), shrink_towards)
shrink_towards = min(math.floor(max_value), shrink_towards)
return choice_equal(self.value, float(shrink_towards))
# the real answer here is "the value in [min_value, max_value] with
# the lowest denominator when represented as a fraction".
# It would be good to compute this correctly in the future, but it's
# also not incorrect to be conservative here.
return False
def __eq__(self, other: object) -> bool:
if not isinstance(other, ChoiceNode):
return NotImplemented
return (
self.type == other.type
and choice_equal(self.value, other.value)
and choice_constraints_equal(self.type, self.constraints, other.constraints)
and self.was_forced == other.was_forced
)
def __hash__(self) -> int:
return hash(
(
self.type,
choice_key(self.value),
choice_constraints_key(self.type, self.constraints),
self.was_forced,
)
)
def __repr__(self) -> str:
forced_marker = " [forced]" if self.was_forced else ""
return f"{self.type} {self.value!r}{forced_marker} {self.constraints!r}"
def _size_to_index(size: int, *, alphabet_size: int) -> int:
# this is the closed form of this geometric series:
# for i in range(size):
# index += alphabet_size**i
if alphabet_size <= 0:
assert size == 0
return 0
if alphabet_size == 1:
return size
v = (alphabet_size**size - 1) // (alphabet_size - 1)
# mypy thinks (m: int) // (n: int) -> Any. assert it back to int.
return cast(int, v)
def _index_to_size(index: int, alphabet_size: int) -> int:
if alphabet_size == 0:
return 0
elif alphabet_size == 1:
# there is only one string of each size, so the size is equal to its
# ordering.
return index
# the closed-form inverse of _size_to_index is
# size = math.floor(math.log(index * (alphabet_size - 1) + 1, alphabet_size))
# which is fast, but suffers from float precision errors. As performance is
# relatively critical here, we'll use this formula by default, but fall back to
# a much slower integer-only logarithm when the calculation is too close for
# comfort.
total = index * (alphabet_size - 1) + 1
size = math.log(total, alphabet_size)
# if this computation is close enough that it could have been affected by
# floating point errors, use a much slower integer-only logarithm instead,
# which is guaranteed to be precise.
if 0 < math.ceil(size) - size < 1e-7:
s = 0
while total >= alphabet_size:
total //= alphabet_size
s += 1
return s
return math.floor(size)
def collection_index(
choice: Sequence[T],
*,
min_size: int,
alphabet_size: int,
to_order: Callable[[T], int],
) -> int:
# Collections are ordered by counting the number of values of each size,
# starting with min_size. alphabet_size indicates how many options there
# are for a single element. to_order orders an element by returning an n ≥ 0.
# we start by adding the size to the index, relative to min_size.
index = _size_to_index(len(choice), alphabet_size=alphabet_size) - _size_to_index(
min_size, alphabet_size=alphabet_size
)
# We then add each element c to the index, starting from the end (so "ab" is
# simpler than "ba"). Each loop takes c at position i in the sequence and
# computes the number of sequences of size i which come before it in the ordering.
# this running_exp computation is equivalent to doing
# index += (alphabet_size**i) * n
# but reuses intermediate exponentiation steps for efficiency.
running_exp = 1
for c in reversed(choice):
index += running_exp * to_order(c)
running_exp *= alphabet_size
return index
def collection_value(
index: int,
*,
min_size: int,
alphabet_size: int,
from_order: Callable[[int], T],
) -> list[T]:
from hypothesis.internal.conjecture.engine import BUFFER_SIZE
# this function is probably easiest to make sense of as an inverse of
# collection_index, tracking ~corresponding lines of code between the two.
index += _size_to_index(min_size, alphabet_size=alphabet_size)
size = _index_to_size(index, alphabet_size=alphabet_size)
# index -> value computation can be arbitrarily expensive for arbitrarily
# large min_size collections. short-circuit if the resulting size would be
# obviously-too-large. callers will generally turn this into a .mark_overrun().
if size >= BUFFER_SIZE:
raise ChoiceTooLarge
# subtract out the amount responsible for the size
index -= _size_to_index(size, alphabet_size=alphabet_size)
vals: list[T] = []
for i in reversed(range(size)):
# optimization for common case when we hit index 0. Exponentiation
# on large integers is expensive!
if index == 0:
n = 0
else:
n = index // (alphabet_size**i)
# subtract out the nearest multiple of alphabet_size**i
index -= n * (alphabet_size**i)
vals.append(from_order(n))
return vals
def zigzag_index(value: int, *, shrink_towards: int) -> int:
# value | 0 1 -1 2 -2 3 -3 4
# index | 0 1 2 3 4 5 6 7
index = 2 * abs(shrink_towards - value)
if value > shrink_towards:
index -= 1
return index
def zigzag_value(index: int, *, shrink_towards: int) -> int:
assert index >= 0
# count how many "steps" away from shrink_towards we are.
n = (index + 1) // 2
# now check if we're stepping up or down from shrink_towards.
if (index % 2) == 0:
n *= -1
return shrink_towards + n
def choice_to_index(choice: ChoiceT, constraints: ChoiceConstraintsT) -> int:
# This function takes a choice in the choice sequence and returns the
# complexity index of that choice from among its possible values, where 0
# is the simplest.
#
# Note that the index of a choice depends on its constraints. The simplest value
# (at index 0) for {"min_value": None, "max_value": None} is 0, while for
# {"min_value": 1, "max_value": None} the simplest value is 1.
#
# choice_from_index inverts this function. An invariant on both functions is
# that they must be injective. Unfortunately, floats do not currently respect
# this. That's not *good*, but nothing has blown up - yet. And ordering
# floats in a sane manner is quite hard, so I've left it for another day.
if isinstance(choice, int) and not isinstance(choice, bool):
# Let a = shrink_towards.
# * Unbounded: Ordered by (|a - x|, sgn(a - x)). Think of a zigzag.
# [a, a + 1, a - 1, a + 2, a - 2, ...]
# * Semi-bounded: Same as unbounded, except stop on one side when you hit
# {min, max}_value. so min_value=-1 a=0 has order
# [0, 1, -1, 2, 3, 4, ...]
# * Bounded: Same as unbounded and semibounded, except stop on each side
# when you hit {min, max}_value.
#
# To simplify and gain intuition about this ordering, you can think about
# the most common case where 0 is first (a = 0). We deviate from this only
# rarely, e.g. for datetimes, where we generally want year 2000 to be
# simpler than year 0.
constraints = cast(IntegerConstraints, constraints)
shrink_towards = constraints["shrink_towards"]
min_value = constraints["min_value"]
max_value = constraints["max_value"]
if min_value is not None:
shrink_towards = max(min_value, shrink_towards)
if max_value is not None:
shrink_towards = min(max_value, shrink_towards)
if min_value is None and max_value is None:
# case: unbounded
return zigzag_index(choice, shrink_towards=shrink_towards)
elif min_value is not None and max_value is None:
# case: semibounded below
# min_value = -2
# index | 0 1 2 3 4 5 6 7
# v | 0 1 -1 2 -2 3 4 5
if abs(choice - shrink_towards) <= (shrink_towards - min_value):
return zigzag_index(choice, shrink_towards=shrink_towards)
return choice - min_value
elif max_value is not None and min_value is None:
# case: semibounded above
if abs(choice - shrink_towards) <= (max_value - shrink_towards):
return zigzag_index(choice, shrink_towards=shrink_towards)
return max_value - choice
else:
# case: bounded
# range = [-2, 5]
# shrink_towards = 2
# index | 0 1 2 3 4 5 6 7
# v | 2 3 1 4 0 5 -1 -2
#
# ^ with zero weights at index = [0, 2, 6]
# index | 0 1 2 3 4
# v | 3 4 0 5 -2
assert min_value is not None
assert max_value is not None
assert constraints["weights"] is None or all(
w > 0 for w in constraints["weights"].values()
), "technically possible but really annoying to support zero weights"
# check which side gets exhausted first
if (shrink_towards - min_value) < (max_value - shrink_towards):
# Below shrink_towards gets exhausted first. Equivalent to
# semibounded below
if abs(choice - shrink_towards) <= (shrink_towards - min_value):
return zigzag_index(choice, shrink_towards=shrink_towards)
return choice - min_value
else:
# Above shrink_towards gets exhausted first. Equivalent to semibounded
# above
if abs(choice - shrink_towards) <= (max_value - shrink_towards):
return zigzag_index(choice, shrink_towards=shrink_towards)
return max_value - choice
elif isinstance(choice, bool):
constraints = cast(BooleanConstraints, constraints)
# Ordered by [False, True].
p = constraints["p"]
if not (2 ** (-64) < p < (1 - 2 ** (-64))):
# only one option is possible, so whatever it is is first.
return 0
return int(choice)
elif isinstance(choice, bytes):
constraints = cast(BytesConstraints, constraints)
return collection_index(
list(choice),
min_size=constraints["min_size"],
alphabet_size=2**8,
to_order=identity,
)
elif isinstance(choice, str):
constraints = cast(StringConstraints, constraints)
intervals = constraints["intervals"]
return collection_index(
choice,
min_size=constraints["min_size"],
alphabet_size=len(intervals),
to_order=intervals.index_from_char_in_shrink_order,
)
elif isinstance(choice, float):
sign = int(math.copysign(1.0, choice) < 0)
return (sign << 64) | float_to_lex(abs(choice))
else:
raise NotImplementedError
def choice_from_index(
index: int, choice_type: ChoiceTypeT, constraints: ChoiceConstraintsT
) -> ChoiceT:
assert index >= 0
if choice_type == "integer":
constraints = cast(IntegerConstraints, constraints)
shrink_towards = constraints["shrink_towards"]
min_value = constraints["min_value"]
max_value = constraints["max_value"]
if min_value is not None:
shrink_towards = max(min_value, shrink_towards)
if max_value is not None:
shrink_towards = min(max_value, shrink_towards)
if min_value is None and max_value is None:
# case: unbounded
return zigzag_value(index, shrink_towards=shrink_towards)
elif min_value is not None and max_value is None:
# case: semibounded below
if index <= zigzag_index(min_value, shrink_towards=shrink_towards):
return zigzag_value(index, shrink_towards=shrink_towards)
return index + min_value
elif max_value is not None and min_value is None:
# case: semibounded above
if index <= zigzag_index(max_value, shrink_towards=shrink_towards):
return zigzag_value(index, shrink_towards=shrink_towards)
return max_value - index
else:
# case: bounded
assert min_value is not None
assert max_value is not None
assert constraints["weights"] is None or all(
w > 0 for w in constraints["weights"].values()
), "possible but really annoying to support zero weights"
if (shrink_towards - min_value) < (max_value - shrink_towards):
# equivalent to semibounded below case
if index <= zigzag_index(min_value, shrink_towards=shrink_towards):
return zigzag_value(index, shrink_towards=shrink_towards)
return index + min_value
else:
# equivalent to semibounded above case
if index <= zigzag_index(max_value, shrink_towards=shrink_towards):
return zigzag_value(index, shrink_towards=shrink_towards)
return max_value - index
elif choice_type == "boolean":
constraints = cast(BooleanConstraints, constraints)
# Ordered by [False, True].
p = constraints["p"]
only = None
if p <= 2 ** (-64):
only = False
elif p >= (1 - 2 ** (-64)):
only = True
assert index in {0, 1}
if only is not None:
# only one choice
assert index == 0
return only
return bool(index)
elif choice_type == "bytes":
constraints = cast(BytesConstraints, constraints)
value_b = collection_value(
index,
min_size=constraints["min_size"],
alphabet_size=2**8,
from_order=identity,
)
return bytes(value_b)
elif choice_type == "string":
constraints = cast(StringConstraints, constraints)
intervals = constraints["intervals"]
# _s because mypy is unhappy with reusing different-typed names in branches,
# even if the branches are disjoint.
value_s = collection_value(
index,
min_size=constraints["min_size"],
alphabet_size=len(intervals),
from_order=intervals.char_in_shrink_order,
)
return "".join(value_s)
elif choice_type == "float":
constraints = cast(FloatConstraints, constraints)
sign = -1 if index >> 64 else 1
result = sign * lex_to_float(index & ((1 << 64) - 1))
clamper = make_float_clamper(
min_value=constraints["min_value"],
max_value=constraints["max_value"],
smallest_nonzero_magnitude=constraints["smallest_nonzero_magnitude"],
allow_nan=constraints["allow_nan"],
)
return clamper(result)
else:
raise NotImplementedError
def choice_permitted(choice: ChoiceT, constraints: ChoiceConstraintsT) -> bool:
if isinstance(choice, int) and not isinstance(choice, bool):
constraints = cast(IntegerConstraints, constraints)
min_value = constraints["min_value"]
max_value = constraints["max_value"]
if min_value is not None and choice < min_value:
return False
return not (max_value is not None and choice > max_value)
elif isinstance(choice, float):
constraints = cast(FloatConstraints, constraints)
if math.isnan(choice):
return constraints["allow_nan"]
if 0 < abs(choice) < constraints["smallest_nonzero_magnitude"]:
return False
return sign_aware_lte(constraints["min_value"], choice) and sign_aware_lte(
choice, constraints["max_value"]
)
elif isinstance(choice, str):
constraints = cast(StringConstraints, constraints)
if len(choice) < constraints["min_size"]:
return False
if (
constraints["max_size"] is not None
and len(choice) > constraints["max_size"]
):
return False
return all(ord(c) in constraints["intervals"] for c in choice)
elif isinstance(choice, bytes):
constraints = cast(BytesConstraints, constraints)
if len(choice) < constraints["min_size"]:
return False
return constraints["max_size"] is None or len(choice) <= constraints["max_size"]
elif isinstance(choice, bool):
constraints = cast(BooleanConstraints, constraints)
if constraints["p"] <= 0:
return choice is False
if constraints["p"] >= 1:
return choice is True
return True
else:
raise NotImplementedError(f"unhandled type {type(choice)} with value {choice}")
def choices_key(choices: Sequence[ChoiceT]) -> tuple[ChoiceKeyT, ...]:
return tuple(choice_key(choice) for choice in choices)
def choice_key(choice: ChoiceT) -> ChoiceKeyT:
if isinstance(choice, float):
# float_to_int to distinguish -0.0/0.0, signaling/nonsignaling nans, etc,
# and then add a "float" key to avoid colliding with actual integers.
return ("float", float_to_int(choice))
if isinstance(choice, bool):
# avoid choice_key(0) == choice_key(False)
return ("bool", choice)
return choice
def choice_equal(choice1: ChoiceT, choice2: ChoiceT) -> bool:
assert type(choice1) is type(choice2), (choice1, choice2)
return choice_key(choice1) == choice_key(choice2)
def choice_constraints_equal(
choice_type: ChoiceTypeT,
constraints1: ChoiceConstraintsT,
constraints2: ChoiceConstraintsT,
) -> bool:
return choice_constraints_key(choice_type, constraints1) == choice_constraints_key(
choice_type, constraints2
)
def choice_constraints_key(
choice_type: ChoiceTypeT, constraints: ChoiceConstraintsT
) -> tuple[Hashable, ...]:
if choice_type == "float":
constraints = cast(FloatConstraints, constraints)
return (
float_to_int(constraints["min_value"]),
float_to_int(constraints["max_value"]),
constraints["allow_nan"],
constraints["smallest_nonzero_magnitude"],
)
if choice_type == "integer":
constraints = cast(IntegerConstraints, constraints)
return (
constraints["min_value"],
constraints["max_value"],
None if constraints["weights"] is None else tuple(constraints["weights"]),
constraints["shrink_towards"],
)
return tuple(constraints[key] for key in sorted(constraints)) # type: ignore
def choices_size(choices: Iterable[ChoiceT]) -> int:
from hypothesis.database import choices_to_bytes
return len(choices_to_bytes(choices))
| ChoiceNode |
python | doocs__leetcode | solution/0600-0699/0679.24 Game/Solution.py | {
"start": 0,
"end": 1302
} | class ____:
def judgePoint24(self, cards: List[int]) -> bool:
def dfs(nums: List[float]):
n = len(nums)
if n == 1:
if abs(nums[0] - 24) < 1e-6:
return True
return False
ok = False
for i in range(n):
for j in range(n):
if i != j:
nxt = [nums[k] for k in range(n) if k != i and k != j]
for op in ops:
match op:
case "/":
if nums[j] == 0:
continue
ok |= dfs(nxt + [nums[i] / nums[j]])
case "*":
ok |= dfs(nxt + [nums[i] * nums[j]])
case "+":
ok |= dfs(nxt + [nums[i] + nums[j]])
case "-":
ok |= dfs(nxt + [nums[i] - nums[j]])
if ok:
return True
return ok
ops = ("+", "-", "*", "/")
nums = [float(x) for x in cards]
return dfs(nums)
| Solution |
python | apache__airflow | providers/standard/tests/unit/standard/utils/test_python_virtualenv.py | {
"start": 1158,
"end": 9773
} | class ____:
@mock.patch("shutil.which")
def test_use_uv(self, mock_shutil_which):
with conf_vars({("standard", "venv_install_method"): "auto"}):
mock_shutil_which.side_effect = [True]
assert _use_uv() is True
mock_shutil_which.side_effect = [False]
assert _use_uv() is False
with conf_vars({("standard", "venv_install_method"): "uv"}):
assert _use_uv() is True
with conf_vars({("standard", "venv_install_method"): "pip"}):
assert _use_uv() is False
@pytest.mark.parametrize(
("index_urls", "expected_pip_conf_content", "unexpected_pip_conf_content"),
[
[[], ["[global]", "no-index ="], ["index-url", "extra", "http", "pypi"]],
[["http://mysite"], ["[global]", "index-url", "http://mysite"], ["no-index", "extra", "pypi"]],
[
["http://mysite", "https://othersite"],
["[global]", "index-url", "http://mysite", "extra", "https://othersite"],
["no-index", "pypi"],
],
[
["http://mysite", "https://othersite", "http://site"],
["[global]", "index-url", "http://mysite", "extra", "https://othersite http://site"],
["no-index", "pypi"],
],
],
)
def test_generate_pip_conf(
self,
index_urls: list[str],
expected_pip_conf_content: list[str],
unexpected_pip_conf_content: list[str],
tmp_path: Path,
):
tmp_file = tmp_path / "pip.conf"
_generate_pip_conf(tmp_file, index_urls)
generated_conf = tmp_file.read_text()
for term in expected_pip_conf_content:
assert term in generated_conf
for term in unexpected_pip_conf_content:
assert term not in generated_conf
@mock.patch("airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess")
@conf_vars({("standard", "venv_install_method"): "pip"})
def test_should_create_virtualenv_pip(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=False, requirements=[]
)
assert python_bin == "/VENV/bin/python"
mock_execute_in_subprocess.assert_called_once_with(["pythonVER", "-m", "venv", "/VENV"])
@mock.patch("airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess")
@conf_vars({("standard", "venv_install_method"): "uv"})
def test_should_create_virtualenv_uv(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=False, requirements=[]
)
assert python_bin == "/VENV/bin/python"
mock_execute_in_subprocess.assert_called_once_with(
["uv", "venv", "--allow-existing", "--seed", "--python", "pythonVER", "/VENV"]
)
@mock.patch("airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess")
@conf_vars({("standard", "venv_install_method"): "pip"})
def test_should_create_virtualenv_with_system_packages_pip(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=True, requirements=[]
)
assert python_bin == "/VENV/bin/python"
mock_execute_in_subprocess.assert_called_once_with(
["pythonVER", "-m", "venv", "/VENV", "--system-site-packages"]
)
@mock.patch("airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess")
@conf_vars({("standard", "venv_install_method"): "uv"})
def test_should_create_virtualenv_with_system_packages_uv(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=True, requirements=[]
)
assert python_bin == "/VENV/bin/python"
mock_execute_in_subprocess.assert_called_once_with(
[
"uv",
"venv",
"--allow-existing",
"--seed",
"--python",
"pythonVER",
"--system-site-packages",
"/VENV",
]
)
@mock.patch("airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess")
@conf_vars({("standard", "venv_install_method"): "pip"})
def test_pip_install_options_pip(self, mock_execute_in_subprocess):
pip_install_options = ["--no-deps"]
python_bin = prepare_virtualenv(
venv_directory="/VENV",
python_bin="pythonVER",
system_site_packages=True,
requirements=["apache-beam[gcp]"],
pip_install_options=pip_install_options,
)
assert python_bin == "/VENV/bin/python"
mock_execute_in_subprocess.assert_called_with(
["/VENV/bin/pip", "install", *pip_install_options, "apache-beam[gcp]"],
env=mock.ANY,
)
@mock.patch("airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess")
@conf_vars({("standard", "venv_install_method"): "uv"})
def test_pip_install_options_uv(self, mock_execute_in_subprocess):
pip_install_options = ["--no-deps"]
python_bin = prepare_virtualenv(
venv_directory="/VENV",
python_bin="pythonVER",
system_site_packages=True,
requirements=["apache-beam[gcp]"],
pip_install_options=pip_install_options,
)
assert python_bin == "/VENV/bin/python"
mock_execute_in_subprocess.assert_called_with(
[
"uv",
"pip",
"install",
"--python",
"/VENV/bin/python",
*pip_install_options,
"apache-beam[gcp]",
],
env=mock.ANY,
)
@mock.patch("airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess")
@conf_vars({("standard", "venv_install_method"): "pip"})
def test_should_create_virtualenv_with_extra_packages_pip(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV",
python_bin="pythonVER",
system_site_packages=False,
requirements=["apache-beam[gcp]"],
)
assert python_bin == "/VENV/bin/python"
mock_execute_in_subprocess.assert_any_call(["pythonVER", "-m", "venv", "/VENV"])
mock_execute_in_subprocess.assert_called_with(
["/VENV/bin/pip", "install", "apache-beam[gcp]"], env=mock.ANY
)
@mock.patch("airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess")
@conf_vars({("standard", "venv_install_method"): "uv"})
def test_should_create_virtualenv_with_extra_packages_uv(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV",
python_bin="pythonVER",
system_site_packages=False,
requirements=["apache-beam[gcp]"],
)
assert python_bin == "/VENV/bin/python"
mock_execute_in_subprocess.assert_called_with(
["uv", "pip", "install", "--python", "/VENV/bin/python", "apache-beam[gcp]"],
env=mock.ANY,
)
@pytest.mark.parametrize(
("decorators", "expected_decorators"),
[
(["@task.virtualenv"], []),
(["@task.virtualenv()"], []),
(['@task.virtualenv(serializer="dill")'], []),
(["@foo", "@task.virtualenv", "@bar"], ["@foo", "@bar"]),
(["@foo", "@task.virtualenv()", "@bar"], ["@foo", "@bar"]),
],
ids=["without_parens", "parens", "with_args", "nested_without_parens", "nested_with_parens"],
)
def test_remove_task_decorator(self, decorators: list[str], expected_decorators: list[str]):
concated_decorators = "\n".join(decorators)
expected_decorator = "\n".join(expected_decorators)
SCRIPT = dedent(
"""
def f():
# @task.virtualenv
import funcsigs
"""
)
py_source = concated_decorators + SCRIPT
expected_source = expected_decorator + SCRIPT if expected_decorator else SCRIPT.lstrip()
res = remove_task_decorator(python_source=py_source, task_decorator_name="@task.virtualenv")
assert res == expected_source
| TestPrepareVirtualenv |
python | kamyu104__LeetCode-Solutions | Python/invalid-transactions.py | {
"start": 55,
"end": 1248
} | class ____:
def invalidTransactions(self, transactions):
AMOUNT, MINUTES = 1000, 60
trans = map(lambda x: (x[0], int(x[1]), int(x[2]), x[3]),
(transaction.split(',') for transaction in transactions))
trans.sort(key=lambda t: t[1])
trans_indexes = collections.defaultdict(list)
for i, t in enumerate(trans):
trans_indexes[t[0]].append(i)
result = []
for name, indexes in trans_indexes.iteritems():
left, right = 0, 0
for i, t_index in enumerate(indexes):
t = trans[t_index]
if (t[2] > AMOUNT):
result.append("{},{},{},{}".format(*t))
continue
while left+1 < len(indexes) and trans[indexes[left]][1] < t[1]-MINUTES:
left += 1
while right+1 < len(indexes) and trans[indexes[right+1]][1] <= t[1]+MINUTES:
right += 1
for i in xrange(left, right+1):
if trans[indexes[i]][3] != t[3]:
result.append("{},{},{},{}".format(*t))
break
return result
| Solution |
python | openai__openai-python | src/openai/types/eval_create_params.py | {
"start": 6087,
"end": 6242
} | class ____(TextSimilarityGraderParam, total=False):
pass_threshold: Required[float]
"""The threshold for the score."""
| TestingCriterionTextSimilarity |
python | jazzband__django-simple-history | simple_history/tests/tests/test_manager.py | {
"start": 15577,
"end": 16987
} | class ____(TestCase):
def setUp(self):
d = datetime(3021, 1, 1, 10, 0)
self.poll1 = Poll.objects.create(question="why?", pub_date=d)
self.poll2 = Poll.objects.create(question="how?", pub_date=d)
self.choice1 = Choice.objects.create(poll=self.poll1, votes=1)
self.choice2 = Choice.objects.create(poll=self.poll1, votes=2)
self.choice3 = Choice.objects.create(poll=self.poll2, votes=3)
def test__select_related_history_tracked_objs__prefetches_expected_objects(self):
num_choices = Choice.objects.count()
self.assertEqual(num_choices, 3)
def access_related_objs(records):
for record in records:
self.assertIsInstance(record.poll, Poll)
# Without prefetching:
with self.assertNumQueries(1):
historical_records = Choice.history.all()
self.assertEqual(len(historical_records), num_choices)
with self.assertNumQueries(num_choices):
access_related_objs(historical_records)
# With prefetching:
with self.assertNumQueries(1):
historical_records = (
Choice.history.all()._select_related_history_tracked_objs()
)
self.assertEqual(len(historical_records), num_choices)
with self.assertNumQueries(0):
access_related_objs(historical_records)
| PrefetchingMethodsTestCase |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 9686,
"end": 10007
} | class ____(SignedIntegerType):
"""
128-bit signed integer type.
.. warning::
This functionality is considered **unstable**.
It is a work-in-progress feature and may not always work as expected.
It may be changed at any point without it being considered a breaking change.
"""
| Int128 |
python | kamyu104__LeetCode-Solutions | Python/factor-combinations.py | {
"start": 36,
"end": 645
} | class ____(object):
# @param {integer} n
# @return {integer[][]}
def getFactors(self, n):
result = []
factors = []
self.getResult(n, result, factors)
return result
def getResult(self, n, result, factors):
i = 2 if not factors else factors[-1]
while i <= n / i:
if n % i == 0:
factors.append(i)
factors.append(n / i)
result.append(list(factors))
factors.pop()
self.getResult(n / i, result, factors)
factors.pop()
i += 1
| Solution |
python | fastapi__sqlmodel | docs_src/tutorial/offset_and_limit/tutorial004.py | {
"start": 100,
"end": 1649
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.age > 32).offset(1).limit(2)
results = session.exec(statement)
heroes = results.all()
print(heroes)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | keras-team__keras | keras/src/layers/reshaping/flatten_test.py | {
"start": 256,
"end": 4815
} | class ____(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_flatten(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
inputs = np.random.random((10, 3, 5, 5)).astype("float32")
# Make the ndarray relatively sparse
inputs = np.multiply(inputs, inputs >= 0.8)
expected_output_channels_last = ops.convert_to_tensor(
np.reshape(inputs, (-1, 5 * 5 * 3))
)
expected_output_channels_first = ops.convert_to_tensor(
np.reshape(np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
)
if sparse:
if backend.backend() == "tensorflow":
import tensorflow as tf
dense_to_sparse = tf.sparse.from_dense
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
dense_to_sparse = jax_sparse.BCOO.fromdense
else:
self.fail(
f"Sparse is unsupported with backend {backend.backend()}"
)
inputs = dense_to_sparse(inputs)
expected_output_channels_last = dense_to_sparse(
expected_output_channels_last
)
expected_output_channels_first = dense_to_sparse(
expected_output_channels_first
)
# Test default data_format and channels_last
self.run_layer_test(
layers.Flatten,
init_kwargs={},
input_data=inputs,
input_sparse=True,
expected_output=(
expected_output_channels_last
if backend.config.image_data_format() == "channels_last"
else expected_output_channels_first
),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_last"},
input_data=inputs,
input_sparse=True,
expected_output=expected_output_channels_last,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
# Test channels_first
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_first"},
input_data=inputs,
input_sparse=True,
expected_output=expected_output_channels_first,
expected_output_sparse=sparse,
run_training_check=not sparse,
)
@pytest.mark.requires_trainable_backend
def test_flatten_with_scalar_channels(self):
inputs = np.random.random((10,)).astype("float32")
expected_output = ops.convert_to_tensor(np.expand_dims(inputs, -1))
# Test default data_format and channels_last
self.run_layer_test(
layers.Flatten,
init_kwargs={},
input_data=inputs,
expected_output=expected_output,
)
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_last"},
input_data=inputs,
expected_output=expected_output,
)
# Test channels_first
self.run_layer_test(
layers.Flatten,
init_kwargs={"data_format": "channels_first"},
input_data=inputs,
expected_output=expected_output,
)
def test_flatten_symbolic_with_dynamic_batch_size(self):
input_layer = layers.Input(batch_shape=(None, 2, 3))
flattened = layers.Flatten()(input_layer)
self.assertEqual(flattened.shape, (None, 2 * 3))
def test_flatten_symbolic_with_dynamic_dimension(self):
input_layer = layers.Input(batch_shape=(5, 2, None))
flattened = layers.Flatten()(input_layer)
self.assertEqual(flattened.shape, (5, None))
@skip_if_backend("openvino", "Dynamic dimensions not supported by OpenVino")
def test_flatten_with_dynamic_batch_size_and_dynamic_dimenstions(self):
def generator():
yield (np.ones((3, 5, 7), dtype="float32"),)
yield (np.ones((2, 7, 5), dtype="float32"),)
model = models.Sequential([layers.Flatten()])
model.predict(generator())
| FlattenTest |
python | PyCQA__pydocstyle | src/tests/test_integration.py | {
"start": 299,
"end": 45901
} | class ____:
"""An isolated environment where pydocstyle can be run.
Since running pydocstyle as a script is affected by local config files,
it's important that tests will run in an isolated environment. This class
should be used as a context manager and offers utility methods for adding
files to the environment and changing the environment's configuration.
"""
Result = namedtuple('Result', ('out', 'err', 'code'))
def __init__(
self,
script_name='pydocstyle',
section_name='pydocstyle',
config_name='tox.ini',
):
"""Initialize the object."""
self.tempdir = None
self.script_name = script_name
self.section_name = section_name
self.config_name = config_name
def write_config(self, prefix='', name=None, **kwargs):
"""Change an environment config file.
Applies changes to `tox.ini` relative to `tempdir/prefix`.
If the given path prefix does not exist it is created.
"""
base = os.path.join(self.tempdir, prefix) if prefix else self.tempdir
if not os.path.isdir(base):
self.makedirs(base)
name = self.config_name if name is None else name
if name.endswith('.toml'):
def convert_value(val):
return (
repr(val).lower()
if isinstance(val, bool)
else repr(val)
)
else:
def convert_value(val):
return val
with open(os.path.join(base, name), 'wt') as conf:
conf.write(f"[{self.section_name}]\n")
for k, v in kwargs.items():
conf.write("{} = {}\n".format(
k.replace('_', '-'), convert_value(v)
))
def open(self, path, *args, **kwargs):
"""Open a file in the environment.
The file path should be relative to the base of the environment.
"""
return open(os.path.join(self.tempdir, path), *args, **kwargs)
def get_path(self, name, prefix=''):
return os.path.join(self.tempdir, prefix, name)
def makedirs(self, path, *args, **kwargs):
"""Create a directory in a path relative to the environment base."""
os.makedirs(os.path.join(self.tempdir, path), *args, **kwargs)
def invoke(self, args="", target=None):
"""Run pydocstyle on the environment base folder with the given args.
If `target` is not None, will run pydocstyle on `target` instead of
the environment base folder.
"""
run_target = self.tempdir if target is None else \
os.path.join(self.tempdir, target)
cmd = shlex.split("{} {} {}"
.format(self.script_name, run_target, args),
posix=False)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return self.Result(out=out.decode('utf-8'),
err=err.decode('utf-8'),
code=p.returncode)
def __enter__(self):
self.tempdir = tempfile.mkdtemp()
# Make sure we won't be affected by other config files
self.write_config()
return self
def __exit__(self, *args, **kwargs):
shutil.rmtree(self.tempdir)
pass
@pytest.fixture(scope="module")
def install_package(request):
"""Install the package in development mode for the tests.
This is so we can run the integration tests on the installed console
script.
"""
cwd = os.path.join(os.path.dirname(__file__), '..', '..')
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-e", "."], cwd=cwd
)
yield
subprocess.check_call(
[sys.executable, "-m", "pip", "uninstall", "-y", "pydocstyle"], cwd=cwd
)
@pytest.fixture(scope="function", params=['ini', 'toml'])
def env(request):
"""Add a testing environment to a test method."""
sandbox_settings = {
'ini': {
'section_name': 'pydocstyle',
'config_name': 'tox.ini',
},
'toml': {
'section_name': 'tool.pydocstyle',
'config_name': 'pyproject.toml',
},
}[request.param]
with SandboxEnv(**sandbox_settings) as test_env:
yield test_env
pytestmark = pytest.mark.usefixtures("install_package")
def parse_errors(err):
"""Parse `err` to a dictionary of {filename: error_codes}.
This is for test purposes only. All file names should be different.
"""
result = {}
py_ext = '.py'
lines = err.split('\n')
while lines:
curr_line = lines.pop(0)
filename = curr_line[:curr_line.find(py_ext) + len(py_ext)]
if lines:
err_line = lines.pop(0).strip()
err_code = err_line.split(':')[0]
basename = os.path.basename(filename)
result.setdefault(basename, set()).add(err_code)
return result
def test_pep257_conformance():
"""Test that we conform to PEP 257."""
base_dir = (pathlib.Path(__file__).parent / '..').resolve()
excluded = base_dir / 'tests' / 'test_cases'
src_files = (str(path) for path in base_dir.glob('**/*.py')
if excluded not in path.parents)
ignored = {'D104', 'D105'}
select = violations.conventions.pep257 - ignored
errors = list(checker.check(src_files, select=select))
assert errors == [], errors
def test_ignore_list():
"""Test that `ignore`d errors are not reported in the API."""
function_to_check = textwrap.dedent('''
def function_with_bad_docstring(foo):
""" does spacinwithout a period in the end
no blank line after one-liner is bad. Also this - """
return foo
''')
expected_error_codes = {'D100', 'D400', 'D401', 'D205', 'D209', 'D210',
'D403', 'D415', 'D213'}
mock_open = mock.mock_open(read_data=function_to_check)
from pydocstyle import checker
with mock.patch.object(
checker.tk, 'open', mock_open, create=True):
# Passing a blank ignore here explicitly otherwise
# checkers takes the pep257 ignores by default.
errors = tuple(checker.check(['filepath'], ignore={}))
error_codes = {error.code for error in errors}
assert error_codes == expected_error_codes
# We need to recreate the mock, otherwise the read file is empty
mock_open = mock.mock_open(read_data=function_to_check)
with mock.patch.object(
checker.tk, 'open', mock_open, create=True):
ignored = {'D100', 'D202', 'D213'}
errors = tuple(checker.check(['filepath'], ignore=ignored))
error_codes = {error.code for error in errors}
assert error_codes == expected_error_codes - ignored
def test_skip_errors():
"""Test that `ignore`d errors are not reported in the API."""
function_to_check = textwrap.dedent('''
def function_with_bad_docstring(foo): # noqa: D400, D401, D403, D415
""" does spacinwithout a period in the end
no blank line after one-liner is bad. Also this - """
return foo
''')
expected_error_codes = {'D100', 'D205', 'D209', 'D210', 'D213'}
mock_open = mock.mock_open(read_data=function_to_check)
from pydocstyle import checker
with mock.patch.object(
checker.tk, 'open', mock_open, create=True):
# Passing a blank ignore here explicitly otherwise
# checkers takes the pep257 ignores by default.
errors = tuple(checker.check(['filepath'], ignore={}))
error_codes = {error.code for error in errors}
assert error_codes == expected_error_codes
skipped_error_codes = {'D400', 'D401', 'D403', 'D415'}
# We need to recreate the mock, otherwise the read file is empty
mock_open = mock.mock_open(read_data=function_to_check)
with mock.patch.object(
checker.tk, 'open', mock_open, create=True):
errors = tuple(checker.check(['filepath'], ignore={},
ignore_inline_noqa=True))
error_codes = {error.code for error in errors}
assert error_codes == expected_error_codes | skipped_error_codes
def test_run_as_named_module():
"""Test that pydocstyle can be run as a "named module".
This means that the following should run pydocstyle:
python -m pydocstyle
"""
# Add --match='' so that no files are actually checked (to make sure that
# the return code is 0 and to reduce execution time).
cmd = [sys.executable, "-m", "pydocstyle", "--match=''"]
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
assert p.returncode == 0, out.decode('utf-8') + err.decode('utf-8')
def test_config_file(env):
"""Test that options are correctly loaded from a config file.
This test create a temporary directory and creates two files in it: a
Python file that has two violations (D100 and D103) and a config
file (tox.ini). This test alternates settings in the config file and checks
that we give the correct output.
"""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
def foo():
pass
"""))
env.write_config(ignore='D100')
out, err, code = env.invoke()
assert code == 1
assert 'D100' not in out
assert 'D103' in out
env.write_config(ignore='')
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out
assert 'D103' in out
env.write_config(ignore='D100,D103')
out, err, code = env.invoke()
assert code == 0
assert 'D100' not in out
assert 'D103' not in out
env.write_config(ignore='D10')
_, err, code = env.invoke()
assert code == 0
assert 'D100' not in err
assert 'D103' not in err
def test_sectionless_config_file(env):
"""Test that config files without a valid section name issue a warning."""
with env.open('config.ini', 'wt') as conf:
conf.write('[pdcstl]')
config_path = conf.name
_, err, code = env.invoke(f'--config={config_path}')
assert code == 0
assert 'Configuration file does not contain a pydocstyle section' in err
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
def foo():
pass
"""))
with env.open('tox.ini', 'wt') as conf:
conf.write('[pdcstl]\n')
conf.write('ignore = D100')
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out
assert 'file does not contain a pydocstyle section' not in err
@pytest.mark.parametrize(
# Don't parametrize over 'pyproject.toml'
# since this test applies only to '.ini' files
'env', ['ini'], indirect=True
)
def test_multiple_lined_config_file(env):
"""Test that .ini files with multi-lined entries are parsed correctly."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
class Foo(object):
"Doc string"
def foo():
pass
"""))
select_string = ('D100,\n'
' #D103,\n'
' D204, D300 # Just remember - don\'t check D103!')
env.write_config(select=select_string)
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out
assert 'D204' in out
assert 'D300' in out
assert 'D103' not in out
@pytest.mark.parametrize(
# Don't parametrize over 'tox.ini' since
# this test applies only to '.toml' files
'env', ['toml'], indirect=True
)
def test_accepts_select_error_code_list(env):
"""Test that .ini files with multi-lined entries are parsed correctly."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
class Foo(object):
"Doc string"
def foo():
pass
"""))
env.write_config(select=['D100', 'D204', 'D300'])
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out
assert 'D204' in out
assert 'D300' in out
assert 'D103' not in out
def test_config_path(env):
"""Test that options are correctly loaded from a specific config file.
Make sure that a config file passed via --config is actually used and that
normal config file discovery is disabled.
"""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
def foo():
pass
"""))
# either my_config.ini or my_config.toml
config_ext = env.config_name.split('.')[-1]
config_name = 'my_config.' + config_ext
env.write_config(ignore='D100')
env.write_config(name=config_name, ignore='D103')
out, err, code = env.invoke()
assert code == 1
assert 'D100' not in out
assert 'D103' in out
out, err, code = env.invoke('--config={} -d'
.format(env.get_path(config_name)))
assert code == 1, out + err
assert 'D100' in out
assert 'D103' not in out
def test_non_existent_config(env):
out, err, code = env.invoke('--config=does_not_exist')
assert code == 2
def test_verbose(env):
"""Test that passing --verbose prints more information."""
with env.open('example.py', 'wt') as example:
example.write('"""Module docstring."""\n')
out, _, code = env.invoke()
assert code == 0
assert 'example.py' not in out
out, _, code = env.invoke(args="--verbose")
assert code == 0
assert 'example.py' in out
def test_count(env):
"""Test that passing --count correctly prints the error num."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
def foo():
pass
"""))
out, err, code = env.invoke(args='--count')
assert code == 1
assert '2' in out
# The error count should be in the last line of the output.
# -2 since there is a newline at the end of the output.
assert '2' == out.split('\n')[-2].strip()
def test_select_cli(env):
"""Test choosing error codes with `--select` in the CLI."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
def foo():
pass
"""))
out, err, code = env.invoke(args="--select=D100")
assert code == 1
assert 'D100' in out
assert 'D103' not in out
def test_select_config(env):
"""Test choosing error codes with `select` in the config file."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
class Foo(object):
"Doc string"
def foo():
pass
"""))
env.write_config(select="D100,D3")
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out
assert 'D300' in out
assert 'D103' not in out
def test_add_select_cli(env):
"""Test choosing error codes with --add-select in the CLI."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
class Foo(object):
"Doc string"
def foo():
pass
"""))
env.write_config(select="D100")
out, err, code = env.invoke(args="--add-select=D204,D3")
assert code == 1
assert 'D100' in out
assert 'D204' in out
assert 'D300' in out
assert 'D103' not in out
def test_add_ignore_cli(env):
"""Test choosing error codes with --add-ignore in the CLI."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
class Foo(object):
def foo():
pass
"""))
env.write_config(select="D100,D101")
out, err, code = env.invoke(args="--add-ignore=D101")
assert code == 1
assert 'D100' in out
assert 'D101' not in out
assert 'D103' not in out
def test_wildcard_add_ignore_cli(env):
"""Test choosing error codes with --add-ignore in the CLI."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
class Foo(object):
"Doc string"
def foo():
pass
"""))
env.write_config(select="D203,D300")
out, err, code = env.invoke(args="--add-ignore=D30")
assert code == 1
assert 'D203' in out
assert 'D300' not in out
@pytest.mark.parametrize(
# Don't parametrize over 'pyproject.toml'
# since this test applies only to '.ini' files
'env', ['ini'], indirect=True
)
def test_ignores_whitespace_in_fixed_option_set(env):
with env.open('example.py', 'wt') as example:
example.write("class Foo(object):\n 'Doc string'")
env.write_config(ignore="D100,\n # comment\n D300")
out, err, code = env.invoke()
assert code == 1
assert 'D300' not in out
assert err == ''
@pytest.mark.parametrize(
# Don't parametrize over 'tox.ini' since
# this test applies only to '.toml' files
'env', ['toml'], indirect=True
)
def test_accepts_ignore_error_code_list(env):
with env.open('example.py', 'wt') as example:
example.write("class Foo(object):\n 'Doc string'")
env.write_config(ignore=['D100', 'D300'])
out, err, code = env.invoke()
assert code == 1
assert 'D300' not in out
assert err == ''
def test_bad_wildcard_add_ignore_cli(env):
"""Test adding a non-existent error codes with --add-ignore."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
class Foo(object):
"Doc string"
def foo():
pass
"""))
env.write_config(select="D203,D300")
out, err, code = env.invoke(args="--add-ignore=D3004")
assert code == 1
assert 'D203' in out
assert 'D300' in out
assert 'D3004' not in out
assert ('Error code passed is not a prefix of any known errors: D3004'
in err)
def test_overload_function(env):
"""Functions decorated with @overload trigger D418 error."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''\
from typing import overload
@overload
def overloaded_func(a: int) -> str:
...
@overload
def overloaded_func(a: str) -> str:
"""Foo bar documentation."""
...
def overloaded_func(a):
"""Foo bar documentation."""
return str(a)
'''))
env.write_config(ignore="D100")
out, err, code = env.invoke()
assert code == 1
assert 'D418' in out
assert 'D103' not in out
def test_overload_async_function(env):
"""Async functions decorated with @overload trigger D418 error."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''\
from typing import overload
@overload
async def overloaded_func(a: int) -> str:
...
@overload
async def overloaded_func(a: str) -> str:
"""Foo bar documentation."""
...
async def overloaded_func(a):
"""Foo bar documentation."""
return str(a)
'''))
env.write_config(ignore="D100")
out, err, code = env.invoke()
assert code == 1
assert 'D418' in out
assert 'D103' not in out
def test_overload_method(env):
"""Methods decorated with @overload trigger D418 error."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''\
from typing import overload
class ClassWithMethods:
@overload
def overloaded_method(a: int) -> str:
...
@overload
def overloaded_method(a: str) -> str:
"""Foo bar documentation."""
...
def overloaded_method(a):
"""Foo bar documentation."""
return str(a)
'''))
env.write_config(ignore="D100")
out, err, code = env.invoke()
assert code == 1
assert 'D418' in out
assert 'D102' not in out
assert 'D103' not in out
def test_overload_method_valid(env):
"""Valid case for overload decorated Methods.
This shouldn't throw any errors.
"""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''\
from typing import overload
class ClassWithMethods:
"""Valid docstring in public Class."""
@overload
def overloaded_method(a: int) -> str:
...
@overload
def overloaded_method(a: str) -> str:
...
def overloaded_method(a):
"""Foo bar documentation."""
return str(a)
'''))
env.write_config(ignore="D100, D203")
out, err, code = env.invoke()
assert code == 0
def test_overload_function_valid(env):
"""Valid case for overload decorated functions.
This shouldn't throw any errors.
"""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''\
from typing import overload
@overload
def overloaded_func(a: int) -> str:
...
@overload
def overloaded_func(a: str) -> str:
...
def overloaded_func(a):
"""Foo bar documentation."""
return str(a)
'''))
env.write_config(ignore="D100")
out, err, code = env.invoke()
assert code == 0
def test_overload_async_function_valid(env):
"""Valid case for overload decorated async functions.
This shouldn't throw any errors.
"""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''\
from typing import overload
@overload
async def overloaded_func(a: int) -> str:
...
@overload
async def overloaded_func(a: str) -> str:
...
async def overloaded_func(a):
"""Foo bar documentation."""
return str(a)
'''))
env.write_config(ignore="D100")
out, err, code = env.invoke()
assert code == 0
def test_overload_nested_function(env):
"""Nested functions decorated with @overload trigger D418 error."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''\
from typing import overload
def function_with_nesting():
"""Valid docstring in public function."""
@overload
def overloaded_func(a: int) -> str:
...
@overload
def overloaded_func(a: str) -> str:
"""Foo bar documentation."""
...
def overloaded_func(a):
"""Foo bar documentation."""
return str(a)
'''))
env.write_config(ignore="D100")
out, err, code = env.invoke()
assert code == 1
assert 'D418' in out
assert 'D103' not in out
def test_overload_nested_function_valid(env):
"""Valid case for overload decorated nested functions.
This shouldn't throw any errors.
"""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''\
from typing import overload
def function_with_nesting():
"""Adding a docstring to a function."""
@overload
def overloaded_func(a: int) -> str:
...
@overload
def overloaded_func(a: str) -> str:
...
def overloaded_func(a):
"""Foo bar documentation."""
return str(a)
'''))
env.write_config(ignore="D100")
out, err, code = env.invoke()
assert code == 0
def test_conflicting_select_ignore_config(env):
"""Test that select and ignore are mutually exclusive."""
env.write_config(select="D100", ignore="D101")
_, err, code = env.invoke()
assert code == 2
assert 'mutually exclusive' in err
def test_conflicting_select_convention_config(env):
"""Test that select and convention are mutually exclusive."""
env.write_config(select="D100", convention="pep257")
_, err, code = env.invoke()
assert code == 2
assert 'mutually exclusive' in err
def test_conflicting_ignore_convention_config(env):
"""Test that select and convention are mutually exclusive."""
env.write_config(ignore="D100", convention="pep257")
_, err, code = env.invoke()
assert code == 2
assert 'mutually exclusive' in err
def test_missing_docstring_in_package(env):
"""Make sure __init__.py files are treated as packages."""
with env.open('__init__.py', 'wt') as init:
pass # an empty package file
out, err, code = env.invoke()
assert code == 1
assert 'D100' not in out # shouldn't be treated as a module
assert 'D104' in out # missing docstring in package
def test_illegal_convention(env):
"""Test that illegal convention names are dealt with properly."""
_, err, code = env.invoke('--convention=illegal_conv')
assert code == 2, err
assert "Illegal convention 'illegal_conv'." in err
assert 'Possible conventions' in err
assert 'pep257' in err
assert 'numpy' in err
def test_empty_select_cli(env):
"""Test excluding all error codes with `--select=` in the CLI."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
def foo():
pass
"""))
_, _, code = env.invoke(args="--select=")
assert code == 0
def test_empty_select_config(env):
"""Test excluding all error codes with `select=` in the config file."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
def foo():
pass
"""))
env.write_config(select="")
_, _, code = env.invoke()
assert code == 0
def test_empty_select_with_added_error(env):
"""Test excluding all errors but one."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
def foo():
pass
"""))
env.write_config(select="")
out, err, code = env.invoke(args="--add-select=D100")
assert code == 1
assert 'D100' in out
assert 'D101' not in out
assert 'D103' not in out
def test_pep257_convention(env):
"""Test that the 'pep257' convention options has the correct errors."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''
class Foo(object):
"""Docstring for this class"""
def foo():
pass
# Original PEP-257 example from -
# https://www.python.org/dev/peps/pep-0257/
def complex(real=0.0, imag=0.0):
"""Form a complex number.
Keyword arguments:
real -- the real part (default 0.0)
imag -- the imaginary part (default 0.0)
"""
if imag == 0.0 and real == 0.0:
return complex_zero
'''))
env.write_config(convention="pep257")
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out
assert 'D211' in out
assert 'D203' not in out
assert 'D212' not in out
assert 'D213' not in out
assert 'D413' not in out
def test_numpy_convention(env):
"""Test that the 'numpy' convention options has the correct errors."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''
class Foo(object):
"""Docstring for this class.
returns
------
"""
def __init__(self):
pass
'''))
env.write_config(convention="numpy")
out, err, code = env.invoke()
assert code == 1
assert 'D107' not in out
assert 'D213' not in out
assert 'D215' in out
assert 'D405' in out
assert 'D409' in out
assert 'D414' in out
assert 'D410' not in out
assert 'D413' not in out
def test_google_convention(env):
"""Test that the 'google' convention options has the correct errors."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent('''
def func(num1, num2, num_three=0):
"""Docstring for this function.
Args:
num1 (int): Number 1.
num2: Number 2.
"""
class Foo(object):
"""Docstring for this class.
Attributes:
test: Test
returns:
"""
def __init__(self):
pass
'''))
env.write_config(convention="google")
out, err, code = env.invoke()
assert code == 1
assert 'D107' in out
assert 'D213' not in out
assert 'D215' not in out
assert 'D405' in out
assert 'D409' not in out
assert 'D410' not in out
assert 'D412' in out
assert 'D413' not in out
assert 'D414' in out
assert 'D417' in out
def test_config_file_inheritance(env):
"""Test configuration files inheritance.
The test creates 2 configuration files:
env_base
+-- tox.ini
| This configuration will set `select=`.
+-- A
+-- tox.ini
| This configuration will set `inherit=false`.
+-- test.py
The file will contain code that violates D100,D103.
When invoking pydocstyle, the first config file found in the base directory
will set `select=`, so no error codes should be checked.
The `A/tox.ini` configuration file sets `inherit=false` but has an empty
configuration, therefore the default convention will be checked.
We expect pydocstyle to ignore the `select=` configuration and raise all
the errors stated above.
"""
env.write_config(select='')
env.write_config(prefix='A', inherit=False)
with env.open(os.path.join('A', 'test.py'), 'wt') as test:
test.write(textwrap.dedent("""\
def bar():
pass
"""))
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out
assert 'D103' in out
def test_config_file_cumulative_add_ignores(env):
"""Test that add-ignore is cumulative.
env_base
+-- tox.ini
| This configuration will set `select=D100,D103` and `add-ignore=D100`.
+-- base.py
| Will violate D100,D103
+-- A
+-- tox.ini
| This configuration will set `add-ignore=D103`.
+-- a.py
Will violate D100,D103.
The desired result is that `base.py` will fail with D103 and
`a.py` will pass.
"""
env.write_config(select='D100,D103', add_ignore='D100')
env.write_config(prefix='A', add_ignore='D103')
test_content = textwrap.dedent("""\
def foo():
pass
""")
with env.open('base.py', 'wt') as test:
test.write(test_content)
with env.open(os.path.join('A', 'a.py'), 'wt') as test:
test.write(test_content)
out, err, code = env.invoke()
err = parse_errors(out)
assert code == 1
assert 'base.py' in err, err
assert 'a.py' not in err, err
assert 'D100' not in err['base.py'], err
assert 'D103' in err['base.py'], err
def test_config_file_cumulative_add_select(env):
"""Test that add-select is cumulative.
env_base
+-- tox.ini
| This configuration will set `select=` and `add-select=D100`.
+-- base.py
| Will violate D100,D103
+-- A
+-- tox.ini
| This configuration will set `add-select=D103`.
+-- a.py
Will violate D100,D103.
The desired result is that `base.py` will fail with D100 and
`a.py` will fail with D100,D103.
"""
env.write_config(select='', add_select='D100')
env.write_config(prefix='A', add_select='D103')
test_content = textwrap.dedent("""\
def foo():
pass
""")
with env.open('base.py', 'wt') as test:
test.write(test_content)
with env.open(os.path.join('A', 'a.py'), 'wt') as test:
test.write(test_content)
out, err, code = env.invoke()
err = parse_errors(out)
assert code == 1
assert 'base.py' in err, err
assert 'a.py' in err, err
assert err['base.py'] == {'D100'}, err
assert err['a.py'] == {'D100', 'D103'}, err
def test_config_file_convention_overrides_select(env):
"""Test that conventions override selected errors.
env_base
+-- tox.ini
| This configuration will set `select=D103`.
+-- base.py
| Will violate D100.
+-- A
+-- tox.ini
| This configuration will set `convention=pep257`.
+-- a.py
Will violate D100.
The expected result is that `base.py` will be clear of errors and
`a.py` will violate D100.
"""
env.write_config(select='D103')
env.write_config(prefix='A', convention='pep257')
test_content = ""
with env.open('base.py', 'wt') as test:
test.write(test_content)
with env.open(os.path.join('A', 'a.py'), 'wt') as test:
test.write(test_content)
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out, out
assert 'base.py' not in out, out
assert 'a.py' in out, out
def test_cli_overrides_config_file(env):
"""Test that the CLI overrides error codes selected in the config file.
env_base
+-- tox.ini
| This configuration will set `select=D103` and `match-dir=foo`.
+-- base.py
| Will violate D100.
+-- A
+-- a.py
Will violate D100,D103.
We shall run with `--convention=pep257`.
We expect `base.py` to be checked and violate `D100` and that `A/a.py` will
not be checked because of `match-dir=foo` in the config file.
"""
env.write_config(select='D103', match_dir='foo')
with env.open('base.py', 'wt') as test:
test.write("")
env.makedirs('A')
with env.open(os.path.join('A', 'a.py'), 'wt') as test:
test.write(textwrap.dedent("""\
def foo():
pass
"""))
out, err, code = env.invoke(args="--convention=pep257")
assert code == 1
assert 'D100' in out, out
assert 'D103' not in out, out
assert 'base.py' in out, out
assert 'a.py' not in out, out
def test_cli_match_overrides_config_file(env):
"""Test that the CLI overrides the match clauses in the config file.
env_base
+-- tox.ini
| This configuration will set `match-dir=foo`.
+-- base.py
| Will violate D100,D103.
+-- A
+-- tox.ini
| This configuration will set `match=bar.py`.
+-- a.py
Will violate D100.
We shall run with `--match=a.py` and `--match-dir=A`.
We expect `base.py` will not be checked and that `A/a.py` will be checked.
"""
env.write_config(match_dir='foo')
env.write_config(prefix='A', match='bar.py')
with env.open('base.py', 'wt') as test:
test.write(textwrap.dedent("""\
def foo():
pass
"""))
with env.open(os.path.join('A', 'a.py'), 'wt') as test:
test.write("")
out, err, code = env.invoke(args="--match=a.py --match-dir=A")
assert code == 1
assert 'D100' in out, out
assert 'D103' not in out, out
assert 'base.py' not in out, out
assert 'a.py' in out, out
def test_config_file_convention_overrides_ignore(env):
"""Test that conventions override ignored errors.
env_base
+-- tox.ini
| This configuration will set `ignore=D100,D103`.
+-- base.py
| Will violate D100,D103.
+-- A
+-- tox.ini
| This configuration will set `convention=pep257`.
+-- a.py
Will violate D100,D103.
The expected result is that `base.py` will be clear of errors and
`a.py` will violate D103.
"""
env.write_config(ignore='D100,D103')
env.write_config(prefix='A', convention='pep257')
test_content = textwrap.dedent("""\
def foo():
pass
""")
with env.open('base.py', 'wt') as test:
test.write(test_content)
with env.open(os.path.join('A', 'a.py'), 'wt') as test:
test.write(test_content)
out, err, code = env.invoke()
assert code == 1
assert 'D100' in out, out
assert 'D103' in out, out
assert 'base.py' not in out, out
assert 'a.py' in out, out
def test_config_file_ignore_overrides_select(env):
"""Test that ignoring any error overrides selecting errors.
env_base
+-- tox.ini
| This configuration will set `select=D100`.
+-- base.py
| Will violate D100,D101,D102.
+-- A
+-- tox.ini
| This configuration will set `ignore=D102`.
+-- a.py
Will violate D100,D101,D102.
The expected result is that `base.py` will violate D100 and
`a.py` will violate D100,D101.
"""
env.write_config(select='D100')
env.write_config(prefix='A', ignore='D102')
test_content = textwrap.dedent("""\
class Foo(object):
def bar():
pass
""")
with env.open('base.py', 'wt') as test:
test.write(test_content)
with env.open(os.path.join('A', 'a.py'), 'wt') as test:
test.write(test_content)
out, err, code = env.invoke()
err = parse_errors(out)
assert code == 1
assert 'base.py' in err, err
assert 'a.py' in err, err
assert err['base.py'] == {'D100'}, err
assert err['a.py'] == {'D100', 'D101'}, err
def test_config_file_nearest_to_checked_file(env):
"""Test that the configuration to each file is the nearest one.
In this test there will be 2 identical files in 2 branches in the directory
tree. Both of them will violate the same error codes, but their config
files will contain different ignores.
env_base
+-- tox.ini
| This configuration will set `convention=pep257` and `add-ignore=D100`
+-- base.py
| Will violate D100,D101,D102.
+-- A
| +-- a.py
| Will violate D100,D101,D102.
+-- B
+-- tox.ini
| Will set `add-ignore=D101`
+-- b.py
Will violate D100,D101,D102.
We should see that `a.py` and `base.py` act the same and violate
D101,D102 (since they are both configured by `tox.ini`) and that
`b.py` violates D102, since it's configured by `B/tox.ini` as well.
"""
env.write_config(convention='pep257', add_ignore='D100')
env.write_config(prefix='B', add_ignore='D101')
test_content = textwrap.dedent("""\
class Foo(object):
def bar():
pass
""")
with env.open('base.py', 'wt') as test:
test.write(test_content)
env.makedirs('A')
with env.open(os.path.join('A', 'a.py'), 'wt') as test:
test.write(test_content)
with env.open(os.path.join('B', 'b.py'), 'wt') as test:
test.write(test_content)
out, err, code = env.invoke()
err = parse_errors(out)
assert code == 1
assert 'base.py' in err, err
assert 'a.py' in err, err
assert 'b.py' in err, err
assert err['base.py'] == {'D101', 'D102'}, err
assert err['a.py'] == {'D101', 'D102'}, err
assert err['b.py'] == {'D102'}, err
def test_config_file_nearest_match_re(env):
"""Test that the `match` and `match-dir` options are handled correctly.
env_base
+-- tox.ini
| This configuration will set `convention=pep257` and `add-ignore=D100`.
+-- A
+-- tox.ini
| Will set `match-dir=C`.
+-- B
| +-- b.py
| Will violate D100,D103.
+-- C
+-- tox.ini
| Will set `match=bla.py`.
+-- c.py
| Will violate D100,D103.
+-- bla.py
Will violate D100.
We expect the call to pydocstyle to be successful, since `b.py` and
`c.py` are not supposed to be found by the re.
"""
env.write_config(convention='pep257', add_ignore='D100')
env.write_config(prefix='A', match_dir='C')
env.write_config(prefix=os.path.join('A', 'C'), match='bla.py')
content = textwrap.dedent("""\
def foo():
pass
""")
env.makedirs(os.path.join('A', 'B'))
with env.open(os.path.join('A', 'B', 'b.py'), 'wt') as test:
test.write(content)
with env.open(os.path.join('A', 'C', 'c.py'), 'wt') as test:
test.write(content)
with env.open(os.path.join('A', 'C', 'bla.py'), 'wt') as test:
test.write('')
_, _, code = env.invoke()
assert code == 0
def test_syntax_error_multiple_files(env):
"""Test that a syntax error in a file doesn't prevent further checking."""
for filename in ('first.py', 'second.py'):
with env.open(filename, 'wt') as fobj:
fobj.write("[")
out, err, code = env.invoke(args="-v")
assert code == 1
assert 'first.py: Cannot parse file' in err
assert 'second.py: Cannot parse file' in err
def test_indented_function(env):
"""Test that nested functions do not cause IndentationError."""
env.write_config(ignore='D')
with env.open("test.py", 'wt') as fobj:
fobj.write(textwrap.dedent('''\
def foo():
def bar(a):
"""A docstring
Args:
a : An argument.
"""
pass
'''))
out, err, code = env.invoke(args="-v")
assert code == 0
assert "IndentationError: unexpected indent" not in err
def test_only_comment_file(env):
"""Test that file with only comments does only cause D100."""
with env.open('comments.py', 'wt') as comments:
comments.write(
'#!/usr/bin/env python3\n'
'# -*- coding: utf-8 -*-\n'
'# Useless comment\n'
'# Just another useless comment\n'
)
out, _, code = env.invoke()
assert 'D100' in out
out = out.replace('D100', '')
for err in {'D1', 'D2', 'D3', 'D4'}:
assert err not in out
assert code == 1
def test_comment_plus_docstring_file(env):
"""Test that file with comments and docstring does not cause errors."""
with env.open('comments_plus.py', 'wt') as comments_plus:
comments_plus.write(
'#!/usr/bin/env python3\n'
'# -*- coding: utf-8 -*-\n'
'# Useless comment\n'
'# Just another useless comment\n'
'"""Module docstring."""\n'
)
out, _, code = env.invoke()
assert '' == out
assert code == 0
def test_only_comment_with_noqa_file(env):
"""Test that file with noqa and only comments does not cause errors."""
with env.open('comments.py', 'wt') as comments:
comments.write(
'#!/usr/bin/env python3\n'
'# -*- coding: utf-8 -*-\n'
'# Useless comment\n'
'# Just another useless comment\n'
'# noqa: D100\n'
)
out, _, code = env.invoke()
assert 'D100' not in out
assert code == 0
def test_comment_with_noqa_plus_docstring_file(env):
"""Test that file with comments, noqa, docstring does not cause errors."""
with env.open('comments_plus.py', 'wt') as comments_plus:
comments_plus.write(
'#!/usr/bin/env python3\n'
'# -*- coding: utf-8 -*-\n'
'# Useless comment\n'
'# Just another useless comment\n'
'# noqa: D400\n'
'"""Module docstring without period"""\n'
)
out, _, code = env.invoke()
assert '' == out
assert code == 0
def test_ignore_self_only_init(env):
"""Test that ignore_self_only_init works ignores __init__ with only self."""
with env.open('example.py', 'wt') as example:
example.write(textwrap.dedent("""\
class Foo:
def __init__(self):
pass
"""))
env.write_config(ignore_self_only_init=True, select="D107")
out, err, code = env.invoke()
assert '' == out
assert code == 0
def test_match_considers_basenames_for_path_args(env):
"""Test that `match` option only considers basenames for path arguments.
The test environment consists of a single empty module `test_a.py`. The
match option is set to a pattern that ignores test_ prefixed .py filenames.
When pydocstyle is invoked with full path to `test_a.py`, we expect it to
succeed since match option will match against just the file name and not
full path.
"""
# Ignore .py files prefixed with 'test_'
env.write_config(select='D100', match='(?!test_).+.py')
# Create an empty module (violates D100)
with env.open('test_a.py', 'wt') as test:
test.write('')
# env.invoke calls pydocstyle with full path to test_a.py
out, _, code = env.invoke(target='test_a.py')
assert '' == out
assert code == 0 | SandboxEnv |
python | readthedocs__readthedocs.org | readthedocs/organizations/forms.py | {
"start": 992,
"end": 4246
} | class ____(SimpleHistoryModelForm):
"""
Base organization form.
:param user: User instance, responsible for ownership of Organization
:type user: django.contrib.auth.models.User
"""
# We use the organization slug + project name
# to form the final project slug.
# A valid project slug is 63 chars long.
name = forms.CharField(max_length=32)
class Meta:
model = Organization
fields = ["name", "email", "avatar", "description", "url"]
labels = {
"name": _("Organization Name"),
"email": _("Billing Email"),
}
widgets = {
"email": forms.EmailInput(attrs={"placeholder": "accounting@example.com"}),
# Make description less prominent on the page, we don't want long descriptions
"description": forms.TextInput(
attrs={"placeholder": "Engineering docs for Example company"}
),
# Don't use a URLField as a widget, the validation is too strict on FF
"url": forms.TextInput(attrs={"placeholder": "https://"}),
}
def __init__(self, *args, **kwargs):
try:
self.user = kwargs.pop("user")
except KeyError:
raise TypeError(
"OrganizationForm expects a `user` keyword argument",
)
super().__init__(*args, **kwargs)
def clean_slug(self):
slug_source = self.cleaned_data["slug"]
# Skip slug validation on already created organizations.
if self.instance.pk:
return slug_source
slug = slugify(slug_source, dns_safe=True)
if not slug:
# If the was not empty, but renders down to something empty, the
# user gave an invalid slug. However, we can't suggest anything
# useful because the slug is empty. This is an edge case for input
# like `---`, so the error here doesn't need to be very specific.
raise forms.ValidationError(_("Invalid slug, use more valid characters."))
elif slug != slug_source:
# There is a difference between the slug from the front end code, or
# the user is trying to submit the form without our front end code.
raise forms.ValidationError(
_("Invalid slug, use suggested slug '%(slug)s' instead"),
params={"slug": slug},
)
if Organization.objects.filter(slug=slug).exists():
raise forms.ValidationError(_("Slug is already used by another organization"))
return slug
def clean_avatar(self):
avatar = self.cleaned_data.get("avatar")
if avatar:
if avatar.size > 750 * 1024:
raise forms.ValidationError(
_("Avatar image size must not exceed 750KB."),
)
try:
img = Image.open(avatar)
except Exception:
raise ValidationError("Could not process image. Please upload a valid image file.")
width, height = img.size
if width > 500 or height > 500:
raise ValidationError("The image dimensions cannot exceed 500x500 pixels.")
return avatar
| OrganizationForm |
python | huggingface__transformers | src/transformers/models/instructblipvideo/modeling_instructblipvideo.py | {
"start": 37145,
"end": 47513
} | class ____(InstructBlipVideoPreTrainedModel):
main_input_name = "pixel_values"
_keep_in_fp32_modules = ["query_tokens"] # TODO @ArthurZucker I don't know why this is required for FP8
def __init__(self, config: InstructBlipVideoConfig):
super().__init__(config)
self.vision_model = InstructBlipVideoVisionModel(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.qformer = InstructBlipVideoQFormerModel(config.qformer_config)
self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
self.language_model = AutoModel.from_config(config.text_config)
if self.language_model._no_split_modules is not None:
self._no_split_modules.extend(self.language_model._no_split_modules)
if self.language_model._keep_in_fp32_modules is not None:
self._keep_in_fp32_modules.extend(self.language_model._keep_in_fp32_modules)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def _preprocess_accelerate(self):
r"""
Some pre-processing hacks to make the model `accelerate` compatible. Check
https://github.com/huggingface/transformers/pull/21707 for more details.
"""
hf_device_map = self.hf_device_map
if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
# warn users about unexpected behavior when using multi-GPU + InstructBlipVideo + `accelerate`.
logger.warning(
"The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
" Please pass a `device_map` that contains `language_model` to remove this warning."
" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
" more details on creating a `device_map` for large models.",
)
if hasattr(self.language_model, "_hf_hook"):
self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
qformer_input_ids: torch.FloatTensor,
qformer_attention_mask: Optional[torch.LongTensor] = None,
input_ids: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
use_cache: Optional[bool] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, InstructBlipVideoForConditionalGenerationModelOutput]:
r"""
qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
to serve as text prompt, which the Q-Former model will encode.
Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Only relevant in case an encoder-decoder language model (like T5) is used.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# step 1: forward the images through the vision encoder,
# we process in a batched way, later unbatch it back (video has frames=4 always)
batch_size, frames, channel, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
interpolate_pos_encoding=interpolate_pos_encoding,
)
image_embeds = vision_outputs[0]
# step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
# difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
if qformer_attention_mask is None:
qformer_attention_mask = torch.ones_like(qformer_input_ids)
qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)
qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
query_outputs = self.qformer(
input_ids=qformer_input_ids,
attention_mask=qformer_attention_mask,
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
query_output = query_outputs[0][:, : query_tokens.size(1), :]
# step 3: use the language model, conditioned on the query outputs and the prompt
language_model_inputs = self.language_projection(query_output)
# unbatch inputs back, each video-frame gets `num_query_tokens` seq length
language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)
if inputs_embeds is None:
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
special_image_mask = input_ids == self.config.video_token_id
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
else:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
if self.config.use_decoder_only_language_model:
outputs = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
use_cache=use_cache,
**kwargs,
)
else:
outputs = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
use_cache=use_cache,
**kwargs,
)
return InstructBlipVideoForConditionalGenerationModelOutput(
vision_outputs=vision_outputs,
qformer_outputs=query_outputs,
language_model_outputs=outputs,
)
@auto_docstring(
custom_intro="""
InstructBlipVideo Model for generating text given an image and an optional text prompt. The model consists of a vision
encoder, Querying Transformer (Q-Former) and a language model.
One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
"""
)
| InstructBlipVideoModel |
python | protocolbuffers__protobuf | python/google/protobuf/descriptor.py | {
"start": 42670,
"end": 53140
} | class ____(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
Note that :attr:`enum_types_by_name`, :attr:`extensions_by_name`, and
:attr:`dependencies` fields are only set by the
:py:mod:`google.protobuf.message_factory` module, and not by the generated
proto code.
Attributes:
name (str): Name of file, relative to root of source tree.
package (str): Name of the package
edition (Edition): Enum value indicating edition of the file
serialized_pb (bytes): Byte string of serialized
:class:`descriptor_pb2.FileDescriptorProto`.
dependencies (list[FileDescriptor]): List of other :class:`FileDescriptor`
objects this :class:`FileDescriptor` depends on.
public_dependencies (list[FileDescriptor]): A subset of
:attr:`dependencies`, which were declared as "public".
message_types_by_name (dict(str, Descriptor)): Mapping from message names to
their :class:`Descriptor`.
enum_types_by_name (dict(str, EnumDescriptor)): Mapping from enum names to
their :class:`EnumDescriptor`.
extensions_by_name (dict(str, FieldDescriptor)): Mapping from extension
names declared at file scope to their :class:`FieldDescriptor`.
services_by_name (dict(str, ServiceDescriptor)): Mapping from services'
names to their :class:`ServiceDescriptor`.
pool (DescriptorPool): The pool this descriptor belongs to. When not passed
to the constructor, the global default pool is used.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FileDescriptor
def __new__(
cls,
name,
package,
options=None,
serialized_options=None,
serialized_pb=None,
dependencies=None,
public_dependencies=None,
syntax=None,
edition=None,
pool=None,
create_key=None,
):
# FileDescriptor() is called from various places, not only from generated
# files, to register dynamic proto files and messages.
# pylint: disable=g-explicit-bool-comparison
if serialized_pb:
return _message.default_pool.AddSerializedFile(serialized_pb)
else:
return super(FileDescriptor, cls).__new__(cls)
def __init__(
self,
name,
package,
options=None,
serialized_options=None,
serialized_pb=None,
dependencies=None,
public_dependencies=None,
syntax=None,
edition=None,
pool=None,
create_key=None,
):
"""Constructor."""
if create_key is not _internal_create_key:
_Deprecated('create function FileDescriptor()')
super(FileDescriptor, self).__init__(
self, options, serialized_options, 'FileOptions'
)
if edition and edition != 'EDITION_UNKNOWN':
self._edition = edition
elif syntax == 'proto3':
self._edition = 'EDITION_PROTO3'
else:
self._edition = 'EDITION_PROTO2'
if pool is None:
from google.protobuf import descriptor_pool
pool = descriptor_pool.Default()
self.pool = pool
self.message_types_by_name = {}
self.name = name
self.package = package
self.serialized_pb = serialized_pb
self.enum_types_by_name = {}
self.extensions_by_name = {}
self.services_by_name = {}
self.dependencies = dependencies or []
self.public_dependencies = public_dependencies or []
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
@property
def _parent(self):
return None
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def _ToCamelCase(name):
"""Converts name to camel-case and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
if result:
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
# Lower-case the first letter.
if result and result[0].isupper():
result[0] = result[0].lower()
return ''.join(result)
def _OptionsOrNone(descriptor_proto):
"""Returns the value of the field `options`, or None if it is not set."""
if descriptor_proto.HasField('options'):
return descriptor_proto.options
else:
return None
def _ToJsonName(name):
"""Converts name to Json name and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
return ''.join(result)
def MakeDescriptor(
desc_proto,
package='',
build_file_if_cpp=True,
syntax=None,
edition=None,
file_desc=None,
):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches. Set to
False on recursion, so no duplicates are created.
syntax: The syntax/semantics that should be used. Set to "proto3" to get
proto3 field presence semantics.
edition: The edition that should be used if syntax is "edition".
file_desc: A FileDescriptor to place this descriptor into.
Returns:
A Descriptor for protobuf messages.
"""
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pb2
# Generate a random name for this proto file to prevent conflicts with any
# imported ones. We need to specify a file name so the descriptor pool
# accepts our FileDescriptorProto, but it is not important what that file
# name is actually set to.
proto_name = binascii.hexlify(os.urandom(16)).decode('ascii')
if package:
file_name = os.path.join(package.replace('.', '/'), proto_name + '.proto')
else:
file_name = proto_name + '.proto'
if api_implementation.Type() != 'python' and build_file_if_cpp:
# The C++ implementation requires all descriptors to be backed by the same
# definition in the C++ descriptor pool. To do this, we build a
# FileDescriptorProto with the same definition as this descriptor and build
# it into the pool.
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.message_type.add().MergeFrom(desc_proto)
if package:
file_descriptor_proto.package = package
file_descriptor_proto.name = file_name
_message.default_pool.Add(file_descriptor_proto)
result = _message.default_pool.FindFileByName(file_descriptor_proto.name)
if _USE_C_DESCRIPTORS:
return result.message_types_by_name[desc_proto.name]
if file_desc is None:
file_desc = FileDescriptor(
pool=None,
name=file_name,
package=package,
syntax=syntax,
edition=edition,
options=None,
serialized_pb='',
dependencies=[],
public_dependencies=[],
create_key=_internal_create_key,
)
full_message_name = [desc_proto.name]
if package:
full_message_name.insert(0, package)
# Create Descriptors for enum types
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = '.'.join(full_message_name + [enum_proto.name])
enum_desc = EnumDescriptor(
enum_proto.name,
full_name,
None,
[
EnumValueDescriptor(
enum_val.name,
ii,
enum_val.number,
create_key=_internal_create_key,
)
for ii, enum_val in enumerate(enum_proto.value)
],
file=file_desc,
create_key=_internal_create_key,
)
enum_types[full_name] = enum_desc
# Create Descriptors for nested types
nested_types = {}
for nested_proto in desc_proto.nested_type:
full_name = '.'.join(full_message_name + [nested_proto.name])
# Nested types are just those defined inside of the message, not all types
# used by fields in the message, so no loops are possible here.
nested_desc = MakeDescriptor(
nested_proto,
package='.'.join(full_message_name),
build_file_if_cpp=False,
syntax=syntax,
edition=edition,
file_desc=file_desc,
)
nested_types[full_name] = nested_desc
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
enum_desc = None
nested_desc = None
if field_proto.json_name:
json_name = field_proto.json_name
else:
json_name = None
if field_proto.HasField('type_name'):
type_name = field_proto.type_name
full_type_name = '.'.join(
full_message_name + [type_name[type_name.rfind('.') + 1 :]]
)
if full_type_name in nested_types:
nested_desc = nested_types[full_type_name]
elif full_type_name in enum_types:
enum_desc = enum_types[full_type_name]
# Else type_name references a non-local type, which isn't implemented
field = FieldDescriptor(
field_proto.name,
full_name,
field_proto.number - 1,
field_proto.number,
field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label,
None,
nested_desc,
enum_desc,
None,
False,
None,
options=_OptionsOrNone(field_proto),
has_default_value=False,
json_name=json_name,
file=file_desc,
create_key=_internal_create_key,
)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(
desc_proto.name,
desc_name,
None,
None,
fields,
list(nested_types.values()),
list(enum_types.values()),
[],
options=_OptionsOrNone(desc_proto),
file=file_desc,
create_key=_internal_create_key,
)
| FileDescriptor |
python | scipy__scipy | scipy/io/matlab/_miobase.py | {
"start": 514,
"end": 591
} | class ____(UserWarning):
"""Warning class for read issues."""
| MatReadWarning |
python | docker__docker-py | tests/unit/api_image_test.py | {
"start": 247,
"end": 10984
} | class ____(BaseAPIClientTest):
def test_image_viz(self):
with pytest.raises(Exception): # noqa: B017
self.client.images('busybox', viz=True)
self.fail('Viz output should not be supported!')
def test_images(self):
self.client.images(all=True)
fake_request.assert_called_with(
'GET',
f"{url_prefix}images/json",
params={'only_ids': 0, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_name(self):
self.client.images('foo:bar')
fake_request.assert_called_with(
'GET',
f"{url_prefix}images/json",
params={'only_ids': 0, 'all': 0,
'filters': '{"reference": ["foo:bar"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_quiet(self):
self.client.images(all=True, quiet=True)
fake_request.assert_called_with(
'GET',
f"{url_prefix}images/json",
params={'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_ids(self):
self.client.images(quiet=True)
fake_request.assert_called_with(
'GET',
f"{url_prefix}images/json",
params={'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_filters(self):
self.client.images(filters={'dangling': True})
fake_request.assert_called_with(
'GET',
f"{url_prefix}images/json",
params={'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_pull(self):
self.client.pull('joffrey/test001')
args = fake_request.call_args
assert args[0][1] == f"{url_prefix}images/create"
assert args[1]['params'] == {
'tag': 'latest', 'fromImage': 'joffrey/test001'
}
assert not args[1]['stream']
def test_pull_stream(self):
self.client.pull('joffrey/test001', stream=True)
args = fake_request.call_args
assert args[0][1] == f"{url_prefix}images/create"
assert args[1]['params'] == {
'tag': 'latest', 'fromImage': 'joffrey/test001'
}
assert args[1]['stream']
def test_commit(self):
self.client.commit(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
f"{url_prefix}commit",
data='{}',
headers={'Content-Type': 'application/json'},
params={
'repo': None,
'comment': None,
'tag': None,
'container': fake_api.FAKE_CONTAINER_ID,
'author': None,
'pause': True,
'changes': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_image(self):
self.client.remove_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'DELETE',
f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}",
params={'force': False, 'noprune': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_history(self):
self.client.history(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
f"{url_prefix}images/test_image/history",
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image(self):
self.client.import_image(
fake_api.FAKE_TARBALL_PATH,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/create",
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': fake_api.FAKE_TARBALL_PATH
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_bytes(self):
stream = (i for i in range(0, 100))
self.client.import_image(
stream,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/create",
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': '-',
},
headers={
'Content-Type': 'application/tar',
},
data=stream,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_image(self):
self.client.import_image(
image=fake_api.FAKE_IMAGE_NAME,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/create",
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromImage': fake_api.FAKE_IMAGE_NAME
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image(self):
self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
f"{url_prefix}images/test_image/json",
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_image(arg)
assert excinfo.value.args[0] == 'Resource ID was not provided'
def test_push_image(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/test_image/push",
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_tag(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/test_image/push",
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_auth(self):
auth_config = {
'username': "test_user",
'password': "test_password",
'serveraddress': "test_server",
}
encoded_auth = auth.encode_header(auth_config)
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
auth_config=auth_config
)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/test_image/push",
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json',
'X-Registry-Auth': encoded_auth},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_stream(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/test_image/push",
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image(self):
self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}/tag",
params={
'tag': None,
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_tag(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID,
fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}/tag",
params={
'tag': 'tag',
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_force(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}/tag",
params={
'tag': None,
'repo': 'repo',
'force': 1
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_get_image(self):
self.client.get_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'GET',
f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}/get",
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image(self):
self.client.load_image('Byte Stream....')
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/load",
data='Byte Stream....',
stream=True,
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image_quiet(self):
self.client.load_image('Byte Stream....', quiet=True)
fake_request.assert_called_with(
'POST',
f"{url_prefix}images/load",
data='Byte Stream....',
stream=True,
params={'quiet': True},
timeout=DEFAULT_TIMEOUT_SECONDS
)
| ImageTest |
python | openai__openai-python | src/openai/types/eval_create_params.py | {
"start": 2432,
"end": 2862
} | class ____(TypedDict, total=False):
item_schema: Required[Dict[str, object]]
"""The json schema for each row in the data source."""
type: Required[Literal["custom"]]
"""The type of data source. Always `custom`."""
include_sample_schema: bool
"""
Whether the eval should expect you to populate the sample namespace (ie, by
generating responses off of your data source)
"""
| DataSourceConfigCustom |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 26073,
"end": 26807
} | class ____(ORMBaseModel):
"""An ORM representation of a block schema reference."""
parent_block_schema_id: UUID = Field(
default=..., description="ID of block schema the reference is nested within"
)
parent_block_schema: Optional[BlockSchema] = Field(
default=None, description="The block schema the reference is nested within"
)
reference_block_schema_id: UUID = Field(
default=..., description="ID of the nested block schema"
)
reference_block_schema: Optional[BlockSchema] = Field(
default=None, description="The nested block schema"
)
name: str = Field(
default=..., description="The name that the reference is nested under"
)
| BlockSchemaReference |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_zip5.py | {
"start": 1609,
"end": 3898
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid zip5 string types.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_zip5": ["90001", "78884", "20010", "10011"],
"invalid_zip5": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_zip5"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_zip5"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_zip5"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidZip5().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidZip5 |
python | pytorch__pytorch | torch/fx/experimental/migrate_gradual_types/constraint.py | {
"start": 6941,
"end": 8242
} | class ____(Constraint):
def __init__(self, tensor_size, input_var, index1, index2, output):
"""
Args:
tensor_size: current tensor size
input_var: variable to hold input
index1: dimension 1
index2: dimension 2
output: output that stores result
"""
assert isinstance(input_var, TVar)
assert isinstance(output, TVar)
assert isinstance(index1, int)
assert isinstance(index2, int)
self.input_var = input_var
self.tensor_size = tensor_size
self.index1 = index1
self.index2 = index2
self.output = output
def __repr__(self):
return (
f" {self.output} = "
f"Transpose({self.input_var}, "
f"tensor_size: {self.tensor_size}, "
f"{self.index1}, "
f"{self.index2})"
)
def __eq__(self, other):
if isinstance(other, Transpose):
return (
self.tensor_size == other.tensor_size
and self.index1 == other.index1
and self.index2 == other.index2
and self.output == other.output
and self.input_var == other.input_var
)
else:
return False
| Transpose |
python | huggingface__transformers | src/transformers/models/doge/modeling_doge.py | {
"start": 36864,
"end": 37066
} | class ____(GenericForSequenceClassification, DogePreTrainedModel):
pass
__all__ = ["DogeForCausalLM", "DogeModel", "DogePreTrainedModel", "DogeForSequenceClassification"]
| DogeForSequenceClassification |
python | aimacode__aima-python | search.py | {
"start": 264,
"end": 2595
} | class ____:
"""The abstract class for a formal problem. You should subclass
this and implement the methods actions and result, and possibly
__init__, goal_test, and path_cost. Then you will create instances
of your subclass and solve them with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial
self.goal = goal
def actions(self, state):
"""Return the actions that can be executed in the given
state. The result would typically be a list, but if there are
many actions, consider yielding them one at a time in an
iterator, rather than building them all at once."""
raise NotImplementedError
def result(self, state, action):
"""Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state)."""
raise NotImplementedError
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal or checks for state in self.goal if it is a
list, as specified in the constructor. Override this method if
checking against a single self.goal is not enough."""
if isinstance(self.goal, list):
return is_in(state, self.goal)
else:
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self, state):
"""For optimization problems, each state has a value. Hill Climbing
and related algorithms try to maximize this value."""
raise NotImplementedError
# ______________________________________________________________________________
| Problem |
python | numpy__numpy | numpy/lib/tests/test_io.py | {
"start": 108005,
"end": 111400
} | class ____:
def __init__(self, base):
self.base = base
def read(self, n):
return self.base.read(n)
def seek(self, off, whence=0):
return self.base.seek(off, whence)
def test_ducktyping():
a = np.random.random((5, 5))
s = BytesIO()
f = JustWriter(s)
np.save(f, a)
f.flush()
s.seek(0)
f = JustReader(s)
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
with temppath(suffix='.gz') as name:
with open(name, 'wb') as f:
f.write(s.read())
res = np.loadtxt(name)
s.close()
assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
for a in z.values():
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
assert (z.get('x') == z['x']).all()
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.thread_unsafe(reason="garbage collector is global state")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
with assert_no_gc_cycles():
np.load(f)
f.seek(0)
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
def test_load_multiple_arrays_until_eof():
f = BytesIO()
np.save(f, 1)
np.save(f, 2)
f.seek(0)
out1 = np.load(f)
assert out1 == 1
out2 = np.load(f)
assert out2 == 2
with pytest.raises(EOFError):
np.load(f)
def test_savez_nopickle():
obj_array = np.array([1, 'hello'], dtype=object)
with temppath(suffix='.npz') as tmp:
np.savez(tmp, obj_array)
with temppath(suffix='.npz') as tmp:
with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"):
np.savez(tmp, obj_array, allow_pickle=False)
with temppath(suffix='.npz') as tmp:
np.savez_compressed(tmp, obj_array)
with temppath(suffix='.npz') as tmp:
with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"):
np.savez_compressed(tmp, obj_array, allow_pickle=False)
| JustReader |
python | bokeh__bokeh | tests/unit/bokeh/model/test_model.py | {
"start": 1759,
"end": 3537
} | class ____:
def test_exception_for_no_callbacks(self) -> None:
m = SomeModel()
with pytest.raises(ValueError):
m.js_on_change('foo')
def test_exception_for_bad_callbacks(self) -> None:
m = SomeModel()
for val in [10, "bar", None, [1], {}, 10.2]:
with pytest.raises(ValueError):
m.js_on_change('foo', val)
def test_with_propname(self) -> None:
cb = CustomJS(code="")
m0 = SomeModel()
for name in m0.properties():
m = SomeModel()
m.js_on_change(name, cb)
assert m.js_property_callbacks == {f"change:{name}": [cb]}
def test_with_non_propname(self) -> None:
cb = CustomJS(code="")
m1 = SomeModel()
m1.js_on_change('foo', cb)
assert m1.js_property_callbacks == {"foo": [cb]}
m2 = SomeModel()
m2.js_on_change('change:b', cb)
assert m2.js_property_callbacks == {"change:b": [cb]}
def test_with_multple_callbacks(self) -> None:
cb1 = CustomJS(code="")
cb2 = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb1, cb2)
assert m.js_property_callbacks == {"foo": [cb1, cb2]}
def test_with_multple_callbacks_separately(self) -> None:
cb1 = CustomJS(code="")
cb2 = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb1)
assert m.js_property_callbacks == {"foo": [cb1]}
m.js_on_change('foo', cb2)
assert m.js_property_callbacks == {"foo": [cb1, cb2]}
def test_ignores_dupe_callbacks(self) -> None:
cb = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb, cb)
assert m.js_property_callbacks == {"foo": [cb]}
| Test_js_on_change |
python | pytorch__pytorch | torch/nn/modules/padding.py | {
"start": 17803,
"end": 19984
} | class ____(_ReflectionPadNd):
r"""Pads the input tensor using the reflection of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
Note that padding size should be less than the corresponding input dimension.
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
where
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
>>> m = nn.ReflectionPad3d(1)
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
>>> m(input)
tensor([[[[[7., 6., 7., 6.],
[5., 4., 5., 4.],
[7., 6., 7., 6.],
[5., 4., 5., 4.]],
[[3., 2., 3., 2.],
[1., 0., 1., 0.],
[3., 2., 3., 2.],
[1., 0., 1., 0.]],
[[7., 6., 7., 6.],
[5., 4., 5., 4.],
[7., 6., 7., 6.],
[5., 4., 5., 4.]],
[[3., 2., 3., 2.],
[1., 0., 1., 0.],
[3., 2., 3., 2.],
[1., 0., 1., 0.]]]]])
"""
# pyrefly: ignore [bad-override]
padding: tuple[int, int, int, int, int, int]
def __init__(self, padding: _size_6_t) -> None:
super().__init__()
self.padding = _ntuple(6)(padding)
| ReflectionPad3d |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 29118,
"end": 31703
} | class ____(BaseConfigHeuristic):
"""
CPU-specific config heuristic with CPU-specific optimizations.
"""
def _get_cpu_exclude_function(
self, method: str = "bmm"
) -> Callable[[sympy.Integer, sympy.Integer, sympy.Integer], bool]:
"""
Get CPU-specific exclude function based on method type.
Returns a function that can be used as exclude condition.
Moved from mm_common._is_large_block_for_cpu and refactored to return a function.
"""
if method in ("conv"):
def exclude_conv(
m: sympy.Integer, n: sympy.Integer, k: sympy.Integer
) -> bool:
# Thresholds are experimentally determined to reduce Triton CPU compile times
if m > 256 or n > 256 or k > 256:
return True
return m * n * k > 2**17
return exclude_conv
elif method in ("mm", "addmm", "int_mm"):
def exclude_mm(
m: sympy.Integer, n: sympy.Integer, k: sympy.Integer
) -> bool:
return m * n > 2**13
return exclude_mm
else: # Default to bmm implementation for unknown methods
def exclude_bmm(
m: sympy.Integer, n: sympy.Integer, k: sympy.Integer
) -> bool:
if m > 128 or n > 128 or k > 128:
return True
return m * n > 2**12
return exclude_bmm
def preprocess_mm_configs(
self,
m: int,
n: int,
k: int,
configs: list[BaseConfig],
has_int8_tensor: bool = False,
scale: float = 1.0,
exclude: Callable[
[sympy.Integer, sympy.Integer, sympy.Integer], bool
] = lambda m, n, k: False,
dtype_size: int = 0,
op_name: str = "mm", # For preprocessing overrides e.g. on CPU
) -> Generator[TritonConfig, None, None]:
"""
CPU-specific preprocessing that applies CPU-specific scaling (0.5) and exclusion logic.
"""
# Get CPU-specific exclude function based on operation type
cpu_exclude_fn = self._get_cpu_exclude_function(op_name)
# Apply CPU-specific scaling (0.5) and exclusion logic
return super().preprocess_mm_configs(
m,
n,
k,
configs=configs,
has_int8_tensor=has_int8_tensor,
scale=0.5,
exclude=cpu_exclude_fn,
dtype_size=dtype_size,
op_name=op_name,
)
| CPUConfigHeuristic |
python | pytorch__pytorch | torch/profiler/_pattern_matcher.py | {
"start": 16154,
"end": 17685
} | class ____(Pattern):
"""
This pattern identifies if we are not setting grad to None in zero_grad.
example:
optimizer.zero_grad()
By setting set_to_none=True, we can gain speedup
Pattern:
XXXXX: _zero_grad
NOT aten::zeros
aten::zero_
aten::zero_ is called on each parameter in the model.
We also want to make sure it is not called by aten::zeros.
Algorithm:
String match
"""
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
super().__init__(prof, should_benchmark)
self.name = "Gradient Set To Zero Instead of None Pattern"
self.description = (
"Detected gradient set to zero instead of None. "
"Please add 'set_to_none=True' when calling zero_grad()."
)
self.url = (
"https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
"#disable-gradient-calculation-for-validation-or-inference"
)
def match(self, event: _ProfilerEvent) -> bool:
if not event.name.endswith(": zero_grad"):
return False
if not event.children:
return False
for sub_event in traverse_dfs(event.children):
if (
sub_event.name == "aten::zero_"
and sub_event.parent.name != "aten::zeros"
):
return True
# TODO: We should also check if the optimizer's numerical behavior will change.
return False
| GradNotSetToNonePattern |
python | geekcomputers__Python | PongPong_Game/pong/paddle.py | {
"start": 100,
"end": 996
} | class ____(pyglet.shapes.Rectangle):
def __init__(self, *args, **kwargs):
super(Paddle, self).__init__(*args, **kwargs)
self.acc_left, self.acc_right = 0.0, 0.0
self.rightx = 0
self.key_handler = key.KeyStateHandler()
self.event_handlers = [self, self.key_handler]
def update(self, win_size: Tuple, border: float, other_object, dt):
newlx = self.x + self.acc_left
newrx = self.x + self.acc_right
if self.key_handler[key.LEFT]:
self.x = newlx
elif self.key_handler[key.RIGHT]:
self.x = newrx
self.rightx = self.x + self.width
if self.x < border:
self.x = border
self.rightx = self.x + self.width
elif self.rightx > win_size[0] - border:
self.x = win_size[0] - border - self.width
self.rightx = self.x + self.width
| Paddle |
python | doocs__leetcode | lcci/05.04.Closed Number/Solution.py | {
"start": 0,
"end": 825
} | class ____:
def findClosedNumbers(self, num: int) -> List[int]:
ans = [-1] * 2
dirs = (0, 1, 0)
for p in range(2):
a, b = dirs[p], dirs[p + 1]
x = num
for i in range(1, 31):
if (x >> i & 1) == a and (x >> (i - 1) & 1) == b:
x ^= 1 << i
x ^= 1 << (i - 1)
j, k = 0, i - 2
while j < k:
while j < k and (x >> j & 1) == b:
j += 1
while j < k and (x >> k & 1) == a:
k -= 1
if j < k:
x ^= 1 << j
x ^= 1 << k
ans[p] = x
break
return ans
| Solution |
python | walkccc__LeetCode | solutions/3269. Constructing Two Increasing Arrays/3269.py | {
"start": 0,
"end": 753
} | class ____:
def minLargest(self, nums1: list[int], nums2: list[int]) -> int:
m = len(nums1)
n = len(nums2)
# dp[i][j] := the minimum largest number for the first i nums1 and the
# first j nums2
dp = [[math.inf] * (n + 1) for _ in range(m + 1)]
dp[0][0] = 0
def f(prev: int, num: int) -> int:
"""
Returns the next number to fill in the array based on the previous number
and the current number.
"""
return prev + (2 if prev % 2 == num else 1)
for i in range(m + 1):
for j in range(n + 1):
if i > 0:
dp[i][j] = min(dp[i][j], f(dp[i - 1][j], nums1[i - 1]))
if j > 0:
dp[i][j] = min(dp[i][j], f(dp[i][j - 1], nums2[j - 1]))
return dp[m][n]
| Solution |
python | django__django | tests/custom_managers/models.py | {
"start": 2262,
"end": 2417
} | class ____(BaseCustomManager.from_queryset(CustomQuerySet)):
def __init__(self, a, b, c=1, d=2):
super().__init__(a)
| DeconstructibleCustomManager |
python | pytorch__pytorch | test/inductor/test_cpu_select_algorithm.py | {
"start": 115149,
"end": 115215
} | class ____(BaseTestSelectAlgorithm):
pass
| _DynamicShapesTestBase |
python | numpy__numpy | numpy/lib/tests/test_index_tricks.py | {
"start": 14976,
"end": 16575
} | class ____:
def test_regression_1(self):
# Test empty untyped inputs create outputs of indexing type, gh-5804
a, = np.ix_(range(0))
assert_equal(a.dtype, np.intp)
a, = np.ix_([])
assert_equal(a.dtype, np.intp)
# but if the type is specified, don't change it
a, = np.ix_(np.array([], dtype=np.float32))
assert_equal(a.dtype, np.float32)
def test_shape_and_dtype(self):
sizes = (4, 5, 3, 2)
# Test both lists and arrays
for func in (range, np.arange):
arrays = np.ix_(*[func(sz) for sz in sizes])
for k, (a, sz) in enumerate(zip(arrays, sizes)):
assert_equal(a.shape[k], sz)
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
assert_(np.issubdtype(a.dtype, np.integer))
def test_bool(self):
bool_a = [True, False, True, True]
int_a, = np.nonzero(bool_a)
assert_equal(np.ix_(bool_a)[0], int_a)
def test_1d_only(self):
idx2d = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, np.ix_, idx2d)
def test_repeated_input(self):
length_of_vector = 5
x = np.arange(length_of_vector)
out = ix_(x, x)
assert_equal(out[0].shape, (length_of_vector, 1))
assert_equal(out[1].shape, (1, length_of_vector))
# check that input shape is not modified
assert_equal(x.shape, (length_of_vector,))
def test_c_():
a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
| TestIx_ |
python | getsentry__sentry | src/sentry/integrations/discord/analytics.py | {
"start": 650,
"end": 833
} | class ____(analytics.Event):
provider: str
actor_id: int
actor_type: str
@analytics.eventclass("integrations.discord.message_interaction")
| DiscordIntegrationIdentityUnlinked |
python | huggingface__transformers | tests/models/layoutlmv3/test_modeling_layoutlmv3.py | {
"start": 14800,
"end": 16093
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return LayoutLMv3ImageProcessor(apply_ocr=False) if is_vision_available() else None
@slow
def test_inference_no_head(self):
model = LayoutLMv3Model.from_pretrained("microsoft/layoutlmv3-base").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device)
input_ids = torch.tensor([[1, 2]])
bbox = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
outputs = model(
input_ids=input_ids.to(torch_device),
bbox=bbox.to(torch_device),
pixel_values=pixel_values.to(torch_device),
)
# verify the logits
expected_shape = torch.Size((1, 199, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| LayoutLMv3ModelIntegrationTest |
python | walkccc__LeetCode | solutions/48. Rotate Image/48.py | {
"start": 0,
"end": 215
} | class ____:
def rotate(self, matrix: list[list[int]]) -> None:
matrix.reverse()
for i, j in itertools.combinations(range(len(matrix)), 2):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
| Solution |
python | python-attrs__attrs | tests/test_next_gen.py | {
"start": 12824,
"end": 14092
} | class ____:
"""
Verify our re-imports and mirroring works.
"""
def test_converters(self):
"""
Importing from attrs.converters works.
"""
from attrs.converters import optional
assert optional is _attr.converters.optional
def test_exceptions(self):
"""
Importing from attrs.exceptions works.
"""
from attrs.exceptions import FrozenError
assert FrozenError is _attr.exceptions.FrozenError
def test_filters(self):
"""
Importing from attrs.filters works.
"""
from attrs.filters import include
assert include is _attr.filters.include
def test_setters(self):
"""
Importing from attrs.setters works.
"""
from attrs.setters import pipe
assert pipe is _attr.setters.pipe
def test_validators(self):
"""
Importing from attrs.validators works.
"""
from attrs.validators import and_
assert and_ is _attr.validators.and_
def test_inspect_not_attrs_class():
"""
inspect() raises an error if the class is not an attrs class.
"""
with pytest.raises(attrs.exceptions.NotAnAttrsClassError):
attrs.inspect(object)
| TestImports |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/assets/asset_testing.py | {
"start": 1152,
"end": 1622
} | class ____(ConfigurableResource):
def query(self, url) -> dict[str, Any]:
return requests.get(url).json()
@asset
def uses_config_and_resource(config: MyConfig, my_api: MyAPIResource):
return my_api.query(config.api_url)
def test_uses_resource() -> None:
result = uses_config_and_resource(
config=MyConfig(api_url="https://dagster.io"), my_api=MyAPIResource()
)
assert result == {"foo": "bar"}
# end_asset_with_resource
| MyAPIResource |
python | PyCQA__pylint | tests/functional/a/arguments_differ.py | {
"start": 6683,
"end": 7032
} | class ____(AbstractFoo):
def kwonly_6(self, first, *args, **kwargs): # valid override
"One positional with the rest variadics to pass through parent params"
# Adding arguments with default values to a child class is valid
# See:
# https://github.com/pylint-dev/pylint/issues/1556
# https://github.com/pylint-dev/pylint/issues/5338
| Foo2 |
python | sqlalchemy__sqlalchemy | test/orm/test_deferred.py | {
"start": 15124,
"end": 49699
} | class ____(AssertsCompiledSQL, _fixtures.FixtureTest):
__dialect__ = "default"
def test_options(self):
"""Options on a mapper to create deferred and undeferred columns"""
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
q = sess.query(Order).order_by(Order.id).options(defer(Order.user_id))
def go():
q.all()[0].user_id
self.sql_eq_(
go,
[
(
"SELECT orders.id AS orders_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id",
{},
),
(
"SELECT orders.user_id "
"FROM orders WHERE orders.id = :pk_1",
{"pk_1": 1},
),
],
)
sess.expunge_all()
# hypothetical for 2.0 - don't overwrite conflicting user-defined
# options, raise instead.
# not sure if this behavior will fly with the userbase. however,
# it at least gives us a clear place to affirmatively resolve
# conflicts like this if we see that we need to re-enable overwriting
# of conflicting options.
q2 = q.options(undefer(Order.user_id))
with expect_raises_message(
sa.exc.InvalidRequestError,
r"Loader strategies for ORM Path\[Mapper\[Order\(orders\)\] -> "
r"Order.user_id\] conflict",
):
q2.all()
q3 = (
sess.query(Order)
.order_by(Order.id)
.options(undefer(Order.user_id))
)
self.sql_eq_(
q3.all,
[
(
"SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id",
{},
)
],
)
def test_undefer_group(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id, group="primary")),
(
"description",
deferred(orders.c.description, group="primary"),
),
("opened", deferred(orders.c.isopen, group="primary")),
]
),
)
sess = fixture_session()
q = sess.query(Order).order_by(Order.id)
def go():
result = q.options(undefer_group("primary")).all()
o2 = result[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, "order 3")
self.sql_eq_(
go,
[
(
"SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id",
{},
)
],
)
def test_undefer_group_multi(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id, group="primary")),
(
"description",
deferred(orders.c.description, group="primary"),
),
("opened", deferred(orders.c.isopen, group="secondary")),
]
),
)
sess = fixture_session()
q = sess.query(Order).order_by(Order.id)
def go():
result = q.options(
undefer_group("primary"), undefer_group("secondary")
).all()
o2 = result[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, "order 3")
self.sql_eq_(
go,
[
(
"SELECT "
"orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id",
{},
)
],
)
def test_undefer_group_multi_pathed(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id, group="primary")),
(
"description",
deferred(orders.c.description, group="primary"),
),
("opened", deferred(orders.c.isopen, group="secondary")),
]
),
)
sess = fixture_session()
q = sess.query(Order).order_by(Order.id)
def go():
result = q.options(
Load(Order).undefer_group("primary").undefer_group("secondary")
).all()
o2 = result[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, "order 3")
self.sql_eq_(
go,
[
(
"SELECT "
"orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id",
{},
)
],
)
def test_undefer_group_with_load(self):
users, Order, User, orders = (
self.tables.users,
self.classes.Order,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id, group="primary")),
(
"description",
deferred(orders.c.description, group="primary"),
),
("opened", deferred(orders.c.isopen, group="primary")),
("user", relationship(User)),
]
),
)
sess = fixture_session()
q = (
sess.query(Order)
.filter(Order.id == 3)
.options(
selectinload(Order.user),
undefer_group("primary"),
)
)
def go():
result = q.all()
print(result)
o = result[0]
eq_(o.opened, 1)
eq_(o.userident, 7)
eq_(o.description, "order 3")
u = o.user
eq_(u.id, 7)
self.sql_eq_(
go,
[
(
"SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders WHERE orders.id = :id_1",
{"id_1": 3},
),
(
"SELECT users.id, users.name "
"FROM users WHERE users.id IN "
"(__[POSTCOMPILE_primary_keys])",
[{"primary_keys": [7]}],
),
],
)
def test_undefer_group_from_relationship_lazyload(self):
users, Order, User, orders = (
self.tables.users,
self.classes.Order,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, order_by=orders.c.id)),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id, group="primary")),
(
"description",
deferred(orders.c.description, group="primary"),
),
("opened", deferred(orders.c.isopen, group="primary")),
]
),
)
sess = fixture_session()
q = (
sess.query(User)
.filter(User.id == 7)
.options(defaultload(User.orders).undefer_group("primary"))
)
def go():
result = q.all()
o2 = result[0].orders[1]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, "order 3")
self.sql_eq_(
go,
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :id_1",
{"id_1": 7},
),
(
"SELECT orders.id, "
"orders.user_id, "
"orders.address_id, "
"orders.description, "
"orders.isopen "
"FROM orders WHERE :param_1 = orders.user_id "
"ORDER BY orders.id",
{"param_1": 7},
),
],
)
def test_undefer_group_from_relationship_subqueryload(self):
users, Order, User, orders = (
self.tables.users,
self.classes.Order,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, order_by=orders.c.id)),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id, group="primary")),
(
"description",
deferred(orders.c.description, group="primary"),
),
("opened", deferred(orders.c.isopen, group="primary")),
]
),
)
sess = fixture_session()
q = (
sess.query(User)
.filter(User.id == 7)
.options(subqueryload(User.orders).undefer_group("primary"))
)
def go():
result = q.all()
o2 = result[0].orders[1]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, "order 3")
self.sql_eq_(
go,
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :id_1",
{"id_1": 7},
),
(
"SELECT "
"orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"anon_1.users_id AS anon_1_users_id "
"FROM (SELECT users.id AS "
"users_id FROM users WHERE users.id = :id_1) AS anon_1 "
"JOIN orders ON anon_1.users_id = orders.user_id ORDER BY "
"orders.id",
[{"id_1": 7}],
),
],
)
def test_undefer_group_from_relationship_joinedload(self):
users, Order, User, orders = (
self.tables.users,
self.classes.Order,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, order_by=orders.c.id)),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id, group="primary")),
(
"description",
deferred(orders.c.description, group="primary"),
),
("opened", deferred(orders.c.isopen, group="primary")),
]
),
)
sess = fixture_session()
q = (
sess.query(User)
.filter(User.id == 7)
.options(joinedload(User.orders).undefer_group("primary"))
)
def go():
result = q.all()
o2 = result[0].orders[1]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, "order 3")
self.sql_eq_(
go,
[
(
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id WHERE users.id = :id_1 "
"ORDER BY orders_1.id",
{"id_1": 7},
)
],
)
def test_undefer_group_from_relationship_joinedload_colexpr(self):
users, Order, User, orders = (
self.tables.users,
self.classes.Order,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, order_by=orders.c.id)),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id, group="primary")),
(
"lower_desc",
deferred(
sa.func.lower(orders.c.description).label(None),
group="primary",
),
),
("opened", deferred(orders.c.isopen, group="primary")),
]
),
)
sess = fixture_session()
q = (
sess.query(User)
.filter(User.id == 7)
.options(joinedload(User.orders).undefer_group("primary"))
)
def go():
result = q.all()
o2 = result[0].orders[1]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.lower_desc, "order 3")
self.sql_eq_(
go,
[
(
"SELECT users.id AS users_id, users.name AS users_name, "
"lower(orders_1.description) AS lower_1, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id WHERE users.id = :id_1 "
"ORDER BY orders_1.id",
{"id_1": 7},
)
],
)
def test_undefer_star(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties=util.OrderedDict(
[
("userident", deferred(orders.c.user_id)),
("description", deferred(orders.c.description)),
("opened", deferred(orders.c.isopen)),
]
),
)
sess = fixture_session()
q = sess.query(Order).options(Load(Order).undefer("*"))
self.assert_compile(
q,
"SELECT "
"orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders",
)
def test_locates_col(self):
"""changed in 1.0 - we don't search for deferred cols in the result
now."""
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties={"description": deferred(orders.c.description)},
)
sess = fixture_session()
o1 = (
sess.query(Order)
.order_by(Order.id)
.add_columns(orders.c.description)
.first()
)[0]
def go():
eq_(o1.description, "order 1")
# prior to 1.0 we'd search in the result for this column
# self.sql_count_(0, go)
self.sql_count_(1, go)
def test_locates_col_rowproc_only(self):
"""changed in 1.0 - we don't search for deferred cols in the result
now.
Because the loading for ORM Query and Query from a core select
is now split off, we test loading from a plain select()
separately.
"""
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties={"description": deferred(orders.c.description)},
)
sess = fixture_session()
stmt = sa.select(Order).order_by(Order.id)
o1 = (sess.query(Order).from_statement(stmt).all())[0]
def go():
eq_(o1.description, "order 1")
# prior to 1.0 we'd search in the result for this column
# self.sql_count_(0, go)
self.sql_count_(1, go)
def test_raise_on_col_rowproc_only(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"description": deferred(orders.c.description, raiseload=True)
},
)
sess = fixture_session()
stmt = sa.select(Order).order_by(Order.id)
o1 = (sess.query(Order).from_statement(stmt).all())[0]
assert_raises_message(
sa.exc.InvalidRequestError,
"'Order.description' is not available due to raiseload=True",
getattr,
o1,
"description",
)
def test_raise_on_col_newstyle(self):
class Base(DeclarativeBase):
pass
class Order(Base):
__tablename__ = "orders"
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int]
address_id: Mapped[int]
isopen: Mapped[bool]
description: Mapped[str] = mapped_column(deferred_raiseload=True)
sess = fixture_session()
stmt = sa.select(Order).order_by(Order.id)
o1 = (sess.query(Order).from_statement(stmt).all())[0]
assert_raises_message(
sa.exc.InvalidRequestError,
"'Order.description' is not available due to raiseload=True",
getattr,
o1,
"description",
)
def test_locates_col_w_option_rowproc_only(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
stmt = sa.select(Order).order_by(Order.id)
o1 = (
sess.query(Order)
.from_statement(stmt)
.options(defer(Order.description))
.all()
)[0]
def go():
eq_(o1.description, "order 1")
# prior to 1.0 we'd search in the result for this column
# self.sql_count_(0, go)
self.sql_count_(1, go)
def test_raise_on_col_w_option_rowproc_only(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
stmt = sa.select(Order).order_by(Order.id)
o1 = (
sess.query(Order)
.from_statement(stmt)
.options(defer(Order.description, raiseload=True))
.all()
)[0]
assert_raises_message(
sa.exc.InvalidRequestError,
"'Order.description' is not available due to raiseload=True",
getattr,
o1,
"description",
)
def test_deep_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(description=deferred(items.c.description)),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(items=relationship(Item, secondary=order_items)),
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(orders=relationship(Order, order_by=orders.c.id)),
)
sess = fixture_session()
q = sess.query(User).order_by(User.id)
result = q.all()
item = result[0].orders[1].items[1]
def go():
eq_(item.description, "item 4")
self.sql_count_(1, go)
eq_(item.description, "item 4")
sess.expunge_all()
result = q.options(
defaultload(User.orders)
.defaultload(Order.items)
.undefer(Item.description)
).all()
item = result[0].orders[1].items[1]
def go():
eq_(item.description, "item 4")
self.sql_count_(0, go)
eq_(item.description, "item 4")
@testing.combinations(
lazyload, joinedload, subqueryload, selectinload, immediateload
)
def test_defer_star_from_loader(self, opt_class):
User = self.classes.User
Order = self.classes.Order
users = self.tables.users
orders = self.tables.orders
self.mapper_registry.map_imperatively(
User,
users,
properties={"orders": relationship(Order)},
)
self.mapper_registry.map_imperatively(
Order,
orders,
)
sess = fixture_session()
stmt = (
select(User)
.options(opt_class(User.orders).defer("*"))
.where(User.id == 9)
)
if opt_class is joinedload:
obj = sess.scalars(stmt).unique().one()
else:
obj = sess.scalars(stmt).one()
eq_(obj.orders, [Order(id=2), Order(id=4)])
assert "description" not in obj.orders[0].__dict__
eq_(obj.orders[0].description, "order 2")
def test_path_entity(self):
r"""test the legacy \*addl_attrs argument."""
User = self.classes.User
Order = self.classes.Order
Item = self.classes.Item
users = self.tables.users
orders = self.tables.orders
items = self.tables.items
order_items = self.tables.order_items
self.mapper_registry.map_imperatively(
User,
users,
properties={"orders": relationship(Order, lazy="joined")},
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, lazy="joined"
)
},
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
exp = (
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id LEFT OUTER JOIN "
"(order_items AS order_items_1 JOIN items AS items_1 "
"ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id"
)
q = sess.query(User).options(
defaultload(User.orders)
.defaultload(Order.items)
.defer(Item.description)
)
self.assert_compile(q, exp)
def test_chained_multi_col_options(self):
users, User = self.tables.users, self.classes.User
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
User, users, properties={"orders": relationship(Order)}
)
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
q = sess.query(User).options(
joinedload(User.orders)
.defer(Order.description)
.defer(Order.isopen)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, "
"users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id "
"FROM users "
"LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id",
)
def test_load_only_no_pk(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
q = sess.query(Order).options(
load_only(Order.isopen, Order.description)
)
self.assert_compile(
q,
"SELECT orders.id AS orders_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen FROM orders",
)
def test_load_only_no_pk_rt(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
q = (
sess.query(Order)
.order_by(Order.id)
.options(load_only(Order.isopen, Order.description))
)
eq_(q.first(), Order(id=1))
def test_load_only_w_deferred(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties={"description": deferred(orders.c.description)},
)
sess = fixture_session()
q = sess.query(Order).options(
load_only(Order.isopen, Order.description), undefer(Order.user_id)
)
self.assert_compile(
q,
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen FROM orders",
)
def test_load_only_synonym(self):
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties={"desc": synonym("description")},
)
opt = load_only(Order.isopen, Order.desc)
sess = fixture_session()
q = sess.query(Order).options(opt)
self.assert_compile(
q,
"SELECT orders.id AS orders_id, orders.description "
"AS orders_description, orders.isopen AS orders_isopen "
"FROM orders",
)
def test_load_only_propagate_unbound(self):
self._test_load_only_propagate(False)
def test_load_only_propagate_bound(self):
self._test_load_only_propagate(True)
def _test_load_only_propagate(self, use_load):
User = self.classes.User
Address = self.classes.Address
users = self.tables.users
addresses = self.tables.addresses
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
expected = [
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])",
{"id_1": [7, 8]},
),
(
"SELECT addresses.id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
{"param_1": 7},
),
(
"SELECT addresses.id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
{"param_1": 8},
),
]
if use_load:
opt = (
Load(User)
.defaultload(User.addresses)
.load_only(Address.id, Address.email_address)
)
else:
opt = defaultload(User.addresses).load_only(
Address.id, Address.email_address
)
q = sess.query(User).options(opt).filter(User.id.in_([7, 8]))
def go():
for user in q:
user.addresses
self.sql_eq_(go, expected)
def test_load_only_parent_specific(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
users = self.tables.users
addresses = self.tables.addresses
orders = self.tables.orders
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
q = sess.query(User, Order, Address).options(
Load(User).load_only(User.name),
Load(Order).load_only(Order.id),
Load(Address).load_only(Address.id, Address.email_address),
)
self.assert_compile(
q,
"SELECT users.id AS users_id, "
"users.name AS users_name, "
"orders.id AS orders_id, "
"addresses.id AS addresses_id, "
"addresses.email_address AS addresses_email_address "
"FROM users, orders, addresses",
)
def test_load_only_path_specific(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
users = self.tables.users
addresses = self.tables.addresses
orders = self.tables.orders
self.mapper_registry.map_imperatively(
User,
users,
properties=util.OrderedDict(
[
("addresses", relationship(Address, lazy="joined")),
("orders", relationship(Order, lazy="joined")),
]
),
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(Order, orders)
sess = fixture_session()
q = sess.query(User).options(
load_only(User.name)
.defaultload(User.addresses)
.load_only(Address.id, Address.email_address),
defaultload(User.orders).load_only(Order.id),
)
# hmmmm joinedload seems to be forcing users.id into here...
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id FROM users "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id",
)
@testing.combinations(
(
"order_one",
True,
),
(
"order_two",
False,
),
argnames="name, rel_ordering",
id_="sa",
)
| DeferredOptionsTest |
python | dagster-io__dagster | examples/docs_projects/project_ml/src/project_ml/defs/resources.py | {
"start": 175,
"end": 679
} | class ____(dg.ConfigurableResource, ABC):
"""Abstract base class for model storage resources."""
@abstractmethod
def save_model(self, model_data: dict[str, Any], model_name: str):
pass
@abstractmethod
def load_model(self, model_name: str) -> dict[str, Any]:
pass
@abstractmethod
def list_models(self) -> list[str]:
"""List available models, sorted by modification time (newest first)."""
pass
# end_model_storage_interface
| ModelStoreResource |
python | pytorch__pytorch | benchmarks/instruction_counts/core/api.py | {
"start": 1570,
"end": 1948
} | class ____:
py_setup: str = ""
cpp_setup: str = ""
global_setup: str = ""
def __post_init__(self) -> None:
for field in dataclasses.fields(self):
assert field.type is str
value: str = getattr(self, field.name)
object.__setattr__(self, field.name, textwrap.dedent(value))
@dataclasses.dataclass(frozen=True)
| GroupedSetup |
python | walkccc__LeetCode | solutions/3416. Subsequences with a Unique Middle Mode II/3416.py | {
"start": 937,
"end": 2732
} | class ____:
# Same as 3395. Subsequences with a Unique Middle Mode I
def subsequencesWithMiddleMode(self, nums: list[int]) -> int:
MOD = 1_000_000_007
ans = 0
p = collections.Counter() # prefix counter
s = collections.Counter(nums) # suffix counter
def nC2(n: int) -> int:
return n * (n - 1) // 2
pss = 0
spp = 0
pp = 0
ss = sum(freq**2 for freq in s.values())
ps = 0
for i, a in enumerate(nums):
# Update running sums after decrementing s[a].
pss += p[a] * (-s[a]**2 + (s[a] - 1)**2)
spp += -p[a]**2 # (-s[a] + (s[a] - 1)) * p[a]**2
ss += -s[a]**2 + (s[a] - 1)**2
ps += -p[a] # -p[a] * (-s[a] + (s[a] - 1))
s[a] -= 1
l = i
r = len(nums) - i - 1
# Start with all possible subsequences with `a` as the middle number.
ans += nC2(l) * nC2(r)
# Minus the cases where the frequency of `a` is 1, so it's not a mode.
ans -= nC2(l - p[a]) * nC2(r - s[a])
# Minus the values where `b != a`.
pss_ = pss - p[a] * s[a]**2
spp_ = spp - s[a] * p[a]**2
pp_ = pp - p[a]**2
ss_ = ss - s[a]**2
ps_ = ps - p[a] * s[a]
p_ = l - p[a]
s_ = r - s[a]
# Minus the cases where the `a` is not a "unique" mode or not a mode.
ans -= ps_ * (p[a] * (r - s[a])) + pss_ * (-p[a])
ans -= ps_ * (s[a] * (l - p[a])) + spp_ * (-s[a])
ans -= (pp_ - p_) * s[a] * (r - s[a]) // 2
ans -= (ss_ - s_) * p[a] * (l - p[a]) // 2
ans %= MOD
# Update running sums after incrementing p[a].
pss += s[a]**2 # (-p[a] + (p[a] + 1)) * s[a]**2
spp += s[a] * (-p[a]**2 + (p[a] + 1)**2)
pp += -p[a]**2 + (p[a] + 1)**2
ps += s[a] # (-p[a] + (p[a] + 1)) * s[a]
p[a] += 1
return ans
| Solution |
python | django__django | tests/model_inheritance/models.py | {
"start": 1917,
"end": 2203
} | class ____(Place, Rating):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
chef = models.ForeignKey(Chef, models.SET_NULL, null=True, blank=True)
class Meta(Rating.Meta):
db_table = "my_restaurant"
| Restaurant |
python | numpy__numpy | numpy/f2py/tests/test_return_integer.py | {
"start": 1113,
"end": 1813
} | class ____(TestReturnInteger):
sources = [
util.getpath("tests", "src", "return_integer", "foo77.f"),
util.getpath("tests", "src", "return_integer", "foo90.f90"),
]
@pytest.mark.parametrize("name",
["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"])
def test_all_f77(self, name):
self.check_function(getattr(self.module, name), name)
@pytest.mark.parametrize("name",
["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"])
def test_all_f90(self, name):
self.check_function(getattr(self.module.f90_return_integer, name),
name)
| TestFReturnInteger |
python | numpy__numpy | numpy/f2py/tests/test_crackfortran.py | {
"start": 11211,
"end": 12802
} | class ____:
@pytest.mark.parametrize(
['adversary'],
[
('@)@bind@(@',),
('@)@bind @(@',),
('@)@bind foo bar baz@(@',)
]
)
def test_nameargspattern_backtracking(self, adversary):
'''address ReDOS vulnerability:
https://github.com/numpy/numpy/issues/23338'''
trials_per_batch = 12
batches_per_regex = 4
start_reps, end_reps = 15, 25
for ii in range(start_reps, end_reps):
repeated_adversary = adversary * ii
# test times in small batches.
# this gives us more chances to catch a bad regex
# while still catching it before too long if it is bad
for _ in range(batches_per_regex):
times = []
for _ in range(trials_per_batch):
t0 = time.perf_counter()
mtch = nameargspattern.search(repeated_adversary)
times.append(time.perf_counter() - t0)
# our pattern should be much faster than 0.2s per search
# it's unlikely that a bad regex will pass even on fast CPUs
assert np.median(times) < 0.2
assert not mtch
# if the adversary is capped with @)@, it becomes acceptable
# according to the old version of the regex.
# that should still be true.
good_version_of_adversary = repeated_adversary + '@)@'
assert nameargspattern.search(good_version_of_adversary)
| TestNameArgsPatternBacktracking |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 688577,
"end": 689162
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of LinkRepositoryToProject"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project = sgqlc.types.Field("Project", graphql_name="project")
"""The linked Project."""
repository = sgqlc.types.Field("Repository", graphql_name="repository")
"""The linked Repository."""
| LinkRepositoryToProjectPayload |
python | django-haystack__django-haystack | haystack/models.py | {
"start": 602,
"end": 8819
} | class ____:
"""
A single search result. The actual object is loaded lazily by accessing
object; until then this object only stores the model, pk, and score.
Note that iterating over SearchResults and getting the object for each
result will do O(N) database queries, which may not fit your needs for
performance.
"""
def __init__(self, app_label, model_name, pk, score, **kwargs):
self.app_label, self.model_name = app_label, model_name
self.pk = pk
self.score = score
self._object = None
self._model = None
self._verbose_name = None
self._additional_fields = []
self._point_of_origin = kwargs.pop("_point_of_origin", None)
self._distance = kwargs.pop("_distance", None)
self.stored_fields = None
self.log = self._get_log()
for key, value in kwargs.items():
if key not in self.__dict__:
self.__dict__[key] = value
self._additional_fields.append(key)
def _get_log(self):
return logging.getLogger("haystack")
def __repr__(self):
return "<SearchResult: %s.%s (pk=%r)>" % (
self.app_label,
self.model_name,
self.pk,
)
def __str__(self):
return force_str(self.__repr__())
def __getattr__(self, attr):
if attr == "__getnewargs__":
raise AttributeError
return self.__dict__.get(attr, None)
def _get_searchindex(self):
from haystack import connections
return connections[DEFAULT_ALIAS].get_unified_index().get_index(self.model)
searchindex = property(_get_searchindex)
def _get_object(self):
if self._object is None:
if self.model is None:
self.log.error("Model could not be found for SearchResult '%s'.", self)
return None
try:
try:
self._object = self.searchindex.read_queryset().get(pk=self.pk)
except NotHandled:
self.log.warning(
"Model '%s.%s' not handled by the routers.",
self.app_label,
self.model_name,
)
# Revert to old behaviour
self._object = self.model._default_manager.get(pk=self.pk)
except ObjectDoesNotExist:
self.log.error(
"Object could not be found in database for SearchResult '%s'.", self
)
self._object = None
return self._object
def _set_object(self, obj):
self._object = obj
object = property(_get_object, _set_object) # noqa A003
def _get_model(self):
if self._model is None:
try:
self._model = haystack_get_model(self.app_label, self.model_name)
except LookupError:
# this changed in change 1.7 to throw an error instead of
# returning None when the model isn't found. So catch the
# lookup error and keep self._model == None.
pass
return self._model
def _set_model(self, obj):
self._model = obj
model = property(_get_model, _set_model)
def _get_distance(self):
from django.contrib.gis.measure import Distance
if self._distance is None:
# We didn't get it from the backend & we haven't tried calculating
# it yet. Check if geopy is available to do it the "slow" way
# (even though slow meant 100 distance calculations in 0.004 seconds
# in my testing).
if geopy_distance is None:
raise SpatialError(
"The backend doesn't have 'DISTANCE_AVAILABLE' enabled & the 'geopy' library could not be imported, so distance information is not available."
)
if not self._point_of_origin:
raise SpatialError("The original point is not available.")
if not hasattr(self, self._point_of_origin["field"]):
raise SpatialError(
"The field '%s' was not included in search results, so the distance could not be calculated."
% self._point_of_origin["field"]
)
po_lng, po_lat = self._point_of_origin["point"].coords
location_field = getattr(self, self._point_of_origin["field"])
if location_field is None:
return None
lf_lng, lf_lat = location_field.coords
self._distance = Distance(
km=geopy_distance.distance((po_lat, po_lng), (lf_lat, lf_lng)).km
)
# We've either already calculated it or the backend returned it, so
# let's use that.
return self._distance
def _set_distance(self, dist):
self._distance = dist
distance = property(_get_distance, _set_distance)
def _get_verbose_name(self):
if self.model is None:
self.log.error("Model could not be found for SearchResult '%s'.", self)
return ""
return force_str(capfirst(self.model._meta.verbose_name))
verbose_name = property(_get_verbose_name)
def _get_verbose_name_plural(self):
if self.model is None:
self.log.error("Model could not be found for SearchResult '%s'.", self)
return ""
return force_str(capfirst(self.model._meta.verbose_name_plural))
verbose_name_plural = property(_get_verbose_name_plural)
def content_type(self):
"""Returns the content type for the result's model instance."""
if self.model is None:
self.log.error("Model could not be found for SearchResult '%s'.", self)
return ""
return str(self.model._meta)
def get_additional_fields(self):
"""
Returns a dictionary of all of the fields from the raw result.
Useful for serializing results. Only returns what was seen from the
search engine, so it may have extra fields Haystack's indexes aren't
aware of.
"""
additional_fields = {}
for fieldname in self._additional_fields:
additional_fields[fieldname] = getattr(self, fieldname)
return additional_fields
def get_stored_fields(self):
"""
Returns a dictionary of all of the stored fields from the SearchIndex.
Useful for serializing results. Only returns the fields Haystack's
indexes are aware of as being 'stored'.
"""
if self._stored_fields is None:
from haystack import connections
try:
index = (
connections[DEFAULT_ALIAS].get_unified_index().get_index(self.model)
)
except NotHandled:
# Not found? Return nothing.
return {}
self._stored_fields = {}
# Iterate through the index's fields, pulling out the fields that
# are stored.
for fieldname, field in index.fields.items():
if field.stored is True:
self._stored_fields[fieldname] = getattr(self, fieldname, "")
return self._stored_fields
def __getstate__(self):
"""
Returns a dictionary representing the ``SearchResult`` in order to
make it pickleable.
"""
# The ``log`` is excluded because, under the hood, ``logging`` uses
# ``threading.Lock``, which doesn't pickle well.
ret_dict = self.__dict__.copy()
del ret_dict["log"]
return ret_dict
def __setstate__(self, data_dict):
"""
Updates the object's attributes according to data passed by pickle.
"""
self.__dict__.update(data_dict)
self.log = self._get_log()
def reload_indexes(sender, *args, **kwargs):
from haystack import connections
for conn in connections.all():
ui = conn.get_unified_index()
# Note: Unlike above, we're resetting the ``UnifiedIndex`` here.
# Thi gives us a clean slate.
ui.reset()
| SearchResult |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/__init__.py | {
"start": 25008,
"end": 25215
} | class ____(TestSkipped):
"""Sanity test skipped."""
def __init__(self, test: str, python_version: t.Optional[str] = None) -> None:
super().__init__(COMMAND, test, python_version)
| SanitySkipped |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/serializers/data_source_serializer.py | {
"start": 329,
"end": 1821
} | class ____(Serializer):
def get_attrs(
self, item_list: Sequence[DataSource], user: Any, **kwargs: Any
) -> MutableMapping[DataSource, dict[str, Any]]:
attrs: dict[DataSource, dict[str, Any]] = defaultdict(dict)
ds_by_type: dict[type[DataSourceTypeHandler], list[DataSource]] = defaultdict(list)
for item in item_list:
ds_by_type[item.type_handler].append(item)
serialized_query_objs: dict[int, dict[str, Any]] = {}
for type_handler, ds_items in ds_by_type.items():
ds_query_objs = list(type_handler.bulk_get_query_object(ds_items).items())
serialized: list[dict[str, Any]] = serialize(
[query_obj for ds, query_obj in ds_query_objs], user=user
)
serialized_query_objs.update(
{
ds_id: serialized_obj
for (ds_id, query_obj), serialized_obj in zip(ds_query_objs, serialized)
}
)
for item in item_list:
attrs[item]["query_obj"] = serialized_query_objs.get(item.id, [])
return attrs
def serialize(
self, obj: DataSource, attrs: Mapping[str, Any], user, **kwargs
) -> dict[str, Any]:
return {
"id": str(obj.id),
"organizationId": str(obj.organization_id),
"type": obj.type,
"sourceId": str(obj.source_id),
"queryObj": attrs["query_obj"],
}
| DataSourceSerializer |
python | matplotlib__matplotlib | lib/matplotlib/legend.py | {
"start": 14038,
"end": 55618
} | class ____(Artist):
"""
Place a legend on the figure/axes.
"""
# 'best' is only implemented for Axes legends
codes = {'best': 0, **AnchoredOffsetbox.codes}
zorder = 5
def __str__(self):
return "Legend"
@_docstring.interpd
def __init__(
self, parent, handles, labels,
*,
loc=None,
numpoints=None, # number of points in the legend line
markerscale=None, # relative size of legend markers vs. original
markerfirst=True, # left/right ordering of legend marker and label
reverse=False, # reverse ordering of legend marker and label
scatterpoints=None, # number of scatter points
scatteryoffsets=None,
prop=None, # properties for the legend texts
fontsize=None, # keyword to set font size directly
labelcolor=None, # keyword to set the text color
# spacing & pad defined as a fraction of the font-size
borderpad=None, # whitespace inside the legend border
labelspacing=None, # vertical space between the legend entries
handlelength=None, # length of the legend handles
handleheight=None, # height of the legend handles
handletextpad=None, # pad between the legend handle and text
borderaxespad=None, # pad between the Axes and legend border
columnspacing=None, # spacing between columns
ncols=1, # number of columns
mode=None, # horizontal distribution of columns: None or "expand"
fancybox=None, # True: fancy box, False: rounded box, None: rcParam
shadow=None,
title=None, # legend title
title_fontsize=None, # legend title font size
framealpha=None, # set frame alpha
edgecolor=None, # frame patch edgecolor
facecolor=None, # frame patch facecolor
bbox_to_anchor=None, # bbox to which the legend will be anchored
bbox_transform=None, # transform for the bbox
frameon=None, # draw frame
handler_map=None,
title_fontproperties=None, # properties for the legend title
alignment="center", # control the alignment within the legend box
ncol=1, # synonym for ncols (backward compatibility)
draggable=False # whether the legend can be dragged with the mouse
):
"""
Parameters
----------
parent : `~matplotlib.axes.Axes` or `.Figure`
The artist that contains the legend.
handles : list of (`.Artist` or tuple of `.Artist`)
A list of Artists (lines, patches) to be added to the legend.
labels : list of str
A list of labels to show next to the artists. The length of handles
and labels should be the same. If they are not, they are truncated
to the length of the shorter list.
Other Parameters
----------------
%(_legend_kw_doc)s
Attributes
----------
legend_handles
List of `.Artist` objects added as legend entries.
.. versionadded:: 3.7
"""
# local import only to avoid circularity
from matplotlib.axes import Axes
from matplotlib.figure import FigureBase
super().__init__()
if prop is None:
self.prop = FontProperties(size=mpl._val_or_rc(fontsize, "legend.fontsize"))
else:
self.prop = FontProperties._from_any(prop)
if isinstance(prop, dict) and "size" not in prop:
self.prop.set_size(mpl.rcParams["legend.fontsize"])
self._fontsize = self.prop.get_size_in_points()
self.texts = []
self.legend_handles = []
self._legend_title_box = None
#: A dictionary with the extra handler mappings for this Legend
#: instance.
self._custom_handler_map = handler_map
self.numpoints = mpl._val_or_rc(numpoints, 'legend.numpoints')
self.markerscale = mpl._val_or_rc(markerscale, 'legend.markerscale')
self.scatterpoints = mpl._val_or_rc(scatterpoints, 'legend.scatterpoints')
self.borderpad = mpl._val_or_rc(borderpad, 'legend.borderpad')
self.labelspacing = mpl._val_or_rc(labelspacing, 'legend.labelspacing')
self.handlelength = mpl._val_or_rc(handlelength, 'legend.handlelength')
self.handleheight = mpl._val_or_rc(handleheight, 'legend.handleheight')
self.handletextpad = mpl._val_or_rc(handletextpad, 'legend.handletextpad')
self.borderaxespad = mpl._val_or_rc(borderaxespad, 'legend.borderaxespad')
self.columnspacing = mpl._val_or_rc(columnspacing, 'legend.columnspacing')
self.shadow = mpl._val_or_rc(shadow, 'legend.shadow')
if reverse:
labels = [*reversed(labels)]
handles = [*reversed(handles)]
handles = list(handles)
if len(handles) < 2:
ncols = 1
self._ncols = ncols if ncols != 1 else ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be > 0; it was %d" % numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3. / 8., 4. / 8., 2.5 / 8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = self.scatterpoints // len(self._scatteryoffsets) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets,
reps)[:self.scatterpoints]
# _legend_box is a VPacker instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent, Axes):
self.isaxes = True
self.axes = parent
self.set_figure(parent.get_figure(root=False))
elif isinstance(parent, FigureBase):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError(
"Legend needs either Axes or FigureBase as parent"
)
self.parent = parent
self._mode = mode
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
# Figure out if self.shadow is valid
# If shadow was None, rcParams loads False
# So it shouldn't be None here
self._shadow_props = {'ox': 2, 'oy': -2} # default location offsets
if isinstance(self.shadow, dict):
self._shadow_props.update(self.shadow)
self.shadow = True
elif self.shadow in (0, 1, True, False):
self.shadow = bool(self.shadow)
else:
raise ValueError(
'Legend shadow must be a dict or bool, not '
f'{self.shadow!r} of type {type(self.shadow)}.'
)
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
facecolor = mpl._val_or_rc(facecolor, "legend.facecolor")
if facecolor == 'inherit':
facecolor = mpl.rcParams["axes.facecolor"]
edgecolor = mpl._val_or_rc(edgecolor, "legend.edgecolor")
if edgecolor == 'inherit':
edgecolor = mpl.rcParams["axes.edgecolor"]
fancybox = mpl._val_or_rc(fancybox, "legend.fancybox")
self.legendPatch = FancyBboxPatch(
xy=(0, 0), width=1, height=1,
facecolor=facecolor, edgecolor=edgecolor,
# If shadow is used, default to alpha=1 (#8943).
alpha=(framealpha if framealpha is not None
else 1 if shadow
else mpl.rcParams["legend.framealpha"]),
# The width and height of the legendPatch will be set (in draw())
# to the length that includes the padding. Thus we set pad=0 here.
boxstyle=("round,pad=0,rounding_size=0.2" if fancybox
else "square,pad=0"),
mutation_scale=self._fontsize,
snap=True,
visible=mpl._val_or_rc(frameon, "legend.frameon")
)
self._set_artist_props(self.legendPatch)
_api.check_in_list(["center", "left", "right"], alignment=alignment)
self._alignment = alignment
# init with null renderer
self._init_legend_box(handles, labels, markerfirst)
# Set legend location
self.set_loc(loc)
# figure out title font properties:
if title_fontsize is not None and title_fontproperties is not None:
raise ValueError(
"title_fontsize and title_fontproperties can't be specified "
"at the same time. Only use one of them. ")
title_prop_fp = FontProperties._from_any(title_fontproperties)
if isinstance(title_fontproperties, dict):
if "size" not in title_fontproperties:
title_fontsize = mpl.rcParams["legend.title_fontsize"]
title_prop_fp.set_size(title_fontsize)
elif title_fontsize is not None:
title_prop_fp.set_size(title_fontsize)
elif not isinstance(title_fontproperties, FontProperties):
title_fontsize = mpl.rcParams["legend.title_fontsize"]
title_prop_fp.set_size(title_fontsize)
self.set_title(title, prop=title_prop_fp)
self._draggable = None
self.set_draggable(state=draggable)
# set the text color
color_getters = { # getter function depends on line or patch
'linecolor': ['get_markerfacecolor',
'get_facecolor',
'get_markeredgecolor',
'get_edgecolor',
'get_color'],
'markerfacecolor': ['get_markerfacecolor', 'get_facecolor'],
'mfc': ['get_markerfacecolor', 'get_facecolor'],
'markeredgecolor': ['get_markeredgecolor', 'get_edgecolor'],
'mec': ['get_markeredgecolor', 'get_edgecolor'],
}
labelcolor = mpl._val_or_rc(mpl._val_or_rc(labelcolor, 'legend.labelcolor'),
'text.color')
if isinstance(labelcolor, str) and labelcolor in color_getters:
getter_names = color_getters[labelcolor]
for handle, text in zip(self.legend_handles, self.texts):
try:
if handle.get_array() is not None:
continue
except AttributeError:
pass
for getter_name in getter_names:
try:
color = getattr(handle, getter_name)()
except AttributeError:
continue
if isinstance(color, np.ndarray):
if color.size == 0:
continue
elif (color.shape[0] == 1 or np.isclose(color, color[0]).all()):
text.set_color(color[0])
else:
pass
elif cbook._str_lower_equal(color, 'none'):
continue
elif mpl.colors.to_rgba(color)[3] == 0:
continue
else:
text.set_color(color)
break
elif cbook._str_equal(labelcolor, 'none'):
for text in self.texts:
text.set_color(labelcolor)
elif np.iterable(labelcolor):
for text, color in zip(self.texts,
itertools.cycle(
colors.to_rgba_array(labelcolor))):
text.set_color(color)
else:
raise ValueError(f"Invalid labelcolor: {labelcolor!r}")
def _set_artist_props(self, a):
"""
Set the boilerplate props for artists added to Axes.
"""
a.set_figure(self.get_figure(root=False))
if self.isaxes:
a.axes = self.axes
a.set_transform(self.get_transform())
@_docstring.interpd
def set_loc(self, loc=None):
"""
Set the location of the legend.
.. versionadded:: 3.8
Parameters
----------
%(_legend_kw_set_loc_doc)s
"""
loc0 = loc
self._loc_used_default = loc is None
if loc is None:
loc = mpl.rcParams["legend.loc"]
if not self.isaxes and loc in [0, 'best']:
loc = 'upper right'
type_err_message = ("loc must be string, coordinate tuple, or"
f" an integer 0-10, not {loc!r}")
# handle outside legends:
self._outside_loc = None
if isinstance(loc, str):
if loc.split()[0] == 'outside':
# strip outside:
loc = loc.split('outside ')[1]
# strip "center" at the beginning
self._outside_loc = loc.replace('center ', '')
# strip first
self._outside_loc = self._outside_loc.split()[0]
locs = loc.split()
if len(locs) > 1 and locs[0] in ('right', 'left'):
# locs doesn't accept "left upper", etc, so swap
if locs[0] != 'center':
locs = locs[::-1]
loc = locs[0] + ' ' + locs[1]
# check that loc is in acceptable strings
loc = _api.check_getitem(self.codes, loc=loc)
elif np.iterable(loc):
# coerce iterable into tuple
loc = tuple(loc)
# validate the tuple represents Real coordinates
if len(loc) != 2 or not all(isinstance(e, numbers.Real) for e in loc):
raise ValueError(type_err_message)
elif isinstance(loc, int):
# validate the integer represents a string numeric value
if loc < 0 or loc > 10:
raise ValueError(type_err_message)
else:
# all other cases are invalid values of loc
raise ValueError(type_err_message)
if self.isaxes and self._outside_loc:
raise ValueError(
f"'outside' option for loc='{loc0}' keyword argument only "
"works for figure legends")
if not self.isaxes and loc == 0:
raise ValueError(
"Automatic legend placement (loc='best') not implemented for "
"figure legend")
tmp = self._loc_used_default
self._set_loc(loc)
self._loc_used_default = tmp # ignore changes done by _set_loc
def _set_loc(self, loc):
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
self._loc_used_default = False
self._loc_real = loc
self.stale = True
self._legend_box.set_offset(self._findoffset)
def set_ncols(self, ncols):
"""Set the number of columns."""
self._ncols = ncols
def _get_loc(self):
return self._loc_real
_loc = property(_get_loc, _set_loc)
def _findoffset(self, width, height, xdescent, ydescent, renderer):
"""Helper function to locate the legend."""
if self._loc == 0: # "best".
x, y = self._find_best_position(width, height, renderer)
elif self._loc in Legend.codes.values(): # Fixed location.
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox,
self.get_bbox_to_anchor(),
renderer)
else: # Axes or figure coordinates.
fx, fy = self._loc
bbox = self.get_bbox_to_anchor()
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
return x + xdescent, y + ydescent
@allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
renderer.open_group('legend', gid=self.get_gid())
fontsize = renderer.points_to_pixels(self._fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the parent (minus pads)
if self._mode in ["expand"]:
pad = 2 * (self.borderaxespad + self.borderpad) * fontsize
self._legend_box.set_width(self.get_bbox_to_anchor().width - pad)
# update the location and size of the legend. This needs to
# be done in any case to clip the figure right.
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.bounds)
self.legendPatch.set_mutation_scale(fontsize)
# self.shadow is validated in __init__
# So by here it is a bool and self._shadow_props contains any configs
if self.shadow:
Shadow(self.legendPatch, **self._shadow_props).draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
self.stale = False
# _default_handler_map defines the default mapping between plot
# elements and the legend handlers.
_default_handler_map = {
StemContainer: legend_handler.HandlerStem(),
ErrorbarContainer: legend_handler.HandlerErrorbar(),
Line2D: legend_handler.HandlerLine2D(),
Patch: legend_handler.HandlerPatch(),
StepPatch: legend_handler.HandlerStepPatch(),
LineCollection: legend_handler.HandlerLineCollection(),
RegularPolyCollection: legend_handler.HandlerRegularPolyCollection(),
CircleCollection: legend_handler.HandlerCircleCollection(),
BarContainer: legend_handler.HandlerPatch(
update_func=legend_handler.update_from_first_child),
tuple: legend_handler.HandlerTuple(),
PatchCollection: legend_handler.HandlerPolyCollection(),
PathCollection: legend_handler.HandlerPathCollection(),
PolyCollection: legend_handler.HandlerPolyCollection()
}
# (get|set|update)_default_handler_maps are public interfaces to
# modify the default handler map.
@classmethod
def get_default_handler_map(cls):
"""Return the global default handler map, shared by all legends."""
return cls._default_handler_map
@classmethod
def set_default_handler_map(cls, handler_map):
"""Set the global default handler map, shared by all legends."""
cls._default_handler_map = handler_map
@classmethod
def update_default_handler_map(cls, handler_map):
"""Update the global default handler map, shared by all legends."""
cls._default_handler_map.update(handler_map)
def get_legend_handler_map(self):
"""Return this legend instance's handler map."""
default_handler_map = self.get_default_handler_map()
return ({**default_handler_map, **self._custom_handler_map}
if self._custom_handler_map else default_handler_map)
@staticmethod
def get_legend_handler(legend_handler_map, orig_handle):
"""
Return a legend handler from *legend_handler_map* that
corresponds to *orig_handler*.
*legend_handler_map* should be a dictionary object (that is
returned by the get_legend_handler_map method).
It first checks if the *orig_handle* itself is a key in the
*legend_handler_map* and return the associated value.
Otherwise, it checks for each of the classes in its
method-resolution-order. If no matching key is found, it
returns ``None``.
"""
try:
return legend_handler_map[orig_handle]
except (TypeError, KeyError): # TypeError if unhashable.
pass
for handle_type in type(orig_handle).mro():
try:
return legend_handler_map[handle_type]
except KeyError:
pass
return None
def _init_legend_box(self, handles, labels, markerfirst=True):
"""
Initialize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self._fontsize
# legend_box is a HPacker, horizontally packed with columns.
# Each column is a VPacker, vertically packed with legend items.
# Each legend item is a HPacker packed with:
# - handlebox: a DrawingArea which contains the legend handle.
# - labelbox: a TextArea which contains the legend text.
text_list = [] # the list of text instances
handle_list = [] # the list of handle instances
handles_and_labels = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
descent = 0.35 * fontsize * (self.handleheight - 0.7) # heuristic.
height = fontsize * self.handleheight - descent
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their coordinates should
# be given in the display coordinates.
# The transformation of each handle will be automatically set
# to self.get_transform(). If the artist does not use its
# default transform (e.g., Collections), you need to
# manually set their transform to the self.get_transform().
legend_handler_map = self.get_legend_handler_map()
for orig_handle, label in zip(handles, labels):
handler = self.get_legend_handler(legend_handler_map, orig_handle)
if handler is None:
_api.warn_external(
"Legend does not support handles for "
f"{type(orig_handle).__name__} "
"instances.\nA proxy artist may be used "
"instead.\nSee: https://matplotlib.org/"
"stable/users/explain/axes/legend_guide.html"
"#controlling-the-legend-entries")
# No handle for this artist, so we just defer to None.
handle_list.append(None)
else:
textbox = TextArea(label, multilinebaseline=True,
textprops=dict(
verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop))
handlebox = DrawingArea(width=self.handlelength * fontsize,
height=height,
xdescent=0., ydescent=descent)
text_list.append(textbox._text)
# Create the artist for the legend which represents the
# original artist/handle.
handle_list.append(handler.legend_artist(self, orig_handle,
fontsize, handlebox))
handles_and_labels.append((handlebox, textbox))
columnbox = []
# array_split splits n handles_and_labels into ncols columns, with the
# first n%ncols columns having an extra entry. filter(len, ...)
# handles the case where n < ncols: the last ncols-n columns are empty
# and get filtered out.
for handles_and_labels_column in filter(
len, np.array_split(handles_and_labels, self._ncols)):
# pack handlebox and labelbox into itembox
itemboxes = [HPacker(pad=0,
sep=self.handletextpad * fontsize,
children=[h, t] if markerfirst else [t, h],
align="baseline")
for h, t in handles_and_labels_column]
# pack columnbox
alignment = "baseline" if markerfirst else "right"
columnbox.append(VPacker(pad=0,
sep=self.labelspacing * fontsize,
align=alignment,
children=itemboxes))
mode = "expand" if self._mode == "expand" else "fixed"
sep = self.columnspacing * fontsize
self._legend_handle_box = HPacker(pad=0,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_title_box = TextArea("")
self._legend_box = VPacker(pad=self.borderpad * fontsize,
sep=self.labelspacing * fontsize,
align=self._alignment,
children=[self._legend_title_box,
self._legend_handle_box])
self._legend_box.set_figure(self.get_figure(root=False))
self._legend_box.axes = self.axes
self.texts = text_list
self.legend_handles = handle_list
def _auto_legend_data(self, renderer):
"""
Return display coordinates for hit testing for "best" positioning.
Returns
-------
bboxes
List of bounding boxes of all patches.
lines
List of `.Path` corresponding to each line.
offsets
List of (x, y) offsets of all collection.
"""
assert self.isaxes # always holds, as this is only called internally
bboxes = []
lines = []
offsets = []
for artist in self.parent._children:
if isinstance(artist, Line2D):
lines.append(
artist.get_transform().transform_path(artist.get_path()))
elif isinstance(artist, Rectangle):
bboxes.append(
artist.get_bbox().transformed(artist.get_data_transform()))
elif isinstance(artist, Patch):
lines.append(
artist.get_transform().transform_path(artist.get_path()))
elif isinstance(artist, PolyCollection):
lines.extend(artist.get_transform().transform_path(path)
for path in artist.get_paths())
elif isinstance(artist, Collection):
transform, transOffset, hoffsets, _ = artist._prepare_points()
if len(hoffsets):
offsets.extend(transOffset.transform(hoffsets))
elif isinstance(artist, Text):
bboxes.append(artist.get_window_extent(renderer))
return bboxes, lines, offsets
def get_children(self):
# docstring inherited
return [self._legend_box, self.get_frame()]
def get_frame(self):
"""Return the `~.patches.Rectangle` used to frame the legend."""
return self.legendPatch
def get_lines(self):
r"""Return the list of `~.lines.Line2D`\s in the legend."""
return [h for h in self.legend_handles if isinstance(h, Line2D)]
def get_patches(self):
r"""Return the list of `~.patches.Patch`\s in the legend."""
return silent_list('Patch',
[h for h in self.legend_handles
if isinstance(h, Patch)])
def get_texts(self):
r"""Return the list of `~.text.Text`\s in the legend."""
return silent_list('Text', self.texts)
def set_alignment(self, alignment):
"""
Set the alignment of the legend title and the box of entries.
The entries are aligned as a single block, so that markers always
lined up.
Parameters
----------
alignment : {'center', 'left', 'right'}.
"""
_api.check_in_list(["center", "left", "right"], alignment=alignment)
self._alignment = alignment
self._legend_box.align = alignment
def get_alignment(self):
"""Get the alignment value of the legend box"""
return self._legend_box.align
def set_title(self, title, prop=None):
"""
Set legend title and title style.
Parameters
----------
title : str
The legend title.
prop : `.font_manager.FontProperties` or `str` or `pathlib.Path`
The font properties of the legend title.
If a `str`, it is interpreted as a fontconfig pattern parsed by
`.FontProperties`. If a `pathlib.Path`, it is interpreted as the
absolute path to a font file.
"""
self._legend_title_box._text.set_text(title)
if title:
self._legend_title_box._text.set_visible(True)
self._legend_title_box.set_visible(True)
else:
self._legend_title_box._text.set_visible(False)
self._legend_title_box.set_visible(False)
if prop is not None:
self._legend_title_box._text.set_fontproperties(prop)
self.stale = True
def get_title(self):
"""Return the `.Text` instance for the legend title."""
return self._legend_title_box._text
def get_window_extent(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
return self._legend_box.get_window_extent(renderer=renderer)
def get_tightbbox(self, renderer=None):
# docstring inherited
return self._legend_box.get_window_extent(renderer)
def get_frame_on(self):
"""Get whether the legend box patch is drawn."""
return self.legendPatch.get_visible()
def set_frame_on(self, b):
"""
Set whether the legend box patch is drawn.
Parameters
----------
b : bool
"""
self.legendPatch.set_visible(b)
self.stale = True
draw_frame = set_frame_on # Backcompat alias.
def get_bbox_to_anchor(self):
"""Return the bbox that the legend will be anchored to."""
if self._bbox_to_anchor is None:
return self.parent.bbox
else:
return self._bbox_to_anchor
def set_bbox_to_anchor(self, bbox, transform=None):
"""
Set the bbox that the legend will be anchored to.
Parameters
----------
bbox : `~matplotlib.transforms.BboxBase` or tuple
The bounding box can be specified in the following ways:
- A `.BboxBase` instance
- A tuple of ``(left, bottom, width, height)`` in the given
transform (normalized axes coordinate if None)
- A tuple of ``(left, bottom)`` where the width and height will be
assumed to be zero.
- *None*, to remove the bbox anchoring, and use the parent bbox.
transform : `~matplotlib.transforms.Transform`, optional
A transform to apply to the bounding box. If not specified, this
will use a transform to the bounding box of the parent.
"""
if bbox is None:
self._bbox_to_anchor = None
return
elif isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError as err:
raise ValueError(f"Invalid bbox: {bbox}") from err
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
if transform is None:
transform = BboxTransformTo(self.parent.bbox)
self._bbox_to_anchor = TransformedBbox(self._bbox_to_anchor,
transform)
self.stale = True
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x, y) coordinate of the bbox.
Parameters
----------
loc : int
A location code in range(1, 11). This corresponds to the possible
values for ``self._loc``, excluding "best".
bbox : `~matplotlib.transforms.Bbox`
bbox to be placed, in display coordinates.
parentbbox : `~matplotlib.transforms.Bbox`
A parent box which will contain the bbox, in display coordinates.
"""
pad = self.borderaxespad * renderer.points_to_pixels(self._fontsize)
return offsetbox._get_anchored_bbox(
loc, bbox, parentbbox,
pad, pad)
def _find_best_position(self, width, height, renderer):
"""Determine the best location to place the legend."""
assert self.isaxes # always holds, as this is only called internally
start_time = time.perf_counter()
bboxes, lines, offsets = self._auto_legend_data(renderer)
bbox = Bbox.from_bounds(0, 0, width, height)
candidates = []
for idx in range(1, len(self.codes)):
l, b = self._get_anchored_bbox(idx, bbox,
self.get_bbox_to_anchor(),
renderer)
legendBox = Bbox.from_bounds(l, b, width, height)
# XXX TODO: If markers are present, it would be good to take them
# into account when checking vertex overlaps in the next line.
badness = (sum(legendBox.count_contains(line.vertices)
for line in lines)
+ legendBox.count_contains(offsets)
+ legendBox.count_overlaps(bboxes)
+ sum(line.intersects_bbox(legendBox, filled=False)
for line in lines))
# Include the index to favor lower codes in case of a tie.
candidates.append((badness, idx, (l, b)))
if badness == 0:
break
_, _, (l, b) = min(candidates)
if self._loc_used_default and time.perf_counter() - start_time > 1:
_api.warn_external(
'Creating legend with loc="best" can be slow with large '
'amounts of data.')
return l, b
def contains(self, mouseevent):
return self.legendPatch.contains(mouseevent)
def set_draggable(self, state, use_blit=False, update='loc'):
"""
Enable or disable mouse dragging support of the legend.
Parameters
----------
state : bool
Whether mouse dragging is enabled.
use_blit : bool, optional
Use blitting for faster image composition. For details see
:ref:`func-animation`.
update : {'loc', 'bbox'}, optional
The legend parameter to be changed when dragged:
- 'loc': update the *loc* parameter of the legend
- 'bbox': update the *bbox_to_anchor* parameter of the legend
Returns
-------
`.DraggableLegend` or *None*
If *state* is ``True`` this returns the `.DraggableLegend` helper
instance. Otherwise this returns *None*.
"""
if state:
if self._draggable is None:
self._draggable = DraggableLegend(self,
use_blit,
update=update)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
def get_draggable(self):
"""Return ``True`` if the legend is draggable, ``False`` otherwise."""
return self._draggable is not None
# Helper functions to parse legend arguments for both `figure.legend` and
# `axes.legend`:
def _get_legend_handles(axs, legend_handler_map=None):
"""Yield artists that can be used as handles in a legend."""
handles_original = []
for ax in axs:
handles_original += [
*(a for a in ax._children
if isinstance(a, (Line2D, Patch, Collection, Text))),
*ax.containers]
# support parasite Axes:
if hasattr(ax, 'parasites'):
for axx in ax.parasites:
handles_original += [
*(a for a in axx._children
if isinstance(a, (Line2D, Patch, Collection, Text))),
*axx.containers]
handler_map = {**Legend.get_default_handler_map(),
**(legend_handler_map or {})}
has_handler = Legend.get_legend_handler
for handle in handles_original:
label = handle.get_label()
if label != '_nolegend_' and has_handler(handler_map, handle):
yield handle
elif (label and not label.startswith('_') and
not has_handler(handler_map, handle)):
_api.warn_external(
"Legend does not support handles for "
f"{type(handle).__name__} "
"instances.\nSee: https://matplotlib.org/stable/"
"tutorials/intermediate/legend_guide.html"
"#implementing-a-custom-legend-handler")
continue
def _get_legend_handles_labels(axs, legend_handler_map=None):
"""Return handles and labels for legend."""
handles = []
labels = []
for handle in _get_legend_handles(axs, legend_handler_map):
label = handle.get_label()
if label and not label.startswith('_'):
handles.append(handle)
labels.append(label)
return handles, labels
def _parse_legend_args(axs, *args, handles=None, labels=None, **kwargs):
"""
Get the handles and labels from the calls to either ``figure.legend``
or ``axes.legend``.
The parser is a bit involved because we support::
legend()
legend(labels)
legend(handles, labels)
legend(labels=labels)
legend(handles=handles)
legend(handles=handles, labels=labels)
The behavior for a mixture of positional and keyword handles and labels
is undefined and raises an error.
Parameters
----------
axs : list of `.Axes`
If handles are not given explicitly, the artists in these Axes are
used as handles.
*args : tuple
Positional parameters passed to ``legend()``.
handles
The value of the keyword argument ``legend(handles=...)``, or *None*
if that keyword argument was not used.
labels
The value of the keyword argument ``legend(labels=...)``, or *None*
if that keyword argument was not used.
**kwargs
All other keyword arguments passed to ``legend()``.
Returns
-------
handles : list of (`.Artist` or tuple of `.Artist`)
The legend handles.
labels : list of str
The legend labels.
kwargs : dict
*kwargs* with keywords handles and labels removed.
"""
log = logging.getLogger(__name__)
handlers = kwargs.get('handler_map')
if (handles is not None or labels is not None) and args:
raise TypeError("When passing handles and labels, they must both be "
"passed positionally or both as keywords.")
if (hasattr(handles, "__len__") and
hasattr(labels, "__len__") and
len(handles) != len(labels)):
_api.warn_external(f"Mismatched number of handles and labels: "
f"len(handles) = {len(handles)} "
f"len(labels) = {len(labels)}")
# if got both handles and labels as kwargs, make same length
if handles and labels:
handles, labels = zip(*zip(handles, labels))
elif handles is not None and labels is None:
labels = [handle.get_label() for handle in handles]
elif labels is not None and handles is None:
# Get as many handles as there are labels.
handles = [handle for handle, label
in zip(_get_legend_handles(axs, handlers), labels)]
elif len(args) == 0: # 0 args: automatically detect labels and handles.
handles, labels = _get_legend_handles_labels(axs, handlers)
if not handles:
_api.warn_external(
"No artists with labels found to put in legend. Note that "
"artists whose label start with an underscore are ignored "
"when legend() is called with no argument.")
elif len(args) == 1: # 1 arg: user defined labels, automatic handle detection.
labels, = args
if any(isinstance(l, Artist) for l in labels):
raise TypeError("A single argument passed to legend() must be a "
"list of labels, but found an Artist in there.")
# Get as many handles as there are labels.
handles = [handle for handle, label
in zip(_get_legend_handles(axs, handlers), labels)]
elif len(args) == 2: # 2 args: user defined handles and labels.
handles, labels = args[:2]
else:
raise _api.nargs_error('legend', '0-2', len(args))
return handles, labels, kwargs
| Legend |
python | ansible__ansible | test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py | {
"start": 100,
"end": 213
} | class ____(object):
def tests(self):
return {
'subdir_test': subdir_test
}
| TestModule |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 18715,
"end": 20970
} | class ____(Interface):
"""An object representing a Pyramid authentication policy.
.. deprecated:: 2.0
Authentication policies have been removed in favor of security
policies. See :ref:`upgrading_auth_20` for more information.
"""
def authenticated_userid(request):
"""Return the authenticated :term:`userid` or ``None`` if
no authenticated userid can be found. This method of the
policy should ensure that a record exists in whatever
persistent store is used related to the user (the user
should not have been deleted); if a record associated with
the current id does not exist in a persistent store, it
should return ``None``.
"""
def unauthenticated_userid(request):
"""Return the *unauthenticated* userid. This method
performs the same duty as ``authenticated_userid`` but is
permitted to return the userid based only on data present
in the request; it needn't (and shouldn't) check any
persistent store to ensure that the user record related to
the request userid exists.
This method is intended primarily a helper to assist the
``authenticated_userid`` method in pulling credentials out
of the request data, abstracting away the specific headers,
query strings, etc that are used to authenticate the request.
"""
def effective_principals(request):
"""Return a sequence representing the effective principals
typically including the :term:`userid` and any groups belonged
to by the current user, always including 'system' groups such
as ``pyramid.authorization.Everyone`` and
``pyramid.authorization.Authenticated``.
"""
def remember(request, userid, **kw):
"""Return a set of headers suitable for 'remembering' the
:term:`userid` named ``userid`` when set in a response. An
individual authentication policy and its consumers can
decide on the composition and meaning of ``**kw``.
"""
def forget(request):
"""Return a set of headers suitable for 'forgetting' the
current user on subsequent requests.
"""
| IAuthenticationPolicy |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/dagster_type.py | {
"start": 13563,
"end": 13955
} | class ____(BuiltinScalarDagsterType):
def __init__(self):
super(_String, self).__init__(
name="String",
loader=BuiltinSchemas.STRING_INPUT,
type_check_fn=self.type_check_fn,
typing_type=str,
)
def type_check_scalar_value(self, value: object) -> TypeCheck:
return _fail_if_not_of_type(value, str, "string")
| _String |
python | kamyu104__LeetCode-Solutions | Python/modify-graph-edge-weights.py | {
"start": 227,
"end": 1621
} | class ____(object):
def modifiedGraphEdges(self, n, edges, source, destination, target):
"""
:type n: int
:type edges: List[List[int]]
:type source: int
:type destination: int
:type target: int
:rtype: List[List[int]]
"""
def dijkstra(start, x):
best = [target+1]*len(adj)
best[start] = 0
min_heap = [(0, start)]
while min_heap:
curr, u = heapq.heappop(min_heap)
if curr > best[u]:
continue
for v, w in adj[u]:
if w == -1:
w = x
if curr+w >= best[v]:
continue
best[v] = curr+w
heapq.heappush(min_heap, (best[v], v))
return best
adj = [[] for _ in xrange(n)]
for u, v, w in edges:
adj[u].append((v, w))
adj[v].append((u, w))
left = dijkstra(source, 1)
if not (left[destination] <= target):
return []
right= dijkstra(destination, target+1)
if not (right[source] >= target):
return []
for e in edges:
if e[2] == -1:
e[2] = max(target-left[e[0]]-right[e[1]], target-left[e[1]]-right[e[0]], 1)
return edges
| Solution |
python | huggingface__transformers | src/transformers/integrations/tensor_parallel.py | {
"start": 24598,
"end": 26728
} | class ____(TensorParallelLayer):
"""
This class is used to replicate computation in a TP layer (used in SP regions when we don't use sequence parallelism for example)
"""
def __init__(self, use_dtensor=True, use_local_output=True, **kwargs):
super().__init__(**kwargs)
self.input_layouts = (Replicate(),)
self.output_layouts = (Replicate(),)
self.desired_input_layouts = (Replicate(),)
self.use_local_output = use_local_output
self.use_dtensor = use_dtensor
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):
# TODO: figure out dynamo support for instance method and switch this to instance method
# annotate module input placements/sharding with input_layouts
input_tensor = inputs[0]
if not isinstance(input_tensor, DTensor):
input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False)
return input_tensor
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
return outputs.to_local() if use_local_output and isinstance(outputs, DTensor) else outputs
def shard_tensor(
self,
param,
param_type=None,
param_casting_dtype=None,
to_contiguous=None,
rank=None,
device_mesh=None,
tensor_idx=None,
):
parameter = param[...].to(param_casting_dtype)
shard = [Replicate()]
self.shard = shard
return parameter, shard
def partition_tensor(self, param, empty_param, param_type, param_casting_dtype, to_contiguous, rank, device_mesh):
parameter, shard = self.shard_tensor(
param,
param_type=param_type,
param_casting_dtype=param_casting_dtype,
to_contiguous=to_contiguous,
rank=rank,
device_mesh=device_mesh,
)
if self.use_dtensor:
parameter = DTensor.from_local(parameter, device_mesh, shard, run_check=False)
return parameter
| ReplicateParallel |
python | pandas-dev__pandas | asv_bench/benchmarks/multiindex_object.py | {
"start": 4324,
"end": 5030
} | class ____:
def setup(self):
n = 1182720
low, high = -4096, 4096
arrs = [
np.repeat(np.random.randint(low, high, (n // k)), k)
for k in [11, 7, 5, 3, 1]
]
self.mi_int = MultiIndex.from_arrays(arrs)[np.random.permutation(n)]
a = np.repeat(np.arange(100), 1000)
b = np.tile(np.arange(1000), 100)
self.mi = MultiIndex.from_arrays([a, b])
self.mi = self.mi.take(np.random.permutation(np.arange(100000)))
def time_sortlevel_int64(self):
self.mi_int.sortlevel()
def time_sortlevel_zero(self):
self.mi.sortlevel(0)
def time_sortlevel_one(self):
self.mi.sortlevel(1)
| Sortlevel |
python | pallets__werkzeug | src/werkzeug/wsgi.py | {
"start": 7606,
"end": 10226
} | class ____:
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of the iterable returned by the application.
Because it is useful to add another close action to a returned iterable
and adding a custom iterable is a boring task this class can be used for
that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(
self,
iterable: t.Iterable[bytes],
callbacks: None
| (t.Callable[[], None] | t.Iterable[t.Callable[[], None]]) = None,
) -> None:
iterator = iter(iterable)
self._next = t.cast(t.Callable[[], bytes], partial(next, iterator))
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterable, "close", None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self) -> ClosingIterator:
return self
def __next__(self) -> bytes:
return self._next()
def close(self) -> None:
for callback in self._callbacks:
callback()
def wrap_file(
environ: WSGIEnvironment, file: t.IO[bytes], buffer_size: int = 8192
) -> t.Iterable[bytes]:
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`Response.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get("wsgi.file_wrapper", FileWrapper)( # type: ignore
file, buffer_size
)
| ClosingIterator |
python | keon__algorithms | algorithms/tree/red_black_tree/red_black_tree.py | {
"start": 44,
"end": 265
} | class ____:
def __init__(self, val, is_red, parent=None, left=None, right=None):
self.val = val
self.parent = parent
self.left = left
self.right = right
self.color = is_red
| RBNode |
python | django__django | tests/test_utils/tests.py | {
"start": 88404,
"end": 89888
} | class ____(SimpleTestCase):
@mock.patch.object(DoNothingDecorator, "disable")
def test_exception_in_setup(self, mock_disable):
"""An exception is setUp() is reraised after disable() is called."""
class ExceptionInSetUp(unittest.TestCase):
def setUp(self):
raise NotImplementedError("reraised")
decorator = DoNothingDecorator()
decorated_test_class = decorator.__call__(ExceptionInSetUp)()
self.assertFalse(mock_disable.called)
with self.assertRaisesMessage(NotImplementedError, "reraised"):
decorated_test_class.setUp()
decorated_test_class.doCleanups()
self.assertTrue(mock_disable.called)
def test_cleanups_run_after_tearDown(self):
calls = []
class SaveCallsDecorator(TestContextDecorator):
def enable(self):
calls.append("enable")
def disable(self):
calls.append("disable")
class AddCleanupInSetUp(unittest.TestCase):
def setUp(self):
calls.append("setUp")
self.addCleanup(lambda: calls.append("cleanup"))
decorator = SaveCallsDecorator()
decorated_test_class = decorator.__call__(AddCleanupInSetUp)()
decorated_test_class.setUp()
decorated_test_class.tearDown()
decorated_test_class.doCleanups()
self.assertEqual(calls, ["enable", "setUp", "cleanup", "disable"])
| TestContextDecoratorTests |
python | apache__airflow | providers/openlineage/tests/system/openlineage/operator.py | {
"start": 5455,
"end": 9586
} | class ____(BaseOperator):
"""
This operator is added for system testing purposes.
It compares expected event templates set on initialization with ones emitted by OpenLineage integration
and stored in Variables by VariableTransport.
Note:
If `clear_variables` is True, only the Airflow Variables listed in `event_templates`
(or derived from `file_path`) will be deleted - those that are supposed to be checked by the Operator.
We won't remove all Airflow Variables to avoid interfering with other instances of this Operator
running in parallel on different DAGs. Running continuous system tests without clearing Variables
may lead to leftover or growing Variables size. We recommend implementing a process to remove all
Airflow Variables after all system tests have run to ensure a clean environment for each test run.
:param event_templates: dictionary where key is the key used by VariableTransport in format of <DAG_ID>.<TASK_ID>.event.<EVENT_TYPE>, and value is event template (fragment) that need to be in received events.
:param file_path: alternatively, file_path pointing to file with event templates will be used
:param env: jinja environment used to render event templates
:param allow_duplicate_events: if set to True, allows multiple events for the same key
:param clear_variables: if set to True, clears only variables to be checked after all events are checked or if any check fails
:raises: ValueError if the received events do not match with expected ones.
"""
def __init__(
self,
event_templates: dict[str, dict] | None = None,
file_path: str | None = None,
env: Environment = setup_jinja(),
allow_duplicate_events: bool = False,
clear_variables: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.event_templates = event_templates
self.file_path = file_path
self.env = env
self.multiple_events = allow_duplicate_events
self.delete = clear_variables
if self.event_templates and self.file_path:
raise ValueError("Can't pass both event_templates and file_path")
def execute(self, context: Context) -> None:
time.sleep(10) # Wait for all variables to update properly
if self.file_path is not None:
self.event_templates = {}
self.log.info("Reading OpenLineage event templates from file `%s`", self.file_path)
with open(self.file_path) as f:
events = json.load(f)
for event in events:
# Just a single event per job and event type is loaded as this is the most common scenario
key = event["job"]["name"] + ".event." + event["eventType"].lower()
self.event_templates[key] = event
try:
for key, template in self.event_templates.items(): # type: ignore[union-attr]
log.info("Checking key: `%s`", key)
actual_events = Variable.get(key=key, deserialize_json=True)
self.log.info(
"Events: len=`%s`, type=`%s`, value=%s",
len(actual_events),
type(actual_events),
actual_events,
)
if len(actual_events) == 0:
raise ValueError(f"No event for key {key}")
if len(actual_events) != 1 and not self.multiple_events:
raise ValueError(f"Expected one event for key {key}, got {len(actual_events)}")
# Last event is checked against the template, this will allow to f.e. check change in try_num
if not match(template, json.loads(actual_events[-1]), self.env):
raise ValueError("Event received does not match one specified in test")
finally:
if self.delete:
for key in self.event_templates: # type: ignore[union-attr]
log.info("Removing variable `%s`", key)
Variable.delete(key=key)
| OpenLineageTestOperator |
python | django__django | tests/custom_managers/models.py | {
"start": 1550,
"end": 1952
} | class ____(models.Manager):
def __init__(self, arg):
super().__init__()
self.init_arg = arg
def filter(self, *args, **kwargs):
queryset = super().filter(fun=True)
queryset._filter_CustomManager = True
return queryset
def manager_only(self):
return self.all()
CustomManager = BaseCustomManager.from_queryset(CustomQuerySet)
| BaseCustomManager |
python | PrefectHQ__prefect | tests/server/schemas/test_schedules.py | {
"start": 30070,
"end": 31898
} | class ____:
async def test_rrule_is_required(self):
with pytest.raises(ValidationError):
RRuleSchedule()
async def test_create_from_rrule_str(self):
assert RRuleSchedule(rrule=RRDaily)
async def test_create_from_rrule_obj(self):
s = RRuleSchedule.from_rrule(rrule.rrulestr("FREQ=DAILY"))
assert "RRULE:FREQ=DAILY" in s.rrule
s = RRuleSchedule.from_rrule(rrule.rrule(freq=rrule.MONTHLY))
assert "RRULE:FREQ=MONTHLY" in s.rrule
async def test_create_from_rrule_obj_reads_timezone(self):
s = RRuleSchedule.from_rrule(
rrule.rrule(
rrule.DAILY,
dtstart=datetime(2020, 1, 1, tzinfo=ZoneInfo("America/New_York")),
)
)
assert s.timezone == "America/New_York"
async def test_default_timezone_is_utc(self):
s = RRuleSchedule(rrule=RRDaily)
assert s.timezone == "UTC"
async def test_create_with_dtstart(self):
s = RRuleSchedule(rrule="DTSTART:20210905T000000\nFREQ=DAILY")
assert "DTSTART:20210905T000000" in str(s.rrule)
assert s.timezone == "UTC"
async def test_create_with_timezone(self):
s = RRuleSchedule(
rrule="DTSTART:20210101T000000\nFREQ=DAILY", timezone="America/New_York"
)
assert s.timezone == "America/New_York"
# Use a fixed start date to avoid DST-related flakiness
start = datetime(2025, 1, 15, tzinfo=ZoneInfo("UTC"))
dates = await s.get_dates(5, start=start)
assert dates[0].tzinfo.key == "America/New_York"
expected = [
datetime(2025, 1, 15, 0, 0, 0, tzinfo=ZoneInfo("America/New_York"))
+ timedelta(days=i)
for i in range(5)
]
assert dates == expected
| TestCreateRRuleSchedule |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-slack/components.py | {
"start": 6239,
"end": 8601
} | class ____(StateMigration):
"""
The logic for incrementally syncing threads is not very obvious, so buckle up.
To get all messages in a thread, one must specify the channel and timestamp of the parent (first) message of that thread,
basically its ID.
One complication is that threads can be updated at Any time in the future. Therefore, if we wanted to comprehensively sync data
i.e: get every single response in a thread, we'd have to read every message in the slack instance every time we ran a sync,
because otherwise there is no way to guarantee that a thread deep in the past didn't receive a new message.
A pragmatic workaround is to say we want threads to be at least N days fresh i.e: look back N days into the past,
get every message since, and read all of the thread responses. This is essentially the approach we're taking here via slicing:
create slices from N days into the past and read all messages in threads since then. We could optionally filter out records we have
already read, but that's omitted to keep the logic simple to reason about.
Good luck.
"""
config: Config
def __init__(self, config: Config):
self._config = config
def should_migrate(self, stream_state: Mapping[str, Any]) -> bool:
return True
def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
if not stream_state:
return {}
start_date_state = ab_datetime_parse(self._config["start_date"]).timestamp() # start date is required
# for migrated state
if stream_state.get("states"):
for state in stream_state["states"]:
start_date_state = max(start_date_state, float(state.get("cursor", {}).get("float_ts", start_date_state)))
# for old-stype state
if stream_state.get("float_ts"):
start_date_state = max(start_date_state, float(stream_state["float_ts"]))
lookback_window = timedelta(days=self._config.get("lookback_window", 0)) # lookback window in days
final_state = {"float_ts": (ab_datetime_parse(int(start_date_state)) - lookback_window).timestamp()}
stream_state["parent_state"] = {"channel_messages": final_state}
return stream_state
MESSAGES_AND_THREADS_RATE = Rate(limit=1, interval=timedelta(seconds=60))
| ThreadsStateMigration |
python | zarr-developers__zarr-python | src/zarr/errors.py | {
"start": 1848,
"end": 2268
} | class ____(BaseZarrError):
"""Raised when both array and group metadata are found at the same path."""
_msg = (
"Array and group metadata documents (.zarray and .zgroup) were both found in store "
"{!r} at path {!r}. "
"Only one of these files may be present in a given directory / prefix. "
"Remove the .zarray file, or the .zgroup file, or both."
)
| ContainsArrayAndGroupError |
python | Pylons__pyramid | tests/test_location.py | {
"start": 783,
"end": 1332
} | class ____(unittest.TestCase):
def _callFUT(self, context):
from pyramid.location import lineage
return lineage(context)
def test_lineage(self):
o1 = Location()
o2 = Location()
o2.__parent__ = o1
o3 = Location()
o3.__parent__ = o2
o4 = Location()
o4.__parent__ = o3
result = list(self._callFUT(o3))
self.assertEqual(result, [o3, o2, o1])
result = list(self._callFUT(o1))
self.assertEqual(result, [o1])
@implementer(ILocation)
| TestLineage |
python | PrefectHQ__prefect | tests/cli/test_profile.py | {
"start": 1047,
"end": 19109
} | class ____:
@pytest.fixture
def profiles(self):
prefect_cloud_api_url = "https://api.prefect.cloud/api"
prefect_cloud_server_api_url = (
f"{prefect_cloud_api_url}/accounts/{uuid4()}/workspaces/{uuid4()}"
)
hosted_server_api_url = "https://hosted-server.prefect.io/api"
return ProfilesCollection(
profiles=[
Profile(
name="prefect-cloud",
settings={
"PREFECT_API_URL": prefect_cloud_server_api_url,
"PREFECT_API_KEY": "a working cloud api key",
},
),
Profile(
name="prefect-cloud-with-invalid-key",
settings={
"PREFECT_API_URL": prefect_cloud_server_api_url,
"PREFECT_API_KEY": "a broken cloud api key",
},
),
Profile(
name="hosted-server",
settings={
"PREFECT_API_URL": hosted_server_api_url,
},
),
Profile(
name="ephemeral",
settings={"PREFECT_SERVER_ALLOW_EPHEMERAL_MODE": True},
),
],
active=None,
)
@pytest.fixture
def authorized_cloud(self):
# attempts to reach the Cloud API implies a good connection
# to Prefect Cloud as opposed to a hosted Prefect server instance
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
# Mock the health endpoint for cloud
health = respx_mock.get(
"https://api.prefect.cloud/api/health",
).mock(return_value=Response(200, json={}))
# Keep the workspaces endpoint mock for backward compatibility
respx_mock.get(
"https://api.prefect.cloud/api/me/workspaces",
).mock(return_value=Response(200, json=[]))
yield health
@pytest.fixture
def unauthorized_cloud(self):
# requests to cloud with an invalid key will result in a 401 response
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
# Mock the health endpoint for cloud
health = respx_mock.get(
"https://api.prefect.cloud/api/health",
).mock(side_effect=CloudUnauthorizedError("Invalid API key"))
# Keep the workspaces endpoint mock for backward compatibility
respx_mock.get(
"https://api.prefect.cloud/api/me/workspaces",
).mock(side_effect=CloudUnauthorizedError("Invalid API key"))
yield health
@pytest.fixture
def unhealthy_cloud(self):
# requests to cloud with an invalid key will result in a 401 response
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
# Mock the health endpoint for cloud with an error
unhealthy = respx_mock.get(
"https://api.prefect.cloud/api/health",
).mock(side_effect=self.connection_error)
# Keep the workspaces endpoint mock for backward compatibility
respx_mock.get(
"https://api.prefect.cloud/api/me/workspaces",
).mock(side_effect=self.connection_error)
yield unhealthy
@pytest.fixture
def hosted_server_has_no_cloud_api(self):
# if the API URL points to a hosted Prefect server instance, no Cloud API will be found
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
# We don't need to mock the cloud API endpoint anymore since we check server type first
hosted = respx_mock.get(
"https://hosted-server.prefect.io/api/me/workspaces",
).mock(return_value=Response(404, json={}))
yield hosted
@pytest.fixture
def healthy_hosted_server(self):
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
hosted = respx_mock.get(
"https://hosted-server.prefect.io/api/health",
).mock(return_value=Response(200, json={}))
yield hosted
def connection_error(self, *args):
raise Exception
@pytest.fixture
def unhealthy_hosted_server(self):
with respx.mock(using="httpx", assert_all_called=False) as respx_mock:
badly_hosted = respx_mock.get(
"https://hosted-server.prefect.io/api/health",
).mock(side_effect=self.connection_error)
yield badly_hosted
@pytest.mark.usefixtures("authorized_cloud")
def test_authorized_cloud_connection(self, profiles: ProfilesCollection):
save_profiles(profiles)
invoke_and_assert(
["profile", "use", "prefect-cloud"],
expected_output_contains=(
"Connected to Prefect Cloud using profile 'prefect-cloud'"
),
expected_code=0,
)
profiles = load_profiles()
assert profiles.active_name == "prefect-cloud"
@pytest.mark.usefixtures("unauthorized_cloud")
def test_unauthorized_cloud_connection(self, profiles: ProfilesCollection):
save_profiles(profiles)
invoke_and_assert(
["profile", "use", "prefect-cloud-with-invalid-key"],
expected_output_contains=(
"Error authenticating with Prefect Cloud using profile"
" 'prefect-cloud-with-invalid-key'"
),
expected_code=1,
)
profiles = load_profiles()
assert profiles.active_name == "prefect-cloud-with-invalid-key"
@pytest.mark.usefixtures("unhealthy_cloud")
def test_unhealthy_cloud_connection(self, profiles: ProfilesCollection):
save_profiles(profiles)
invoke_and_assert(
["profile", "use", "prefect-cloud"],
expected_output_contains="Error connecting to Prefect Cloud",
expected_code=1,
)
profiles = load_profiles()
assert profiles.active_name == "prefect-cloud"
@pytest.mark.usefixtures("hosted_server_has_no_cloud_api", "healthy_hosted_server")
def test_using_hosted_server(self, profiles: ProfilesCollection):
save_profiles(profiles)
invoke_and_assert(
["profile", "use", "hosted-server"],
expected_output_contains=(
"Connected to Prefect server using profile 'hosted-server'"
),
expected_code=0,
)
profiles = load_profiles()
assert profiles.active_name == "hosted-server"
@pytest.mark.usefixtures(
"hosted_server_has_no_cloud_api", "unhealthy_hosted_server"
)
def test_unhealthy_hosted_server(self, profiles: ProfilesCollection):
save_profiles(profiles)
invoke_and_assert(
["profile", "use", "hosted-server"],
expected_output_contains="Error connecting to Prefect server",
expected_code=1,
)
profiles = load_profiles()
assert profiles.active_name == "hosted-server"
def test_using_ephemeral_server(self, profiles: ProfilesCollection):
save_profiles(profiles)
invoke_and_assert(
["profile", "use", "ephemeral"],
expected_output_contains=(
"No Prefect server specified using profile 'ephemeral'"
),
expected_code=0,
)
profiles = load_profiles()
assert profiles.active_name == "ephemeral"
def test_ls_additional_profiles():
# 'ephemeral' is not the current profile because we have a temporary profile in-use
# during tests
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={}),
Profile(name="bar", settings={}),
],
active=None,
)
)
invoke_and_assert(
["profile", "ls"],
expected_output_contains=(
"foo",
"bar",
),
)
def test_ls_respects_current_from_profile_flag():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={}),
],
active=None,
)
)
invoke_and_assert(
["--profile", "foo", "profile", "ls"],
expected_output_contains=("* foo",),
)
def test_ls_respects_current_from_context():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={}),
Profile(name="bar", settings={}),
],
active=None,
)
)
with use_profile("bar"):
invoke_and_assert(
["profile", "ls"],
expected_output_contains=(
"foo",
"* bar",
),
)
def test_create_profile():
invoke_and_assert(
["profile", "create", "foo"],
expected_output="""
Created profile with properties:
name - 'foo'
from name - None
Use created profile for future, subsequent commands:
prefect profile use 'foo'
Use created profile temporarily for a single command:
prefect -p 'foo' config view
""",
)
profiles = load_profiles()
assert profiles["foo"] == Profile(
name="foo", settings={}, source=PREFECT_PROFILES_PATH.value()
)
def test_create_profile_from_existing():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={PREFECT_API_KEY: "foo"}),
],
active=None,
)
)
invoke_and_assert(
["profile", "create", "bar", "--from", "foo"],
expected_output="""
Created profile with properties:
name - 'bar'
from name - foo
Use created profile for future, subsequent commands:
prefect profile use 'bar'
Use created profile temporarily for a single command:
prefect -p 'bar' config view
""",
)
profiles = load_profiles()
assert profiles["foo"].settings == {PREFECT_API_KEY: "foo"}, "Foo is unchanged"
assert profiles["bar"] == Profile(
name="bar",
settings={PREFECT_API_KEY: "foo"},
source=PREFECT_PROFILES_PATH.value(),
)
def test_create_profile_from_unknown_profile():
invoke_and_assert(
["profile", "create", "bar", "--from", "foo"],
expected_output="Profile 'foo' not found.",
expected_code=1,
)
def test_create_profile_with_existing_profile():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={PREFECT_API_KEY: "foo"}),
],
active=None,
)
)
invoke_and_assert(
["profile", "create", "foo"],
expected_output="""
Profile 'foo' already exists.
To create a new profile, remove the existing profile first:
prefect profile delete 'foo'
""",
expected_code=1,
)
def test_create_profile_with_name_conflict_vs_unsaved_default():
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/15643
"""
invoke_and_assert(
["profile", "create", "local"],
expected_output="""
Created profile with properties:
name - 'local'
from name - None
Use created profile for future, subsequent commands:
prefect profile use 'local'
Use created profile temporarily for a single command:
prefect -p 'local' config view
""",
)
def test_delete_profile():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={PREFECT_API_KEY: "foo"}),
Profile(name="bar", settings={PREFECT_API_KEY: "bar"}),
],
active=None,
)
)
invoke_and_assert(
["profile", "delete", "bar"],
user_input="y",
expected_output_contains="Removed profile 'bar'.",
)
profiles = load_profiles()
assert "foo" in profiles
assert "bar" not in profiles
def test_delete_profile_unknown_name():
invoke_and_assert(
["profile", "delete", "foo"],
expected_output="Profile 'foo' not found.",
expected_code=1,
)
def test_delete_profile_cannot_target_active_profile():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={PREFECT_API_KEY: "foo"}),
],
active=None,
)
)
with use_profile("foo"):
invoke_and_assert(
["profile", "delete", "foo"],
expected_output=(
"Profile 'foo' is the active profile. You must switch profiles before"
" it can be deleted."
),
expected_code=1,
)
def test_rename_profile_name_exists():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={}),
Profile(name="bar", settings={}),
],
active=None,
)
)
invoke_and_assert(
["profile", "rename", "foo", "bar"],
expected_output="Profile 'bar' already exists.",
expected_code=1,
)
def test_rename_profile_unknown_name():
invoke_and_assert(
["profile", "rename", "foo", "bar"],
expected_output="Profile 'foo' not found.",
expected_code=1,
)
def test_rename_profile_renames_profile():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={PREFECT_API_KEY: "foo"}),
],
active=None,
)
)
invoke_and_assert(
["profile", "rename", "foo", "bar"],
expected_output="Renamed profile 'foo' to 'bar'.",
expected_code=0,
)
profiles = load_profiles()
assert "foo" not in profiles, "The original profile should not exist anymore"
assert profiles["bar"].settings == {PREFECT_API_KEY: "foo"}, (
"Settings should be retained"
)
assert profiles.active_name != "bar", "The active profile should not be changed"
def test_rename_profile_changes_active_profile():
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={PREFECT_API_KEY: "foo"}),
],
active="foo",
)
)
invoke_and_assert(
["profile", "rename", "foo", "bar"],
expected_output="Renamed profile 'foo' to 'bar'.",
expected_code=0,
)
profiles = load_profiles()
assert profiles.active_name == "bar"
def test_rename_profile_warns_on_environment_variable_active_profile(
monkeypatch: pytest.MonkeyPatch,
):
save_profiles(
ProfilesCollection(
profiles=[
Profile(name="foo", settings={PREFECT_API_KEY: "foo"}),
],
active=None,
)
)
monkeypatch.setenv("PREFECT_PROFILE", "foo")
invoke_and_assert(
["profile", "rename", "foo", "bar"],
expected_output_contains=(
"You have set your current profile to 'foo' with the PREFECT_PROFILE "
"environment variable. You must update this variable to 'bar' "
"to continue using the profile."
),
expected_code=0,
)
profiles = load_profiles()
assert profiles.active_name != "foo", (
"The active profile should not be updated in the file"
)
def test_inspect_profile_unknown_name():
invoke_and_assert(
["profile", "inspect", "foo"],
expected_output="Profile 'foo' not found.",
expected_code=1,
)
def test_inspect_profile():
save_profiles(
ProfilesCollection(
profiles=[
Profile(
name="foo",
settings={PREFECT_API_KEY: "foo", PREFECT_DEBUG_MODE: True},
),
],
active=None,
)
)
invoke_and_assert(
["profile", "inspect", "foo"],
expected_output="""
PREFECT_API_KEY='foo'
PREFECT_DEBUG_MODE='True'
""",
)
def test_inspect_profile_without_settings():
save_profiles(
ProfilesCollection(
profiles=[Profile(name="foo", settings={})],
active=None,
)
)
invoke_and_assert(
["profile", "inspect", "foo"],
expected_output="""
Profile 'foo' is empty.
""",
)
def test_inspect_profile_with_json_output():
"""Test profile inspect command with JSON output flag."""
import json
save_profiles(
ProfilesCollection(
profiles=[
Profile(
name="test-profile",
settings={
PREFECT_API_URL: "https://test.prefect.cloud/api",
PREFECT_DEBUG_MODE: True,
},
)
],
active="test-profile",
)
)
result = invoke_and_assert(
["profile", "inspect", "test-profile", "--output", "json"],
expected_code=0,
)
# Parse JSON output and verify it's valid JSON
output_data = json.loads(result.stdout.strip())
# Verify key fields are present
assert "PREFECT_API_URL" in output_data
assert "PREFECT_DEBUG_MODE" in output_data
assert output_data["PREFECT_API_URL"] == "https://test.prefect.cloud/api"
assert (
output_data["PREFECT_DEBUG_MODE"] == "True"
) # Settings are serialized as strings
| TestChangingProfileAndCheckingServerConnection |
python | doocs__leetcode | solution/0700-0799/0785.Is Graph Bipartite/Solution.py | {
"start": 0,
"end": 473
} | class ____:
def isBipartite(self, graph: List[List[int]]) -> bool:
def dfs(a: int, c: int) -> bool:
color[a] = c
for b in graph[a]:
if color[b] == c or (color[b] == 0 and not dfs(b, -c)):
return False
return True
n = len(graph)
color = [0] * n
for i in range(n):
if color[i] == 0 and not dfs(i, 1):
return False
return True
| Solution |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 2271,
"end": 2417
} | class ____(Error):
"""An exception raised when a file or folder cannot be accessed after multiple retries."""
pass
| StreamlitMaxRetriesError |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/emr.py | {
"start": 1134,
"end": 2620
} | class ____(AwsBaseWaiterTrigger):
"""
Poll for the status of EMR steps until they reach terminal state.
:param job_flow_id: job_flow_id which contains the steps to check the state of
:param step_ids: steps to check the state of
:param waiter_delay: polling period in seconds to check for the status
:param waiter_max_attempts: The maximum number of attempts to be made
:param aws_conn_id: Reference to AWS connection id
"""
def __init__(
self,
job_flow_id: str,
step_ids: list[str],
waiter_delay: int,
waiter_max_attempts: int,
aws_conn_id: str | None = "aws_default",
):
super().__init__(
serialized_fields={"job_flow_id": job_flow_id, "step_ids": step_ids},
waiter_name="steps_wait_for_terminal",
waiter_args={"ClusterId": job_flow_id, "StepIds": step_ids},
failure_message=f"Error while waiting for steps {step_ids} to complete",
status_message=f"Step ids: {step_ids}, Steps are still in non-terminal state",
status_queries=[
"Steps[].Status.State",
"Steps[].Status.FailureDetails",
],
return_value=step_ids,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrHook(aws_conn_id=self.aws_conn_id)
| EmrAddStepsTrigger |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 68094,
"end": 68383
} | class ____:
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
| TestMeshGrid |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructorCallable2.py | {
"start": 935,
"end": 1113
} | class ____:
def __new__(cls, x: int) -> int: ...
r4 = accepts_callable(Class4)
reveal_type(r4, expected_text="(x: int) -> int")
reveal_type(r4(1), expected_text="int")
| Class4 |
python | scipy__scipy | scipy/sparse/linalg/tests/test_funm_multiply_krylov.py | {
"start": 732,
"end": 7289
} | class ____:
def test_krylov_funm_zero_vector(self):
n = 20
A = np.zeros((n, n))
b = np.zeros(n)
observed = funm_multiply_krylov(expm, A, b)
expected = np.zeros(n)
assert_allclose(observed, expected)
@pytest.mark.parametrize("f", FUNCS)
def test_funm_multiply_krylov_nonhermitian_dense(self, f):
rng = np.random.default_rng(1738151906092735)
n = 60
nsamples = 10
for i in range(nsamples):
A = rng.standard_normal((n, n))
b = rng.standard_normal(n)
fA = f(A)
expected = fA @ b
observed = funm_multiply_krylov(f, A, b)
assert_allclose(observed, expected, rtol = 1E-6, atol = 1E-8)
observed = funm_multiply_krylov(f, aslinearoperator(A), b)
assert_allclose(observed, expected, rtol = 1E-6, atol = 1E-8)
@pytest.mark.parametrize("f", FUNCS)
def test_funm_multiply_krylov_nonhermitian_sparse(self, f, num_parallel_threads):
rng = np.random.default_rng(1738151906092735)
n = 100
nsamples = 1 + 9 // num_parallel_threads # Very slow otherwise
for i in range(nsamples):
D = scipy.sparse.diags(rng.standard_normal(n))
A = scipy.sparse.random_array((n, n), density = 0.01, rng = rng) + D
denseA = A.todense()
b = rng.standard_normal(n)
fA = f(denseA)
expected = fA @ b
observed = funm_multiply_krylov(f, A, b)
assert_allclose(observed, expected, rtol = 1E-6, atol = 1E-8)
observed = funm_multiply_krylov(f, aslinearoperator(A), b)
assert_allclose(observed, expected, rtol = 1E-6, atol = 1E-8)
@pytest.mark.parametrize("f", FUNCS)
def test_funm_multiply_krylov_hermitian_dense(self, f):
rng = np.random.default_rng(1738151906092735)
n = 60
nsamples = 10
for i in range(nsamples):
R = np.triu(rng.standard_normal((n, n)))
A = R.T + R
b = rng.standard_normal(n)
fA = f(A)
expected = fA @ b
observed = funm_multiply_krylov(f, A, b, assume_a = 'her')
assert_allclose(observed, expected, rtol = 1E-6, atol = 1E-8)
observed = funm_multiply_krylov(f, aslinearoperator(A), b, assume_a = 'her')
assert_allclose(observed, expected, rtol = 1E-6, atol = 1E-8)
@pytest.mark.parametrize("f", FUNCS)
def test_funm_multiply_krylov_hermitian_sparse(self, f, num_parallel_threads):
rng = np.random.default_rng(1738151906092735)
n = 100
nsamples = 1 + 9 // num_parallel_threads # Very slow otherwise
for i in range(nsamples):
D = scipy.sparse.diags(rng.standard_normal(n))
A = scipy.sparse.random_array((n, n), density = 0.01, rng = rng)
R = scipy.sparse.triu(A)
A = R + R.T + D
denseA = A.todense()
b = rng.standard_normal(n)
fA = f(denseA)
expected = fA @ b
observed = funm_multiply_krylov(f, A, b, assume_a = 'her')
assert_allclose(observed, expected, rtol = 1E-6, atol = 1E-8)
observed = funm_multiply_krylov(f, aslinearoperator(A), b, assume_a = 'her')
assert_allclose(observed, expected, rtol = 1E-6, atol = 1E-8)
def test_funm_multiply_krylov_breakdown(self):
rng = np.random.default_rng(1738151906092735)
# From test_iterative
A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0],
[0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1],
[1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0],
[-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0],
[0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0],
[0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0],
[0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0],
[0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype = float)
b = rng.standard_normal(A.shape[0])
fA = expm(A)
expected = fA @ b
observed = funm_multiply_krylov(expm, A, b, restart_every_m = 40)
assert_allclose(observed, expected)
def test_funm_multiply_krylov_invalid_input(self):
A = np.array([[1, 2], [3, 4]]) # Non-hermitian matrix
b = np.array([1.0, 2.0]) # Ensure 'b' is a 1D array of floats
# Test for invalid 'b' (not 1D)
b_invalid = np.array([[1.0], [2.0]]) # 2D array
with pytest.raises(ValueError,
match="argument 'b' must be a 1D array."):
funm_multiply_krylov(np.exp, A, b_invalid)
# Test for invalid restart parameter
with pytest.raises(ValueError,
match="argument 'restart_every_m' must be positive."):
funm_multiply_krylov(np.exp, A, b, restart_every_m=0)
# Test for invalid max_restarts
with pytest.raises(ValueError,
match="argument 'max_restarts' must be positive."):
funm_multiply_krylov(np.exp, A, b, max_restarts=0)
# Test for invalid 'assume_a' string
with pytest.raises(ValueError,
match="is not a recognized matrix structure"):
funm_multiply_krylov(np.exp, A, b, assume_a='invalid')
@pytest.mark.parametrize("dtype_a", DTYPES)
@pytest.mark.parametrize("dtype_b", DTYPES)
def test_funm_multiply_krylov_types(dtype_a, dtype_b):
assert_allclose_ = (partial(assert_allclose, rtol = 1.8e-3, atol = 1e-5)
if {dtype_a, dtype_b} else assert_allclose)
rng = np.random.default_rng(1738151906092735)
n = 50
if dtype_a in REAL_DTYPES:
A = rng.random([n, n]).astype(dtype_a)
else:
A = (rng.random([n, n]) + 1j * rng.random([n, n])).astype(dtype_a)
if dtype_b in REAL_DTYPES:
b = (2 * rng.random(n)).astype(dtype_b)
else:
b = (rng.random(n) + 1j * rng.random(n)).astype(dtype_b)
expA = expm(A)
expected = expA @ b
observed = funm_multiply_krylov(expm, A, b)
assert_allclose_(observed, expected)
observed = funm_multiply_krylov(expm, aslinearoperator(A), b)
assert_allclose_(observed, expected)
| TestKrylovFunmv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.