language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pikepdf__pikepdf | src/pikepdf/_methods.py | {
"start": 16680,
"end": 23143
} | class ____:
@property
def mediabox(self):
return self._get_mediabox(True)
@mediabox.setter
def mediabox(self, value):
check_is_box(value)
self.obj['/MediaBox'] = value
@property
def artbox(self):
return self._get_artbox(True, False)
@artbox.setter
def artbox(self, value):
check_is_box(value)
self.obj['/ArtBox'] = value
@property
def bleedbox(self):
return self._get_bleedbox(True, False)
@bleedbox.setter
def bleedbox(self, value):
check_is_box(value)
self.obj['/BleedBox'] = value
@property
def cropbox(self):
return self._get_cropbox(True, False)
@cropbox.setter
def cropbox(self, value):
check_is_box(value)
self.obj['/CropBox'] = value
@property
def trimbox(self):
return self._get_trimbox(True, False)
@trimbox.setter
def trimbox(self, value):
check_is_box(value)
self.obj['/TrimBox'] = value
@property
def images(self) -> _ObjectMapping:
return self._images
@property
def form_xobjects(self) -> _ObjectMapping:
return self._form_xobjects
@property
def resources(self) -> Dictionary:
if Name.Resources not in self.obj:
self.obj.Resources = Dictionary()
elif not isinstance(self.obj.Resources, Dictionary):
raise TypeError("Page /Resources exists but is not a dictionary")
return self.obj.Resources
def add_resource(
self,
res: Object,
res_type: Name,
name: Name | None = None,
*,
prefix: str = '',
replace_existing: bool = True,
) -> Name:
resources = self.resources
if res_type not in resources:
resources[res_type] = Dictionary()
if name is not None and prefix:
raise ValueError("Must specify one of name= or prefix=")
if name is None:
name = Name.random(prefix=prefix)
for res_dict in resources.as_dict().values():
if not isinstance(res_dict, Dictionary):
continue
if name in res_dict:
if replace_existing:
del res_dict[name]
else:
raise ValueError(f"Name {name} already exists in page /Resources")
resources[res_type][name] = res.with_same_owner_as(self.obj)
return name
def _over_underlay(
self,
other,
rect: Rectangle | None,
under: bool,
push_stack: bool,
shrink: bool,
expand: bool,
) -> Name:
formx = None
if isinstance(other, Page):
formx = other.as_form_xobject()
elif isinstance(other, Dictionary) and other.get(Name.Type) == Name.Page:
formx = Page(other).as_form_xobject()
elif (
isinstance(other, Stream)
and other.get(Name.Type) == Name.XObject
and other.get(Name.Subtype) == Name.Form
):
formx = other
if formx is None:
raise TypeError(
"other object is not something we can convert to Form XObject"
)
if rect is None:
rect = Rectangle(self.trimbox)
formx_placed_name = self.add_resource(formx, Name.XObject)
cs = self.calc_form_xobject_placement(
formx, formx_placed_name, rect, allow_shrink=shrink, allow_expand=expand
)
if push_stack:
self.contents_add(b'q\n', prepend=True) # prepend q
self.contents_add(b'Q\n', prepend=False) # i.e. append Q
self.contents_add(cs, prepend=under)
self.contents_coalesce()
return formx_placed_name
def add_overlay(
self,
other: Object | Page,
rect: Rectangle | None = None,
*,
push_stack: bool = True,
shrink: bool = True,
expand: bool = True,
) -> Name:
return self._over_underlay(
other,
rect,
under=False,
push_stack=push_stack,
expand=expand,
shrink=shrink,
)
def add_underlay(
self,
other: Object | Page,
rect: Rectangle | None = None,
*,
shrink: bool = True,
expand: bool = True,
) -> Name:
return self._over_underlay(
other, rect, under=True, push_stack=False, expand=expand, shrink=shrink
)
def contents_add(self, contents: Stream | bytes, *, prepend: bool = False):
return self._contents_add(contents, prepend=prepend)
def __getattr__(self, name):
return getattr(self.obj, name)
@augment_override_cpp
def __setattr__(self, name, value):
if hasattr(self.__class__, name):
object.__setattr__(self, name, value)
else:
setattr(self.obj, name, value)
@augment_override_cpp
def __delattr__(self, name):
if hasattr(self.__class__, name):
object.__delattr__(self, name)
else:
delattr(self.obj, name)
def __getitem__(self, key):
return self.obj[key]
def __setitem__(self, key, value):
self.obj[key] = value
def __delitem__(self, key):
del self.obj[key]
def __contains__(self, key):
return key in self.obj
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def emplace(self, other: Page, retain=(Name.Parent,)):
return self.obj.emplace(other.obj, retain=retain)
def __repr__(self):
return (
repr(self.obj)
.replace('Dictionary', 'Page', 1)
.replace('(Type="/Page")', '', 1)
)
def _repr_mimebundle_(self, include=None, exclude=None):
data = {}
bundle = {'application/pdf', 'image/svg+xml'}
if include:
bundle = {k for k in bundle if k in include}
if exclude:
bundle = {k for k in bundle if k not in exclude}
pagedata = _single_page_pdf(self)
if 'application/pdf' in bundle:
data['application/pdf'] = pagedata
if 'image/svg+xml' in bundle:
with suppress(FileNotFoundError, RuntimeError):
data['image/svg+xml'] = _mudraw(pagedata, 'svg').decode('utf-8')
return data
@augments(Token)
| Extend_Page |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 44089,
"end": 45647
} | class ____(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function."""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters."""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope**-1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.input_unit is None and self.slope.input_unit is None:
return None
return {self.inputs[0]: self.intercept.input_unit / self.slope.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit[self.outputs[0]],
"slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
}
| Linear1D |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline_ref.py | {
"start": 18,
"end": 588
} | class ____(graphene.Interface):
"""This interface supports the case where we can look up a pipeline successfully in the
repository available to the DagsterInstance/graphql context, as well as the case where we know
that a pipeline exists/existed thanks to materialized data such as logs and run metadata, but
where we can't look the concrete pipeline up.
"""
name = graphene.NonNull(graphene.String)
solidSelection = graphene.List(graphene.NonNull(graphene.String))
class Meta:
name = "PipelineReference"
| GraphenePipelineReference |
python | pytorch__pytorch | test/functorch/test_eager_transforms.py | {
"start": 122506,
"end": 134994
} | class ____(TestCase):
@parametrize("disable_autograd_tracking", [True, False])
def test_disable_autograd_tracking(self, disable_autograd_tracking):
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
mod = Foo()
_, params = make_functional(
mod, disable_autograd_tracking=disable_autograd_tracking
)
self.assertEqual(len(params), 2)
for param in params:
self.assertEqual(param.requires_grad, not disable_autograd_tracking)
def test_parameter_tying(self):
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.bias = nn.Parameter(torch.randn(3))
self.linear = nn.Linear(3, 3)
self.linear.bias = self.bias
self.linear_tied = self.linear
def forward(self, x):
x = self.linear(x)
x = self.linear_tied(x)
x = x + self.bias
return x
torch.manual_seed(1)
mod = Foo()
func, _ = make_functional(mod)
torch.manual_seed(0)
mod = Foo()
_, params = make_functional(mod)
self.assertEqual(len(params), 2)
x = torch.randn(2, 3)
result = func(params, x)
expected = mod(x)
self.assertEqual(result, expected)
def test_buffer_tying(self):
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.bias = nn.Parameter(torch.randn(3))
self.linear = nn.Linear(3, 3)
self.buffer = nn.Buffer(torch.randn(3))
self.buffer_tied = self.buffer
def forward(self, x):
x = self.linear(x)
x = x + self.bias
x = x + self.buffer
x = x + self.buffer_tied
return x
torch.manual_seed(1)
mod = Foo()
func, _, _ = make_functional_with_buffers(mod)
torch.manual_seed(0)
mod = Foo()
_, params, buffers = make_functional_with_buffers(mod)
self.assertEqual(len(params), 3)
self.assertEqual(len(buffers), 1)
x = torch.randn(2, 3)
result = func(params, buffers, x)
expected = mod(x)
self.assertEqual(result, expected)
@parametrize("disable_autograd_tracking", [True, False])
def test_with_buffers_disable_autograd_tracking(self, disable_autograd_tracking):
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(3, 3)
self.buffer = nn.Buffer(torch.randn(3))
def forward(self, x):
x = self.linear(x)
x = x + self.buffer
return x
mod = Foo()
_, params, buffers = make_functional_with_buffers(
mod, disable_autograd_tracking=disable_autograd_tracking
)
self.assertEqual(len(params), 2)
self.assertEqual(len(buffers), 1)
for param in params:
self.assertEqual(param.requires_grad, not disable_autograd_tracking)
@parametrize("detach_params", [True, False])
def test_using_detach_functional_call(self, detach_params):
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(3, 3)
self.buffer = nn.Buffer(torch.randn(3))
def forward(self, x):
x = self.linear(x)
x = x + self.buffer
return x
def params_dict(mod):
named_params = mod.named_parameters()
return (
{k: v.detach() for k, v in named_params}
if detach_params
else dict(named_params)
)
mod = Foo()
x = torch.randn(3, 3)
d = (params_dict(mod), dict(mod.named_buffers()))
out = functional_call(mod, d, x)
self.assertEqual(out.grad_fn is None, detach_params)
def test_parameter_tying_grad(self):
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(3, 3)
self.weight = self.linear.weight
self.bias = self.linear.bias
def forward(self, x):
x = self.linear(x)
x = F.linear(x, self.weight, self.bias)
return x
x = torch.randn(2, 3)
torch.manual_seed(0)
mod = Foo()
loss = mod(x).sum()
expected = torch.autograd.grad(loss, mod.parameters())
mod = Foo()
fmod, _, _ = make_functional_with_buffers(mod)
torch.manual_seed(0)
mod = Foo()
_, params, buffers = make_functional_with_buffers(mod)
def compute_loss(params, buffers, x):
return fmod(params, buffers, x).sum()
result = grad(compute_loss)(params, buffers, x)
self.assertEqual(result, expected)
def test_parameter_tying_ensemble(self):
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(3, 3)
self.weight = self.linear.weight
self.bias = self.linear.bias
self.buffer = nn.Buffer(torch.randn(3))
self.buffer_tied = self.buffer
def forward(self, x):
x = self.linear(x)
x = F.linear(x, self.weight, self.bias)
x = x + self.buffer
x = x + self.buffer_tied
return x
num_models = 2
xs = torch.randn(num_models, 64, 3)
models = [Foo() for _ in range(num_models)]
fmodel, _, _ = combine_state_for_ensemble(models)
torch.manual_seed(0)
models = [Foo() for _ in range(num_models)]
_, params, buffers = combine_state_for_ensemble(models)
result = vmap(fmodel)(params, buffers, xs)
torch.manual_seed(0)
models = [Foo() for _ in range(num_models)]
expected = torch.stack([model(x) for model, x in zip(models, xs)])
self.assertEqual(result, expected)
@parametrize("mechanism", ["make_functional", "functional_call"])
def test_correctness_mnist(self, mechanism):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
x = torch.randn(64, 1, 32, 32)
torch.manual_seed(301)
fnet, _ = _get_weights_and_functional_call(Net(), mechanism)
torch.manual_seed(0)
_, params = _get_weights_and_functional_call(Net(), mechanism)
result = fnet(params, x)
torch.manual_seed(0)
net = Net()
expected = net(x)
self.assertEqual(result, expected)
def test_combine_state_for_ensemble_error(self):
in_features = 2
out_features = 2
models = []
with self.assertRaisesRegex(RuntimeError, "Expected at least one model"):
_ = combine_state_for_ensemble(models)
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1].eval()
with self.assertRaisesRegex(RuntimeError, "same training/eval mode"):
_ = combine_state_for_ensemble(models)
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1] = torch.nn.Conv2d(3, 3, (3, 3))
with self.assertRaisesRegex(RuntimeError, "models to be of the same class"):
_ = combine_state_for_ensemble(models)
def test_combine_state_for_ensemble_smoke(self):
in_features = 2
out_features = 2
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
_ = combine_state_for_ensemble(models)
def test_stack_module_state_smoke(self):
in_features = 2
out_features = 2
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
_ = stack_module_state(models)
def test_stack_module_state_leaf(self):
in_features = 2
out_features = 2
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
params, buffers = stack_module_state(models)
for param in params.values():
self.assertTrue(param.requires_grad)
self.assertTrue(param.is_leaf)
def test_stack_module_state_mismatch_error(self):
in_features = 2
out_features = 2
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[0].weight.requires_grad_(False)
with self.assertRaisesRegex(RuntimeError, "same .requires_grad"):
params, buffers = stack_module_state(models)
def test_stack_module_state_error(self):
in_features = 2
out_features = 2
models = []
with self.assertRaisesRegex(
RuntimeError, "stack_module_state:.* Expected at least one model"
):
_ = stack_module_state(models)
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1].eval()
with self.assertRaisesRegex(
RuntimeError, "stack_module_state:.* same training/eval mode."
):
_ = stack_module_state(models)
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1] = torch.nn.Conv2d(3, 3, (3, 3))
with self.assertRaisesRegex(
RuntimeError, "stack_module_state:.* models to be of the same class"
):
_ = stack_module_state(models)
@parametrize("mechanism", ["make_functional", "functional_call"])
def test_make_functional_state_correctly_returned_after_forward(self, mechanism):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
def get_module_info(mod):
if mechanism == "make_functional":
return make_functional(mod)
else:
assert mechanism == "functional_call"
return mod, dict(mod.named_parameters())
mod = Net()
func_mod, params = get_module_info(mod)
# state in func.names_map
mod = func_mod.stateless_model if mechanism == "make_functional" else func_mod
old_state_linear_weight = mod.linear.weight
old_state_linear_bias = mod.linear.bias
self.assertIsNotNone(old_state_linear_weight)
self.assertIsNotNone(old_state_linear_bias)
x = torch.randn(4, 3)
if mechanism == "make_functional":
func_mod(params, x)
else:
assert mechanism == "functional_call"
functional_call(func_mod, params, x)
mod = func_mod.stateless_model if mechanism == "make_functional" else func_mod
new_state_linear_weight = mod.linear.weight
new_state_linear_bias = mod.linear.bias
self.assertIsNotNone(new_state_linear_weight)
self.assertIsNotNone(new_state_linear_bias)
self.assertEqual(old_state_linear_weight, new_state_linear_weight)
self.assertEqual(old_state_linear_bias, new_state_linear_bias)
@markDynamoStrictTest
| TestMakeFunctional |
python | mlflow__mlflow | dev/clint/src/clint/rules/base.py | {
"start": 207,
"end": 798
} | class ____(ABC):
id: str
name: str
def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
# Only generate ID for concrete classes
if not inspect.isabstract(cls):
id_ = next(_id_counter)
cls.id = f"MLF{id_:04d}"
cls.name = _CLASS_NAME_TO_RULE_NAME_REGEX.sub("-", cls.__name__).lower()
@abstractmethod
def _message(self) -> str:
"""
Return a message that explains this rule.
"""
@property
def message(self) -> str:
return self._message()
| Rule |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/commands/ci/checks.py | {
"start": 2165,
"end": 4071
} | class ____(Enum):
failed = "failed"
warning = "warning"
skipped = "skipped"
passed = "passed"
def handle_result(
result: CheckResult,
check: Check,
prefix_message: str,
success_message: str,
failure_message: str,
) -> Verdict:
def full_msg(msg):
return prefix_message + msg
def passed(msg):
ui.print("✅" + full_msg(msg))
def warning(msg):
ui.print("🟡" + ui.yellow(full_msg(msg)))
def failed(msg):
ui.print("🚫" + ui.red(full_msg(msg)))
def print_indented(msgs):
lines = "\n".join(msgs).splitlines(keepends=False)
for line in lines:
ui.print(" | " + line)
if check == Check.skip:
return Verdict.skipped
if result.errors:
if check == Check.error:
failed(failure_message)
print_indented(result.messages)
print_indented(result.errors)
ui.print("\n")
return Verdict.failed
elif check == Check.warn:
warning(failure_message)
print_indented(result.messages)
print_indented(result.errors)
ui.print("\n")
return Verdict.warning
passed(success_message)
print_indented(result.messages)
ui.print("\n")
return Verdict.passed
def check_connect_dagster_cloud(url) -> CheckResult:
if TOKEN_ENV_VAR_NAME not in os.environ:
return CheckResult([f"{TOKEN_ENV_VAR_NAME} not set"])
result = CheckResult()
result.messages.append(f"Connecting to {url} using {TOKEN_ENV_VAR_NAME}")
with gql.graphql_client_from_url(url, os.environ[TOKEN_ENV_VAR_NAME]) as client:
try:
gql.get_organization_settings(client)
result.messages.append("Connection successful")
except Exception as err:
result.errors.append(f"Failed to connect to {url}: {err}")
return result
| Verdict |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 32291,
"end": 32542
} | class ____(Expr):
"""Return the current template context including locals. Behaves
exactly like :class:`ContextReference`, but includes local
variables, such as from a ``for`` loop.
.. versionadded:: 2.11
"""
| DerivedContextReference |
python | PrefectHQ__prefect | src/prefect/server/schemas/actions.py | {
"start": 2188,
"end": 2435
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a flow."""
tags: List[str] = Field(
default_factory=list,
description="A list of flow tags",
examples=[["tag-1", "tag-2"]],
)
| FlowUpdate |
python | dagster-io__dagster | python_modules/libraries/dagster-powerbi/dagster_powerbi/components/power_bi_workspace/component.py | {
"start": 1757,
"end": 1806
} | class ____(Model):
token: str
| PowerBITokenModel |
python | coleifer__peewee | tests/models.py | {
"start": 133283,
"end": 140948
} | class ____(OnConflictTests):
@requires_upsert
def test_update(self):
# Conflict on empno - we'll preserve name and update the ID. This will
# overwrite the previous row and set a new ID.
res = (Emp
.insert(first='foo', last='bar', empno='125')
.on_conflict(
conflict_target=(Emp.empno,),
preserve=(Emp.first, Emp.last),
update={Emp.empno: '125.1'})
.execute())
self.assertData([
('huey', 'cat', '123'),
('zaizee', 'cat', '124'),
('foo', 'bar', '125.1')])
# Conflicts on first/last name. The first name is preserved while the
# last-name is updated. The new empno is thrown out.
res = (Emp
.insert(first='foo', last='bar', empno='126')
.on_conflict(
conflict_target=(Emp.first, Emp.last),
preserve=(Emp.first,),
update={Emp.last: 'baze'})
.execute())
self.assertData([
('huey', 'cat', '123'),
('zaizee', 'cat', '124'),
('foo', 'baze', '125.1')])
@requires_upsert
@requires_models(OCTest)
def test_update_ignore_with_conflict_target(self):
query = OCTest.insert(a='foo', b=1).on_conflict(
action='IGNORE',
conflict_target=(OCTest.a,))
rowid1 = query.execute()
self.assertTrue(rowid1 is not None)
query.clone().execute() # Nothing happens, insert is ignored.
self.assertEqual(OCTest.select().count(), 1)
OCTest.insert(a='foo', b=2).on_conflict_ignore().execute()
self.assertEqual(OCTest.select().count(), 1)
OCTest.insert(a='bar', b=1).on_conflict_ignore().execute()
self.assertEqual(OCTest.select().count(), 2)
@requires_upsert
@requires_models(OCTest)
def test_update_atomic(self):
# Add a new row with the given "a" value. If a conflict occurs,
# re-insert with b=b+2.
query = OCTest.insert(a='foo', b=1).on_conflict(
conflict_target=(OCTest.a,),
update={OCTest.b: OCTest.b + 2})
# First execution returns rowid=1. Second execution hits the conflict-
# resolution, and will update the value in "b" from 1 -> 3.
rowid1 = query.execute()
rowid2 = query.clone().execute()
self.assertEqual(rowid1, rowid2)
obj = OCTest.get()
self.assertEqual(obj.a, 'foo')
self.assertEqual(obj.b, 3)
query = OCTest.insert(a='foo', b=4, c=5).on_conflict(
conflict_target=[OCTest.a],
preserve=[OCTest.c],
update={OCTest.b: OCTest.b + 100})
self.assertEqual(query.execute(), rowid2)
obj = OCTest.get()
self.assertEqual(obj.a, 'foo')
self.assertEqual(obj.b, 103)
self.assertEqual(obj.c, 5)
@requires_upsert
@requires_models(OCTest)
def test_update_where_clause(self):
# Add a new row with the given "a" value. If a conflict occurs,
# re-insert with b=b+2 so long as the original b < 3.
query = OCTest.insert(a='foo', b=1).on_conflict(
conflict_target=(OCTest.a,),
update={OCTest.b: OCTest.b + 2},
where=(OCTest.b < 3))
# First execution returns rowid=1. Second execution hits the conflict-
# resolution, and will update the value in "b" from 1 -> 3.
rowid1 = query.execute()
rowid2 = query.clone().execute()
self.assertEqual(rowid1, rowid2)
obj = OCTest.get()
self.assertEqual(obj.a, 'foo')
self.assertEqual(obj.b, 3)
# Third execution also returns rowid=1. The WHERE clause prevents us
# from updating "b" again. If this is SQLite, we get the rowid back, if
# this is Postgresql we get None (since nothing happened).
rowid3 = query.clone().execute()
if IS_SQLITE:
self.assertEqual(rowid1, rowid3)
else:
self.assertTrue(rowid3 is None)
# Because we didn't satisfy the WHERE clause, the value in "b" is
# not incremented again.
obj = OCTest.get()
self.assertEqual(obj.a, 'foo')
self.assertEqual(obj.b, 3)
@requires_upsert
@requires_models(Emp) # Has unique on first/last, unique on empno.
def test_conflict_update_excluded(self):
e1 = Emp.create(first='huey', last='c', empno='10')
e2 = Emp.create(first='zaizee', last='c', empno='20')
res = (Emp.insert(first='huey', last='c', empno='30')
.on_conflict(conflict_target=(Emp.first, Emp.last),
update={Emp.empno: Emp.empno + EXCLUDED.empno},
where=(EXCLUDED.empno != Emp.empno))
.execute())
data = sorted(Emp.select(Emp.first, Emp.last, Emp.empno).tuples())
self.assertEqual(data, [('huey', 'c', '1030'), ('zaizee', 'c', '20')])
@requires_upsert
@requires_models(KV)
def test_conflict_update_excluded2(self):
KV.create(key='k1', value=1)
query = (KV.insert(key='k1', value=10)
.on_conflict(conflict_target=[KV.key],
update={KV.value: KV.value + EXCLUDED.value},
where=(EXCLUDED.value > KV.value)))
query.execute()
self.assertEqual(KV.select(KV.key, KV.value).tuples()[:], [('k1', 11)])
# Running it again will have no effect this time, since the new value
# (10) is not greater than the pre-existing row value (11).
query.execute()
self.assertEqual(KV.select(KV.key, KV.value).tuples()[:], [('k1', 11)])
@requires_upsert
@skip_if(IS_CRDB, 'crdb does not support the WHERE clause')
@requires_models(UKVP)
def test_conflict_target_constraint_where(self):
u1 = UKVP.create(key='k1', value=1, extra=1)
u2 = UKVP.create(key='k2', value=2, extra=2)
fields = [UKVP.key, UKVP.value, UKVP.extra]
data = [('k1', 1, 2), ('k2', 2, 3)]
# XXX: SQLite does not seem to accept parameterized values for the
# conflict target WHERE clause (e.g., the partial index). So we have to
# express this literally as ("extra" > 1) rather than using an
# expression which will be parameterized. Hopefully SQLite's authors
# decide this is a bug and fix it.
if IS_SQLITE:
conflict_where = UKVP.extra > SQL('1')
else:
conflict_where = UKVP.extra > 1
res = (UKVP.insert_many(data, fields)
.on_conflict(conflict_target=(UKVP.key, UKVP.value),
conflict_where=conflict_where,
preserve=(UKVP.extra,))
.execute())
# How many rows exist? The first one would not have triggered the
# conflict resolution, since the existing k1/1 row's "extra" value was
# not greater than 1, thus it did not satisfy the index condition.
# The second row (k2/2/3) would have triggered the resolution.
self.assertEqual(UKVP.select().count(), 3)
query = (UKVP
.select(UKVP.key, UKVP.value, UKVP.extra)
.order_by(UKVP.key, UKVP.value, UKVP.extra)
.tuples())
self.assertEqual(list(query), [
('k1', 1, 1),
('k1', 1, 2),
('k2', 2, 3)])
# Verify the primary-key of k2 did not change.
u2_db = UKVP.get(UKVP.key == 'k2')
self.assertEqual(u2_db.id, u2.id)
@requires_mysql
| PGOnConflictTests |
python | bokeh__bokeh | src/bokeh/protocol/receiver.py | {
"start": 1743,
"end": 6927
} | class ____:
''' Receive wire message fragments and assemble complete Bokeh server
message objects.
On ``MessageError`` or ``ValidationError``, the receiver will reset its
state and attempt to consume a new message.
The *fragment* received can be either bytes or unicode, depending on
the transport's semantics (WebSocket allows both).
.. code-block:: python
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
The ``header`` fragment will have the form:
.. code-block:: python
header = {
# these are required
'msgid' : <str> # a unique id for the message
'msgtype' : <str> # a message type, e.g. 'ACK', 'PATCH-DOC', etc
# these are optional
'num_buffers' : <int> # the number of additional buffers, if any
}
The ``metadata`` fragment may contain any arbitrary information. It is not
processed by Bokeh for any purpose, but may be useful for external
monitoring or instrumentation tools.
The ``content`` fragment is defined by the specific message type.
'''
_current_consumer: Callable[[Fragment], None]
_fragments: list[Fragment]
_message: Message[Any] | None
_buf_header: BufferHeader | None
_partial: Message[Any] | None
def __init__(self, protocol: Protocol) -> None:
''' Configure a Receiver with a specific Bokeh protocol.
Args:
protocol (Protocol) :
A Bokeh protocol object to use to assemble collected message
fragments.
'''
self._protocol = protocol
self._current_consumer = self._HEADER
self._message = None
self._partial = None
self._buf_header = None
async def consume(self, fragment: Fragment) -> Message[Any]|None:
''' Consume individual protocol message fragments.
Args:
fragment (``JSON``) :
A message fragment to assemble. When a complete message is
assembled, the receiver state will reset to begin consuming a
new message.
'''
self._current_consumer(fragment)
return self._message
def _HEADER(self, fragment: Fragment) -> None:
self._message = None
self._partial = None
self._fragments = [self._assume_text(fragment)]
self._current_consumer = self._METADATA
def _METADATA(self, fragment: Fragment) -> None:
metadata = self._assume_text(fragment)
self._fragments.append(metadata)
self._current_consumer = self._CONTENT
def _CONTENT(self, fragment: Fragment) -> None:
content = self._assume_text(fragment)
self._fragments.append(content)
header_json, metadata_json, content_json = (self._assume_text(x) for x in self._fragments[:3])
self._partial = self._protocol.assemble(header_json, metadata_json, content_json)
self._check_complete()
def _BUFFER_HEADER(self, fragment: Fragment) -> None:
header = json.loads(self._assume_text(fragment))
if set(header) != { "id" }:
raise ValidationError(f"Malformed buffer header {header!r}")
self._buf_header = header
self._current_consumer = self._BUFFER_PAYLOAD
def _BUFFER_PAYLOAD(self, fragment: Fragment) -> None:
payload = self._assume_binary(fragment)
if self._buf_header is None:
raise ValidationError("Consuming a buffer payload, but current buffer header is None")
header = BufferHeader(id=self._buf_header["id"])
cast(Message[Any], self._partial).assemble_buffer(header, payload)
self._check_complete()
def _check_complete(self) -> None:
if self._partial and self._partial.complete:
self._message = self._partial
self._current_consumer = self._HEADER
else:
self._current_consumer = self._BUFFER_HEADER
def _assume_text(self, fragment: Fragment) -> str:
if not isinstance(fragment, str):
raise ValidationError(f"expected text fragment but received binary fragment for {self._current_consumer.__name__}")
return fragment
def _assume_binary(self, fragment: Fragment) -> bytes:
if not isinstance(fragment, bytes):
raise ValidationError(f"expected binary fragment but received text fragment for {self._current_consumer.__name__}")
return fragment
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Receiver |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_task_instances.py | {
"start": 59973,
"end": 63366
} | class ____:
def setup_method(self):
clear_db_runs()
def teardown_method(self):
clear_db_runs()
def test_ti_previous_dag_run(self, client, session, create_task_instance, dag_maker):
"""Test that the previous dag run is returned correctly for a task instance."""
ti = create_task_instance(
task_id="test_ti_previous_dag_run",
dag_id="test_dag",
logical_date=timezone.datetime(2025, 1, 19),
state=State.RUNNING,
start_date=timezone.datetime(2024, 1, 17),
session=session,
)
session.commit()
# Create 2 DagRuns for the same DAG to verify that the correct DagRun (last) is returned
dr1 = dag_maker.create_dagrun(
run_id="test_run_id_1",
logical_date=timezone.datetime(2025, 1, 17),
run_type="scheduled",
state=State.SUCCESS,
session=session,
)
dr1.end_date = timezone.datetime(2025, 1, 17, 1, 0, 0)
dr2 = dag_maker.create_dagrun(
run_id="test_run_id_2",
logical_date=timezone.datetime(2025, 1, 18),
run_type="scheduled",
state=State.SUCCESS,
session=session,
)
dr2.end_date = timezone.datetime(2025, 1, 18, 1, 0, 0)
session.commit()
response = client.get(f"/execution/task-instances/{ti.id}/previous-successful-dagrun")
assert response.status_code == 200
assert response.json() == {
"data_interval_start": "2025-01-18T00:00:00Z",
"data_interval_end": "2025-01-19T00:00:00Z",
"start_date": "2024-01-17T00:00:00Z",
"end_date": "2025-01-18T01:00:00Z",
}
def test_ti_previous_dag_run_not_found(self, client, session):
ti_id = "0182e924-0f1e-77e6-ab50-e977118bc139"
assert session.get(TaskInstance, ti_id) is None
response = client.get(f"/execution/task-instances/{ti_id}/previous-successful-dagrun")
assert response.status_code == 200
assert response.json() == {
"data_interval_start": None,
"data_interval_end": None,
"start_date": None,
"end_date": None,
}
def test_ti_with_none_as_logical_date(self, client, session, create_task_instance, dag_maker):
ti = create_task_instance(
task_id="test_ti_with_none_as_logical_date",
dag_id="test_dag",
logical_date=None,
state=State.RUNNING,
start_date=timezone.datetime(2024, 1, 17),
session=session,
)
session.commit()
assert ti.logical_date is None
dr1 = dag_maker.create_dagrun(
run_id="test_ti_with_none_as_logical_date",
logical_date=timezone.datetime(2025, 1, 17),
run_type="scheduled",
state=State.SUCCESS,
session=session,
)
dr1.end_date = timezone.datetime(2025, 1, 17, 1, 0, 0)
session.commit()
response = client.get(f"/execution/task-instances/{ti.id}/previous-successful-dagrun")
assert response.status_code == 200
assert response.json() == {
"data_interval_start": None,
"data_interval_end": None,
"start_date": None,
"end_date": None,
}
| TestPreviousDagRun |
python | langchain-ai__langchain | libs/core/langchain_core/outputs/chat_generation.py | {
"start": 2296,
"end": 4771
} | class ____(ChatGeneration):
"""`ChatGeneration` chunk.
`ChatGeneration` chunks can be concatenated with other `ChatGeneration` chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def __add__(
self, other: ChatGenerationChunk | list[ChatGenerationChunk]
) -> ChatGenerationChunk:
"""Concatenate two `ChatGenerationChunk`s.
Args:
other: The other `ChatGenerationChunk` or list of `ChatGenerationChunk`
to concatenate.
Raises:
TypeError: If other is not a `ChatGenerationChunk` or list of
`ChatGenerationChunk`.
Returns:
A new `ChatGenerationChunk` concatenated from self and other.
"""
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
if isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
def merge_chat_generation_chunks(
chunks: list[ChatGenerationChunk],
) -> ChatGenerationChunk | None:
"""Merge a list of `ChatGenerationChunk`s into a single `ChatGenerationChunk`.
Args:
chunks: A list of `ChatGenerationChunk` to merge.
Returns:
A merged `ChatGenerationChunk`, or None if the input list is empty.
"""
if not chunks:
return None
if len(chunks) == 1:
return chunks[0]
return chunks[0] + chunks[1:]
| ChatGenerationChunk |
python | zarr-developers__zarr-python | examples/custom_dtype/custom_dtype.py | {
"start": 857,
"end": 9008
} | class ____(ZDType[int2_dtype_cls, int2_scalar_cls]):
"""
This class provides a Zarr compatibility layer around the int2 data type (the ``dtype`` of a
NumPy array of type int2) and the int2 scalar type (the ``dtype`` of the scalar value inside an int2 array).
"""
# This field is as the key for the data type in the internal data type registry, and also
# as the identifier for the data type when serializaing the data type to disk for zarr v3
_zarr_v3_name: ClassVar[Literal["int2"]] = "int2"
# this field will be used internally
_zarr_v2_name: ClassVar[Literal["int2"]] = "int2"
# we bind a class variable to the native data type class so we can create instances of it
dtype_cls = int2_dtype_cls
@classmethod
def from_native_dtype(cls, dtype: np.dtype) -> Self:
"""Create an instance of this ZDType from a native dtype."""
if cls._check_native_dtype(dtype):
return cls()
raise DataTypeValidationError(
f"Invalid data type: {dtype}. Expected an instance of {cls.dtype_cls}"
)
def to_native_dtype(self: Self) -> int2_dtype_cls:
"""Create an int2 dtype instance from this ZDType"""
return self.dtype_cls()
@classmethod
def _check_json_v2(cls, data: DTypeJSON) -> TypeGuard[DTypeConfig_V2[Literal["|b1"], None]]:
"""
Type check for Zarr v2-flavored JSON.
This will check that the input is a dict like this:
.. code-block:: json
{
"name": "int2",
"object_codec_id": None
}
Note that this representation differs from the ``dtype`` field looks like in zarr v2 metadata.
Specifically, whatever goes into the ``dtype`` field in metadata is assigned to the ``name`` field here.
See the Zarr docs for more information about the JSON encoding for data types.
"""
return (
check_dtype_spec_v2(data) and data["name"] == "int2" and data["object_codec_id"] is None
)
@classmethod
def _check_json_v3(cls, data: DTypeJSON) -> TypeGuard[Literal["int2"]]:
"""
Type check for Zarr V3-flavored JSON.
Checks that the input is the string "int2".
"""
return data == cls._zarr_v3_name
@classmethod
def _from_json_v2(cls, data: DTypeJSON) -> Self:
"""
Create an instance of this ZDType from Zarr V3-flavored JSON.
"""
if cls._check_json_v2(data):
return cls()
# This first does a type check on the input, and if that passes we create an instance of the ZDType.
msg = f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected the string {cls._zarr_v2_name!r}"
raise DataTypeValidationError(msg)
@classmethod
def _from_json_v3(cls: type[Self], data: DTypeJSON) -> Self:
"""
Create an instance of this ZDType from Zarr V3-flavored JSON.
This first does a type check on the input, and if that passes we create an instance of the ZDType.
"""
if cls._check_json_v3(data):
return cls()
msg = f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected the string {cls._zarr_v3_name!r}"
raise DataTypeValidationError(msg)
@overload # type: ignore[override]
def to_json(self, zarr_format: Literal[2]) -> DTypeConfig_V2[Literal["int2"], None]: ...
@overload
def to_json(self, zarr_format: Literal[3]) -> Literal["int2"]: ...
def to_json(
self, zarr_format: ZarrFormat
) -> DTypeConfig_V2[Literal["int2"], None] | Literal["int2"]:
"""
Serialize this ZDType to v2- or v3-flavored JSON
If the zarr_format is 2, then return a dict like this:
.. code-block:: json
{
"name": "int2",
"object_codec_id": None
}
If the zarr_format is 3, then return the string "int2"
"""
if zarr_format == 2:
return {"name": "int2", "object_codec_id": None}
elif zarr_format == 3:
return self._zarr_v3_name
raise ValueError(f"zarr_format must be 2 or 3, got {zarr_format}") # pragma: no cover
def _check_scalar(self, data: object) -> TypeGuard[int | ml_dtypes.int2]:
"""
Check if a python object is a valid int2-compatible scalar
The strictness of this type check is an implementation degree of freedom.
You could be strict here, and only accept int2 values, or be open and accept any integer
or any object and rely on exceptions from the int2 constructor that will be called in
cast_scalar.
"""
return isinstance(data, (int, int2_scalar_cls))
def cast_scalar(self, data: object) -> ml_dtypes.int2:
"""
Attempt to cast a python object to an int2.
We first perform a type check to ensure that the input type is appropriate, and if that
passes we call the int2 scalar constructor.
"""
if self._check_scalar(data):
return ml_dtypes.int2(data)
msg = (
f"Cannot convert object {data!r} with type {type(data)} to a scalar compatible with the "
f"data type {self}."
)
raise TypeError(msg)
def default_scalar(self) -> ml_dtypes.int2:
"""
Get the default scalar value. This will be used when automatically selecting a fill value.
"""
return ml_dtypes.int2(0)
def to_json_scalar(self, data: object, *, zarr_format: ZarrFormat) -> int:
"""
Convert a python object to a JSON representation of an int2 scalar.
This is necessary for taking user input for the ``fill_value`` attribute in array metadata.
In this implementation, we optimistically convert the input to an int,
and then check that it lies in the acceptable range for this data type.
"""
# We could add a type check here, but we don't need to for this example
val: int = int(data) # type: ignore[call-overload]
if val not in (-2, -1, 0, 1):
raise ValueError("Invalid value. Expected -2, -1, 0, or 1.")
return val
def from_json_scalar(self, data: JSON, *, zarr_format: ZarrFormat) -> ml_dtypes.int2:
"""
Read a JSON-serializable value as an int2 scalar.
We first perform a type check to ensure that the JSON value is well-formed, then call the
int2 scalar constructor.
The base definition of this method requires that it take a zarr_format parameter because
other data types serialize scalars differently in zarr v2 and v3, but we don't use this here.
"""
if self._check_scalar(data):
return ml_dtypes.int2(data)
raise TypeError(f"Invalid type: {data}. Expected an int.")
# after defining dtype class, it must be registered with the data type registry so zarr can use it
data_type_registry.register(Int2._zarr_v3_name, Int2)
# this parametrized function will create arrays in zarr v2 and v3 using our new data type
@pytest.mark.parametrize("zarr_format", [2, 3])
def test_custom_dtype(tmp_path: Path, zarr_format: Literal[2, 3]) -> None:
# create array and write values
z_w = zarr.create_array(
store=tmp_path, shape=(4,), dtype="int2", zarr_format=zarr_format, compressors=None
)
z_w[:] = [-1, -2, 0, 1]
# open the array
z_r = zarr.open_array(tmp_path, mode="r")
print(z_r.info_complete())
# look at the array metadata
if zarr_format == 2:
meta_file = tmp_path / ".zarray"
else:
meta_file = tmp_path / "zarr.json"
print(json.dumps(json.loads(meta_file.read_text()), indent=2))
if __name__ == "__main__":
# Run the example with printed output, and a dummy pytest configuration file specified.
# Without the dummy configuration file, at test time pytest will attempt to use the
# configuration file in the project root, which will error because Zarr is using some
# plugins that are not installed in this example.
sys.exit(pytest.main(["-s", __file__, f"-c {__file__}"]))
| Int2 |
python | pytorch__pytorch | torch/ao/nn/intrinsic/quantized/modules/bn_relu.py | {
"start": 1731,
"end": 3285
} | class ____(nnq.BatchNorm3d):
r"""
A BNReLU3d module is a fused module of BatchNorm3d and ReLU
We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm3d`.
Attributes:
Same as torch.ao.nn.quantized.BatchNorm3d
"""
_FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU3d
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
super().__init__(
num_features, eps=eps, momentum=momentum, device=device, dtype=dtype
)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
return torch.ops.quantized.batch_norm3d_relu(
input,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.eps,
self.scale,
self.zero_point,
)
def _get_name(self):
return "QuantizedBNReLU3d"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
# TODO: Add qat support for BNReLU3d
return super().from_float(
mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
@classmethod
def from_reference(cls, bn_relu, output_scale, output_zero_point):
return super().from_reference(bn_relu[0], output_scale, output_zero_point)
| BNReLU3d |
python | huggingface__transformers | src/transformers/models/moonshine/configuration_moonshine.py | {
"start": 1283,
"end": 10578
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Moonshine
[UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MoonshineModel`].
hidden_size (`int`, *optional*, defaults to 288):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
encoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
decoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer decoder.
encoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
decoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`decoder_num_attention_heads`.
pad_head_dim_to_multiple_of (`int`, *optional*):
Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain
optimized attention implementations.
encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
decoder_start_token_id (`int`, *optional*, defaults to 1):
Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
are provided to the `generate` function. It is used to guide the model`s generation process depending on
the task.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
bos_token_id (`int`, *optional*, defaults to 1):
Denotes beginning of sequences token id.
eos_token_id (`int`, *optional*, defaults to 2):
Denotes end of sequences token id.
Example:
```python
>>> from transformers import MoonshineModel, MoonshineConfig
>>> # Initializing a Moonshine style configuration
>>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny")
>>> # Initializing a model from the configuration
>>> model = MoonshineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "moonshine"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"num_key_value_heads": "encoder_num_key_value_heads",
"num_attention_heads": "encoder_num_attention_heads",
"num_hidden_layers": "encoder_num_hidden_layers",
}
def __init__(
self,
vocab_size: Optional[int] = 32768,
hidden_size: Optional[int] = 288,
intermediate_size: Optional[int] = 1152,
encoder_num_hidden_layers: Optional[int] = 6,
decoder_num_hidden_layers: Optional[int] = 6,
encoder_num_attention_heads: Optional[int] = 8,
decoder_num_attention_heads: Optional[int] = 8,
encoder_num_key_value_heads: Optional[int] = None,
decoder_num_key_value_heads: Optional[int] = None,
pad_head_dim_to_multiple_of: Optional[int] = None,
encoder_hidden_act: Optional[str] = "gelu",
decoder_hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 512,
initializer_range: Optional[float] = 0.02,
decoder_start_token_id: Optional[int] = 1,
use_cache: Optional[bool] = True,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
is_encoder_decoder: Optional[bool] = True,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.encoder_num_hidden_layers = encoder_num_hidden_layers
self.decoder_num_hidden_layers = decoder_num_hidden_layers
self.encoder_num_attention_heads = encoder_num_attention_heads
self.decoder_num_attention_heads = decoder_num_attention_heads
if encoder_num_key_value_heads is None:
encoder_num_key_value_heads = encoder_num_attention_heads
self.encoder_num_key_value_heads = encoder_num_key_value_heads
if decoder_num_key_value_heads is None:
decoder_num_key_value_heads = decoder_num_attention_heads
self.decoder_num_key_value_heads = decoder_num_key_value_heads
self.pad_head_dim_to_multiple_of = pad_head_dim_to_multiple_of
self.encoder_hidden_act = encoder_hidden_act
self.decoder_hidden_act = decoder_hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.is_encoder_decoder = is_encoder_decoder
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.9) # assign default for BC
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
__all__ = ["MoonshineConfig"]
| MoonshineConfig |
python | tornadoweb__tornado | tornado/locks.py | {
"start": 4905,
"end": 7753
} | class ____:
"""An event blocks coroutines until its internal flag is set to True.
Similar to `threading.Event`.
A coroutine can wait for an event to be set. Once it is set, calls to
``yield event.wait()`` will not block unless the event has been cleared:
.. testcode::
import asyncio
from tornado import gen
from tornado.locks import Event
event = Event()
async def waiter():
print("Waiting for event")
await event.wait()
print("Not waiting this time")
await event.wait()
print("Done")
async def setter():
print("About to set the event")
event.set()
async def runner():
await gen.multi([waiter(), setter()])
asyncio.run(runner())
.. testoutput::
Waiting for event
About to set the event
Not waiting this time
Done
"""
def __init__(self) -> None:
self._value = False
self._waiters = set() # type: Set[Future[None]]
def __repr__(self) -> str:
return "<{} {}>".format(
self.__class__.__name__,
"set" if self.is_set() else "clear",
)
def is_set(self) -> bool:
"""Return ``True`` if the internal flag is true."""
return self._value
def set(self) -> None:
"""Set the internal flag to ``True``. All waiters are awakened.
Calling `.wait` once the flag is set will not block.
"""
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(None)
def clear(self) -> None:
"""Reset the internal flag to ``False``.
Calls to `.wait` will block until `.set` is called.
"""
self._value = False
def wait(
self, timeout: Optional[Union[float, datetime.timedelta]] = None
) -> Awaitable[None]:
"""Block until the internal flag is true.
Returns an awaitable, which raises `tornado.util.TimeoutError` after a
timeout.
"""
fut = Future() # type: Future[None]
if self._value:
fut.set_result(None)
return fut
self._waiters.add(fut)
fut.add_done_callback(lambda fut: self._waiters.remove(fut))
if timeout is None:
return fut
else:
timeout_fut = gen.with_timeout(timeout, fut)
# This is a slightly clumsy workaround for the fact that
# gen.with_timeout doesn't cancel its futures. Cancelling
# fut will remove it from the waiters list.
timeout_fut.add_done_callback(
lambda tf: fut.cancel() if not fut.done() else None
)
return timeout_fut
| Event |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 32752,
"end": 33267
} | class ____(Visitor):
"""Visitor for replacing type parameters with actual types."""
def __init__(self, mapping):
super().__init__()
self.mapping = mapping
def VisitTypeParameter(self, p):
return self.mapping[p]
def ClassAsType(cls):
"""Converts a pytd.Class to an instance of pytd.Type."""
params = tuple(item.type_param for item in cls.template)
if not params:
return pytd.NamedType(cls.name)
else:
return pytd.GenericType(pytd.NamedType(cls.name), params)
| ReplaceTypeParameters |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 34667,
"end": 35298
} | class ____(BaseModel):
"""
Object used for create backfill request.
"""
model_config = ConfigDict(
extra="forbid",
)
dag_id: Annotated[str, Field(title="Dag Id")]
from_date: Annotated[datetime, Field(title="From Date")]
to_date: Annotated[datetime, Field(title="To Date")]
run_backwards: Annotated[bool | None, Field(title="Run Backwards")] = False
dag_run_conf: Annotated[dict[str, Any] | None, Field(title="Dag Run Conf")] = {}
reprocess_behavior: ReprocessBehavior | None = "none"
max_active_runs: Annotated[int | None, Field(title="Max Active Runs")] = 10
| BackfillPostBody |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 193671,
"end": 198819
} | class ____(fixtures.TablesTest, _RangeTests):
__requires__ = ("multirange_types",)
__sparse_driver_backend__ = True
@testing.fixture(params=(True, False), ids=["multirange", "plain_list"])
def data_obj(self, request):
if request.param:
return MultiRange(self._data_obj())
else:
return list(self._data_obj())
@classmethod
def define_tables(cls, metadata):
# no reason ranges shouldn't be primary keys,
# so lets just use them as such
table = Table(
"data_table",
metadata,
Column("range", cls._col_type, primary_key=True),
)
cls.col = table.c.range
def test_auto_cast_back_to_type(self, connection, data_obj):
"""test that a straight pass of the range type without any context
will send appropriate casting info so that the driver can round
trip it.
This doesn't happen in general across other backends and not for
types like JSON etc., although perhaps it should, as we now have
pretty straightforward infrastructure to turn it on; asyncpg
for example does cast JSONs now in place. But that's a
bigger issue; for PG ranges it's likely useful to do this for
PG backends as this is a fairly narrow use case.
Brought up in #8540.
"""
# see also CompileTest::test_multirange_custom_object_hook
stmt = select(literal(data_obj, type_=self._col_type))
round_trip = connection.scalar(stmt)
eq_(round_trip, data_obj)
def test_auto_cast_back_to_type_without_type(self, connection):
"""use _resolve_for_literal to cast"""
# see also CompileTest::test_multirange_custom_object_hook
data_obj = MultiRange(self._data_obj())
lit = literal(data_obj)
round_trip = connection.scalar(select(lit))
eq_(round_trip, data_obj)
eq_(type(lit.type), self._col_type)
@testing.fails("no automatic adaptation of plain list")
def test_auto_cast_back_to_type_without_type_plain_list(self, connection):
"""use _resolve_for_literal to cast"""
# see also CompileTest::test_multirange_custom_object_hook
data_obj = list(self._data_obj())
lit = literal(data_obj)
r = connection.scalar(select(lit))
eq_(type(r), list)
def test_actual_type(self):
eq_(str(self._col_type()), self._col_str)
def test_reflect(self, connection):
from sqlalchemy import inspect
insp = inspect(connection)
cols = insp.get_columns("data_table")
assert isinstance(cols[0]["type"], self._col_type)
def _assert_data(self, conn):
data = conn.execute(select(self.tables.data_table.c.range)).fetchall()
eq_(data, [(self._data_obj(),)])
eq_(type(data[0][0]), MultiRange)
def test_textual_round_trip_w_dialect_type(self, connection, data_obj):
"""test #8690"""
data_table = self.tables.data_table
connection.execute(
self.tables.data_table.insert(), {"range": data_obj}
)
q1 = text("SELECT range from data_table")
v = connection.scalar(q1)
q2 = select(data_table).where(data_table.c.range == v)
v2 = connection.scalar(q2)
eq_(data_obj, v2)
def test_insert_obj(self, connection, data_obj):
connection.execute(
self.tables.data_table.insert(), {"range": data_obj}
)
self._assert_data(connection)
@testing.requires.psycopg_or_pg8000_compatibility
def test_insert_text(self, connection):
connection.execute(
self.tables.data_table.insert(), {"range": self._data_str()}
)
self._assert_data(connection)
@testing.requires.psycopg_or_pg8000_compatibility
def test_union_result_text(self, connection):
# insert
connection.execute(
self.tables.data_table.insert(), {"range": self._data_str()}
)
# select
range_ = self.tables.data_table.c.range
data = connection.execute(select(range_ + range_)).fetchall()
eq_(data, [(self._data_obj(),)])
eq_(type(data[0][0]), MultiRange)
@testing.requires.psycopg_or_pg8000_compatibility
def test_intersection_result_text(self, connection):
# insert
connection.execute(
self.tables.data_table.insert(), {"range": self._data_str()}
)
# select
range_ = self.tables.data_table.c.range
data = connection.execute(select(range_ * range_)).fetchall()
eq_(data, [(self._data_obj(),)])
eq_(type(data[0][0]), MultiRange)
@testing.requires.psycopg_or_pg8000_compatibility
def test_difference_result_text(self, connection):
# insert
connection.execute(
self.tables.data_table.insert(), {"range": self._data_str()}
)
# select
range_ = self.tables.data_table.c.range
data = connection.execute(select(range_ - range_)).fetchall()
eq_(data, [([],)])
eq_(type(data[0][0]), MultiRange)
| _MultiRangeTypeRoundTrip |
python | huggingface__transformers | src/transformers/models/yolos/modular_yolos.py | {
"start": 2021,
"end": 5453
} | class ____(DetrImageProcessorFast):
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, list[tuple]] = None, top_k: int = 100
):
"""
Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`YolosObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
top_k (`int`, *optional*, defaults to 100):
Keep only top k bounding boxes before filtering by thresholding.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = out_logits.sigmoid()
prob = prob.view(out_logits.shape[0], -1)
k_value = min(top_k, prob.size(1))
topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
def post_process_instance_segmentation(self):
raise NotImplementedError("Segmentation post-processing is not implemented for Deformable DETR yet.")
def post_process_semantic_segmentation(self):
raise NotImplementedError("Semantic segmentation post-processing is not implemented for Deformable DETR yet.")
def post_process_panoptic_segmentation(self):
raise NotImplementedError("Panoptic segmentation post-processing is not implemented for Deformable DETR yet.")
__all__ = ["YolosImageProcessorFast"]
| YolosImageProcessorFast |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-box/llama_index/tools/box/ai_extract/base.py | {
"start": 411,
"end": 2656
} | class ____(BaseToolSpec):
"""
Extracts AI generated content from a Box file.
Args:
box_client (BoxClient): A BoxClient instance for interacting with Box API.
Attributes:
spec_functions (list): A list of supported functions.
_box_client (BoxClient): An instance of BoxClient for interacting with Box API.
Methods:
ai_extract(file_id, ai_prompt): Extracts AI generated content from a Box file.
Args:
file_id (str): The ID of the Box file.
ai_prompt (str): The AI prompt to use for extraction.
Returns:
Document: A Document object containing the extracted AI content.
"""
spec_functions = ["ai_extract"]
_box_client: BoxClient
def __init__(self, box_client: BoxClient) -> None:
"""
Initializes the BoxAIExtractToolSpec with a BoxClient instance.
Args:
box_client (BoxClient): The BoxClient instance to use for interacting with the Box API.
"""
self._box_client = add_extra_header_to_box_client(box_client)
def ai_extract(
self,
file_id: str,
ai_prompt: str,
) -> Document:
"""
Extracts AI generated content from a Box file using the provided AI prompt.
Args:
file_id (str): The ID of the Box file to process.
ai_prompt (str): The AI prompt to use for content extraction.
Returns:
Document: A Document object containing the extracted AI content,
including metadata about the original Box file.
"""
# Connect to Box
box_check_connection(self._box_client)
# get payload information
box_file = get_box_files_details(
box_client=self._box_client, file_ids=[file_id]
)[0]
box_file = get_files_ai_extract_data(
box_client=self._box_client,
box_files=[box_file],
ai_prompt=ai_prompt,
)[0]
doc = box_file_to_llama_document(box_file)
doc.text = box_file.ai_response if box_file.ai_response else ""
doc.metadata["ai_prompt"] = box_file.ai_prompt
doc.metadata["ai_response"] = box_file.ai_response
return doc
| BoxAIExtractToolSpec |
python | mlflow__mlflow | mlflow/genai/judges/tools/get_traces_in_session.py | {
"start": 796,
"end": 4459
} | class ____(JudgeTool):
"""
Tool for retrieving traces from the same session for multi-turn evaluation.
This tool extracts the session ID from the current trace and searches for other traces
within the same session to provide conversational context to judges.
"""
@property
def name(self) -> str:
return ToolNames._GET_TRACES_IN_SESSION
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames._GET_TRACES_IN_SESSION,
description=(
"Retrieve traces from the same session for multi-turn evaluation. "
"Extracts the session ID from the current trace and searches for other "
"traces in the same session to provide conversational context. "
"Returns a list of JudgeToolTraceInfo objects containing trace metadata, "
"request, and response."
),
parameters=ToolParamsSchema(
type="object",
properties={
"max_results": {
"type": "integer",
"description": "Maximum number of traces to return (default: 20)",
"default": 20,
},
"order_by": {
"type": "array",
"items": {"type": "string"},
"description": (
"List of order by clauses for sorting results "
"(default: ['timestamp ASC'] for chronological order)"
),
},
},
required=[],
),
),
type="function",
)
def invoke(
self,
trace: Trace,
max_results: int = 20,
order_by: list[str] | None = None,
) -> list[JudgeToolTraceInfo]:
"""
Retrieve traces from the same session.
Args:
trace: The current MLflow trace object
max_results: Maximum number of traces to return
order_by: List of order by clauses for sorting results
Returns:
List of JudgeToolTraceInfo objects containing trace metadata, request, and response
Raises:
MlflowException: If session ID is not found or has invalid format
"""
session_id = trace.info.trace_metadata.get(TraceMetadataKey.TRACE_SESSION)
if not session_id:
raise MlflowException(
"No session ID found in trace metadata. Traces in session require a session ID "
"to identify related traces within the same conversation session.",
error_code=INVALID_PARAMETER_VALUE,
)
if not session_id.replace("-", "").replace("_", "").isalnum():
raise MlflowException(
(
f"Invalid session ID format: {session_id}. Session IDs should contain only "
"alphanumeric characters, hyphens, and underscores."
),
error_code=INVALID_PARAMETER_VALUE,
)
filter_string = (
f"metadata.`{TraceMetadataKey.TRACE_SESSION}` = '{session_id}' "
f"AND trace.timestamp < {trace.info.request_time}"
)
return SearchTracesTool().invoke(
trace=trace, filter_string=filter_string, order_by=order_by, max_results=max_results
)
| GetTracesInSession |
python | ray-project__ray | python/ray/data/preprocessors/transformer.py | {
"start": 197,
"end": 3843
} | class ____(Preprocessor):
"""Apply a `power transform <https://en.wikipedia.org/wiki/Power_transform>`_ to
make your data more normally distributed.
Some models expect data to be normally distributed. By making your data more
Gaussian-like, you might be able to improve your model's performance.
This preprocessor supports the following transformations:
* `Yeo-Johnson <https://en.wikipedia.org/wiki/Power_transform#Yeo%E2%80%93Johnson_transformation>`_
* `Box-Cox <https://en.wikipedia.org/wiki/Power_transform#Box%E2%80%93Cox_transformation>`_
Box-Cox requires all data to be positive.
.. warning::
You need to manually specify the transform's power parameter. If you
choose a bad value, the transformation might not work well.
Args:
columns: The columns to separately transform.
power: A parameter that determines how your data is transformed. Practioners
typically set ``power`` between :math:`-2.5` and :math:`2.5`, although you
may need to try different values to find one that works well.
method: A string representing which transformation to apply. Supports
``"yeo-johnson"`` and ``"box-cox"``. If you choose ``"box-cox"``, your data
needs to be positive. Defaults to ``"yeo-johnson"``.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
""" # noqa: E501
_valid_methods = ["yeo-johnson", "box-cox"]
_is_fittable = False
def __init__(
self,
columns: List[str],
power: float,
method: str = "yeo-johnson",
*,
output_columns: Optional[List[str]] = None,
):
super().__init__()
self.columns = columns
self.method = method
self.power = power
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
if method not in self._valid_methods:
raise ValueError(
f"Method {method} is not supported."
f"Supported values are: {self._valid_methods}"
)
def _transform_pandas(self, df: pd.DataFrame):
def column_power_transformer(s: pd.Series):
if self.method == "yeo-johnson":
result = np.zeros_like(s, dtype=np.float64)
pos = s >= 0 # binary mask
if self.power != 0:
result[pos] = (np.power(s[pos] + 1, self.power) - 1) / self.power
else:
result[pos] = np.log(s[pos] + 1)
if self.power != 2:
result[~pos] = -(np.power(-s[~pos] + 1, 2 - self.power) - 1) / (
2 - self.power
)
else:
result[~pos] = -np.log(-s[~pos] + 1)
return result
else: # box-cox
if self.power != 0:
return (np.power(s, self.power) - 1) / self.power
else:
return np.log(s)
df[self.output_columns] = df[self.columns].transform(column_power_transformer)
return df
def __repr__(self):
return (
f"{self.__class__.__name__}(columns={self.columns!r}, "
f"power={self.power!r}, method={self.method!r}, "
f"output_columns={self.output_columns!r})"
)
| PowerTransformer |
python | walkccc__LeetCode | solutions/554. Brick Wall/554.py | {
"start": 0,
"end": 333
} | class ____:
def leastBricks(self, wall: list[list[int]]) -> int:
maxFreq = 0
count = collections.defaultdict(int)
for row in wall:
prefix = 0
for i in range(len(row) - 1):
prefix += row[i]
count[prefix] += 1
maxFreq = max(maxFreq, count[prefix])
return len(wall) - maxFreq
| Solution |
python | walkccc__LeetCode | solutions/26. Remove Duplicates from Sorted Array/26.py | {
"start": 0,
"end": 190
} | class ____:
def removeDuplicates(self, nums: list[int]) -> int:
i = 0
for num in nums:
if i < 1 or num > nums[i - 1]:
nums[i] = num
i += 1
return i
| Solution |
python | pydata__xarray | xarray/backends/locks.py | {
"start": 6851,
"end": 7852
} | class ____(Lock):
"""DummyLock provides the lock API without any actual locking."""
def acquire(self, blocking=True):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def locked(self):
return False
def combine_locks(locks: Sequence[Lock]) -> Lock:
"""Combine a sequence of locks into a single lock."""
all_locks: list[Lock] = []
for lock in locks:
if isinstance(lock, CombinedLock):
all_locks.extend(lock.locks)
elif lock is not None:
all_locks.append(lock)
num_locks = len(all_locks)
if num_locks > 1:
return CombinedLock(all_locks)
elif num_locks == 1:
return all_locks[0]
else:
return DummyLock()
def ensure_lock(lock: Lock | None | Literal[False]) -> Lock:
"""Ensure that the given object is a lock."""
if lock is None or lock is False:
return DummyLock()
return lock
| DummyLock |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 15663,
"end": 16456
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_get_job_trigger(self, mock_hook):
mock_hook.return_value.get_job_trigger.return_value = JobTrigger()
operator = CloudDLPGetDLPJobTriggerOperator(
job_trigger_id=TRIGGER_ID, project_id=PROJECT_ID, task_id="id"
)
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_job_trigger.assert_called_once_with(
job_trigger_id=TRIGGER_ID,
project_id=PROJECT_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPGetJobTripperOperator |
python | getsentry__sentry | tests/sentry/api/endpoints/test_api_tokens.py | {
"start": 359,
"end": 1506
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
for _ in range(2):
ApiToken.objects.create(user=self.user)
def test_simple(self) -> None:
self.login_as(self.user)
url = reverse("sentry-api-0-api-tokens")
response = self.client.get(url)
assert response.status_code == 200, response.content
assert len(response.data) == 2
def test_never_cache(self) -> None:
self.login_as(self.user)
url = reverse("sentry-api-0-api-tokens")
response = self.client.get(url)
assert response.status_code == 200, response.content
assert (
response.get("cache-control")
== "max-age=0, no-cache, no-store, must-revalidate, private"
)
def test_deny_token_access(self) -> None:
token = ApiToken.objects.create(user=self.user, scope_list=[])
url = reverse("sentry-api-0-api-tokens")
response = self.client.get(url, format="json", HTTP_AUTHORIZATION=f"Bearer {token.token}")
assert response.status_code == 403, response.content
@control_silo_test
| ApiTokensListTest |
python | neetcode-gh__leetcode | python/0721-accounts-merge.py | {
"start": 0,
"end": 615
} | class ____:
def __init__(self, n):
self.par = [i for i in range(n)]
self.rank = [1] * n
def find(self, x):
while x != self.par[x]:
self.par[x] = self.par[self.par[x]]
x = self.par[x]
return x
def union(self, x1, x2):
p1, p2 = self.find(x1), self.find(x2)
if p1 == p2:
return False
if self.rank[p1] > self.rank[p2]:
self.par[p2] = p1
self.rank[p1] += self.rank[p2]
else:
self.par[p1] = p2
self.rank[p2] += self.rank[p1]
return True
| UnionFind |
python | ray-project__ray | python/ray/dashboard/modules/aggregator/publisher/async_publisher_client.py | {
"start": 812,
"end": 987
} | class ____:
"""Data class that represents a batch of events to publish."""
# The list of events to publish
events: list[events_base_event_pb2.RayEvent]
| PublishBatch |
python | openai__openai-python | src/openai/resources/fine_tuning/checkpoints/permissions.py | {
"start": 8145,
"end": 15281
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncPermissionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncPermissionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncPermissionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncPermissionsWithStreamingResponse(self)
def create(
self,
fine_tuned_model_checkpoint: str,
*,
project_ids: SequenceNotStr[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[PermissionCreateResponse, AsyncPage[PermissionCreateResponse]]:
"""
**NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
This enables organization owners to share fine-tuned models with other projects
in their organization.
Args:
project_ids: The project identifiers to grant access to.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get_api_list(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
page=AsyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=PermissionCreateResponse,
method="post",
)
async def retrieve(
self,
fine_tuned_model_checkpoint: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["ascending", "descending"] | Omit = omit,
project_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> PermissionRetrieveResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to view all permissions for a
fine-tuned model checkpoint.
Args:
after: Identifier for the last permission ID from the previous pagination request.
limit: Number of permissions to retrieve.
order: The order in which to retrieve permissions.
project_id: The ID of the project to get permissions for.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return await self._get(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"project_id": project_id,
},
permission_retrieve_params.PermissionRetrieveParams,
),
),
cast_to=PermissionRetrieveResponse,
)
async def delete(
self,
permission_id: str,
*,
fine_tuned_model_checkpoint: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> PermissionDeleteResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to delete a permission for a
fine-tuned model checkpoint.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
if not permission_id:
raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return await self._delete(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=PermissionDeleteResponse,
)
| AsyncPermissions |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/cumulative_vrange_middle/package.py | {
"start": 216,
"end": 632
} | class ____(Package):
"""Test that creating cumulative version ranges of the
form X.Y:X works and allows for the selection of all the
versions >= X.Y with major == X
"""
homepage = "https://www.example.org"
url = "https://example.org/files/v3.4/cmake-3.4.3.tar.gz"
version("1.0", md5="4cb3ff35b2472aae70f542116d616e63")
depends_on("cumulative-vrange-bottom@2.1:")
| CumulativeVrangeMiddle |
python | PyCQA__pylint | doc/data/messages/b/bad-dunder-name/good.py | {
"start": 0,
"end": 96
} | class ____:
def __init__(self):
pass
def hello(self):
print("hello")
| Apples |
python | django__django | tests/apps/namespace_package_base/nsapp/apps.py | {
"start": 47,
"end": 153
} | class ____(AppConfig):
default = False
name = "nsapp"
path = os.path.dirname(__file__)
| NSAppConfig |
python | PyCQA__flake8 | src/flake8/style_guide.py | {
"start": 1650,
"end": 6569
} | class ____:
"""A class for managing the decision process around violations.
This contains the logic for whether a violation should be reported or
ignored.
"""
def __init__(self, options: argparse.Namespace) -> None:
"""Initialize the engine."""
self.cache: dict[str, Decision] = {}
self.selected_explicitly = _explicitly_chosen(
option=options.select,
extend=options.extend_select,
)
self.ignored_explicitly = _explicitly_chosen(
option=options.ignore,
extend=options.extend_ignore,
)
self.selected = _select_ignore(
option=options.select,
default=(),
extended_default=options.extended_default_select,
extend=options.extend_select,
)
self.ignored = _select_ignore(
option=options.ignore,
default=defaults.IGNORE,
extended_default=options.extended_default_ignore,
extend=options.extend_ignore,
)
def was_selected(self, code: str) -> Selected | Ignored:
"""Determine if the code has been selected by the user.
:param code: The code for the check that has been run.
:returns:
Selected.Implicitly if the selected list is empty,
Selected.Explicitly if the selected list is not empty and a match
was found,
Ignored.Implicitly if the selected list is not empty but no match
was found.
"""
if code.startswith(self.selected_explicitly):
return Selected.Explicitly
elif code.startswith(self.selected):
return Selected.Implicitly
else:
return Ignored.Implicitly
def was_ignored(self, code: str) -> Selected | Ignored:
"""Determine if the code has been ignored by the user.
:param code:
The code for the check that has been run.
:returns:
Selected.Implicitly if the ignored list is empty,
Ignored.Explicitly if the ignored list is not empty and a match was
found,
Selected.Implicitly if the ignored list is not empty but no match
was found.
"""
if code.startswith(self.ignored_explicitly):
return Ignored.Explicitly
elif code.startswith(self.ignored):
return Ignored.Implicitly
else:
return Selected.Implicitly
def make_decision(self, code: str) -> Decision:
"""Decide if code should be ignored or selected."""
selected = self.was_selected(code)
ignored = self.was_ignored(code)
LOG.debug(
"The user configured %r to be %r, %r",
code,
selected,
ignored,
)
if isinstance(selected, Selected) and isinstance(ignored, Selected):
return Decision.Selected
elif isinstance(selected, Ignored) and isinstance(ignored, Ignored):
return Decision.Ignored
elif (
selected is Selected.Explicitly
and ignored is not Ignored.Explicitly
):
return Decision.Selected
elif (
selected is not Selected.Explicitly
and ignored is Ignored.Explicitly
):
return Decision.Ignored
elif selected is Ignored.Implicitly and ignored is Selected.Implicitly:
return Decision.Ignored
elif (
selected is Selected.Explicitly and ignored is Ignored.Explicitly
) or (
selected is Selected.Implicitly and ignored is Ignored.Implicitly
):
# we only get here if it was in both lists: longest prefix wins
select = next(s for s in self.selected if code.startswith(s))
ignore = next(s for s in self.ignored if code.startswith(s))
if len(select) > len(ignore):
return Decision.Selected
else:
return Decision.Ignored
else:
raise AssertionError(f"unreachable {code} {selected} {ignored}")
def decision_for(self, code: str) -> Decision:
"""Return the decision for a specific code.
This method caches the decisions for codes to avoid retracing the same
logic over and over again. We only care about the select and ignore
rules as specified by the user in their configuration files and
command-line flags.
This method does not look at whether the specific line is being
ignored in the file itself.
:param code: The code for the check that has been run.
"""
decision = self.cache.get(code)
if decision is None:
decision = self.make_decision(code)
self.cache[code] = decision
LOG.debug('"%s" will be "%s"', code, decision)
return decision
| DecisionEngine |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_requests/sponsored_brands_request_builder.py | {
"start": 162,
"end": 2319
} | class ____(AmazonAdsBaseRequestBuilder):
@classmethod
def ad_groups_endpoint(cls, client_id: str, client_access_token: str, profile_id: str) -> "SponsoredBrandsRequestBuilder":
return (
cls("sb/v4/adGroups/list").with_client_id(client_id).with_client_access_token(client_access_token).with_profile_id(profile_id)
)
@classmethod
def keywords_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, limit: Optional[int] = 100, start_index: Optional[int] = 0
) -> "SponsoredBrandsRequestBuilder":
return (
cls("sb/keywords")
.with_client_id(client_id)
.with_client_access_token(client_access_token)
.with_profile_id(profile_id)
.with_limit(limit)
.with_start_index(start_index)
)
@classmethod
def campaigns_endpoint(cls, client_id: str, client_access_token: str, profile_id: str) -> "SponsoredBrandsRequestBuilder":
return (
cls("sb/v4/campaigns/list").with_client_id(client_id).with_client_access_token(client_access_token).with_profile_id(profile_id)
)
def __init__(self, resource: str) -> None:
super().__init__(resource)
self._limit: Optional[int] = None
self._start_index: Optional[int] = None
self._body: dict = None
@property
def query_params(self) -> Dict[str, Any]:
query_params = {}
if self._limit is not None:
query_params["count"] = self._limit
if self._start_index:
query_params["startIndex"] = self._start_index
return query_params
@property
def request_body(self) -> Optional[str]:
return self._body
def with_limit(self, limit: int) -> "SponsoredBrandsRequestBuilder":
self._limit: int = limit
return self
def with_start_index(self, offset: int) -> "SponsoredBrandsRequestBuilder":
self._start_index: int = offset
return self
def with_request_body(self, body: dict) -> "SponsoredBrandsRequestBuilder":
self._body: dict = body
return self
| SponsoredBrandsRequestBuilder |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 3789,
"end": 5948
} | class ____(PrependedAppendedText):
"""
Layout object for rendering a field with appended text.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
attrs : dict
Attributes to be applied to the field. These are converted into html
attributes. e.g. ``data_id: 'test'`` in the attrs dict will become
``data-id='test'`` on the field's ``<input>``.
Parameters
----------
field : str
The name of the field to be rendered.
text : str
The appended text, can be HTML like.
input_size : str, optional
For Bootstrap4+ additional classes to customise the input-group size
e.g. ``input-group-sm``. By default None
active : bool
For Bootstrap3, a boolean to render the text active. By default
``False``.
css_class : str, optional
CSS classes to be applied to the field. These are added to any classes
included in the ``attrs`` dict. By default ``None``.
wrapper_class: str, optional
CSS classes to be used when rendering the Field. This class is usually
applied to the ``<div>`` which wraps the Field's ``<label>`` and
``<input>`` tags. By default ``None``.
template : str, optional
Overrides the default template, if provided. By default ``None``.
**kwargs : dict, optional
Additional attributes are converted into key="value", pairs. These
attributes are added to the ``<div>``.
Examples
--------
Example::
AppendedText('amount', '.00')
"""
def __init__(
self,
field,
text,
*,
input_size=None,
active=False,
css_class=None,
wrapper_class=None,
template=None,
**kwargs,
):
self.text = text
super().__init__(
field,
appended_text=text,
input_size=input_size,
active=active,
css_class=css_class,
wrapper_class=wrapper_class,
template=template,
**kwargs,
)
| AppendedText |
python | keras-team__keras | keras/src/backend/tensorflow/trainer.py | {
"start": 27517,
"end": 37191
} | class ____(EpochIterator):
def __init__(self, distribute_strategy=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._distribute_strategy = distribute_strategy
dataset = self.data_adapter.get_tf_dataset()
if not isinstance(dataset, tf.distribute.DistributedDataset):
dataset = self._distribute_strategy.experimental_distribute_dataset(
dataset
)
self._distributed_dataset = dataset
def _get_iterator(self):
return self._distributed_dataset
def tf_sync(self):
tf_context.async_wait()
def __next__(self):
return next(self._epoch_iterator)
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
with super().catch_stop_iteration():
try:
yield
self.tf_sync()
except tf.errors.OutOfRangeError:
raise StopIteration
def reduce_per_replica(values, strategy, reduction):
"""Attempt to reduce the structure `values` to single values.
Given `values` (a `tf.Tensor` or a `PerReplica` structure),
which represents the values across all the replicas, `reduce_per_replica`
attempts to "reduce" those values and returns the corresponding structure
that represents only single values.
Currently, `reduce_per_replica` is only used for reducing the metric results
from `tf.distribute.Strategy.run()`. Depending on the underlying
`Strategy` implementation, `values` may be a `PerReplica` object,
which can be thought of as a collection of values across the replicas,
or a `tf.Tensor`, if the strategy has already conducted the reduction
for the downstream library.
There are five possible outcomes of reduction:
1) if the `values` is a structure of simple `tf.Tensor`s, meaning that
reduction is not actually needed, `reduce_per_replica` returns the
structure as-is.
2) else, if `reduction="auto"`, then the best reduction strategy is
chosen based on the current environment. This should only be used
for training cases (`fit()`).
3) else, if `reduction="first"`, then `reduce_per_replica`
returns the values of the first replica. This is used in the case of
training and evaluation, where `values` is expected to hold the same
value across the replicas as a result of `Strategy`'s synchronization
across the replicas.
`reduce_per_replica` does not synchronize the values.
4) else, if `reduction="sum"`, then `reduce_per_replica` returns the sum
of values for all replicas. This may be used in the custom training loop
case, where each replica contain different values which are not
synchronized.
5) else, if `reduction="concat"`, then `reduce_per_replica`
returns the concatenation of the values across the replicas, along the
axis of dimension 0. This is used in the inference case (`predict()`).
Args:
values: Structure of `PerReplica` objects or `tf.Tensor`s.
`tf.Tensor`s are returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of `"auto"`, `"first"`, `"concat"`, `"mean"`, or `"sum"`.
`"auto"` will select `"first"` when used under a TPUStrategy, or
`"mean"` otherwise.
Returns:
Structure of `Tensor`s, representing the result of reduction.
"""
if reduction == "auto":
if isinstance(strategy, tf.distribute.TPUStrategy):
reduction = "first"
else:
reduction = "mean"
def _reduce(v):
"""Reduce a single `PerReplica` object."""
if _collective_all_reduce_multi_worker(strategy):
if reduction == "concat":
return _multi_worker_concat(v, strategy)
elif reduction == "sum":
return strategy.reduce("SUM", v)
elif reduction == "mean":
return strategy.reduce("MEAN", v, axis=0)
if not _is_per_replica_instance(v):
return v
elif reduction == "first":
return strategy.experimental_local_results(v)[0]
elif reduction == "concat":
if _is_tpu_multi_host(strategy):
return _tpu_multi_host_concat(v, strategy)
else:
return concat(strategy.experimental_local_results(v))
elif reduction == "sum":
return tf.reduce_sum(strategy.experimental_local_results(v))
elif reduction == "mean":
return tf.reduce_mean(
strategy.experimental_local_results(v), axis=0
)
else:
raise ValueError(
"`reduction` must be one of "
'"first", "concat", "mean", "sum", or "auto". '
f"Received: reduction={reduction}."
)
return tree.map_structure(_reduce, values)
def _multi_worker_concat(v, strategy):
"""Order PerReplica objects for CollectiveAllReduceStrategy and concat."""
replicas = strategy.gather(v, axis=0)
# v might not have the same shape on different replicas
if _is_per_replica_instance(v):
shapes = tf.concat(
[
tf.expand_dims(tf.shape(single_value)[0], axis=0)
for single_value in v.values
],
axis=0,
)
all_shapes = strategy.gather(shapes, axis=0)
else:
# v is a tensor. This may happen when, say, we have 2x1 multi-worker.
all_shapes = strategy.gather(
tf.expand_dims(tf.shape(v)[0], axis=0), axis=0
)
replicas = tf.split(
replicas,
num_or_size_splits=all_shapes,
num=strategy.num_replicas_in_sync,
)
ordered_replicas = []
num_replicas_per_worker = len(strategy.extended.worker_devices)
for replica_id in range(num_replicas_per_worker):
ordered_replicas += replicas[replica_id::num_replicas_per_worker]
return concat(ordered_replicas)
def concat(tensors, axis=0):
"""Concats `tensor`s along `axis`."""
if isinstance(tensors[0], tf.SparseTensor):
return tf.sparse.concat(axis=axis, sp_inputs=tensors)
elif _is_scalar(tensors[0]):
return tf.stack(tensors, axis=axis)
else:
return tf.concat(tensors, axis=axis)
def _tpu_multi_host_concat(v, strategy):
"""Correctly order TPU PerReplica objects."""
replicas = strategy.experimental_local_results(v)
# When distributed datasets are created from Tensors / NumPy,
# TPUStrategy.experimental_distribute_dataset shards data in
# (Replica, Host) order, and TPUStrategy.experimental_local_results returns
# it in (Host, Replica) order.
num_replicas_per_host = strategy.extended.num_replicas_per_host
ordered_replicas = []
for replica_id in range(num_replicas_per_host):
ordered_replicas += replicas[replica_id::num_replicas_per_host]
return concat(ordered_replicas)
def _collective_all_reduce_multi_worker(strategy):
return (
isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy)
) and strategy.extended._in_multi_worker_mode()
def _is_per_replica_instance(obj):
return isinstance(obj, tf.distribute.DistributedValues) and isinstance(
obj, tf.__internal__.CompositeTensor
)
def _is_scalar(x):
return isinstance(x, (tf.Tensor, tf.Variable)) and x.shape.rank == 0
def _is_tpu_multi_host(strategy):
return _is_tpu_strategy(strategy) and strategy.extended.num_hosts > 1
def _is_tpu_strategy(strategy):
return _is_tpu_strategy_class(strategy.__class__)
def _is_tpu_strategy_class(clz):
def is_tpu_strat(k):
return k.__name__.startswith("TPUStrategy")
if is_tpu_strat(clz):
return True
return any(map(_is_tpu_strategy_class, clz.__bases__))
def convert_to_np_if_not_ragged(x):
if isinstance(x, tf.RaggedTensor):
return x
elif isinstance(x, tf.SparseTensor):
return x
return x.numpy()
def potentially_ragged_concat(tensors):
"""Concats `Tensor`s along their first dimension.
Args:
tensors: List of `Tensor`s.
Returns:
Concatenation of the inputs along the first dimension -- of type
`np.ndarray` if all input shapes are compatible, or `tf.RaggedTensor`
if not.
"""
if len(tensors) == 1:
return tensors[0]
elif isinstance(tensors[0], tf.SparseTensor):
return tf.sparse.concat(axis=0, sp_inputs=tensors)
elif isinstance(tensors[0], tf.RaggedTensor):
return tf.concat(tensors, axis=0)
non_batch_shapes = tf.stack([tf.shape(tensor)[1:] for tensor in tensors])
constant_dims = tf.math.reduce_all(
non_batch_shapes == non_batch_shapes[:1], axis=0
)
if tf.math.reduce_all(constant_dims).numpy().item():
# All non-batch dims are constant
if _is_scalar(tensors[0]):
return tf.stack(tensors, axis=0)
else:
return tf.concat(tensors, axis=0)
# First, identify constant inner dimensions by finding the
# rightmost dimension that is not constant
constant_inner_dimensions = (
constant_dims.numpy().tolist()[::-1].index(False)
)
# If there are constant inner dimensions, define a constant inner shape
if constant_inner_dimensions == 0:
constant_inner_shape = None
else:
constant_inner_shape = tensors[0].shape[-constant_inner_dimensions:]
return tf.ragged.constant(
[tensor.numpy() for tensor in tensors], inner_shape=constant_inner_shape
).merge_dims(0, 1)
| TFEpochIterator |
python | django__django | tests/expressions/models.py | {
"start": 1295,
"end": 1783
} | class ____(models.Model):
integer = models.BigIntegerField(db_column="the_integer")
float = models.FloatField(null=True, db_column="the_float")
decimal_value = models.DecimalField(max_digits=20, decimal_places=17, null=True)
def __str__(self):
return "%i, %s, %s" % (
self.integer,
"%.3f" % self.float if self.float is not None else None,
"%.17f" % self.decimal_value if self.decimal_value is not None else None,
)
| Number |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_doc_integration_details.py | {
"start": 12654,
"end": 15230
} | class ____(DocIntegrationDetailsTest):
method = "DELETE"
def test_staff_delete_valid(self) -> None:
"""
Tests that the delete method works for those with superuser
permissions, deleting the DocIntegration and associated
IntegrationFeatures and DocIntegrationAvatar
"""
self.login_as(user=self.staff_user, staff=True)
features = IntegrationFeature.objects.filter(
target_id=self.doc_delete.id, target_type=IntegrationTypes.DOC_INTEGRATION.value
)
assert features.exists()
assert self.doc_delete.avatar.exists()
self.get_success_response(self.doc_delete.slug, status_code=status.HTTP_204_NO_CONTENT)
with pytest.raises(DocIntegration.DoesNotExist):
DocIntegration.objects.get(id=self.doc_delete.id)
assert not features.exists()
assert not self.doc_delete.avatar.exists()
# TODO(schew2381): Change test to check that superusers cannot delete DocIntegrations
def test_superuser_delete_valid(self) -> None:
"""
Tests that the delete method works for those with superuser
permissions, deleting the DocIntegration and associated
IntegrationFeatures and DocIntegrationAvatar
"""
self.login_as(user=self.superuser, superuser=True)
features = IntegrationFeature.objects.filter(
target_id=self.doc_delete.id, target_type=IntegrationTypes.DOC_INTEGRATION.value
)
assert features.exists()
assert self.doc_delete.avatar.exists()
self.get_success_response(self.doc_delete.slug, status_code=status.HTTP_204_NO_CONTENT)
with pytest.raises(DocIntegration.DoesNotExist):
DocIntegration.objects.get(id=self.doc_delete.id)
assert not features.exists()
assert not self.doc_delete.avatar.exists()
def test_public_delete_invalid(self) -> None:
"""
Tests that the delete method is not accessible by those with regular member
permissions, and no changes occur in the database.
"""
self.login_as(user=self.user)
self.get_error_response(self.doc_delete.slug, status_code=status.HTTP_403_FORBIDDEN)
assert DocIntegration.objects.get(id=self.doc_delete.id)
features = IntegrationFeature.objects.filter(
target_id=self.doc_delete.id, target_type=IntegrationTypes.DOC_INTEGRATION.value
)
assert features.exists()
assert len(features) == 7
assert self.doc_delete.avatar.exists()
| DeleteDocIntegrationDetailsTest |
python | django-compressor__django-compressor | compressor/tests/test_base.py | {
"start": 18646,
"end": 20806
} | class ____(SimpleTestCase):
def setUp(self):
self.css = (
'<link rel="stylesheet" href="/static/css/one.css" type="text/css" />'
)
self.expected_css_hash = "5c6a60375256"
self.tmpdir = mkdtemp()
new_static_root = os.path.join(self.tmpdir, "static")
copytree(settings.STATIC_ROOT, new_static_root)
self.override_settings = self.settings(
COMPRESS_ENABLED=True,
COMPRESS_PRECOMPILERS=(),
COMPRESS_DEBUG_TOGGLE="nocompress",
DEBUG=True,
STATIC_ROOT=new_static_root,
COMPRESS_ROOT=new_static_root,
STATICFILES_DIRS=[settings.COMPRESS_ROOT],
)
self.override_settings.__enter__()
def tearDown(self):
rmtree(self.tmpdir)
self.override_settings.__exit__(None, None, None)
def test_filename_in_debug_mode(self):
# In debug mode, compressor should look for files using staticfiles
# finders only, and not look into the global static directory, where
# files can be outdated. So compressor's output shouldn't change from
# the one pre-generated if we modify the file in STATIC_ROOT.
def compare():
expected = (
'<link rel="stylesheet" href="/static/CACHE/css/%s.css" type="text/css">'
% self.expected_css_hash
)
compressor = CssCompressor("css", self.css)
compressor.storage = DefaultStorage()
output = compressor.output()
self.assertEqual(expected, output)
compare()
filename = os.path.join(settings.COMPRESS_ROOT, "css", "one.css")
test_css_content = "p { font-family: 'test' }"
with open(filename, "a") as css:
css.write("\n")
css.write(test_css_content)
compare()
result_filename = os.path.join(
settings.COMPRESS_ROOT, "CACHE", "css", "%s.css" % self.expected_css_hash
)
with open(result_filename, "r") as f:
result = f.read()
self.assertTrue(test_css_content not in result)
| CompressorInDebugModeTestCase |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/decision_tree.py | {
"start": 7944,
"end": 9482
} | class ____(DecisionTree):
"""
Regression tree for XGBoost
- Reference -
http://xgboost.readthedocs.io/en/latest/model.html
"""
def _split(self, y):
""" y contains y_true in left half of the middle column and
y_pred in the right half. Split and return the two matrices """
col = int(np.shape(y)[1]/2)
y, y_pred = y[:, :col], y[:, col:]
return y, y_pred
def _gain(self, y, y_pred):
nominator = np.power((y * self.loss.gradient(y, y_pred)).sum(), 2)
denominator = self.loss.hess(y, y_pred).sum()
return 0.5 * (nominator / denominator)
def _gain_by_taylor(self, y, y1, y2):
# Split
y, y_pred = self._split(y)
y1, y1_pred = self._split(y1)
y2, y2_pred = self._split(y2)
true_gain = self._gain(y1, y1_pred)
false_gain = self._gain(y2, y2_pred)
gain = self._gain(y, y_pred)
return true_gain + false_gain - gain
def _approximate_update(self, y):
# y split into y, y_pred
y, y_pred = self._split(y)
# Newton's Method
gradient = np.sum(y * self.loss.gradient(y, y_pred), axis=0)
hessian = np.sum(self.loss.hess(y, y_pred), axis=0)
update_approximation = gradient / hessian
return update_approximation
def fit(self, X, y):
self._impurity_calculation = self._gain_by_taylor
self._leaf_value_calculation = self._approximate_update
super(XGBoostRegressionTree, self).fit(X, y)
| XGBoostRegressionTree |
python | apache__airflow | devel-common/src/sphinx_exts/operators_and_hooks_ref.py | {
"start": 17399,
"end": 17788
} | class ____(BaseJinjaReferenceDirective):
"""Generate list of extra links"""
def render_content(
self, *, tags: set[str] | None, header_separator: str = DEFAULT_HEADER_SEPARATOR
) -> str:
return _common_render_list_content(
header_separator=header_separator, resource_type="extra-links", template="extra_links.rst.jinja2"
)
| ExtraLinksDirective |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 27253,
"end": 29240
} | class ____(NextRedirectMixin, FormView):
template_name = (
"account/confirm_password_reset_code." + app_settings.TEMPLATE_EXTENSION
)
form_class = ConfirmPasswordResetCodeForm
@method_decorator(login_not_required)
def dispatch(self, request, *args, **kwargs):
self._process = (
flows.password_reset_by_code.PasswordResetVerificationProcess.resume(
request
)
)
if not self._process:
return HttpResponseRedirect(reverse("account_login"))
if self._process.state.get("code_confirmed"):
return HttpResponseRedirect(reverse("account_complete_password_reset"))
return super().dispatch(request, *args, **kwargs)
def get_form_class(self):
return get_form_class(
app_settings.FORMS, "confirm_password_reset_code", self.form_class
)
def get_form_kwargs(self):
ret = super().get_form_kwargs()
ret["code"] = self._process.code
return ret
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
ret["email"] = self._process.state["email"]
ret["verify_form"] = ret["form"]
return ret
def form_valid(self, form):
self._process.confirm_code()
return HttpResponseRedirect(
self.passthrough_next_url(reverse("account_complete_password_reset"))
)
def form_invalid(self, form):
attempts_left = self._process.record_invalid_attempt()
if attempts_left:
return super().form_invalid(form)
adapter = get_adapter(self.request)
adapter.add_message(
self.request,
messages.ERROR,
message=adapter.error_messages["too_many_login_attempts"],
)
return HttpResponseRedirect(self.passthrough_next_url(reverse("account_login")))
confirm_password_reset_code = ConfirmPasswordResetCodeView.as_view()
| ConfirmPasswordResetCodeView |
python | numpy__numpy | numpy/distutils/cpuinfo.py | {
"start": 1822,
"end": 2788
} | class ____:
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
def _try_call(self, func):
try:
return func()
except Exception:
pass
def __getattr__(self, name):
if not name.startswith('_'):
if hasattr(self, '_'+name):
attr = getattr(self, '_'+name)
if isinstance(attr, types.MethodType):
return lambda func=self._try_call,attr=attr : func(attr)
else:
return lambda : None
raise AttributeError(name)
def _getNCPUs(self):
return 1
def __get_nbits(self):
abits = platform.architecture()[0]
nbits = re.compile(r'(\d+)bit').search(abits).group(1)
return nbits
def _is_32bit(self):
return self.__get_nbits() == '32'
def _is_64bit(self):
return self.__get_nbits() == '64'
| CPUInfoBase |
python | ZoranPandovski__al-go-rithms | data_structures/red_and_black_tree/Python/red_black_tree.py | {
"start": 71,
"end": 243
} | class ____():
def __init__(self, item):
self.item = item
self.parent = None
self.left = None
self.right = None
self.color = 1
| Node |
python | google__pytype | pytype/tests/test_typevar2.py | {
"start": 134,
"end": 18270
} | class ____(test_base.BaseTest):
"""Tests for TypeVar."""
def test_id(self):
ty = self.Infer("""
import typing
T = typing.TypeVar("T")
def f(x: T) -> T:
return __any_object__
v = f(42)
w = f("")
""")
self.assertTypesMatchPytd(
ty,
"""
import typing
from typing import Any
T = TypeVar("T")
def f(x: T) -> T: ...
v = ... # type: int
w = ... # type: str
""",
)
self.assertTrue(ty.Lookup("f").signatures[0].template)
def test_extract_item(self):
ty = self.Infer("""
from typing import List, TypeVar
S = TypeVar("S") # unused
T = TypeVar("T")
def f(x: List[T]) -> T:
return __any_object__
v = f(["hello world"])
w = f([True])
""")
self.assertTypesMatchPytd(
ty,
"""
S = TypeVar("S")
T = TypeVar("T")
def f(x: typing.List[T]) -> T: ...
v = ... # type: str
w = ... # type: bool
""",
)
self.assertTrue(ty.Lookup("f").signatures[0].template)
def test_wrap_item(self):
ty = self.Infer("""
from typing import List, TypeVar
T = TypeVar("T")
def f(x: T) -> List[T]:
return __any_object__
v = f(True)
w = f(3.14)
""")
self.assertTypesMatchPytd(
ty,
"""
T = TypeVar("T")
def f(x: T) -> typing.List[T]: ...
v = ... # type: typing.List[bool]
w = ... # type: typing.List[float]
""",
)
def test_import_typevar_name_change(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar
T = TypeVar("T")
X = TypeVar("X")
""",
)
_, errors = self.InferWithErrors(
"""
# This is illegal: A TypeVar("T") needs to be stored under the name "T".
from a import T as T2 # invalid-typevar[e1]
from a import X
Y = X # invalid-typevar[e2]
def f(x: T2) -> T2: ...
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e1": r"T.*T2", "e2": r"X.*Y"})
def test_typevar_in_typevar(self):
self.CheckWithErrors("""
from typing import Generic, TypeVar
T1 = TypeVar('T1')
T2 = TypeVar('T2')
S1 = TypeVar('S1', bound=T1) # invalid-typevar
S2 = TypeVar('S2', T1, T2) # invalid-typevar
# Using the invalid TypeVar should not produce an error.
class Foo(Generic[S1]):
pass
""")
def test_multiple_substitution(self):
ty = self.Infer("""
from typing import Dict, Tuple, TypeVar
K = TypeVar("K")
V = TypeVar("V")
def f(x: Dict[K, V]) -> Tuple[V, K]:
return __any_object__
v = f({})
w = f({"test": 42})
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict, Tuple, TypeVar
K = TypeVar("K")
V = TypeVar("V")
def f(x: Dict[K, V]) -> Tuple[V, K]: ...
v = ... # type: Tuple[Any, Any]
w = ... # type: Tuple[int, str]
""",
)
def test_union(self):
ty = self.Infer("""
from typing import TypeVar, Union
S = TypeVar("S")
T = TypeVar("T")
def f(x: S, y: T) -> Union[S, T]:
return __any_object__
v = f("", 42)
w = f(3.14, False)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar, Union
S = TypeVar("S")
T = TypeVar("T")
def f(x: S, y: T) -> Union[S, T]: ...
v = ... # type: Union[str, int]
w = ... # type: Union[float, bool]
""",
)
def test_bad_substitution(self):
errors = self.CheckWithErrors("""
from typing import List, TypeVar
S = TypeVar("S")
T = TypeVar("T")
def f1(x: S) -> List[S]:
return {x} # bad-return-type[e1]
def f2(x: S) -> S:
return 42 # no error because never called
def f3(x: S) -> S:
return 42 # bad-return-type[e2] # bad-return-type[e3]
def f4(x: S, y: T, z: T) -> List[S]:
return [y] # bad-return-type[e4]
f3("")
f3(16) # ok
f3(False)
f4(True, 3.14, 0)
f4("hello", "world", "domination") # ok
""")
self.assertErrorRegexes(
errors,
{
"e1": r"list.*set",
"e2": r"str.*int",
"e3": r"bool.*int",
"e4": r"list\[bool\].*list\[Union\[float, int\]\]",
},
)
def test_use_constraints(self):
ty, errors = self.InferWithErrors("""
from typing import TypeVar
T = TypeVar("T", int, float)
def f(x: T) -> T:
return __any_object__
v = f("") # wrong-arg-types[e]
w = f(True) # ok
u = f(__any_object__) # ok
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, TypeVar, Union
T = TypeVar("T", int, float)
def f(x: T) -> T: ...
v = ... # type: Any
w = ... # type: bool
u = ... # type: Union[int, float]
""",
)
self.assertErrorRegexes(errors, {"e": r"Union\[float, int\].*str"})
def test_type_parameter_type(self):
ty = self.Infer("""
from typing import Type, TypeVar
T = TypeVar("T")
def f(x: Type[T]) -> T:
return __any_object__
v = f(int)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Type, TypeVar
T = TypeVar("T")
def f(x: Type[T]) -> T: ...
v = ... # type: int
""",
)
def test_type_parameter_type_error(self):
errors = self.CheckWithErrors("""
from typing import Sequence, Type, TypeVar
T = TypeVar('T')
def f(x: int):
pass
def g(x: Type[Sequence[T]]) -> T:
print(f(x)) # wrong-arg-types[e]
return x()[0]
""")
self.assertErrorRegexes(
errors, {"e": r"Expected.*int.*Actual.*type\[Sequence\]"}
)
def test_print_nested_type_parameter(self):
errors = self.CheckWithErrors("""
from typing import List, TypeVar
T = TypeVar("T", int, float)
def f(x: List[T]): ...
f([""]) # wrong-arg-types[e]
""")
self.assertErrorRegexes(
errors, {"e": r"list\[Union\[float, int\]\].*list\[str\]"}
)
def test_constraint_subtyping(self):
errors = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar("T", int, float)
def f(x: T, y: T): ...
f(True, False) # ok
f(True, 42) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"Expected.*y: bool.*Actual.*y: int"})
def test_filter_value(self):
errors = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar("T", str, float)
def f(x: T, y: T): ...
x = ''
x = 42.0
f(x, '') # wrong-arg-types[e]
f(x, 42.0) # ok
""")
self.assertErrorRegexes(
errors, {"e": r"Expected.*y: float.*Actual.*y: str"}
)
def test_filter_class(self):
self.Check("""
from typing import TypeVar
class A: pass
class B: pass
T = TypeVar("T", A, B)
def f(x: T, y: T): ...
x = A()
x.__class__ = B
# Setting __class__ makes the type ambiguous to pytype.
f(x, A())
f(x, B())
""")
def test_split(self):
ty = self.Infer("""
from typing import TypeVar
T = TypeVar("T", int, type(None))
def f(x: T) -> T:
return __any_object__
if __random__:
x = None
else:
x = 3
v = id(x) if x else 42
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional, TypeVar
v = ... # type: int
x = ... # type: Optional[int]
T = TypeVar("T", int, None)
def f(x: T) -> T: ...
""",
)
def test_enforce_non_constrained_typevar(self):
errors = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar("T")
def f(x: T, y: T): ...
f(42, True) # ok
f(42, "") # wrong-arg-types[e1]
f(42, 16j) # ok
f(object(), 42) # ok
f(42, object()) # ok
f(42.0, "") # wrong-arg-types[e2]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"Expected.*y: int.*Actual.*y: str",
"e2": r"Expected.*y: float.*Actual.*y: str",
},
)
def test_useless_typevar(self):
self.InferWithErrors("""
from typing import Tuple, TypeVar
T = TypeVar("T")
S = TypeVar("S", int, float)
def f1(x: T): ... # invalid-annotation
def f2() -> T: ... # invalid-annotation
def f3(x: Tuple[T]): ... # invalid-annotation
def f4(x: Tuple[T, T]): ... # ok
def f5(x: S): ... # ok
def f6(x: "U"): ... # invalid-annotation
def f7(x: T, y: "T"): ... # ok
def f8(x: "U") -> "U": ... # ok
U = TypeVar("U")
""")
def test_use_bound(self):
ty, errors = self.InferWithErrors("""
from typing import TypeVar
T = TypeVar("T", bound=float)
def f(x: T) -> T:
return x
v1 = f(__any_object__) # ok
v2 = f(True) # ok
v3 = f(42) # ok
v4 = f(3.14) # ok
v5 = f("") # wrong-arg-types[e]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, TypeVar
T = TypeVar("T", bound=float)
def f(x: T) -> T: ...
v1 = ... # type: float
v2 = ... # type: bool
v3 = ... # type: int
v4 = ... # type: float
v5 = ... # type: Any
""",
)
self.assertErrorRegexes(errors, {"e": r"x: float.*x: str"})
def test_bad_return(self):
self.assertNoCrash(
self.Check,
"""
from typing import AnyStr, Dict
class Foo:
def f(self) -> AnyStr: return __any_object__
def g(self) -> Dict[AnyStr, Dict[AnyStr, AnyStr]]:
return {'foo': {'bar': self.f()}}
""",
)
def test_optional_typevar(self):
errors = self.CheckWithErrors("""
from typing import Optional, TypeVar
T = TypeVar("T", bound=str)
def f() -> Optional[T]:
return 42 if __random__ else None # bad-return-type[e]
""")
self.assertErrorRegexes(errors, {"e": r"Optional\[str\].*int"})
def test_unicode_literals(self):
ty = self.Infer("""
from __future__ import unicode_literals
import typing
T = typing.TypeVar("T")
def f(x: T) -> T:
return __any_object__
v = f(42)
""")
self.assertTypesMatchPytd(
ty,
"""
import typing
from typing import Any
T = TypeVar("T")
def f(x: T) -> T: ...
v = ... # type: int
""",
)
def test_any_as_bound(self):
self.Check("""
from typing import Any, TypeVar
T = TypeVar("T", bound=Any)
def f(x: T) -> T:
return x
f(42)
""")
def test_any_as_constraint(self):
self.Check("""
from typing import Any, TypeVar
T = TypeVar("T", str, Any)
def f(x: T) -> T:
return x
f(42)
""")
def test_name_reuse(self):
self.Check("""
from typing import Generic, TypeVar
T = TypeVar("T", int, float)
class Foo(Generic[T]):
def __init__(self, x: T):
self.x = x
def f(foo: Foo[T]) -> T:
return foo.x
""")
def test_property_type_param(self):
# We should allow property signatures of the form f(self) -> X[T] without
# needing to annotate 'self' if the class is generic and we use its type
# parameter in the property's signature.
ty = self.Infer("""
from typing import TypeVar, Generic
T = TypeVar('T')
class A(Generic[T]):
def __init__(self, foo: T):
self._foo = foo
@property
def foo(self) -> T:
return self._foo
@foo.setter
def foo(self, foo: T) -> None:
self._foo = foo
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar, Generic, Any, Annotated
T = TypeVar('T')
class A(Generic[T]):
_foo: T
foo: Annotated[T, 'property']
def __init__(self, foo: T) -> None:
self = A[T]
""",
)
@test_base.skip("Needs improvements to matcher.py to detect error.")
def test_return_typevar(self):
errors = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar('T')
def f(x: T) -> T:
return T # bad-return-type[e]
""")
self.assertErrorRegexes(errors, {"e": "Expected.*T.*Actual.*TypeVar"})
def test_typevar_in_union_alias(self):
ty = self.Infer("""
from typing import Dict, List, TypeVar, Union
T = TypeVar("T")
U = TypeVar("U")
Foo = Union[T, List[T], Dict[T, List[U]], complex]
def f(x: Foo[int, str]): ...
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, List, TypeVar, Union
T = TypeVar("T")
U = TypeVar("U")
Foo = Union[T, List[T], Dict[T, List[U]], complex]
def f(x: Union[Dict[int, List[str]], List[int], complex, int]) -> None: ...
""",
)
def test_typevar_in_union_alias_error(self):
err = self.CheckWithErrors("""
from typing import Dict, List, TypeVar, Union
T = TypeVar("T")
U = TypeVar("U")
Foo = Union[T, List[T], Dict[T, List[U]], complex]
def f(x: Foo[int]): ... # invalid-annotation[e]
""")
self.assertErrorRegexes(err, {"e": "Union.*2.*got.*1"})
def test_cast_generic_tuple(self):
self.Check("""
from typing import Tuple, TypeVar, cast
T = TypeVar('T')
def f(x: T, y: T):
return cast(Tuple[T, ...], x)
assert_type(f(0, 1), Tuple[int, ...])
""")
def test_cast_in_instance_method(self):
self.Check("""
from typing import TypeVar, cast
T = TypeVar('T', bound='Base')
class Base:
def clone(self: T) -> T:
return cast(T, __any_object__)
class Child(Base):
pass
Child().clone()
""")
def test_typevar_in_nested_function(self):
self.Check("""
from typing import TypeVar
T = TypeVar('T')
def f(x: T):
def wrapper(x: T):
pass
return wrapper
""")
def test_typevar_in_nested_function_in_instance_method(self):
self.Check("""
from typing import TypeVar
T = TypeVar('T')
class Foo:
def f(self, x: T):
def g(x: T):
pass
""")
def test_pass_through_class(self):
self.Check("""
from typing import Type, TypeVar
T = TypeVar('T')
def f(cls: Type[T]) -> Type[T]:
return cls
""")
@test_base.skip("Requires completing TODO in annotation_utils.deformalize")
def test_type_of_typevar(self):
self.Check("""
from typing import Type, TypeVar
T = TypeVar('T', bound=int)
class Foo:
def f(self, x: T) -> Type[T]:
return type(x)
def g(self):
assert_type(self.f(0), Type[int])
""")
def test_instantiate_unsubstituted_typevar(self):
# TODO(b/79369981): Report an error for T appearing only once in f.
self.Check("""
from typing import Type, TypeVar
T = TypeVar('T', bound=int)
def f() -> Type[T]:
return int
def g():
return f().__name__
""")
def test_class_typevar_in_nested_method(self):
self.Check("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def __init__(self, x: T):
self.x = x
def f(self):
def g() -> T:
return self.x
return g()
assert_type(Foo(0).f(), int)
""")
def test_self_annotation_in_base_class(self):
self.Check("""
from typing import TypeVar
T = TypeVar('T', bound='Base')
class Base:
def resolve(self: T) -> T:
return self
class Child(Base):
def resolve(self: T) -> T:
assert_type(Base().resolve(), Base)
return self
assert_type(Child().resolve(), Child)
""")
def test_union_against_typevar(self):
self.Check("""
from typing import Callable, Iterable, TypeVar, Union
T = TypeVar('T')
def f(x: Callable[[T], int], y: Iterable[T]):
pass
def g(x: Union[int, str]):
return 0
f(g, [0, ''])
""")
def test_callable_instance_against_callable(self):
self.CheckWithErrors("""
from typing import Any, Callable, TypeVar
T1 = TypeVar('T1')
T2 = TypeVar('T2', bound=int)
def f() -> Callable[[T2], T2]:
return __any_object__
# Passing f() to g is an error because g expects a callable with an
# unconstrained parameter type.
def g(x: Callable[[T1], T1]):
pass
g(f()) # wrong-arg-types
# Passing f() to h is okay because T1 in this Callable is just being used
# to save the parameter type for h's return type.
def h(x: Callable[[T1], Any]) -> T1:
return __any_object__
h(f())
""")
def test_future_annotations(self):
self.Check("""
from __future__ import annotations
from typing import Callable, TypeVar
T = TypeVar('T')
x: Callable[[T], T] = lambda x: x
""")
def test_imported_typevar_in_scope(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import TypeVar
T = TypeVar('T')
""",
)]):
self.Check("""
import foo
def f(x: foo.T) -> foo.T:
y: foo.T = x
return y
""")
def test_bad_typevar_in_pyi(self):
# TODO(rechen): We should report an error for the stray `T` in foo.C.f. For
# now, just make sure this doesn't crash pytype.
with self.DepTree([(
"foo.pyi",
"""
from typing import Callable, TypeVar
T = TypeVar('T')
class C:
f: Callable[..., T]
""",
)]):
self.assertNoCrash(
self.Check,
"""
import foo
class C(foo.C):
def g(self):
return self.f(0)
""",
)
| TypeVarTest |
python | django__django | tests/admin_scripts/app_raising_messages/models.py | {
"start": 62,
"end": 360
} | class ____(models.Model):
@classmethod
def check(self, **kwargs):
return [
checks.Warning("First warning", hint="Hint", obj="obj"),
checks.Warning("Second warning", obj="a"),
checks.Error("An error", hint="Error hint"),
]
| ModelRaisingMessages |
python | bokeh__bokeh | src/bokeh/server/views/autoload_js_handler.py | {
"start": 1753,
"end": 4302
} | class ____(SessionHandler):
''' Implements a custom Tornado handler for the autoload JS chunk
'''
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "*")
self.set_header("Access-Control-Allow-Credentials", "true")
async def get(self, *args, **kwargs):
if self.request.cookies and "Origin" in self.request.headers:
# If credentials, i.e. cookies, are sent with the request,
# we cannot leave the allowed origin as wildcard "*",
# but have to make it explicit.
self.set_header("Access-Control-Allow-Origin", self.request.headers["Origin"])
session = await self.get_session()
element_id = self.get_argument("bokeh-autoload-element", default=None)
if not element_id:
self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
return
app_path = self.get_argument("bokeh-app-path", default="/")
absolute_url = self.get_argument("bokeh-absolute-url", default=None)
if absolute_url:
uri = urlparse(absolute_url)
server_url = f"{uri.scheme}://{uri.netloc}"
else:
server_url = None
resources_param = self.get_argument("resources", "default")
resources = self.application.resources(server_url) if resources_param != "none" else None
bundle = bundle_for_objs_and_resources(None, resources)
render_items = [RenderItem(token=session.token, elementid=element_id, use_for_title=False)]
bundle.add(Script(script_for_render_items({}, render_items, app_path=app_path, absolute_url=absolute_url)))
js = AUTOLOAD_JS.render(bundle=bundle, elementid=element_id)
self.set_header("Content-Type", 'application/javascript')
self.write(js)
async def options(self, *args, **kwargs):
'''Browsers make OPTIONS requests under the hood before a GET request'''
self.set_header('Access-Control-Allow-Methods', 'PUT, GET, OPTIONS')
self.set_header("Access-Control-Allow-Origin", self.request.headers["Origin"])
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| AutoloadJsHandler |
python | langchain-ai__langchain | libs/core/langchain_core/exceptions.py | {
"start": 283,
"end": 2348
} | class ____(ValueError, LangChainException): # noqa: N818
"""Exception that output parsers should raise to signify a parsing error.
This exists to differentiate parsing errors from other code or execution errors
that also may arise inside the output parser.
`OutputParserException` will be available to catch and handle in ways to fix the
parsing error, while other errors will be raised.
"""
def __init__(
self,
error: Any,
observation: str | None = None,
llm_output: str | None = None,
send_to_llm: bool = False, # noqa: FBT001,FBT002
):
"""Create an `OutputParserException`.
Args:
error: The error that's being re-raised or an error message.
observation: String explanation of error which can be passed to a model to
try and remediate the issue.
llm_output: String model output which is error-ing.
send_to_llm: Whether to send the observation and llm_output back to an Agent
after an `OutputParserException` has been raised.
This gives the underlying model driving the agent the context that the
previous output was improperly structured, in the hopes that it will
update the output to the correct format.
Raises:
ValueError: If `send_to_llm` is `True` but either observation or
`llm_output` are not provided.
"""
if isinstance(error, str):
error = create_message(
message=error, error_code=ErrorCode.OUTPUT_PARSING_FAILURE
)
super().__init__(error)
if send_to_llm and (observation is None or llm_output is None):
msg = (
"Arguments 'observation' & 'llm_output'"
" are required if 'send_to_llm' is True"
)
raise ValueError(msg)
self.observation = observation
self.llm_output = llm_output
self.send_to_llm = send_to_llm
| OutputParserException |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/vertex_ai/test_pipeline_job.py | {
"start": 5802,
"end": 9489
} | class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_no_default_project_id
):
self.hook = PipelineJobHook(gcp_conn_id=TEST_GCP_CONN_ID)
@mock.patch(PIPELINE_JOB_STRING.format("PipelineJobHook.get_pipeline_service_client"))
def test_create_pipeline_job(self, mock_client) -> None:
self.hook.create_pipeline_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
pipeline_job=TEST_PIPELINE_JOB,
pipeline_job_id=TEST_PIPELINE_JOB_ID,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.create_pipeline_job.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
pipeline_job=TEST_PIPELINE_JOB,
pipeline_job_id=TEST_PIPELINE_JOB_ID,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_REGION)
@mock.patch(PIPELINE_JOB_STRING.format("PipelineJobHook.get_pipeline_service_client"))
def test_delete_pipeline_job(self, mock_client) -> None:
self.hook.delete_pipeline_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
pipeline_job_id=TEST_PIPELINE_JOB_ID,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.delete_pipeline_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.pipeline_job_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.pipeline_job_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_REGION, TEST_PIPELINE_JOB_ID
)
@mock.patch(PIPELINE_JOB_STRING.format("PipelineJobHook.get_pipeline_service_client"))
def test_get_pipeline_job(self, mock_client) -> None:
self.hook.get_pipeline_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
pipeline_job_id=TEST_PIPELINE_JOB_ID,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.get_pipeline_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.pipeline_job_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.pipeline_job_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_REGION, TEST_PIPELINE_JOB_ID
)
@mock.patch(PIPELINE_JOB_STRING.format("PipelineJobHook.get_pipeline_service_client"))
def test_list_pipeline_jobs(self, mock_client) -> None:
self.hook.list_pipeline_jobs(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.list_pipeline_jobs.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
page_size=None,
page_token=None,
filter=None,
order_by=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_REGION)
| TestPipelineJobWithoutDefaultProjectIdHook |
python | Textualize__textual | src/textual/screen.py | {
"start": 2593,
"end": 3186
} | class ____(NamedTuple):
"""Result of [get_hover_widget_at][textual.screen.Screen.get_hover_widget_at]"""
mouse_over: tuple[Widget, Region]
"""Widget and region directly under the mouse."""
hover_over: tuple[Widget, Region] | None
"""Widget with a hover style under the mouse, or `None` for no hover style widget."""
@property
def widgets(self) -> tuple[Widget, Widget | None]:
"""Just the widgets."""
return (
self.mouse_over[0],
None if self.hover_over is None else self.hover_over[0],
)
@rich.repr.auto
| HoverWidgets |
python | getsentry__sentry | tests/symbolicator/test_unreal_full.py | {
"start": 1314,
"end": 5785
} | class ____(RelayStoreHelper, TransactionTestCase):
@pytest.fixture(autouse=True)
def initialize(self, live_server):
self.project.update_option("sentry:builtin_symbol_sources", [])
with (
patch("sentry.auth.system.is_internal_ip", return_value=True),
self.options({"system.url-prefix": live_server.url}),
):
# Run test case
yield
def upload_symbols(self):
url = reverse(
"sentry-api-0-dsym-files",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
self.login_as(user=self.user)
out = BytesIO()
f = zipfile.ZipFile(out, "w")
f.write(get_fixture_path("native", "unreal_crash.sym"), "crash.sym")
f.close()
response = self.client.post(
url,
{
"file": SimpleUploadedFile(
"symbols.zip", out.getvalue(), content_type="application/zip"
)
},
format="multipart",
)
assert response.status_code == 201, response.content
assert len(response.json()) == 1
def unreal_crash_test_impl(self, filename):
self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL)
self.upload_symbols()
# attachments feature has to be on for the files extract stick around
with self.feature("organizations:event-attachments"):
with open(filename, "rb") as f:
event = self.post_and_retrieve_unreal(f.read())
def make_snapshot(subname=None):
self.insta_snapshot(
{
"contexts": event.data.get("contexts"),
"exception": {
"values": [
normalize_native_exception(x)
for x in get_path(event.data, "exception", "values") or ()
]
},
"stacktrace": event.data.get("stacktrace"),
"threads": event.data.get("threads"),
"extra": event.data.get("extra"),
"sdk": event.data.get("sdk"),
},
subname=subname,
)
make_snapshot()
return sorted(EventAttachment.objects.filter(event_id=event.event_id), key=lambda x: x.name)
@pytest.mark.skip(reason="temporary because of Relay change")
def test_unreal_crash_with_attachments(self) -> None:
attachments = self.unreal_crash_test_impl(get_unreal_crash_file())
assert len(attachments) == 4
context, config, minidump, log = attachments
assert context.name == "CrashContext.runtime-xml"
assert context.sha1 == "835d3e10db5d1799dc625132c819c047261ddcfb"
assert config.name == "CrashReportClient.ini"
assert config.sha1 == "5839c750bdde8cba4d2a979ea857b8154cffdab5"
assert minidump.name == "UE4Minidump.dmp"
assert minidump.sha1 == "089d9fd3b5c0cc4426339ab46ec3835e4be83c0f"
assert log.name == "YetAnother.log" # Log file is named after the project
assert log.sha1 == "24d1c5f75334cd0912cc2670168d593d5fe6c081"
@pytest.mark.skip(reason="temporary because of Relay change")
def test_unreal_apple_crash_with_attachments(self) -> None:
attachments = self.unreal_crash_test_impl(get_unreal_crash_apple_file())
assert len(attachments) == 6
context, config, diagnostics, log, info, minidump = attachments
assert context.name == "CrashContext.runtime-xml"
assert context.sha1 == "5d2723a7d25111645702fcbbcb8e1d038db56c6e"
assert config.name == "CrashReportClient.ini"
assert config.sha1 == "4d6a2736e3e4969a68b7adbe197b05c171c29ea0"
assert diagnostics.name == "Diagnostics.txt"
assert diagnostics.sha1 == "aa271bf4e307a78005410234081945352e8fb236"
assert log.name == "YetAnotherMac.log" # Log file is named after the project
assert log.sha1 == "735e751a8b6b943dbc0abce0e6d096f4d48a0c1e"
assert info.name == "info.txt"
assert info.sha1 == "279b27ac5d0e6792d088e0662ce1a18413b772bc"
assert minidump.name == "minidump.dmp"
assert minidump.sha1 == "728d0f4b09cf5a7942da3893b6db79ac842b701a"
| SymbolicatorUnrealIntegrationTest |
python | django-haystack__django-haystack | test_haystack/test_app_with_hierarchy/contrib/django/hierarchal_app_django/models.py | {
"start": 138,
"end": 214
} | class ____(Model):
title = CharField(max_length=16)
| HierarchalAppSecondModel |
python | PyCQA__pylint | tests/functional/m/membership_protocol_py3.py | {
"start": 484,
"end": 542
} | class ____(metaclass=MetaOldIterable):
pass
| IterableClass |
python | numba__numba | numba/core/types/abstract.py | {
"start": 10773,
"end": 11617
} | class ____(Type):
"""
Type class for Numpy array-compatible objects (typically, objects
exposing an __array__ method).
Derived classes should implement the *as_array* attribute.
"""
# If overridden by a subclass, it should also implement typing
# for '__array_wrap__' with arguments (input, formal result).
array_priority = 0.0
@property
@abstractmethod
def as_array(self):
"""
The equivalent array type, for operations supporting array-compatible
objects (such as ufuncs).
"""
# For compatibility with types.Array
@cached_property
def ndim(self):
return self.as_array.ndim
@cached_property
def layout(self):
return self.as_array.layout
@cached_property
def dtype(self):
return self.as_array.dtype
| ArrayCompatible |
python | pandas-dev__pandas | pandas/tests/scalar/period/test_arithmetic.py | {
"start": 13965,
"end": 16784
} | class ____:
def test_period_comparison_same_freq(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
assert not jan == feb
assert jan != feb
assert jan < feb
assert jan <= feb
assert not jan > feb
assert not jan >= feb
def test_period_comparison_same_period_different_object(self):
# Separate Period objects for the same period
left = Period("2000-01", "M")
right = Period("2000-01", "M")
assert left == right
assert left >= right
assert left <= right
assert not left < right
assert not left > right
def test_period_comparison_mismatched_freq(self):
jan = Period("2000-01", "M")
day = Period("2012-01-01", "D")
assert not jan == day
assert jan != day
msg = r"Input has different freq=D from Period\(freq=M\)"
with pytest.raises(IncompatibleFrequency, match=msg):
jan < day
with pytest.raises(IncompatibleFrequency, match=msg):
jan <= day
with pytest.raises(IncompatibleFrequency, match=msg):
jan > day
with pytest.raises(IncompatibleFrequency, match=msg):
jan >= day
def test_period_comparison_invalid_type(self):
jan = Period("2000-01", "M")
assert not jan == 1
assert jan != 1
int_or_per = "'(Period|int)'"
msg = f"not supported between instances of {int_or_per} and {int_or_per}"
for left, right in [(jan, 1), (1, jan)]:
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
def test_period_comparison_nat(self):
per = Period("2011-01-01", freq="D")
ts = Timestamp("2011-01-01")
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [
(NaT, per),
(per, NaT),
(NaT, ts),
(ts, NaT),
]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
@pytest.mark.parametrize(
"scalar, expected",
((0, False), (Period("2000-01", "M"), True)),
)
def test_period_comparison_numpy_zerodim_arr(self, scalar, expected):
zerodim_arr = np.array(scalar)
per = Period("2000-01", "M")
assert (per == zerodim_arr) is expected
assert (zerodim_arr == per) is expected
| TestPeriodComparisons |
python | huggingface__transformers | src/transformers/models/paligemma/modeling_paligemma.py | {
"start": 10390,
"end": 19454
} | class ____(PaliGemmaPreTrainedModel):
_checkpoint_conversion_mapping = {"language_model.model": "language_model"}
# we are filtering the logits/labels so we shouldn't divide the loss based on num_items_in_batch
accepts_loss_kwargs = False
def __init__(self, config: PaliGemmaConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config=config.vision_config)
self.multi_modal_projector = PaliGemmaMultiModalProjector(config)
self.vocab_size = config.text_config.vocab_size
language_model = AutoModel.from_config(config=config.text_config)
self.language_model = language_model
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
self.text_config_dtype = self.config.get_text_config().dtype or self.dtype
self.post_init()
# Copied from transformers.models.llava.modeling_llava.LlavaModel.get_input_embeddings with Llava->PaliGemma
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
# Copied from transformers.models.llava.modeling_llava.LlavaModel.set_input_embeddings with Llava->PaliGemma
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_image_features(self, pixel_values: torch.FloatTensor):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
image_outputs = self.vision_tower(pixel_values)
selected_image_feature = image_outputs.last_hidden_state
image_features = self.multi_modal_projector(selected_image_feature)
image_features = image_features / (self.config.text_config.hidden_size**0.5)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
token_type_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, PaligemmaModelOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
>>> model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma2-3b-mix-224")
>>> processor = AutoProcessor.from_pretrained("google/paligemma2-3b-mix-224")
>>> prompt = "Where is the cat standing?"
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs,)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Where is the cat standing?\nsnow"
```"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Replace image id with PAD if the image token if OOV, to avoid index-errors
if input_ids is not None and self.config.image_token_id >= self.vocab_size:
special_image_mask = input_ids == self.config.image_token_id
llm_input_ids = input_ids.clone()
llm_input_ids[special_image_mask] = 0
else:
llm_input_ids = input_ids
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(llm_input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0) + 1 # Paligemma positions are 1-indexed
# Merge text and images
if pixel_values is not None:
image_features = self.get_image_features(pixel_values)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
causal_mask_mapping = create_causal_mask_mapping(
self.config,
inputs_embeds,
attention_mask,
cache_position,
past_key_values,
position_ids,
token_type_ids,
pixel_values,
is_training=self.training,
)
outputs = self.language_model(
attention_mask=causal_mask_mapping,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return PaligemmaModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
The Base Paligemma model which consists of a vision backbone and a language model without language modeling head.,
"""
)
| PaliGemmaModel |
python | cython__cython | Tools/dataclass_test_data/test_dataclasses.py | {
"start": 75690,
"end": 76911
} | class ____(unittest.TestCase):
def test_no_eq(self):
# Test a class with no __eq__ and eq=False.
@dataclass(eq=False)
class C:
x: int
self.assertNotEqual(C(0), C(0))
c = C(3)
self.assertEqual(c, c)
# Test a class with an __eq__ and eq=False.
@dataclass(eq=False)
class C:
x: int
def __eq__(self, other):
return other == 10
self.assertEqual(C(3), 10)
def test_overwriting_eq(self):
# If the class has __eq__, use it no matter the value of
# eq=.
@dataclass
class C:
x: int
def __eq__(self, other):
return other == 3
self.assertEqual(C(1), 3)
self.assertNotEqual(C(1), 1)
@dataclass(eq=True)
class C:
x: int
def __eq__(self, other):
return other == 4
self.assertEqual(C(1), 4)
self.assertNotEqual(C(1), 1)
@dataclass(eq=False)
class C:
x: int
def __eq__(self, other):
return other == 5
self.assertEqual(C(1), 5)
self.assertNotEqual(C(1), 1)
| TestEq |
python | mozilla__bleach | bleach/_vendor/parse.py | {
"start": 4782,
"end": 5051
} | class ____(object):
"""Standard approach to encoding parsed results from str to bytes"""
__slots__ = ()
def encode(self, encoding='ascii', errors='strict'):
return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
| _ResultMixinStr |
python | ray-project__ray | release/nightly_tests/dataset/benchmark.py | {
"start": 342,
"end": 6227
} | class ____:
"""Runs benchmarks in a way that's compatible with our release test infrastructure.
Here's an example of typical usage:
.. testcode::
import time
from benchmark import Benchmark
def sleep(sleep_s)
time.sleep(sleep_s)
# Return any extra metrics you want to record. This can include
# configuration parameters, accuracy, etc.
return {"sleep_s": sleep_s}
benchmark = Benchmark()
benchmark.run_fn("short", sleep, 1)
benchmark.run_fn("long", sleep, 10)
benchmark.write_result()
This code outputs a JSON file with contents like this:
.. code-block:: json
{"short": {"time": 1.0, "sleep_s": 1}, "long": {"time": 10.0 "sleep_s": 10}}
"""
def __init__(self):
self.result = {}
def run_materialize_ds(
self,
name: str,
fn: Callable[..., Dataset],
*fn_args,
**fn_kwargs,
):
"""Run a benchmark on materializing a Ray Dataset. ``fn`` is expected to
return the Dataset which is to be materialized. Runtime and throughput
are automatically calculated and reported."""
gc.collect()
print(f"Running case: {name}")
start_time = time.perf_counter()
output_ds = fn(*fn_args, **fn_kwargs)
output_ds.materialize()
duration = time.perf_counter() - start_time
# TODO(chengsu): Record more metrics based on dataset stats.
num_rows = output_ds.count()
self.result[name] = {
BenchmarkMetric.RUNTIME.value: duration,
BenchmarkMetric.NUM_ROWS.value: num_rows,
BenchmarkMetric.THROUGHPUT.value: num_rows / duration,
}
print(f"Result of case {name}: {self.result[name]}")
def run_iterate_ds(
self,
name: str,
dataset: Any,
):
"""Run a benchmark iterating over a dataset. Runtime and throughput
are automatically calculated and reported. Supported dataset types are:
- Ray Dataset (`ray.data.Dataset`)
- iterator over Ray Dataset (`ray.data.iterator._IterableFromIterator` from
`.iter_batches()`,`.iter_torch_batches()`, `.iter_tf_batches()`)
- Torch DataLoader (`torch.utils.data.DataLoader`)
- TensorFlow Dataset (`tf.data.Dataset`)
"""
# Import TF/Torch within this method, as not all benchmarks
# will use/install these libraries.
import tensorflow as tf
import torch
gc.collect()
print(f"Running case: {name}")
start_time = time.perf_counter()
record_count = 0
ds_iterator = iter(dataset)
for batch in ds_iterator:
# Unwrap list to get the underlying batch format.
if isinstance(batch, (list, tuple)) and len(batch) > 0:
batch = batch[0]
# Get the batch size for various batch formats.
if isinstance(batch, dict):
feature_lengths = {k: len(batch[k]) for k in batch}
batch_size = max(feature_lengths.values())
elif isinstance(batch, (pa.Table, pd.DataFrame)):
batch_size = len(batch)
elif isinstance(batch, torch.Tensor):
batch_size = batch.size(dim=0)
elif isinstance(batch, tf.Tensor):
batch_size = batch.shape.as_list()[0]
else:
raise TypeError(f"Unexpected batch type: {type(batch)}")
record_count += batch_size
duration = time.perf_counter() - start_time
self.result[name] = {
BenchmarkMetric.RUNTIME.value: duration,
BenchmarkMetric.NUM_ROWS.value: record_count,
BenchmarkMetric.THROUGHPUT.value: record_count / duration,
}
print(f"Result of case {name}: {self.result[name]}")
def run_fn(
self,
name: str,
fn: Callable[..., Dict[Union[str, BenchmarkMetric], Any]],
*fn_args,
**fn_kwargs,
):
"""Benchmark a function.
This is the most general benchmark utility available. Use it if the other
methods are too specific.
``run_fn`` automatically records the runtime of ``fn``. To report additional
metrics, return a ``Dict[str, Any]`` of metric labels to metric values from your
function.
"""
gc.collect()
print(f"Running case: {name}")
start_time = time.perf_counter()
fn_output = fn(*fn_args, **fn_kwargs)
assert fn_output is None or isinstance(fn_output, dict), fn_output
duration = time.perf_counter() - start_time
curr_case_metrics = {
BenchmarkMetric.RUNTIME.value: duration,
}
if isinstance(fn_output, dict):
for key, value in fn_output.items():
if isinstance(key, BenchmarkMetric):
curr_case_metrics[key.value] = value
elif isinstance(key, str):
curr_case_metrics[key] = value
else:
raise ValueError(f"Unexpected metric key type: {type(key)}")
self.result[name] = curr_case_metrics
print(f"Result of case {name}: {curr_case_metrics}")
def write_result(self):
"""Write all results to the appropriate JSON file.
Our release test infrastructure consumes the JSON file and uploads the results
to our internal dashboard.
"""
# 'TEST_OUTPUT_JSON' is set in the release test environment.
test_output_json = os.environ.get("TEST_OUTPUT_JSON", "./result.json")
with open(test_output_json, "w") as f:
f.write(json.dumps(self.result))
print(f"Finished benchmark, metrics exported to '{test_output_json}':")
print(json.dumps(self.result, indent=4))
| Benchmark |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/base.py | {
"start": 12596,
"end": 13368
} | class ____:
"""
IO partitioning plan.
Notes
-----
The meaning of `factor` depends on the value of `flavor`:
- SINGLE_FILE: `factor` must be `1`.
- SPLIT_FILES: `factor` is the number of partitions per file.
- FUSED_FILES: `factor` is the number of files per partition.
- SINGLE_READ: `factor` is the total number of files.
"""
__slots__ = ("factor", "flavor")
factor: int
flavor: IOPartitionFlavor
def __init__(self, factor: int, flavor: IOPartitionFlavor) -> None:
if flavor == IOPartitionFlavor.SINGLE_FILE and factor != 1: # pragma: no cover
raise ValueError(f"Expected factor == 1 for {flavor}, got: {factor}")
self.factor = factor
self.flavor = flavor
| IOPartitionPlan |
python | getsentry__sentry | src/sentry/api/endpoints/timeseries.py | {
"start": 711,
"end": 840
} | class ____(TypedDict):
values: list[Row]
yAxis: str
groupBy: NotRequired[list[GroupBy]]
meta: SeriesMeta
| TimeSeries |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/base.py | {
"start": 16324,
"end": 16728
} | class ____(OAuthLibMixin, View):
"""
Implements an endpoint to revoke access or refresh tokens
"""
def post(self, request, *args, **kwargs):
url, headers, body, status = self.create_revocation_response(request)
response = HttpResponse(content=body or "", status=status)
for k, v in headers.items():
response[k] = v
return response
| RevokeTokenView |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 24348,
"end": 24447
} | class ____(DagsterError):
"""Import error raised while importing user-code."""
| DagsterImportError |
python | spack__spack | lib/spack/spack/util/pattern.py | {
"start": 281,
"end": 470
} | class ____(Bunch):
"""Subclass of Bunch to write argparse args more naturally."""
def __init__(self, *flags, **kwargs):
super().__init__(flags=tuple(flags), kwargs=kwargs)
| Args |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-cart/source_cart/streams.py | {
"start": 6107,
"end": 6245
} | class ____(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/b3A6MjMzMTc3Njc-get-addresses
"""
| Addresses |
python | huggingface__transformers | src/transformers/models/mt5/modeling_mt5.py | {
"start": 17731,
"end": 19148
} | class ____(nn.Module):
def __init__(self, config, layer_idx: Optional[int] = None):
super().__init__()
self.EncDecAttention = MT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx)
self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
past_key_values=None,
use_cache=False,
query_length=None,
output_attentions=False,
cache_position=None,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
past_key_values=past_key_values,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
cache_position=cache_position,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.t5.modeling_t5.T5Block with T5->MT5
| MT5LayerCrossAttention |
python | great-expectations__great_expectations | great_expectations/core/expectation_diagnostics/supporting_types.py | {
"start": 4070,
"end": 4255
} | class ____(SerializableDictDot):
error_msg: str
stack_trace: str
test_title: Optional[str] = None
test_backend: Optional[str] = None
@dataclass
| ExpectationErrorDiagnostics |
python | tensorflow__tensorflow | tensorflow/python/ops/init_ops_test.py | {
"start": 1282,
"end": 8569
} | class ____(test.TestCase):
def _runner(self,
init,
shape,
target_mean=None,
target_std=None,
target_max=None,
target_min=None):
output = self.evaluate(init(shape))
self.assertEqual(output.shape, shape)
lim = 3e-2
if target_std is not None:
self.assertGreater(lim, abs(output.std() - target_std))
if target_mean is not None:
self.assertGreater(lim, abs(output.mean() - target_mean))
if target_max is not None:
self.assertGreater(lim, abs(output.max() - target_max))
if target_min is not None:
self.assertGreater(lim, abs(output.min() - target_min))
def test_uniform(self):
shape = (9, 6, 99)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
self._runner(
init_ops.RandomUniform(minval=-1, maxval=1, seed=124),
tensor_shape,
target_mean=0.,
target_max=1,
target_min=-1)
def test_normal(self):
shape = (8, 12, 99)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
self._runner(
init_ops.RandomNormal(mean=0, stddev=1, seed=153),
tensor_shape,
target_mean=0.,
target_std=1)
def test_truncated_normal(self):
shape = (12, 99, 7)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
self._runner(
init_ops.TruncatedNormal(mean=0, stddev=1, seed=126),
tensor_shape,
target_mean=0.,
target_max=2,
target_min=-2)
def test_constant(self):
shape = (5, 6, 4)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
self._runner(
init_ops.Constant(2),
tensor_shape,
target_mean=2,
target_max=2,
target_min=2)
def test_lecun_uniform(self):
shape = (5, 6, 4, 2)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(1. / fan_in)
self._runner(
init_ops.lecun_uniform(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_glorot_uniform_initializer(self):
shape = (5, 6, 4, 2)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._runner(
init_ops.glorot_uniform_initializer(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_he_uniform(self):
shape = (5, 6, 4, 2)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / fan_in)
self._runner(
init_ops.he_uniform(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_lecun_normal(self):
shape = (5, 6, 4, 2)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(1. / fan_in)
self._runner(
init_ops.lecun_normal(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_glorot_normal_initializer(self):
shape = (5, 6, 4, 2)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
fan_in, fan_out = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._runner(
init_ops.glorot_normal_initializer(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_he_normal(self):
shape = (5, 6, 4, 2)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
fan_in, _ = init_ops._compute_fans(tensor_shape)
std = np.sqrt(2. / fan_in)
self._runner(
init_ops.he_normal(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_Orthogonal(self):
shape = (20, 20)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
self._runner(
init_ops.Orthogonal(seed=123), tensor_shape, target_mean=0.)
@test_util.run_gpu_only
def testVariablePlacementWithOrthogonalInitializer(self):
with ops.Graph().as_default() as g:
with ops.device('gpu:0'):
variable_scope.get_variable(
name='v', shape=[8, 2], initializer=init_ops.Orthogonal)
variable_scope.get_variable(
name='w', shape=[8, 2], initializer=init_ops.RandomNormal)
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
config = config_pb2.ConfigProto(
allow_soft_placement=False, log_device_placement=True)
# Note: allow_soft_placement=False will fail whenever we cannot satisfy
# the colocation constraints.
with session.Session(config=config, graph=g) as sess:
sess.run(
variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
@test_util.run_gpu_only
def test_eager_orthogonal_gpu(self):
with context.eager_mode():
v = variable_scope.get_variable(
name='v', shape=[8, 2], initializer=init_ops.Orthogonal)
w = variable_scope.get_variable(
name='w', shape=[8, 2], initializer=init_ops.RandomNormal)
self.assertTrue('GPU' in v.handle.device)
self.assertTrue('GPU' in w.handle.device)
def test_Identity(self):
with self.cached_session():
shape = (3, 4, 5)
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
with self.assertRaises(ValueError):
self._runner(
init_ops.Identity(),
tensor_shape,
target_mean=1. / int(tensor_shape[0]),
target_max=1.)
shape = (3, 3)
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
self._runner(
init_ops.Identity(),
tensor_shape,
target_mean=1. / int(tensor_shape[0]),
target_max=1.)
def test_Zeros(self):
shape = (4, 5)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
self._runner(
init_ops.Zeros(), tensor_shape, target_mean=0., target_max=0.)
def test_Ones(self):
shape = (4, 5)
with self.cached_session():
for tensor_shape in [shape, tensor_shape_lib.TensorShape(shape)]:
self._runner(
init_ops.Ones(), tensor_shape, target_mean=1., target_max=1.)
if __name__ == '__main__':
test.main()
| InitializersTest |
python | pytorch__pytorch | torch/jit/_script.py | {
"start": 65230,
"end": 67324
} | class ____:
def __init__(self) -> None:
self.profile = classes.profiling._ScriptProfile()
def enable(self):
self.profile.enable()
def disable(self):
self.profile.disable()
def dump_string(self) -> str:
outputs: list[str] = []
for source_stats in self.profile._dump_stats():
source_ref = source_stats.source()
source_lines = source_ref.text().splitlines()
dedent = min(len(line) - len(line.lstrip(" ")) for line in source_lines)
source_lines = [line[dedent:] for line in source_lines]
start_line = source_ref.starting_lineno()
end_line = start_line + len(source_lines)
source_range = range(start_line, end_line)
lineno = _ScriptProfileColumn("Line #")
hits = _ScriptProfileColumn("Hits")
time_ns = _ScriptProfileColumn("Time (ns)")
line_contents = _ScriptProfileColumn("Line Contents", 0, 1)
stats = source_stats.line_map()
for line in source_range:
lineno.add_row(line, line)
line_contents.add_row(line, source_lines[line - start_line])
stat = stats.get(line)
if stat is not None:
hits.add_row(line, stat.count())
time_ns.add_row(line, stat.duration_ns())
table = _ScriptProfileTable(
[lineno, hits, time_ns, line_contents], list(source_range)
)
outputs.append(table.dump_string())
return "\n\n".join(outputs)
def dump(self):
print(self.dump_string())
def _unwrap_optional(x):
assert x is not None, "Unwrapping null optional"
return x
_register_builtin(_unwrap_optional, "aten::_unwrap_optional")
_register_builtin(_jit_internal.is_scripting, "aten::is_scripting")
_register_builtin(has_torch_function, "aten::has_torch_function")
_register_builtin(has_torch_function_unary, "aten::has_torch_function")
_register_builtin(has_torch_function_variadic, "aten::has_torch_function")
| _ScriptProfile |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py | {
"start": 4642,
"end": 5933
} | class ____(LlamaRotaryEmbedding):
def __init__(self, config: HunYuanDenseV1Config, device=None):
nn.Module.__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
# Diff from Llama - DynamicNTKAlphaRotary
if self.rope_type == "dynamic" and self.config.rope_parameters.get("alpha"):
self.dim = config.head_dim
base = self.config.rope_parameters["rope_theta"] * self.config.rope_parameters["alpha"] ** (
self.config.head_dim / (self.config.head_dim - 2)
)
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.config.head_dim))
self.attention_scaling = 1.0
else:
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
| HunYuanDenseV1RotaryEmbedding |
python | sympy__sympy | sympy/functions/special/bessel.py | {
"start": 63011,
"end": 66326
} | class ____(DefinedFunction):
r"""
The Marcum Q-function.
Explanation
===========
The Marcum Q-function is defined by the meromorphic continuation of
.. math::
Q_m(a, b) = a^{- m + 1} \int_{b}^{\infty} x^{m} e^{- \frac{a^{2}}{2} - \frac{x^{2}}{2}} I_{m - 1}\left(a x\right)\, dx
Examples
========
>>> from sympy import marcumq
>>> from sympy.abc import m, a, b
>>> marcumq(m, a, b)
marcumq(m, a, b)
Special values:
>>> marcumq(m, 0, b)
uppergamma(m, b**2/2)/gamma(m)
>>> marcumq(0, 0, 0)
0
>>> marcumq(0, a, 0)
1 - exp(-a**2/2)
>>> marcumq(1, a, a)
1/2 + exp(-a**2)*besseli(0, a**2)/2
>>> marcumq(2, a, a)
1/2 + exp(-a**2)*besseli(0, a**2)/2 + exp(-a**2)*besseli(1, a**2)
Differentiation with respect to $a$ and $b$ is supported:
>>> from sympy import diff
>>> diff(marcumq(m, a, b), a)
a*(-marcumq(m, a, b) + marcumq(m + 1, a, b))
>>> diff(marcumq(m, a, b), b)
-a**(1 - m)*b**m*exp(-a**2/2 - b**2/2)*besseli(m - 1, a*b)
References
==========
.. [1] https://en.wikipedia.org/wiki/Marcum_Q-function
.. [2] https://mathworld.wolfram.com/MarcumQ-Function.html
"""
@classmethod
def eval(cls, m, a, b):
if a is S.Zero:
if m is S.Zero and b is S.Zero:
return S.Zero
return uppergamma(m, b**2 * S.Half) / gamma(m)
if m is S.Zero and b is S.Zero:
return 1 - 1 / exp(a**2 * S.Half)
if a == b:
if m is S.One:
return (1 + exp(-a**2) * besseli(0, a**2))*S.Half
if m == 2:
return S.Half + S.Half * exp(-a**2) * besseli(0, a**2) + exp(-a**2) * besseli(1, a**2)
if a.is_zero:
if m.is_zero and b.is_zero:
return S.Zero
return uppergamma(m, b**2*S.Half) / gamma(m)
if m.is_zero and b.is_zero:
return 1 - 1 / exp(a**2*S.Half)
def fdiff(self, argindex=2):
m, a, b = self.args
if argindex == 2:
return a * (-marcumq(m, a, b) + marcumq(1+m, a, b))
elif argindex == 3:
return (-b**m / a**(m-1)) * exp(-(a**2 + b**2)/2) * besseli(m-1, a*b)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_Integral(self, m, a, b, **kwargs):
from sympy.integrals.integrals import Integral
x = kwargs.get('x', Dummy(uniquely_named_symbol('x').name))
return a ** (1 - m) * \
Integral(x**m * exp(-(x**2 + a**2)/2) * besseli(m-1, a*x), [x, b, S.Infinity])
def _eval_rewrite_as_Sum(self, m, a, b, **kwargs):
from sympy.concrete.summations import Sum
k = kwargs.get('k', Dummy('k'))
return exp(-(a**2 + b**2) / 2) * Sum((a/b)**k * besseli(k, a*b), [k, 1-m, S.Infinity])
def _eval_rewrite_as_besseli(self, m, a, b, **kwargs):
if a == b:
if m == 1:
return (1 + exp(-a**2) * besseli(0, a**2)) / 2
if m.is_Integer and m >= 2:
s = sum(besseli(i, a**2) for i in range(1, m))
return S.Half + exp(-a**2) * besseli(0, a**2) / 2 + exp(-a**2) * s
def _eval_is_zero(self):
if all(arg.is_zero for arg in self.args):
return True
| marcumq |
python | matplotlib__matplotlib | lib/matplotlib/figure.py | {
"start": 3775,
"end": 84737
} | class ____(Artist):
"""
Base class for `.Figure` and `.SubFigure` containing the methods that add
artists to the figure or subfigure, create Axes, etc.
"""
def __init__(self, **kwargs):
super().__init__()
# remove the non-figure artist _axes property
# as it makes no sense for a figure to be _in_ an Axes
# this is used by the property methods in the artist base class
# which are over-ridden in this class
del self._axes
self._suptitle = None
self._supxlabel = None
self._supylabel = None
# groupers to keep track of x, y labels and title we want to align.
# see self.align_xlabels, self.align_ylabels,
# self.align_titles, and axis._get_tick_boxes_siblings
self._align_label_groups = {
"x": cbook.Grouper(),
"y": cbook.Grouper(),
"title": cbook.Grouper()
}
self._localaxes = [] # track all Axes
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
self.subfigs = []
self.stale = True
self.suppressComposite = None
self.set(**kwargs)
def _get_draw_artists(self, renderer):
"""Also runs apply_aspect"""
artists = self.get_children()
artists.remove(self.patch)
artists = sorted(
(artist for artist in artists if not artist.get_animated()),
key=lambda artist: artist.get_zorder())
for ax in self._localaxes:
locator = ax.get_axes_locator()
ax.apply_aspect(locator(ax, renderer) if locator else None)
for child in ax.get_children():
if hasattr(child, 'apply_aspect'):
locator = child.get_axes_locator()
child.apply_aspect(
locator(child, renderer) if locator else None)
return artists
def autofmt_xdate(
self, bottom=0.2, rotation=30, ha='right', which='major'):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared x-axis where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
Parameters
----------
bottom : float, default: 0.2
The bottom of the subplots for `subplots_adjust`.
rotation : float, default: 30 degrees
The rotation angle of the xtick labels in degrees.
ha : {'left', 'center', 'right'}, default: 'right'
The horizontal alignment of the xticklabels.
which : {'major', 'minor', 'both'}, default: 'major'
Selects which ticklabels to rotate.
"""
_api.check_in_list(['major', 'minor', 'both'], which=which)
axes = [ax for ax in self.axes if ax._label != '<colorbar>']
allsubplots = all(ax.get_subplotspec() for ax in axes)
if len(axes) == 1:
for label in self.axes[0].get_xticklabels(which=which):
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in axes:
if ax.get_subplotspec().is_last_row():
for label in ax.get_xticklabels(which=which):
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels(which=which):
label.set_visible(False)
ax.set_xlabel('')
engine = self.get_layout_engine()
if allsubplots and (engine is None or engine.adjust_compatible):
self.subplots_adjust(bottom=bottom)
self.stale = True
def get_children(self):
"""Get a list of artists contained in the figure."""
return [self.patch,
*self.artists,
*self._localaxes,
*self.lines,
*self.patches,
*self.texts,
*self.images,
*self.legends,
*self.subfigs]
def get_figure(self, root=None):
"""
Return the `.Figure` or `.SubFigure` instance the (Sub)Figure belongs to.
Parameters
----------
root : bool, default=True
If False, return the (Sub)Figure this artist is on. If True,
return the root Figure for a nested tree of SubFigures.
.. deprecated:: 3.10
From version 3.12 *root* will default to False.
"""
if self._root_figure is self:
# Top level Figure
return self
if self._parent is self._root_figure:
# Return early to prevent the deprecation warning when *root* does not
# matter
return self._parent
if root is None:
# When deprecation expires, consider removing the docstring and just
# inheriting the one from Artist.
message = ('From Matplotlib 3.12 SubFigure.get_figure will by default '
'return the direct parent figure, which may be a SubFigure. '
'To suppress this warning, pass the root parameter. Pass '
'`True` to maintain the old behavior and `False` to opt-in to '
'the future behavior.')
_api.warn_deprecated('3.10', message=message)
root = True
if root:
return self._root_figure
return self._parent
def set_figure(self, fig):
"""
.. deprecated:: 3.10
Currently this method will raise an exception if *fig* is anything other
than the root `.Figure` this (Sub)Figure is on. In future it will always
raise an exception.
"""
no_switch = ("The parent and root figures of a (Sub)Figure are set at "
"instantiation and cannot be changed.")
if fig is self._root_figure:
_api.warn_deprecated(
"3.10",
message=(f"{no_switch} From Matplotlib 3.12 this operation will raise "
"an exception."))
return
raise ValueError(no_switch)
figure = property(functools.partial(get_figure, root=True), set_figure,
doc=("The root `Figure`. To get the parent of a `SubFigure`, "
"use the `get_figure` method."))
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns
-------
bool, {}
"""
if self._different_canvas(mouseevent):
return False, {}
inside = self.bbox.contains(mouseevent.x, mouseevent.y)
return inside, {}
def get_window_extent(self, renderer=None):
# docstring inherited
return self.bbox
def _suplabels(self, t, info, **kwargs):
"""
Add a centered %(name)s to the figure.
Parameters
----------
t : str
The %(name)s text.
x : float, default: %(x0)s
The x location of the text in figure coordinates.
y : float, default: %(y0)s
The y location of the text in figure coordinates.
horizontalalignment, ha : {'center', 'left', 'right'}, default: %(ha)s
The horizontal alignment of the text relative to (*x*, *y*).
verticalalignment, va : {'top', 'center', 'bottom', 'baseline'}, \
default: %(va)s
The vertical alignment of the text relative to (*x*, *y*).
fontsize, size : default: :rc:`figure.%(rc)ssize`
The font size of the text. See `.Text.set_size` for possible
values.
fontweight, weight : default: :rc:`figure.%(rc)sweight`
The font weight of the text. See `.Text.set_weight` for possible
values.
Returns
-------
text
The `.Text` instance of the %(name)s.
Other Parameters
----------------
fontproperties : None or dict, optional
A dict of font properties. If *fontproperties* is given the
default values for font size and weight are taken from the
`.FontProperties` defaults. :rc:`figure.%(rc)ssize` and
:rc:`figure.%(rc)sweight` are ignored in this case.
**kwargs
Additional kwargs are `matplotlib.text.Text` properties.
"""
x = kwargs.pop('x', None)
y = kwargs.pop('y', None)
if info['name'] in ['_supxlabel', '_suptitle']:
autopos = y is None
elif info['name'] == '_supylabel':
autopos = x is None
if x is None:
x = info['x0']
if y is None:
y = info['y0']
kwargs = cbook.normalize_kwargs(kwargs, Text)
kwargs.setdefault('horizontalalignment', info['ha'])
kwargs.setdefault('verticalalignment', info['va'])
kwargs.setdefault('rotation', info['rotation'])
if 'fontproperties' not in kwargs:
kwargs.setdefault('fontsize', mpl.rcParams[info['size']])
kwargs.setdefault('fontweight', mpl.rcParams[info['weight']])
suplab = getattr(self, info['name'])
if suplab is not None:
suplab.set_text(t)
suplab.set_position((x, y))
suplab.set(**kwargs)
else:
suplab = self.text(x, y, t, **kwargs)
setattr(self, info['name'], suplab)
suplab._autopos = autopos
self.stale = True
return suplab
@_docstring.Substitution(x0=0.5, y0=0.98, name='super title', ha='center',
va='top', rc='title')
@_docstring.copy(_suplabels)
def suptitle(self, t, **kwargs):
# docstring from _suplabels...
info = {'name': '_suptitle', 'x0': 0.5, 'y0': 0.98,
'ha': 'center', 'va': 'top', 'rotation': 0,
'size': 'figure.titlesize', 'weight': 'figure.titleweight'}
return self._suplabels(t, info, **kwargs)
def get_suptitle(self):
"""Return the suptitle as string or an empty string if not set."""
text_obj = self._suptitle
return "" if text_obj is None else text_obj.get_text()
@_docstring.Substitution(x0=0.5, y0=0.01, name='super xlabel', ha='center',
va='bottom', rc='label')
@_docstring.copy(_suplabels)
def supxlabel(self, t, **kwargs):
# docstring from _suplabels...
info = {'name': '_supxlabel', 'x0': 0.5, 'y0': 0.01,
'ha': 'center', 'va': 'bottom', 'rotation': 0,
'size': 'figure.labelsize', 'weight': 'figure.labelweight'}
return self._suplabels(t, info, **kwargs)
def get_supxlabel(self):
"""Return the supxlabel as string or an empty string if not set."""
text_obj = self._supxlabel
return "" if text_obj is None else text_obj.get_text()
@_docstring.Substitution(x0=0.02, y0=0.5, name='super ylabel', ha='left',
va='center', rc='label')
@_docstring.copy(_suplabels)
def supylabel(self, t, **kwargs):
# docstring from _suplabels...
info = {'name': '_supylabel', 'x0': 0.02, 'y0': 0.5,
'ha': 'left', 'va': 'center', 'rotation': 'vertical',
'rotation_mode': 'anchor', 'size': 'figure.labelsize',
'weight': 'figure.labelweight'}
return self._suplabels(t, info, **kwargs)
def get_supylabel(self):
"""Return the supylabel as string or an empty string if not set."""
text_obj = self._supylabel
return "" if text_obj is None else text_obj.get_text()
def get_edgecolor(self):
"""Get the edge color of the Figure rectangle."""
return self.patch.get_edgecolor()
def get_facecolor(self):
"""Get the face color of the Figure rectangle."""
return self.patch.get_facecolor()
def get_frameon(self):
"""
Return the figure's background patch visibility, i.e.
whether the figure background will be drawn. Equivalent to
``Figure.patch.get_visible()``.
"""
return self.patch.get_visible()
def set_linewidth(self, linewidth):
"""
Set the line width of the Figure rectangle.
Parameters
----------
linewidth : number
"""
self.patch.set_linewidth(linewidth)
def get_linewidth(self):
"""
Get the line width of the Figure rectangle.
"""
return self.patch.get_linewidth()
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle.
Parameters
----------
color : :mpltype:`color`
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle.
Parameters
----------
color : :mpltype:`color`
"""
self.patch.set_facecolor(color)
def set_frameon(self, b):
"""
Set the figure's background patch visibility, i.e.
whether the figure background will be drawn. Equivalent to
``Figure.patch.set_visible()``.
Parameters
----------
b : bool
"""
self.patch.set_visible(b)
self.stale = True
frameon = property(get_frameon, set_frameon)
def add_artist(self, artist, clip=False):
"""
Add an `.Artist` to the figure.
Usually artists are added to `~.axes.Axes` objects using
`.Axes.add_artist`; this method can be used in the rare cases where
one needs to add artists directly to the figure instead.
Parameters
----------
artist : `~matplotlib.artist.Artist`
The artist to add to the figure. If the added artist has no
transform previously set, its transform will be set to
``figure.transSubfigure``.
clip : bool, default: False
Whether the added artist should be clipped by the figure patch.
Returns
-------
`~matplotlib.artist.Artist`
The added artist.
"""
artist.set_figure(self)
self.artists.append(artist)
artist._remove_method = self.artists.remove
if not artist.is_transform_set():
artist.set_transform(self.transSubfigure)
if clip and artist.get_clip_path() is None:
artist.set_clip_path(self.patch)
self.stale = True
return artist
@_docstring.interpd
def add_axes(self, *args, **kwargs):
"""
Add an `~.axes.Axes` to the figure.
Call signatures::
add_axes(rect, projection=None, polar=False, **kwargs)
add_axes(ax)
Parameters
----------
rect : tuple (left, bottom, width, height)
The dimensions (left, bottom, width, height) of the new
`~.axes.Axes`. All quantities are in fractions of figure width and
height.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the `~.axes.Axes`. *str* is the name of
a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
axes_class : subclass type of `~.axes.Axes`, optional
The `.axes.Axes` subclass that is instantiated. This parameter
is incompatible with *projection* and *polar*. See
:ref:`axisartist_users-guide-index` for examples.
sharex, sharey : `~matplotlib.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared Axes.
label : str
A label for the returned Axes.
Returns
-------
`~.axes.Axes`, or a subclass of `~.axes.Axes`
The returned Axes class depends on the projection used. It is
`~.axes.Axes` if rectilinear projection is used and
`.projections.polar.PolarAxes` if polar projection is used.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for
the returned Axes class. The keyword arguments for the
rectilinear Axes class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used, see the actual Axes
class.
%(Axes:kwdoc)s
Notes
-----
In rare circumstances, `.add_axes` may be called with a single
argument, an Axes instance already created in the present figure but
not in the figure's list of Axes.
See Also
--------
.Figure.add_subplot
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.pyplot.subplots
Examples
--------
Some simple examples::
rect = l, b, w, h
fig = plt.figure()
fig.add_axes(rect)
fig.add_axes(rect, frameon=False, facecolor='g')
fig.add_axes(rect, polar=True)
ax = fig.add_axes(rect, projection='polar')
fig.delaxes(ax)
fig.add_axes(ax)
"""
if not len(args) and 'rect' not in kwargs:
raise TypeError("add_axes() missing 1 required positional argument: 'rect'")
elif 'rect' in kwargs:
if len(args):
raise TypeError("add_axes() got multiple values for argument 'rect'")
args = (kwargs.pop('rect'), )
if len(args) != 1:
raise _api.nargs_error("add_axes", 1, len(args))
if isinstance(args[0], Axes):
a, = args
key = a._projection_init
if a.get_figure(root=False) is not self:
raise ValueError(
"The Axes must have been created in the present figure")
else:
rect, = args
if not np.isfinite(rect).all():
raise ValueError(f'all entries in rect must be finite not {rect}')
projection_class, pkw = self._process_projection_requirements(**kwargs)
# create the new Axes using the Axes class given
a = projection_class(self, rect, **pkw)
key = (projection_class, pkw)
return self._add_axes_internal(a, key)
@_docstring.interpd
def add_subplot(self, *args, **kwargs):
"""
Add an `~.axes.Axes` to the figure as part of a subplot arrangement.
Call signatures::
add_subplot(nrows, ncols, index, **kwargs)
add_subplot(pos, **kwargs)
add_subplot(ax)
add_subplot()
Parameters
----------
*args : int, (int, int, *index*), or `.SubplotSpec`, default: (1, 1, 1)
The position of the subplot described by one of
- Three integers (*nrows*, *ncols*, *index*). The subplot will
take the *index* position on a grid with *nrows* rows and
*ncols* columns. *index* starts at 1 in the upper left corner
and increases to the right. *index* can also be a two-tuple
specifying the (*first*, *last*) indices (1-based, and including
*last*) of the subplot, e.g., ``fig.add_subplot(3, 1, (1, 2))``
makes a subplot that spans the upper 2/3 of the figure.
- A 3-digit integer. The digits are interpreted as if given
separately as three single-digit integers, i.e.
``fig.add_subplot(235)`` is the same as
``fig.add_subplot(2, 3, 5)``. Note that this can only be used
if there are no more than 9 subplots.
- A `.SubplotSpec`.
In rare circumstances, `.add_subplot` may be called with a single
argument, a subplot Axes instance already created in the
present figure but not in the figure's list of Axes.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the subplot (`~.axes.Axes`). *str* is the
name of a custom projection, see `~matplotlib.projections`. The
default None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
axes_class : subclass type of `~.axes.Axes`, optional
The `.axes.Axes` subclass that is instantiated. This parameter
is incompatible with *projection* and *polar*. See
:ref:`axisartist_users-guide-index` for examples.
sharex, sharey : `~matplotlib.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared Axes.
label : str
A label for the returned Axes.
Returns
-------
`~.axes.Axes`
The Axes of the subplot. The returned Axes can actually be an
instance of a subclass, such as `.projections.polar.PolarAxes` for
polar projections.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for the returned Axes
base class; except for the *figure* argument. The keyword arguments
for the rectilinear base class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used.
%(Axes:kwdoc)s
See Also
--------
.Figure.add_axes
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.pyplot.subplots
Examples
--------
::
fig = plt.figure()
fig.add_subplot(231)
ax1 = fig.add_subplot(2, 3, 1) # equivalent but more general
fig.add_subplot(232, frameon=False) # subplot with no frame
fig.add_subplot(233, projection='polar') # polar subplot
fig.add_subplot(234, sharex=ax1) # subplot sharing x-axis with ax1
fig.add_subplot(235, facecolor="red") # red subplot
ax1.remove() # delete ax1 from the figure
fig.add_subplot(ax1) # add ax1 back to the figure
"""
if 'figure' in kwargs:
# Axes itself allows for a 'figure' kwarg, but since we want to
# bind the created Axes to self, it is not allowed here.
raise _api.kwarg_error("add_subplot", "figure")
if (len(args) == 1
and isinstance(args[0], mpl.axes._base._AxesBase)
and args[0].get_subplotspec()):
ax = args[0]
key = ax._projection_init
if ax.get_figure(root=False) is not self:
raise ValueError("The Axes must have been created in "
"the present figure")
else:
if not args:
args = (1, 1, 1)
# Normalize correct ijk values to (i, j, k) here so that
# add_subplot(211) == add_subplot(2, 1, 1). Invalid values will
# trigger errors later (via SubplotSpec._from_subplot_args).
if (len(args) == 1 and isinstance(args[0], Integral)
and 100 <= args[0] <= 999):
args = tuple(map(int, str(args[0])))
projection_class, pkw = self._process_projection_requirements(**kwargs)
ax = projection_class(self, *args, **pkw)
key = (projection_class, pkw)
return self._add_axes_internal(ax, key)
def _add_axes_internal(self, ax, key):
"""Private helper for `add_axes` and `add_subplot`."""
self._axstack.add(ax)
if ax not in self._localaxes:
self._localaxes.append(ax)
self.sca(ax)
ax._remove_method = self.delaxes
# this is to support plt.subplot's re-selection logic
ax._projection_init = key
self.stale = True
ax.stale_callback = _stale_figure_callback
return ax
def subplots(self, nrows=1, ncols=1, *, sharex=False, sharey=False,
squeeze=True, width_ratios=None, height_ratios=None,
subplot_kw=None, gridspec_kw=None):
"""
Add a set of subplots to this figure.
This utility wrapper makes it convenient to create common layouts of
subplots in a single call.
Parameters
----------
nrows, ncols : int, default: 1
Number of rows/columns of the subplot grid.
sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
Controls sharing of x-axis (*sharex*) or y-axis (*sharey*):
- True or 'all': x- or y-axis will be shared among all subplots.
- False or 'none': each subplot x- or y-axis will be independent.
- 'row': each subplot row will share an x- or y-axis.
- 'col': each subplot column will share an x- or y-axis.
When subplots have a shared x-axis along a column, only the x tick
labels of the bottom subplot are created. Similarly, when subplots
have a shared y-axis along a row, only the y tick labels of the
first column subplot are created. To later turn other subplots'
ticklabels on, use `~matplotlib.axes.Axes.tick_params`.
When subplots have a shared axis that has units, calling
`.Axis.set_units` will update each axis with the new units.
Note that it is not possible to unshare axes.
squeeze : bool, default: True
- If True, extra dimensions are squeezed out from the returned
array of Axes:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axes object is returned as a scalar.
- for Nx1 or 1xM subplots, the returned object is a 1D numpy
object array of Axes objects.
- for NxM, subplots with N>1 and M>1 are returned as a 2D array.
- If False, no squeezing at all is done: the returned Axes object
is always a 2D array containing Axes instances, even if it ends
up being 1x1.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width. Equivalent
to ``gridspec_kw={'width_ratios': [...]}``.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height. Equivalent
to ``gridspec_kw={'height_ratios': [...]}``.
subplot_kw : dict, optional
Dict with keywords passed to the `.Figure.add_subplot` call used to
create each subplot.
gridspec_kw : dict, optional
Dict with keywords passed to the
`~matplotlib.gridspec.GridSpec` constructor used to create
the grid the subplots are placed on.
Returns
-------
`~.axes.Axes` or array of Axes
Either a single `~matplotlib.axes.Axes` object or an array of Axes
objects if more than one subplot was created. The dimensions of the
resulting array can be controlled with the *squeeze* keyword, see
above.
See Also
--------
.pyplot.subplots
.Figure.add_subplot
.pyplot.subplot
Examples
--------
::
# First create some toy data:
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Create a figure
fig = plt.figure()
# Create a subplot
ax = fig.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Create two subplots and unpack the output array immediately
ax1, ax2 = fig.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Create four polar Axes and access them through the returned array
axes = fig.subplots(2, 2, subplot_kw=dict(projection='polar'))
axes[0, 0].plot(x, y)
axes[1, 1].scatter(x, y)
# Share an X-axis with each column of subplots
fig.subplots(2, 2, sharex='col')
# Share a Y-axis with each row of subplots
fig.subplots(2, 2, sharey='row')
# Share both X- and Y-axes with all subplots
fig.subplots(2, 2, sharex='all', sharey='all')
# Note that this is the same as
fig.subplots(2, 2, sharex=True, sharey=True)
"""
gridspec_kw = dict(gridspec_kw or {})
if height_ratios is not None:
if 'height_ratios' in gridspec_kw:
raise ValueError("'height_ratios' must not be defined both as "
"parameter and as key in 'gridspec_kw'")
gridspec_kw['height_ratios'] = height_ratios
if width_ratios is not None:
if 'width_ratios' in gridspec_kw:
raise ValueError("'width_ratios' must not be defined both as "
"parameter and as key in 'gridspec_kw'")
gridspec_kw['width_ratios'] = width_ratios
gs = self.add_gridspec(nrows, ncols, figure=self, **gridspec_kw)
axs = gs.subplots(sharex=sharex, sharey=sharey, squeeze=squeeze,
subplot_kw=subplot_kw)
return axs
def delaxes(self, ax):
"""
Remove the `~.axes.Axes` *ax* from the figure; update the current Axes.
"""
self._remove_axes(ax, owners=[self._axstack, self._localaxes])
def _remove_axes(self, ax, owners):
"""
Common helper for removal of standard Axes (via delaxes) and of child Axes.
Parameters
----------
ax : `~.AxesBase`
The Axes to remove.
owners
List of objects (list or _AxesStack) "owning" the Axes, from which the Axes
will be remove()d.
"""
for owner in owners:
owner.remove(ax)
self._axobservers.process("_axes_change_event", self)
self.stale = True
self._root_figure.canvas.release_mouse(ax)
for name in ax._axis_names: # Break link between any shared Axes
grouper = ax._shared_axes[name]
siblings = [other for other in grouper.get_siblings(ax) if other is not ax]
if not siblings: # Axes was not shared along this axis; we're done.
continue
grouper.remove(ax)
# Formatters and locators may previously have been associated with the now
# removed axis. Update them to point to an axis still there (we can pick
# any of them, and use the first sibling).
remaining_axis = siblings[0]._axis_map[name]
remaining_axis.get_major_formatter().set_axis(remaining_axis)
remaining_axis.get_major_locator().set_axis(remaining_axis)
remaining_axis.get_minor_formatter().set_axis(remaining_axis)
remaining_axis.get_minor_locator().set_axis(remaining_axis)
ax._twinned_axes.remove(ax) # Break link between any twinned Axes.
def clear(self, keep_observers=False):
"""
Clear the figure.
Parameters
----------
keep_observers : bool, default: False
Set *keep_observers* to True if, for example,
a gui widget is tracking the Axes in the figure.
"""
self.suppressComposite = None
# first clear the Axes in any subfigures
for subfig in self.subfigs:
subfig.clear(keep_observers=keep_observers)
self.subfigs = []
for ax in tuple(self.axes): # Iterate over the copy.
ax.clear()
self.delaxes(ax) # Remove ax from self._axstack.
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
self.subplotpars.reset()
if not keep_observers:
self._axobservers = cbook.CallbackRegistry()
self._suptitle = None
self._supxlabel = None
self._supylabel = None
self.stale = True
# synonym for `clear`.
def clf(self, keep_observers=False):
"""
[*Discouraged*] Alias for the `clear()` method.
.. admonition:: Discouraged
The use of ``clf()`` is discouraged. Use ``clear()`` instead.
Parameters
----------
keep_observers : bool, default: False
Set *keep_observers* to True if, for example,
a gui widget is tracking the Axes in the figure.
"""
return self.clear(keep_observers=keep_observers)
# Note: the docstring below is modified with replace for the pyplot
# version of this function because the method name differs (plt.figlegend)
# the replacements are:
# " legend(" -> " figlegend(" for the signatures
# "fig.legend(" -> "plt.figlegend" for the code examples
# "ax.plot" -> "plt.plot" for consistency in using pyplot when able
@_docstring.interpd
def legend(self, *args, **kwargs):
"""
Place a legend on the figure.
Call signatures::
legend()
legend(handles, labels)
legend(handles=handles)
legend(labels)
The call signatures correspond to the following different ways to use
this method:
**1. Automatic detection of elements to be shown in the legend**
The elements to be added to the legend are automatically determined,
when you do not pass in any extra arguments.
In this case, the labels are taken from the artist. You can specify
them either at artist creation or by calling the
:meth:`~.Artist.set_label` method on the artist::
ax.plot([1, 2, 3], label='Inline label')
fig.legend()
or::
line, = ax.plot([1, 2, 3])
line.set_label('Label via method')
fig.legend()
Specific lines can be excluded from the automatic legend element
selection by defining a label starting with an underscore.
This is default for all artists, so calling `.Figure.legend` without
any arguments and without setting the labels manually will result in
no legend being drawn.
**2. Explicitly listing the artists and labels in the legend**
For full control of which artists have a legend entry, it is possible
to pass an iterable of legend artists followed by an iterable of
legend labels respectively::
fig.legend([line1, line2, line3], ['label1', 'label2', 'label3'])
**3. Explicitly listing the artists in the legend**
This is similar to 2, but the labels are taken from the artists'
label properties. Example::
line1, = ax1.plot([1, 2, 3], label='label1')
line2, = ax2.plot([1, 2, 3], label='label2')
fig.legend(handles=[line1, line2])
**4. Labeling existing plot elements**
.. admonition:: Discouraged
This call signature is discouraged, because the relation between
plot elements and labels is only implicit by their order and can
easily be mixed up.
To make a legend for all artists on all Axes, call this function with
an iterable of strings, one for each legend item. For example::
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot([1, 3, 5], color='blue')
ax2.plot([2, 4, 6], color='red')
fig.legend(['the blues', 'the reds'])
Parameters
----------
handles : list of `.Artist`, optional
A list of Artists (lines, patches) to be added to the legend.
Use this together with *labels*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
The length of handles and labels should be the same in this
case. If they are not, they are truncated to the smaller length.
labels : list of str, optional
A list of labels to show next to the artists.
Use this together with *handles*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
Returns
-------
`~matplotlib.legend.Legend`
Other Parameters
----------------
%(_legend_kw_figure)s
See Also
--------
.Axes.legend
Notes
-----
Some artists are not supported by this function. See
:ref:`legend_guide` for details.
"""
handles, labels, kwargs = mlegend._parse_legend_args(self.axes, *args, **kwargs)
# explicitly set the bbox transform if the user hasn't.
kwargs.setdefault("bbox_transform", self.transSubfigure)
l = mlegend.Legend(self, handles, labels, **kwargs)
self.legends.append(l)
l._remove_method = self.legends.remove
self.stale = True
return l
@_docstring.interpd
def text(self, x, y, s, fontdict=None, **kwargs):
"""
Add text to figure.
Parameters
----------
x, y : float
The position to place the text. By default, this is in figure
coordinates, floats in [0, 1]. The coordinate system can be changed
using the *transform* keyword.
s : str
The text string.
fontdict : dict, optional
A dictionary to override the default text properties. If not given,
the defaults are determined by :rc:`font.*`. Properties passed as
*kwargs* override the corresponding ones given in *fontdict*.
Returns
-------
`~.text.Text`
Other Parameters
----------------
**kwargs : `~matplotlib.text.Text` properties
Other miscellaneous text parameters.
%(Text:kwdoc)s
See Also
--------
.Axes.text
.pyplot.text
"""
effective_kwargs = {
'transform': self.transSubfigure,
**(fontdict if fontdict is not None else {}),
**kwargs,
}
text = Text(x=x, y=y, text=s, **effective_kwargs)
text.set_figure(self)
text.stale_callback = _stale_figure_callback
self.texts.append(text)
text._remove_method = self.texts.remove
self.stale = True
return text
@_docstring.interpd
def colorbar(
self, mappable, cax=None, ax=None, use_gridspec=True, **kwargs):
"""
Add a colorbar to a plot.
Parameters
----------
mappable
The `matplotlib.colorizer.ColorizingArtist` (i.e., `.AxesImage`,
`.ContourSet`, etc.) described by this colorbar. This argument is
mandatory for the `.Figure.colorbar` method but optional for the
`.pyplot.colorbar` function, which sets the default to the current
image.
Note that one can create a `.colorizer.ColorizingArtist` "on-the-fly"
to generate colorbars not attached to a previously drawn artist, e.g.
::
cr = colorizer.Colorizer(norm=norm, cmap=cmap)
fig.colorbar(colorizer.ColorizingArtist(cr), ax=ax)
cax : `~matplotlib.axes.Axes`, optional
Axes into which the colorbar will be drawn. If `None`, then a new
Axes is created and the space for it will be stolen from the Axes(s)
specified in *ax*.
ax : `~matplotlib.axes.Axes` or iterable or `numpy.ndarray` of Axes, optional
The one or more parent Axes from which space for a new colorbar Axes
will be stolen. This parameter is only used if *cax* is not set.
Defaults to the Axes that contains the mappable used to create the
colorbar.
use_gridspec : bool, optional
If *cax* is ``None``, a new *cax* is created as an instance of
Axes. If *ax* is positioned with a subplotspec and *use_gridspec*
is ``True``, then *cax* is also positioned with a subplotspec.
Returns
-------
colorbar : `~matplotlib.colorbar.Colorbar`
Other Parameters
----------------
%(_make_axes_kw_doc)s
%(_colormap_kw_doc)s
Notes
-----
If *mappable* is a `~.contour.ContourSet`, its *extend* kwarg is
included automatically.
The *shrink* kwarg provides a simple way to scale the colorbar with
respect to the Axes. Note that if *cax* is specified, it determines the
size of the colorbar, and *shrink* and *aspect* are ignored.
For more precise control, you can manually specify the positions of the
axes objects in which the mappable and the colorbar are drawn. In this
case, do not use any of the Axes properties kwargs.
It is known that some vector graphics viewers (svg and pdf) render
white gaps between segments of the colorbar. This is due to bugs in
the viewers, not Matplotlib. As a workaround, the colorbar can be
rendered with overlapping segments::
cbar = colorbar()
cbar.solids.set_edgecolor("face")
draw()
However, this has negative consequences in other circumstances, e.g.
with semi-transparent images (alpha < 1) and colorbar extensions;
therefore, this workaround is not used by default (see issue #1188).
"""
if ax is None:
ax = getattr(mappable, "axes", None)
if cax is None:
if ax is None:
raise ValueError(
'Unable to determine Axes to steal space for Colorbar. '
'Either provide the *cax* argument to use as the Axes for '
'the Colorbar, provide the *ax* argument to steal space '
'from it, or add *mappable* to an Axes.')
fig = ( # Figure of first Axes; logic copied from make_axes.
[*ax.flat] if isinstance(ax, np.ndarray)
else [*ax] if np.iterable(ax)
else [ax])[0].get_figure(root=False)
current_ax = fig.gca()
if (fig.get_layout_engine() is not None and
not fig.get_layout_engine().colorbar_gridspec):
use_gridspec = False
if (use_gridspec
and isinstance(ax, mpl.axes._base._AxesBase)
and ax.get_subplotspec()):
cax, kwargs = cbar.make_axes_gridspec(ax, **kwargs)
else:
cax, kwargs = cbar.make_axes(ax, **kwargs)
# make_axes calls add_{axes,subplot} which changes gca; undo that.
fig.sca(current_ax)
cax.grid(visible=False, which='both', axis='both')
if (hasattr(mappable, "get_figure") and
(mappable_host_fig := mappable.get_figure(root=True)) is not None):
# Warn in case of mismatch
if mappable_host_fig is not self._root_figure:
_api.warn_external(
f'Adding colorbar to a different Figure '
f'{repr(mappable_host_fig)} than '
f'{repr(self._root_figure)} which '
f'fig.colorbar is called on.')
NON_COLORBAR_KEYS = [ # remove kws that cannot be passed to Colorbar
'fraction', 'pad', 'shrink', 'aspect', 'anchor', 'panchor']
cb = cbar.Colorbar(cax, mappable, **{
k: v for k, v in kwargs.items() if k not in NON_COLORBAR_KEYS})
cax.get_figure(root=False).stale = True
return cb
def subplots_adjust(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Adjust the subplot layout parameters.
Unset parameters are left unmodified; initial values are given by
:rc:`figure.subplot.[name]`.
.. plot:: _embedded_plots/figure_subplots_adjust.py
Parameters
----------
left : float, optional
The position of the left edge of the subplots,
as a fraction of the figure width.
right : float, optional
The position of the right edge of the subplots,
as a fraction of the figure width.
bottom : float, optional
The position of the bottom edge of the subplots,
as a fraction of the figure height.
top : float, optional
The position of the top edge of the subplots,
as a fraction of the figure height.
wspace : float, optional
The width of the padding between subplots,
as a fraction of the average Axes width.
hspace : float, optional
The height of the padding between subplots,
as a fraction of the average Axes height.
"""
if (self.get_layout_engine() is not None and
not self.get_layout_engine().adjust_compatible):
_api.warn_external(
"This figure was using a layout engine that is "
"incompatible with subplots_adjust and/or tight_layout; "
"not calling subplots_adjust.")
return
self.subplotpars.update(left, bottom, right, top, wspace, hspace)
for ax in self.axes:
if ax.get_subplotspec() is not None:
ax._set_position(ax.get_subplotspec().get_position(self))
self.stale = True
def align_xlabels(self, axs=None):
"""
Align the xlabels of subplots in the same subplot row if label
alignment is being done automatically (i.e. the label position is
not manually set).
Alignment persists for draw events after this is called.
If a label is on the bottom, it is aligned with labels on Axes that
also have their label on the bottom and that have the same
bottom-most subplot row. If the label is on the top,
it is aligned with labels on Axes with the same top-most row.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list of (or `~numpy.ndarray`) `~matplotlib.axes.Axes`
to align the xlabels.
Default is to align all Axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_ylabels
matplotlib.figure.Figure.align_titles
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that all Axes in ``axs`` are from the same `.GridSpec`,
so that their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with rotated xtick labels::
fig, axs = plt.subplots(1, 2)
axs[0].tick_params(axis='x', rotation=55)
axs[0].set_xlabel('XLabel 0')
axs[1].set_xlabel('XLabel 1')
fig.align_xlabels()
"""
if axs is None:
axs = self.axes
axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]
for ax in axs:
_log.debug(' Working on: %s', ax.get_xlabel())
rowspan = ax.get_subplotspec().rowspan
pos = ax.xaxis.get_label_position() # top or bottom
# Search through other Axes for label positions that are same as
# this one and that share the appropriate row number.
# Add to a grouper associated with each Axes of siblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc.xaxis.get_label_position() == pos:
rowspanc = axc.get_subplotspec().rowspan
if (pos == 'top' and rowspan.start == rowspanc.start or
pos == 'bottom' and rowspan.stop == rowspanc.stop):
# grouper for groups of xlabels to align
self._align_label_groups['x'].join(ax, axc)
def align_ylabels(self, axs=None):
"""
Align the ylabels of subplots in the same subplot column if label
alignment is being done automatically (i.e. the label position is
not manually set).
Alignment persists for draw events after this is called.
If a label is on the left, it is aligned with labels on Axes that
also have their label on the left and that have the same
left-most subplot column. If the label is on the right,
it is aligned with labels on Axes with the same right-most column.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list (or `~numpy.ndarray`) of `~matplotlib.axes.Axes`
to align the ylabels.
Default is to align all Axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_titles
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that all Axes in ``axs`` are from the same `.GridSpec`,
so that their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with large yticks labels::
fig, axs = plt.subplots(2, 1)
axs[0].plot(np.arange(0, 1000, 50))
axs[0].set_ylabel('YLabel 0')
axs[1].set_ylabel('YLabel 1')
fig.align_ylabels()
"""
if axs is None:
axs = self.axes
axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]
for ax in axs:
_log.debug(' Working on: %s', ax.get_ylabel())
colspan = ax.get_subplotspec().colspan
pos = ax.yaxis.get_label_position() # left or right
# Search through other Axes for label positions that are same as
# this one and that share the appropriate column number.
# Add to a list associated with each Axes of siblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc.yaxis.get_label_position() == pos:
colspanc = axc.get_subplotspec().colspan
if (pos == 'left' and colspan.start == colspanc.start or
pos == 'right' and colspan.stop == colspanc.stop):
# grouper for groups of ylabels to align
self._align_label_groups['y'].join(ax, axc)
def align_titles(self, axs=None):
"""
Align the titles of subplots in the same subplot row if title
alignment is being done automatically (i.e. the title position is
not manually set).
Alignment persists for draw events after this is called.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list of (or ndarray) `~matplotlib.axes.Axes`
to align the titles.
Default is to align all Axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_ylabels
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that all Axes in ``axs`` are from the same `.GridSpec`,
so that their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with titles::
fig, axs = plt.subplots(1, 2)
axs[0].set_aspect('equal')
axs[0].set_title('Title 0')
axs[1].set_title('Title 1')
fig.align_titles()
"""
if axs is None:
axs = self.axes
axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]
for ax in axs:
_log.debug(' Working on: %s', ax.get_title())
rowspan = ax.get_subplotspec().rowspan
for axc in axs:
rowspanc = axc.get_subplotspec().rowspan
if (rowspan.start == rowspanc.start):
self._align_label_groups['title'].join(ax, axc)
def align_labels(self, axs=None):
"""
Align the xlabels and ylabels of subplots with the same subplots
row or column (respectively) if label alignment is being
done automatically (i.e. the label position is not manually set).
Alignment persists for draw events after this is called.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list (or `~numpy.ndarray`) of `~matplotlib.axes.Axes`
to align the labels.
Default is to align all Axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_ylabels
matplotlib.figure.Figure.align_titles
Notes
-----
This assumes that all Axes in ``axs`` are from the same `.GridSpec`,
so that their `.SubplotSpec` positions correspond to figure positions.
"""
self.align_xlabels(axs=axs)
self.align_ylabels(axs=axs)
def add_gridspec(self, nrows=1, ncols=1, **kwargs):
"""
Low-level API for creating a `.GridSpec` that has this figure as a parent.
This is a low-level API, allowing you to create a gridspec and
subsequently add subplots based on the gridspec. Most users do
not need that freedom and should use the higher-level methods
`~.Figure.subplots` or `~.Figure.subplot_mosaic`.
Parameters
----------
nrows : int, default: 1
Number of rows in grid.
ncols : int, default: 1
Number of columns in grid.
Returns
-------
`.GridSpec`
Other Parameters
----------------
**kwargs
Keyword arguments are passed to `.GridSpec`.
See Also
--------
matplotlib.pyplot.subplots
Examples
--------
Adding a subplot that spans two rows::
fig = plt.figure()
gs = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
# spans two rows:
ax3 = fig.add_subplot(gs[:, 1])
"""
_ = kwargs.pop('figure', None) # pop in case user has added this...
gs = GridSpec(nrows=nrows, ncols=ncols, figure=self, **kwargs)
return gs
def subfigures(self, nrows=1, ncols=1, squeeze=True,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None,
**kwargs):
"""
Add a set of subfigures to this figure or subfigure.
A subfigure has the same artist methods as a figure, and is logically
the same as a figure, but cannot print itself.
See :doc:`/gallery/subplots_axes_and_figures/subfigures`.
.. versionchanged:: 3.10
subfigures are now added in row-major order.
Parameters
----------
nrows, ncols : int, default: 1
Number of rows/columns of the subfigure grid.
squeeze : bool, default: True
If True, extra dimensions are squeezed out from the returned
array of subfigures.
wspace, hspace : float, default: None
The amount of width/height reserved for space between subfigures,
expressed as a fraction of the average subfigure width/height.
If not given, the values will be inferred from rcParams if using
constrained layout (see `~.ConstrainedLayoutEngine`), or zero if
not using a layout engine.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height.
"""
gs = GridSpec(nrows=nrows, ncols=ncols, figure=self,
wspace=wspace, hspace=hspace,
width_ratios=width_ratios,
height_ratios=height_ratios,
left=0, right=1, bottom=0, top=1)
sfarr = np.empty((nrows, ncols), dtype=object)
for i in range(nrows):
for j in range(ncols):
sfarr[i, j] = self.add_subfigure(gs[i, j], **kwargs)
if self.get_layout_engine() is None and (wspace is not None or
hspace is not None):
# Gridspec wspace and hspace is ignored on subfigure instantiation,
# and no space is left. So need to account for it here if required.
bottoms, tops, lefts, rights = gs.get_grid_positions(self)
for sfrow, bottom, top in zip(sfarr, bottoms, tops):
for sf, left, right in zip(sfrow, lefts, rights):
bbox = Bbox.from_extents(left, bottom, right, top)
sf._redo_transform_rel_fig(bbox=bbox)
if squeeze:
# Discarding unneeded dimensions that equal 1. If we only have one
# subfigure, just return it instead of a 1-element array.
return sfarr.item() if sfarr.size == 1 else sfarr.squeeze()
else:
# Returned axis array will be always 2-d, even if nrows=ncols=1.
return sfarr
def add_subfigure(self, subplotspec, **kwargs):
"""
Add a `.SubFigure` to the figure as part of a subplot arrangement.
Parameters
----------
subplotspec : `.gridspec.SubplotSpec`
Defines the region in a parent gridspec where the subfigure will
be placed.
Returns
-------
`.SubFigure`
Other Parameters
----------------
**kwargs
Are passed to the `.SubFigure` object.
See Also
--------
.Figure.subfigures
"""
sf = SubFigure(self, subplotspec, **kwargs)
self.subfigs += [sf]
sf._remove_method = self.subfigs.remove
sf.stale_callback = _stale_figure_callback
self.stale = True
return sf
def sca(self, a):
"""Set the current Axes to be *a* and return *a*."""
self._axstack.bubble(a)
self._axobservers.process("_axes_change_event", self)
return a
def gca(self):
"""
Get the current Axes.
If there is currently no Axes on this Figure, a new one is created
using `.Figure.add_subplot`. (To test whether there is currently an
Axes on a Figure, check whether ``figure.axes`` is empty. To test
whether there is currently a Figure on the pyplot figure stack, check
whether `.pyplot.get_fignums()` is empty.)
"""
ax = self._axstack.current()
return ax if ax is not None else self.add_subplot()
def _gci(self):
# Helper for `~matplotlib.pyplot.gci`. Do not use elsewhere.
"""
Get the current colorable artist.
Specifically, returns the current `.ScalarMappable` instance (`.Image`
created by `imshow` or `figimage`, `.Collection` created by `pcolor` or
`scatter`, etc.), or *None* if no such instance has been defined.
The current image is an attribute of the current Axes, or the nearest
earlier Axes in the current figure that contains an image.
Notes
-----
Historically, the only colorable artists were images; hence the name
``gci`` (get current image).
"""
# Look first for an image in the current Axes.
ax = self._axstack.current()
if ax is None:
return None
im = ax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
def _process_projection_requirements(self, *, axes_class=None, polar=False,
projection=None, **kwargs):
"""
Handle the args/kwargs to add_axes/add_subplot/gca, returning::
(axes_proj_class, proj_class_kwargs)
which can be used for new Axes initialization/identification.
"""
if axes_class is not None:
if polar or projection is not None:
raise ValueError(
"Cannot combine 'axes_class' and 'projection' or 'polar'")
projection_class = axes_class
else:
if polar:
if projection is not None and projection != 'polar':
raise ValueError(
f"polar={polar}, yet projection={projection!r}. "
"Only one of these arguments should be supplied."
)
projection = 'polar'
if isinstance(projection, str) or projection is None:
projection_class = projections.get_projection_class(projection)
elif hasattr(projection, '_as_mpl_axes'):
projection_class, extra_kwargs = projection._as_mpl_axes()
kwargs.update(**extra_kwargs)
else:
raise TypeError(
f"projection must be a string, None or implement a "
f"_as_mpl_axes method, not {projection!r}")
return projection_class, kwargs
def get_default_bbox_extra_artists(self):
"""
Return a list of Artists typically used in `.Figure.get_tightbbox`.
"""
bbox_artists = [artist for artist in self.get_children()
if (artist.get_visible() and artist.get_in_layout())]
for ax in self.axes:
if ax.get_visible():
bbox_artists.extend(ax.get_default_bbox_extra_artists())
return bbox_artists
def get_tightbbox(self, renderer=None, *, bbox_extra_artists=None):
"""
Return a (tight) bounding box of the figure *in inches*.
Note that `.FigureBase` differs from all other artists, which return
their `.Bbox` in pixels.
Artists that have ``artist.set_in_layout(False)`` are not included
in the bbox.
Parameters
----------
renderer : `.RendererBase` subclass
Renderer that will be used to draw the figures (i.e.
``fig.canvas.get_renderer()``)
bbox_extra_artists : list of `.Artist` or ``None``
List of artists to include in the tight bounding box. If
``None`` (default), then all artist children of each Axes are
included in the tight bounding box.
Returns
-------
`.BboxBase`
containing the bounding box (in figure inches).
"""
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
bb = []
if bbox_extra_artists is None:
artists = [artist for artist in self.get_children()
if (artist not in self.axes and artist.get_visible()
and artist.get_in_layout())]
else:
artists = bbox_extra_artists
for a in artists:
bbox = a.get_tightbbox(renderer)
if bbox is not None:
bb.append(bbox)
for ax in self.axes:
if ax.get_visible():
# some Axes don't take the bbox_extra_artists kwarg so we
# need this conditional....
try:
bbox = ax.get_tightbbox(
renderer, bbox_extra_artists=bbox_extra_artists)
except TypeError:
bbox = ax.get_tightbbox(renderer)
bb.append(bbox)
bb = [b for b in bb
if (np.isfinite(b.width) and np.isfinite(b.height)
and (b.width != 0 or b.height != 0))]
isfigure = hasattr(self, 'bbox_inches')
if len(bb) == 0:
if isfigure:
return self.bbox_inches
else:
# subfigures do not have bbox_inches, but do have a bbox
bb = [self.bbox]
_bbox = Bbox.union(bb)
if isfigure:
# transform from pixels to inches...
_bbox = TransformedBbox(_bbox, self.dpi_scale_trans.inverted())
return _bbox
@staticmethod
def _norm_per_subplot_kw(per_subplot_kw):
expanded = {}
for k, v in per_subplot_kw.items():
if isinstance(k, tuple):
for sub_key in k:
if sub_key in expanded:
raise ValueError(f'The key {sub_key!r} appears multiple times.')
expanded[sub_key] = v
else:
if k in expanded:
raise ValueError(f'The key {k!r} appears multiple times.')
expanded[k] = v
return expanded
@staticmethod
def _normalize_grid_string(layout):
if '\n' not in layout:
# single-line string
return [list(ln) for ln in layout.split(';')]
else:
# multi-line string
layout = inspect.cleandoc(layout)
return [list(ln) for ln in layout.strip('\n').split('\n')]
def subplot_mosaic(self, mosaic, *, sharex=False, sharey=False,
width_ratios=None, height_ratios=None,
empty_sentinel='.',
subplot_kw=None, per_subplot_kw=None, gridspec_kw=None):
"""
Build a layout of Axes based on ASCII art or nested lists.
This is a helper function to build complex GridSpec layouts visually.
See :ref:`mosaic`
for an example and full API documentation
Parameters
----------
mosaic : list of list of {hashable or nested} or str
A visual layout of how you want your Axes to be arranged
labeled as strings. For example ::
x = [['A panel', 'A panel', 'edge'],
['C panel', '.', 'edge']]
produces 4 Axes:
- 'A panel' which is 1 row high and spans the first two columns
- 'edge' which is 2 rows high and is on the right edge
- 'C panel' which in 1 row and 1 column wide in the bottom left
- a blank space 1 row and 1 column wide in the bottom center
Any of the entries in the layout can be a list of lists
of the same form to create nested layouts.
If input is a str, then it can either be a multi-line string of
the form ::
'''
AAE
C.E
'''
where each character is a column and each line is a row. Or it
can be a single-line string where rows are separated by ``;``::
'AB;CC'
The string notation allows only single character Axes labels and
does not support nesting but is very terse.
The Axes identifiers may be `str` or a non-iterable hashable
object (e.g. `tuple` s may not be used).
sharex, sharey : bool, default: False
If True, the x-axis (*sharex*) or y-axis (*sharey*) will be shared
among all subplots. In that case, tick label visibility and axis
units behave as for `subplots`. If False, each subplot's x- or
y-axis will be independent.
width_ratios : array-like of length *ncols*, optional
Defines the relative widths of the columns. Each column gets a
relative width of ``width_ratios[i] / sum(width_ratios)``.
If not given, all columns will have the same width. Equivalent
to ``gridspec_kw={'width_ratios': [...]}``. In the case of nested
layouts, this argument applies only to the outer layout.
height_ratios : array-like of length *nrows*, optional
Defines the relative heights of the rows. Each row gets a
relative height of ``height_ratios[i] / sum(height_ratios)``.
If not given, all rows will have the same height. Equivalent
to ``gridspec_kw={'height_ratios': [...]}``. In the case of nested
layouts, this argument applies only to the outer layout.
subplot_kw : dict, optional
Dictionary with keywords passed to the `.Figure.add_subplot` call
used to create each subplot. These values may be overridden by
values in *per_subplot_kw*.
per_subplot_kw : dict, optional
A dictionary mapping the Axes identifiers or tuples of identifiers
to a dictionary of keyword arguments to be passed to the
`.Figure.add_subplot` call used to create each subplot. The values
in these dictionaries have precedence over the values in
*subplot_kw*.
If *mosaic* is a string, and thus all keys are single characters,
it is possible to use a single string instead of a tuple as keys;
i.e. ``"AB"`` is equivalent to ``("A", "B")``.
.. versionadded:: 3.7
gridspec_kw : dict, optional
Dictionary with keywords passed to the `.GridSpec` constructor used
to create the grid the subplots are placed on. In the case of
nested layouts, this argument applies only to the outer layout.
For more complex layouts, users should use `.Figure.subfigures`
to create the nesting.
empty_sentinel : object, optional
Entry in the layout to mean "leave this space empty". Defaults
to ``'.'``. Note, if *layout* is a string, it is processed via
`inspect.cleandoc` to remove leading white space, which may
interfere with using white-space as the empty sentinel.
Returns
-------
dict[label, Axes]
A dictionary mapping the labels to the Axes objects. The order of
the Axes is left-to-right and top-to-bottom of their position in the
total layout.
"""
subplot_kw = subplot_kw or {}
gridspec_kw = dict(gridspec_kw or {})
per_subplot_kw = per_subplot_kw or {}
if height_ratios is not None:
if 'height_ratios' in gridspec_kw:
raise ValueError("'height_ratios' must not be defined both as "
"parameter and as key in 'gridspec_kw'")
gridspec_kw['height_ratios'] = height_ratios
if width_ratios is not None:
if 'width_ratios' in gridspec_kw:
raise ValueError("'width_ratios' must not be defined both as "
"parameter and as key in 'gridspec_kw'")
gridspec_kw['width_ratios'] = width_ratios
# special-case string input
if isinstance(mosaic, str):
mosaic = self._normalize_grid_string(mosaic)
per_subplot_kw = {
tuple(k): v for k, v in per_subplot_kw.items()
}
per_subplot_kw = self._norm_per_subplot_kw(per_subplot_kw)
# Only accept strict bools to allow a possible future API expansion.
_api.check_isinstance(bool, sharex=sharex, sharey=sharey)
def _make_array(inp):
"""
Convert input into 2D array
We need to have this internal function rather than
``np.asarray(..., dtype=object)`` so that a list of lists
of lists does not get converted to an array of dimension > 2.
Returns
-------
2D object array
"""
r0, *rest = inp
if isinstance(r0, str):
raise ValueError('List mosaic specification must be 2D')
for j, r in enumerate(rest, start=1):
if isinstance(r, str):
raise ValueError('List mosaic specification must be 2D')
if len(r0) != len(r):
raise ValueError(
"All of the rows must be the same length, however "
f"the first row ({r0!r}) has length {len(r0)} "
f"and row {j} ({r!r}) has length {len(r)}."
)
out = np.zeros((len(inp), len(r0)), dtype=object)
for j, r in enumerate(inp):
for k, v in enumerate(r):
out[j, k] = v
return out
def _identify_keys_and_nested(mosaic):
"""
Given a 2D object array, identify unique IDs and nested mosaics
Parameters
----------
mosaic : 2D object array
Returns
-------
unique_ids : tuple
The unique non-sub mosaic entries in this mosaic
nested : dict[tuple[int, int], 2D object array]
"""
# make sure we preserve the user supplied order
unique_ids = cbook._OrderedSet()
nested = {}
for j, row in enumerate(mosaic):
for k, v in enumerate(row):
if v == empty_sentinel:
continue
elif not cbook.is_scalar_or_string(v):
nested[(j, k)] = _make_array(v)
else:
unique_ids.add(v)
return tuple(unique_ids), nested
def _do_layout(gs, mosaic, unique_ids, nested):
"""
Recursively do the mosaic.
Parameters
----------
gs : GridSpec
mosaic : 2D object array
The input converted to a 2D array for this level.
unique_ids : tuple
The identified scalar labels at this level of nesting.
nested : dict[tuple[int, int]], 2D object array
The identified nested mosaics, if any.
Returns
-------
dict[label, Axes]
A flat dict of all of the Axes created.
"""
output = dict()
# we need to merge together the Axes at this level and the Axes
# in the (recursively) nested sub-mosaics so that we can add
# them to the figure in the "natural" order if you were to
# ravel in c-order all of the Axes that will be created
#
# This will stash the upper left index of each object (axes or
# nested mosaic) at this level
this_level = dict()
# go through the unique keys,
for name in unique_ids:
# sort out where each axes starts/ends
index = np.argwhere(mosaic == name)
start_row, start_col = np.min(index, axis=0)
end_row, end_col = np.max(index, axis=0) + 1
# and construct the slice object
slc = (slice(start_row, end_row), slice(start_col, end_col))
# some light error checking
if (mosaic[slc] != name).any():
raise ValueError(
f"While trying to layout\n{mosaic!r}\n"
f"we found that the label {name!r} specifies a "
"non-rectangular or non-contiguous area.")
# and stash this slice for later
this_level[(start_row, start_col)] = (name, slc, 'axes')
# do the same thing for the nested mosaics (simpler because these
# cannot be spans yet!)
for (j, k), nested_mosaic in nested.items():
this_level[(j, k)] = (None, nested_mosaic, 'nested')
# now go through the things in this level and add them
# in order left-to-right top-to-bottom
for key in sorted(this_level):
name, arg, method = this_level[key]
# we are doing some hokey function dispatch here based
# on the 'method' string stashed above to sort out if this
# element is an Axes or a nested mosaic.
if method == 'axes':
slc = arg
# add a single Axes
if name in output:
raise ValueError(f"There are duplicate keys {name} "
f"in the layout\n{mosaic!r}")
ax = self.add_subplot(
gs[slc], **{
'label': str(name),
**subplot_kw,
**per_subplot_kw.get(name, {})
}
)
output[name] = ax
elif method == 'nested':
nested_mosaic = arg
j, k = key
# recursively add the nested mosaic
rows, cols = nested_mosaic.shape
nested_output = _do_layout(
gs[j, k].subgridspec(rows, cols),
nested_mosaic,
*_identify_keys_and_nested(nested_mosaic)
)
overlap = set(output) & set(nested_output)
if overlap:
raise ValueError(
f"There are duplicate keys {overlap} "
f"between the outer layout\n{mosaic!r}\n"
f"and the nested layout\n{nested_mosaic}"
)
output.update(nested_output)
else:
raise RuntimeError("This should never happen")
return output
mosaic = _make_array(mosaic)
rows, cols = mosaic.shape
gs = self.add_gridspec(rows, cols, **gridspec_kw)
ret = _do_layout(gs, mosaic, *_identify_keys_and_nested(mosaic))
ax0 = next(iter(ret.values()))
for ax in ret.values():
if sharex:
ax.sharex(ax0)
ax._label_outer_xaxis(skip_non_rectangular_axes=True)
if sharey:
ax.sharey(ax0)
ax._label_outer_yaxis(skip_non_rectangular_axes=True)
if extra := set(per_subplot_kw) - set(ret):
raise ValueError(
f"The keys {extra} are in *per_subplot_kw* "
"but not in the mosaic."
)
return ret
def _set_artist_props(self, a):
if a != self:
a.set_figure(self)
a.stale_callback = _stale_figure_callback
a.set_transform(self.transSubfigure)
@_docstring.interpd
| FigureBase |
python | cookiecutter__cookiecutter | tests/conftest.py | {
"start": 5838,
"end": 7562
} | class ____(TypedDict):
cookiecutters_dir: str
replay_dir: str
@pytest.fixture(scope='session')
def user_config_data(user_dir) -> UserConfigData:
"""Fixture that creates 2 Cookiecutter user config dirs.
It will create it in the user's home directory.
* `cookiecutters_dir`
* `cookiecutter_replay`
:returns: Dict with name of both user config dirs
"""
cookiecutters_dir = user_dir.joinpath('cookiecutters')
cookiecutters_dir.mkdir()
replay_dir = user_dir.joinpath('cookiecutter_replay')
replay_dir.mkdir()
return {
'cookiecutters_dir': str(cookiecutters_dir),
'replay_dir': str(replay_dir),
}
@pytest.fixture(scope='session')
def user_config_file(user_dir, user_config_data) -> str:
"""Fixture that creates a config file called `config`.
It will create it in the user's home directory, with YAML from
`user_config_data`.
:param user_dir: Simulated user's home directory
:param user_config_data: Dict of config values
:returns: String of path to config file
"""
config_file = user_dir.joinpath('config')
config_text = USER_CONFIG.format(**user_config_data)
config_file.write_text(config_text)
return str(config_file)
@pytest.fixture
def output_dir(tmp_path) -> str:
"""Fixture to prepare test output directory."""
output_path = tmp_path.joinpath("output")
output_path.mkdir()
return str(output_path)
@pytest.fixture
def clone_dir(tmp_path: Path) -> Path:
"""Simulate creation of a directory called `clone_dir` inside of `tmp_path`. \
Returns a str to said directory."""
clone_dir = tmp_path.joinpath("clone_dir")
clone_dir.mkdir()
return clone_dir
| UserConfigData |
python | getsentry__sentry | src/sentry/utils/committers.py | {
"start": 6270,
"end": 6412
} | class ____(TypedDict):
group_owner_id: int
author: Author | None
commits: Sequence[MutableMapping[str, Any]]
| AuthorCommitsSerialized |
python | scipy__scipy | scipy/spatial/tests/test_qhull.py | {
"start": 38273,
"end": 50121
} | class ____:
def assert_unordered_allclose(self, arr1, arr2, rtol=1e-7):
"""Check that every line in arr1 is only once in arr2"""
assert_equal(arr1.shape, arr2.shape)
truths = np.zeros((arr1.shape[0],), dtype=bool)
for l1 in arr1:
indexes = np.nonzero((abs(arr2 - l1) < rtol).all(axis=1))[0]
assert_equal(indexes.shape, (1,))
truths[indexes[0]] = True
assert_(truths.all())
@pytest.mark.parametrize("dt", [np.float64, int])
def test_cube_halfspace_intersection(self, dt):
halfspaces = np.array([[-1, 0, 0],
[0, -1, 0],
[1, 0, -2],
[0, 1, -2]], dtype=dt)
feasible_point = np.array([1, 1], dtype=dt)
points = np.array([[0.0, 0.0], [2.0, 0.0], [0.0, 2.0], [2.0, 2.0]])
hull = qhull.HalfspaceIntersection(halfspaces, feasible_point)
assert_allclose(hull.intersections, points)
def test_self_dual_polytope_intersection(self):
fname = os.path.join(os.path.dirname(__file__), 'data',
'selfdual-4d-polytope.txt')
ineqs = np.genfromtxt(fname)
halfspaces = -np.hstack((ineqs[:, 1:], ineqs[:, :1]))
feas_point = np.array([0., 0., 0., 0.])
hs = qhull.HalfspaceIntersection(halfspaces, feas_point)
assert_equal(hs.intersections.shape, (24, 4))
assert_almost_equal(hs.dual_volume, 32.0)
assert_equal(len(hs.dual_facets), 24)
for facet in hs.dual_facets:
assert_equal(len(facet), 6)
dists = halfspaces[:, -1] + halfspaces[:, :-1].dot(feas_point)
self.assert_unordered_allclose((halfspaces[:, :-1].T/dists).T, hs.dual_points)
points = itertools.permutations([0., 0., 0.5, -0.5])
for point in points:
assert_equal(np.sum((hs.intersections == point).all(axis=1)), 1)
def test_wrong_feasible_point(self):
halfspaces = np.array([[-1.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, -1.0],
[0.0, 1.0, -1.0]])
feasible_point = np.array([0.5, 0.5, 0.5])
#Feasible point is (ndim,) instead of (ndim-1,)
assert_raises(ValueError,
qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([[0.5], [0.5]])
#Feasible point is (ndim-1, 1) instead of (ndim-1,)
assert_raises(ValueError,
qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([[0.5, 0.5]])
#Feasible point is (1, ndim-1) instead of (ndim-1,)
assert_raises(ValueError,
qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([-0.5, -0.5])
#Feasible point is outside feasible region
assert_raises(qhull.QhullError,
qhull.HalfspaceIntersection, halfspaces, feasible_point)
def test_incremental(self):
#Cube
halfspaces = np.array([[0., 0., -1., -0.5],
[0., -1., 0., -0.5],
[-1., 0., 0., -0.5],
[1., 0., 0., -0.5],
[0., 1., 0., -0.5],
[0., 0., 1., -0.5]])
#Cut each summit
extra_normals = np.array([[1., 1., 1.],
[1., 1., -1.],
[1., -1., 1.],
[1, -1., -1.]])
offsets = np.array([[-1.]]*8)
extra_halfspaces = np.hstack((np.vstack((extra_normals, -extra_normals)),
offsets))
feas_point = np.array([0., 0., 0.])
inc_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True)
inc_res_hs = qhull.HalfspaceIntersection(halfspaces, feas_point,
incremental=True)
for i, ehs in enumerate(extra_halfspaces):
inc_hs.add_halfspaces(ehs[np.newaxis, :])
inc_res_hs.add_halfspaces(ehs[np.newaxis, :], restart=True)
total = np.vstack((halfspaces, extra_halfspaces[:i+1, :]))
hs = qhull.HalfspaceIntersection(total, feas_point)
assert_allclose(inc_hs.halfspaces, inc_res_hs.halfspaces)
assert_allclose(inc_hs.halfspaces, hs.halfspaces)
#Direct computation and restart should have points in same order
assert_allclose(hs.intersections, inc_res_hs.intersections)
#Incremental will have points in different order than direct computation
self.assert_unordered_allclose(inc_hs.intersections, hs.intersections)
inc_hs.close()
def test_cube(self):
# Halfspaces of the cube:
halfspaces = np.array([[-1., 0., 0., 0.], # x >= 0
[1., 0., 0., -1.], # x <= 1
[0., -1., 0., 0.], # y >= 0
[0., 1., 0., -1.], # y <= 1
[0., 0., -1., 0.], # z >= 0
[0., 0., 1., -1.]]) # z <= 1
point = np.array([0.5, 0.5, 0.5])
hs = qhull.HalfspaceIntersection(halfspaces, point)
# qhalf H0.5,0.5,0.5 o < input.txt
qhalf_points = np.array([
[-2, 0, 0],
[2, 0, 0],
[0, -2, 0],
[0, 2, 0],
[0, 0, -2],
[0, 0, 2]])
qhalf_facets = [
[2, 4, 0],
[4, 2, 1],
[5, 2, 0],
[2, 5, 1],
[3, 4, 1],
[4, 3, 0],
[5, 3, 1],
[3, 5, 0]]
assert len(qhalf_facets) == len(hs.dual_facets)
for a, b in zip(qhalf_facets, hs.dual_facets):
assert set(a) == set(b) # facet orientation can differ
assert_allclose(hs.dual_points, qhalf_points)
@pytest.mark.parametrize("k", range(1,4))
def test_halfspace_batch(self, k):
# Test that we can add halfspaces a few at a time
big_square = np.array([[ 1., 0., -2.],
[-1., 0., -2.],
[ 0., 1., -2.],
[ 0., -1., -2.]])
small_square = np.array([[ 1., 0., -1.],
[-1., 0., -1.],
[ 0., 1., -1.],
[ 0., -1., -1.]])
hs = qhull.HalfspaceIntersection(big_square,
np.array([0.3141, 0.2718]),
incremental=True)
hs.add_halfspaces(small_square[0:k,:])
hs.add_halfspaces(small_square[k:4,:])
hs.close()
# Check the intersections are correct (they are the corners of the small square)
expected_intersections = np.array([[1., 1.],
[1., -1.],
[-1., 1.],
[-1., -1.]])
actual_intersections = hs.intersections
# They may be in any order, so just check that under some permutation
# expected=actual.
ind1 = np.lexsort((actual_intersections[:, 1], actual_intersections[:, 0]))
ind2 = np.lexsort((expected_intersections[:, 1], expected_intersections[:, 0]))
assert_allclose(actual_intersections[ind1], expected_intersections[ind2])
@pytest.mark.parametrize("halfspaces", [
(np.array([-0.70613882, -0.45589431, 0.04178256])),
(np.array([[-0.70613882, -0.45589431, 0.04178256],
[0.70807342, -0.45464871, -0.45969769],
[0., 0.76515026, -0.35614825]])),
])
def test_gh_19865(self, halfspaces):
# starting off with a feasible interior point and
# adding halfspaces for which it is no longer feasible
# should result in an error rather than a problematic
# intersection polytope
initial_square = np.array(
[[1, 0, -1], [0, 1, -1], [-1, 0, -1], [0, -1, -1]]
)
incremental_intersector = qhull.HalfspaceIntersection(initial_square,
np.zeros(2),
incremental=True)
with pytest.raises(qhull.QhullError, match="feasible.*-0.706.*"):
incremental_intersector.add_halfspaces(halfspaces)
def test_gh_19865_3d(self):
# 3d case where closed half space is enforced for
# feasibility
halfspaces = np.array([[1, 1, 1, -1], # doesn't exclude origin
[-1, -1, -1, -1], # doesn't exclude origin
[1, 0, 0, 0]]) # the origin is on the line
initial_cube = np.array([[1, 0, 0, -1],
[-1, 0, 0, -1],
[0, 1, 0, -1],
[0, -1, 0, -1],
[0, 0, 1, -1],
[0, 0, -1, -1]])
incremental_intersector = qhull.HalfspaceIntersection(initial_cube,
np.zeros(3),
incremental=True)
with pytest.raises(qhull.QhullError, match="feasible.*[1 0 0 0]"):
incremental_intersector.add_halfspaces(halfspaces)
def test_2d_add_halfspace_input(self):
# incrementally added halfspaces should respect the 2D
# array shape requirement
initial_square = np.array(
[[1, 0, -1], [0, 1, -1], [-1, 0, -1], [0, -1, -1]]
)
incremental_intersector = qhull.HalfspaceIntersection(initial_square,
np.zeros(2),
incremental=True)
with pytest.raises(ValueError, match="2D array"):
incremental_intersector.add_halfspaces(np.ones((4, 4, 4)))
def test_1d_add_halfspace_input(self):
# we do allow 1D `halfspaces` input to add_halfspaces()
initial_square = np.array(
[[1, 0, -1], [0, 1, -1], [-1, 0, -1], [0, -1, -1]]
)
incremental_intersector = qhull.HalfspaceIntersection(initial_square,
np.zeros(2),
incremental=True)
assert_allclose(incremental_intersector.dual_vertices, np.arange(4))
incremental_intersector.add_halfspaces(np.array([2, 2, -1]))
assert_allclose(incremental_intersector.dual_vertices, np.arange(5))
@pytest.mark.parametrize("diagram_type", [Voronoi, qhull.Delaunay])
def test_gh_20623(diagram_type):
rng = np.random.default_rng(123)
invalid_data = rng.random((4, 10, 3))
with pytest.raises(ValueError, match="dimensions"):
diagram_type(invalid_data)
def test_gh_21286():
generators = np.array([[0, 0], [0, 1.1], [1, 0], [1, 1]])
tri = qhull.Delaunay(generators)
# verify absence of segfault reported in ticket:
with pytest.raises(IndexError):
tri.find_simplex(1)
with pytest.raises(IndexError):
# strikingly, Delaunay object has shape
# () just like np.asanyarray(1) above
tri.find_simplex(tri)
def test_find_simplex_ndim_err():
generators = np.array([[0, 0], [0, 1.1], [1, 0], [1, 1]])
tri = qhull.Delaunay(generators)
with pytest.raises(ValueError):
tri.find_simplex([2, 2, 2])
| Test_HalfspaceIntersection |
python | great-expectations__great_expectations | tests/integration/data_sources_and_expectations/expectations/test_expect_column_values_to_not_match_like_pattern_list.py | {
"start": 4065,
"end": 5900
} | class ____:
@pytest.mark.parametrize(
"expectation",
[
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["bc"]
),
id="one_pattern",
),
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["bc", "%de%"]
),
id="multiple_patterns",
),
],
)
@parameterize_batch_for_data_sources(
data_source_configs=[MSSQLDatasourceTestConfig()], data=DATA
)
def test_success(
self,
batch_for_datasource: Batch,
expectation: gxe.ExpectColumnValuesToNotMatchLikePatternList,
) -> None:
result = batch_for_datasource.validate(expectation)
assert result.success
@pytest.mark.parametrize(
"expectation",
[
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["%a[b]%"]
),
id="one_pattern",
),
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePatternList(
column=COL_NAME, like_pattern_list=["%[a]%", "not_this"]
),
id="multiple_patterns",
),
],
)
@parameterize_batch_for_data_sources(
data_source_configs=[MSSQLDatasourceTestConfig()], data=DATA
)
def test_failure(
self,
batch_for_datasource: Batch,
expectation: gxe.ExpectColumnValuesToNotMatchLikePatternList,
) -> None:
result = batch_for_datasource.validate(expectation)
assert not result.success
| TestMSSQL |
python | cython__cython | Demos/benchmarks/bm_raytrace.py | {
"start": 8362,
"end": 9678
} | class ____(object):
def __init__(self, **kwargs):
self.baseColour = kwargs.get('baseColour', (1, 1, 1))
self.specularCoefficient = kwargs.get('specularCoefficient', 0.2)
self.lambertCoefficient = kwargs.get('lambertCoefficient', 0.6)
self.ambientCoefficient = 1.0 - self.specularCoefficient - self.lambertCoefficient
def baseColourAt(self, p):
return self.baseColour
def colourAt(self, scene, ray, p, normal):
b = self.baseColourAt(p)
c = (0, 0, 0)
if self.specularCoefficient > 0:
reflectedRay = Ray(p, ray.vector.reflectThrough(normal))
reflectedColour = scene.rayColour(reflectedRay)
c = addColours(c, self.specularCoefficient, reflectedColour)
if self.lambertCoefficient > 0:
lambertAmount = 0
for lightPoint in scene.visibleLights(p):
contribution = (lightPoint - p).normalized().dot(normal)
if contribution > 0:
lambertAmount = lambertAmount + contribution
lambertAmount = min(1, lambertAmount)
c = addColours(c, self.lambertCoefficient * lambertAmount, b)
if self.ambientCoefficient > 0:
c = addColours(c, self.ambientCoefficient, b)
return c
| SimpleSurface |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocolExplicit1.py | {
"start": 1434,
"end": 1602
} | class ____(Protocol7, ABC):
def method1(self):
pass
# This should generate an error because it
# does not implement method1 and is marked final.
@final
| Mixin7 |
python | huggingface__transformers | src/transformers/models/mamba2/modeling_mamba2.py | {
"start": 31994,
"end": 35072
} | class ____(PreTrainedModel):
config: Mamba2Config
base_model_prefix = "backbone"
_no_split_modules = ["Mamba2Block"]
supports_gradient_checkpointing = True
_is_stateful = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights."""
std = self.config.initializer_range
if isinstance(module, Mamba2Mixer):
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.config.num_heads + 1)
init.copy_(module.A_log, torch.log(A))
init.ones_(module.D)
dt = torch.exp(
torch.rand(self.config.num_heads)
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
+ math.log(self.config.time_step_min)
).clamp(min=self.config.time_step_floor)
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
inv_dt = dt + torch.log(-torch.expm1(-dt))
init.copy_(module.dt_bias, inv_dt)
init.kaiming_uniform_(module.conv1d.weight, a=math.sqrt(5))
if module.conv1d.bias is not None:
init.zeros_(module.conv1d.bias)
init.kaiming_uniform_(module.out_proj.weight, a=math.sqrt(5))
if self.config.rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
# We need to reinit p since this code could be called multiple times
# Having just p *= scale would repeatedly scale it down
p = module.out_proj.weight
p /= math.sqrt(self.config.num_hidden_layers)
if isinstance(module, nn.Linear):
init.normal_(module.weight, std=std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, (Mamba2RMSNorm, MambaRMSNormGated)):
init.ones_(module.weight)
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, std=std)
@dataclass
@auto_docstring(
custom_intro="""
Class for the MAMBA2 model outputs.
"""
)
# Copied from transformers.models.mamba.modeling_mamba.MambaOutput with MAMBA->MAMBA2,Mamba->Mamba2
| Mamba2PreTrainedModel |
python | html5lib__html5lib-python | html5lib/html5parser.py | {
"start": 101328,
"end": 103304
} | class ____(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
__slots__ = tuple()
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
startTagHandler = _utils.MethodDispatcher([
("html", Phase.startTagHtml),
("frameset", startTagFrameset),
("frame", startTagFrame),
("noframes", startTagNoframes)
])
startTagHandler.default = startTagOther
endTagHandler = _utils.MethodDispatcher([
("frameset", endTagFrameset)
])
endTagHandler.default = endTagOther
| InFramesetPhase |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/input_manager.py | {
"start": 1321,
"end": 1579
} | class ____:
@property
@abstractmethod
def input_config_schema(self) -> IDefinitionConfigSchema:
"""The schema for per-input configuration for inputs that are managed by this
input manager.
"""
@public
| IInputManagerDefinition |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 1608,
"end": 1702
} | class ____(SQLRole):
__slots__ = ()
_role_name = "Literal Python value"
| LiteralValueRole |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/messages/batches.py | {
"start": 13641,
"end": 26354
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncBatchesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return AsyncBatchesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return AsyncBatchesWithStreamingResponse(self)
async def create(
self,
*,
requests: Iterable[batch_create_params.Request],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> MessageBatch:
"""
Send a batch of Message creation requests.
The Message Batches API can be used to process multiple Messages API requests at
once. Once a Message Batch is created, it begins processing immediately. Batches
can take up to 24 hours to complete.
Learn more about the Message Batches API in our
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
Args:
requests: List of requests for prompt completion. Each is an individual request to create
a Message.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/v1/messages/batches",
body=await async_maybe_transform({"requests": requests}, batch_create_params.BatchCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageBatch,
)
async def retrieve(
self,
message_batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> MessageBatch:
"""This endpoint is idempotent and can be used to poll for Message Batch
completion.
To access the results of a Message Batch, make a request to the
`results_url` field in the response.
Learn more about the Message Batches API in our
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
Args:
message_batch_id: ID of the Message Batch.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not message_batch_id:
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
return await self._get(
f"/v1/messages/batches/{message_batch_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageBatch,
)
def list(
self,
*,
after_id: str | Omit = omit,
before_id: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[MessageBatch, AsyncPage[MessageBatch]]:
"""List all Message Batches within a Workspace.
Most recently created batches are
returned first.
Learn more about the Message Batches API in our
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
Args:
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately after this object.
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately before this object.
limit: Number of items to return per page.
Defaults to `20`. Ranges from `1` to `1000`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/v1/messages/batches",
page=AsyncPage[MessageBatch],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after_id": after_id,
"before_id": before_id,
"limit": limit,
},
batch_list_params.BatchListParams,
),
),
model=MessageBatch,
)
async def delete(
self,
message_batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> DeletedMessageBatch:
"""
Delete a Message Batch.
Message Batches can only be deleted once they've finished processing. If you'd
like to delete an in-progress batch, you must first cancel it.
Learn more about the Message Batches API in our
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
Args:
message_batch_id: ID of the Message Batch.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not message_batch_id:
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
return await self._delete(
f"/v1/messages/batches/{message_batch_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=DeletedMessageBatch,
)
async def cancel(
self,
message_batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> MessageBatch:
"""Batches may be canceled any time before processing ends.
Once cancellation is
initiated, the batch enters a `canceling` state, at which time the system may
complete any in-progress, non-interruptible requests before finalizing
cancellation.
The number of canceled requests is specified in `request_counts`. To determine
which requests were canceled, check the individual results within the batch.
Note that cancellation may not result in any canceled requests if they were
non-interruptible.
Learn more about the Message Batches API in our
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
Args:
message_batch_id: ID of the Message Batch.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not message_batch_id:
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
return await self._post(
f"/v1/messages/batches/{message_batch_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageBatch,
)
async def results(
self,
message_batch_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncJSONLDecoder[MessageBatchIndividualResponse]:
"""
Streams the results of a Message Batch as a `.jsonl` file.
Each line in the file is a JSON object containing the result of a single request
in the Message Batch. Results are not guaranteed to be in the same order as
requests. Use the `custom_id` field to match results to requests.
Learn more about the Message Batches API in our
[user guide](https://docs.claude.com/en/docs/build-with-claude/batch-processing)
Args:
message_batch_id: ID of the Message Batch.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not message_batch_id:
raise ValueError(f"Expected a non-empty value for `message_batch_id` but received {message_batch_id!r}")
batch = await self.retrieve(message_batch_id=message_batch_id)
if not batch.results_url:
raise AnthropicError(
f"No `results_url` for the given batch; Has it finished processing? {batch.processing_status}"
)
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
batch.results_url,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=AsyncJSONLDecoder[MessageBatchIndividualResponse],
stream=True,
)
| AsyncBatches |
python | walkccc__LeetCode | solutions/2164. Sort Even and Odd Indices Independently/2164-2.py | {
"start": 0,
"end": 163
} | class ____:
def sortEvenOdd(self, nums: list[int]) -> list[int]:
nums[::2] = sorted(nums[::2])
nums[1::2] = sorted(nums[1::2])[::-1]
return nums
| Solution |
python | realpython__materials | python-menus-toolbars/sample-app.py | {
"start": 825,
"end": 9237
} | class ____(QMainWindow):
"""Main Window."""
def __init__(self, parent=None):
"""Initializer."""
super().__init__(parent)
self.setWindowTitle("Python Menus & Toolbars")
self.resize(400, 200)
self.centralWidget = QLabel("Hello, World")
self.centralWidget.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.setCentralWidget(self.centralWidget)
self._createActions()
self._createMenuBar()
self._createToolBars()
# Uncomment the call to ._createContextMenu() below to create a context
# menu using menu policies. To test this out, you also need to
# comment .contextMenuEvent() and uncomment ._createContextMenu()
# self._createContextMenu()
self._connectActions()
self._createStatusBar()
def _createMenuBar(self):
menuBar = self.menuBar()
# File menu
fileMenu = QMenu("&File", self)
menuBar.addMenu(fileMenu)
fileMenu.addAction(self.newAction)
fileMenu.addAction(self.openAction)
# Open Recent submenu
self.openRecentMenu = fileMenu.addMenu("Open Recent")
fileMenu.addAction(self.saveAction)
# Separator
fileMenu.addSeparator()
fileMenu.addAction(self.exitAction)
# Edit menu
editMenu = menuBar.addMenu("&Edit")
editMenu.addAction(self.copyAction)
editMenu.addAction(self.pasteAction)
editMenu.addAction(self.cutAction)
# Separator
editMenu.addSeparator()
# Find and Replace submenu
findMenu = editMenu.addMenu("Find and Replace")
findMenu.addAction("Find...")
findMenu.addAction("Replace...")
# Help menu
helpMenu = menuBar.addMenu(QIcon(":help-content.svg"), "&Help")
helpMenu.addAction(self.helpContentAction)
helpMenu.addAction(self.aboutAction)
def _createToolBars(self):
# File toolbar
fileToolBar = self.addToolBar("File")
fileToolBar.setMovable(False)
fileToolBar.addAction(self.newAction)
fileToolBar.addAction(self.openAction)
fileToolBar.addAction(self.saveAction)
# Edit toolbar
editToolBar = QToolBar("Edit", self)
self.addToolBar(editToolBar)
editToolBar.addAction(self.copyAction)
editToolBar.addAction(self.pasteAction)
editToolBar.addAction(self.cutAction)
# Widgets
self.fontSizeSpinBox = QSpinBox()
self.fontSizeSpinBox.setFocusPolicy(Qt.NoFocus)
editToolBar.addWidget(self.fontSizeSpinBox)
def _createStatusBar(self):
self.statusbar = self.statusBar()
# Temporary message
self.statusbar.showMessage("Ready", 3000)
# Permanent widget
self.wcLabel = QLabel(f"{self.getWordCount()} Words")
self.statusbar.addPermanentWidget(self.wcLabel)
def _createActions(self):
# File actions
self.newAction = QAction(self)
self.newAction.setText("&New")
self.newAction.setIcon(QIcon(":file-new.svg"))
self.openAction = QAction(QIcon(":file-open.svg"), "&Open...", self)
self.saveAction = QAction(QIcon(":file-save.svg"), "&Save", self)
self.exitAction = QAction("&Exit", self)
# String-based key sequences
self.newAction.setShortcut("Ctrl+N")
self.openAction.setShortcut("Ctrl+O")
self.saveAction.setShortcut("Ctrl+S")
# Help tips
newTip = "Create a new file"
self.newAction.setStatusTip(newTip)
self.newAction.setToolTip(newTip)
self.newAction.setWhatsThis("Create a new and empty text file")
# Edit actions
self.copyAction = QAction(QIcon(":edit-copy.svg"), "&Copy", self)
self.pasteAction = QAction(QIcon(":edit-paste.svg"), "&Paste", self)
self.cutAction = QAction(QIcon(":edit-cut.svg"), "C&ut", self)
# Standard key sequence
self.copyAction.setShortcut(QKeySequence.Copy)
self.pasteAction.setShortcut(QKeySequence.Paste)
self.cutAction.setShortcut(QKeySequence.Cut)
# Help actions
self.helpContentAction = QAction("&Help Content...", self)
self.aboutAction = QAction("&About...", self)
# Uncomment this method to create a context menu using menu policies
# def _createContextMenu(self):
# # Setting contextMenuPolicy
# self.centralWidget.setContextMenuPolicy(Qt.ActionsContextMenu)
# # Populating the widget with actions
# self.centralWidget.addAction(self.newAction)
# self.centralWidget.addAction(self.openAction)
# self.centralWidget.addAction(self.saveAction)
# self.centralWidget.addAction(self.copyAction)
# self.centralWidget.addAction(self.pasteAction)
# self.centralWidget.addAction(self.cutAction)
def contextMenuEvent(self, event):
# Context menu
menu = QMenu(self.centralWidget)
# Populating the menu with actions
menu.addAction(self.newAction)
menu.addAction(self.openAction)
menu.addAction(self.saveAction)
# Separator
separator = QAction(self)
separator.setSeparator(True)
menu.addAction(separator)
menu.addAction(self.copyAction)
menu.addAction(self.pasteAction)
menu.addAction(self.cutAction)
# Launching the menu
menu.exec(event.globalPos())
def _connectActions(self):
# Connect File actions
self.newAction.triggered.connect(self.newFile)
self.openAction.triggered.connect(self.openFile)
self.saveAction.triggered.connect(self.saveFile)
self.exitAction.triggered.connect(self.close)
# Connect Edit actions
self.copyAction.triggered.connect(self.copyContent)
self.pasteAction.triggered.connect(self.pasteContent)
self.cutAction.triggered.connect(self.cutContent)
# Connect Help actions
self.helpContentAction.triggered.connect(self.helpContent)
self.aboutAction.triggered.connect(self.about)
# Connect Open Recent to dynamically populate it
self.openRecentMenu.aboutToShow.connect(self.populateOpenRecent)
# Slots
def newFile(self):
# Logic for creating a new file goes here...
self.centralWidget.setText("<b>File > New</b> clicked")
def openFile(self):
# Logic for opening an existing file goes here...
self.centralWidget.setText("<b>File > Open...</b> clicked")
def saveFile(self):
# Logic for saving a file goes here...
self.centralWidget.setText("<b>File > Save</b> clicked")
def copyContent(self):
# Logic for copying content goes here...
self.centralWidget.setText("<b>Edit > Copy</b> clicked")
def pasteContent(self):
# Logic for pasting content goes here...
self.centralWidget.setText("<b>Edit > Paste</b> clicked")
def cutContent(self):
# Logic for cutting content goes here...
self.centralWidget.setText("<b>Edit > Cut</b> clicked")
def helpContent(self):
# Logic for launching help goes here...
self.centralWidget.setText("<b>Help > Help Content...</b> clicked")
def about(self):
# Logic for showing an about dialog content goes here...
self.centralWidget.setText("<b>Help > About...</b> clicked")
def populateOpenRecent(self):
# Step 1. Remove the old options from the menu
self.openRecentMenu.clear()
# Step 2. Dynamically create the actions
actions = []
filenames = [f"File-{n}" for n in range(5)]
for filename in filenames:
action = QAction(filename, self)
action.triggered.connect(partial(self.openRecentFile, filename))
actions.append(action)
# Step 3. Add the actions to the menu
self.openRecentMenu.addActions(actions)
def openRecentFile(self, filename):
# Logic for opening a recent file goes here...
self.centralWidget.setText(f"<b>{filename}</b> opened")
def getWordCount(self):
# Logic for computing the word count goes here...
return 42
if __name__ == "__main__":
# Create the application
app = QApplication(sys.argv)
# Create and show the main window
win = Window()
win.show()
# Run the event loop
sys.exit(app.exec_())
| Window |
python | pytorch__pytorch | torch/distributions/lkj_cholesky.py | {
"start": 516,
"end": 6594
} | class ____(Distribution):
r"""
LKJ distribution for lower Cholesky factor of correlation matrices.
The distribution is controlled by ``concentration`` parameter :math:`\eta`
to make the probability of the correlation matrix :math:`M` generated from
a Cholesky factor proportional to :math:`\det(M)^{\eta - 1}`. Because of that,
when ``concentration == 1``, we have a uniform distribution over Cholesky
factors of correlation matrices::
L ~ LKJCholesky(dim, concentration)
X = L @ L' ~ LKJCorr(dim, concentration)
Note that this distribution samples the
Cholesky factor of correlation matrices and not the correlation matrices
themselves and thereby differs slightly from the derivations in [1] for
the `LKJCorr` distribution. For sampling, this uses the Onion method from
[1] Section 3.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> l = LKJCholesky(3, 0.5)
>>> l.sample() # l @ l.T is a sample of a correlation 3x3 matrix
tensor([[ 1.0000, 0.0000, 0.0000],
[ 0.3516, 0.9361, 0.0000],
[-0.1899, 0.4748, 0.8593]])
Args:
dimension (dim): dimension of the matrices
concentration (float or Tensor): concentration/shape parameter of the
distribution (often referred to as eta)
**References**
[1] `Generating random correlation matrices based on vines and extended onion method` (2009),
Daniel Lewandowski, Dorota Kurowicka, Harry Joe.
Journal of Multivariate Analysis. 100. 10.1016/j.jmva.2009.04.008
"""
# pyrefly: ignore [bad-override]
arg_constraints = {"concentration": constraints.positive}
support = constraints.corr_cholesky
def __init__(
self,
dim: int,
concentration: Union[Tensor, float] = 1.0,
validate_args: Optional[bool] = None,
) -> None:
if dim < 2:
raise ValueError(
f"Expected dim to be an integer greater than or equal to 2. Found dim={dim}."
)
self.dim = dim
(self.concentration,) = broadcast_all(concentration)
batch_shape = self.concentration.size()
event_shape = torch.Size((dim, dim))
# This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1].
marginal_conc = self.concentration + 0.5 * (self.dim - 2)
offset = torch.arange(
self.dim - 1,
dtype=self.concentration.dtype,
device=self.concentration.device,
)
offset = torch.cat([offset.new_zeros((1,)), offset])
beta_conc1 = offset + 0.5
beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset
self._beta = Beta(beta_conc1, beta_conc0)
super().__init__(batch_shape, event_shape, validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LKJCholesky, _instance)
batch_shape = torch.Size(batch_shape)
new.dim = self.dim
new.concentration = self.concentration.expand(batch_shape)
new._beta = self._beta.expand(batch_shape + (self.dim,))
super(LKJCholesky, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
# This uses the Onion method, but there are a few differences from [1] Sec. 3.2:
# - This vectorizes the for loop and also works for heterogeneous eta.
# - Same algorithm generalizes to n=1.
# - The procedure is simplified since we are sampling the cholesky factor of
# the correlation matrix instead of the correlation matrix itself. As such,
# we only need to generate `w`.
y = self._beta.sample(sample_shape).unsqueeze(-1)
u_normal = torch.randn(
self._extended_shape(sample_shape), dtype=y.dtype, device=y.device
).tril(-1)
u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True)
# Replace NaNs in first row
u_hypersphere[..., 0, :].fill_(0.0)
w = torch.sqrt(y) * u_hypersphere
# Fill diagonal elements; clamp for numerical stability
eps = torch.finfo(w.dtype).tiny
diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
w += torch.diag_embed(diag_elems)
return w
def log_prob(self, value):
# See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html
# The probability of a correlation matrix is proportional to
# determinant ** (concentration - 1) = prod(L_ii ^ 2(concentration - 1))
# Additionally, the Jacobian of the transformation from Cholesky factor to
# correlation matrix is:
# prod(L_ii ^ (D - i))
# So the probability of a Cholesky factor is proportional to
# prod(L_ii ^ (2 * concentration - 2 + D - i)) = prod(L_ii ^ order_i)
# with order_i = 2 * concentration - 2 + D - i
if self._validate_args:
self._validate_sample(value)
diag_elems = value.diagonal(dim1=-1, dim2=-2)[..., 1:]
order = torch.arange(2, self.dim + 1, device=self.concentration.device)
order = 2 * (self.concentration - 1).unsqueeze(-1) + self.dim - order
unnormalized_log_pdf = torch.sum(order * diag_elems.log(), dim=-1)
# Compute normalization constant (page 1999 of [1])
dm1 = self.dim - 1
alpha = self.concentration + 0.5 * dm1
denominator = torch.lgamma(alpha) * dm1
numerator = torch.mvlgamma(alpha - 0.5, dm1)
# pi_constant in [1] is D * (D - 1) / 4 * log(pi)
# pi_constant in multigammaln is (D - 1) * (D - 2) / 4 * log(pi)
# hence, we need to add a pi_constant = (D - 1) * log(pi) / 2
pi_constant = 0.5 * dm1 * math.log(math.pi)
normalize_term = pi_constant + numerator - denominator
return unnormalized_log_pdf - normalize_term
| LKJCholesky |
python | pypa__pip | tests/unit/test_utils.py | {
"start": 1276,
"end": 10333
} | class ____:
"util.egg_link_path_from_location() tests"
def setup_method(self) -> None:
project = "foo"
self.mock_dist = Mock(project_name=project)
self.site_packages = "SITE_PACKAGES"
self.user_site = "USER_SITE"
self.user_site_egglink = os.path.join(self.user_site, f"{project}.egg-link")
self.site_packages_egglink = os.path.join(
self.site_packages,
f"{project}.egg-link",
)
# patches
from pip._internal.utils import egg_link as utils
self.old_site_packages = utils.site_packages
self.mock_site_packages = utils.site_packages = "SITE_PACKAGES"
self.old_running_under_virtualenv = utils.running_under_virtualenv
self.mock_running_under_virtualenv = utils.running_under_virtualenv = Mock()
self.old_virtualenv_no_global = utils.virtualenv_no_global
self.mock_virtualenv_no_global = utils.virtualenv_no_global = Mock()
self.old_user_site = utils.user_site
self.mock_user_site = utils.user_site = self.user_site
from os import path
self.old_isfile = path.isfile
self.mock_isfile = path.isfile = Mock()
def teardown_method(self) -> None:
from pip._internal.utils import egg_link as utils
utils.site_packages = self.old_site_packages
utils.running_under_virtualenv = self.old_running_under_virtualenv
utils.virtualenv_no_global = self.old_virtualenv_no_global
utils.user_site = self.old_user_site
from os import path
path.isfile = self.old_isfile
def eggLinkInUserSite(self, egglink: str) -> bool:
return egglink == self.user_site_egglink
def eggLinkInSitePackages(self, egglink: str) -> bool:
return egglink == self.site_packages_egglink
# ####################### #
# # egglink in usersite # #
# ####################### #
def test_egglink_in_usersite_notvenv(self) -> None:
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert (
egg_link_path_from_location(self.mock_dist.project_name)
== self.user_site_egglink
)
def test_egglink_in_usersite_venv_noglobal(self) -> None:
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert egg_link_path_from_location(self.mock_dist.project_name) is None
def test_egglink_in_usersite_venv_global(self) -> None:
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInUserSite
assert (
egg_link_path_from_location(self.mock_dist.project_name)
== self.user_site_egglink
)
# ####################### #
# # egglink in sitepkgs # #
# ####################### #
def test_egglink_in_sitepkgs_notvenv(self) -> None:
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert (
egg_link_path_from_location(self.mock_dist.project_name)
== self.site_packages_egglink
)
def test_egglink_in_sitepkgs_venv_noglobal(self) -> None:
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert (
egg_link_path_from_location(self.mock_dist.project_name)
== self.site_packages_egglink
)
def test_egglink_in_sitepkgs_venv_global(self) -> None:
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.side_effect = self.eggLinkInSitePackages
assert (
egg_link_path_from_location(self.mock_dist.project_name)
== self.site_packages_egglink
)
# ################################## #
# # egglink in usersite & sitepkgs # #
# ################################## #
def test_egglink_in_both_notvenv(self) -> None:
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.return_value = True
assert (
egg_link_path_from_location(self.mock_dist.project_name)
== self.user_site_egglink
)
def test_egglink_in_both_venv_noglobal(self) -> None:
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = True
assert (
egg_link_path_from_location(self.mock_dist.project_name)
== self.site_packages_egglink
)
def test_egglink_in_both_venv_global(self) -> None:
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = True
assert (
egg_link_path_from_location(self.mock_dist.project_name)
== self.site_packages_egglink
)
# ############## #
# # no egglink # #
# ############## #
def test_noegglink_in_sitepkgs_notvenv(self) -> None:
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = False
self.mock_isfile.return_value = False
assert egg_link_path_from_location(self.mock_dist.project_name) is None
def test_noegglink_in_sitepkgs_venv_noglobal(self) -> None:
self.mock_virtualenv_no_global.return_value = True
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = False
assert egg_link_path_from_location(self.mock_dist.project_name) is None
def test_noegglink_in_sitepkgs_venv_global(self) -> None:
self.mock_virtualenv_no_global.return_value = False
self.mock_running_under_virtualenv.return_value = True
self.mock_isfile.return_value = False
assert egg_link_path_from_location(self.mock_dist.project_name) is None
def test_rmtree_errorhandler_nonexistent_directory(tmpdir: Path) -> None:
"""
Test rmtree_errorhandler ignores the given non-existing directory.
"""
nonexistent_path = str(tmpdir / "foo")
mock_func = Mock()
# Argument 3 to "rmtree_errorhandler" has incompatible type "None"; expected
# "Tuple[Type[BaseException], BaseException, TracebackType]"
rmtree_errorhandler(mock_func, nonexistent_path, None) # type: ignore[arg-type]
mock_func.assert_not_called()
def test_rmtree_errorhandler_readonly_directory(tmpdir: Path) -> None:
"""
Test rmtree_errorhandler makes the given read-only directory writable.
"""
# Create read only directory
subdir_path = tmpdir / "subdir"
subdir_path.mkdir()
path = str(subdir_path)
os.chmod(path, stat.S_IREAD)
# Make sure mock_func is called with the given path
mock_func = Mock()
# Argument 3 to "rmtree_errorhandler" has incompatible type "None"; expected
# "Tuple[Type[BaseException], BaseException, TracebackType]"
rmtree_errorhandler(mock_func, path, None) # type: ignore[arg-type]
mock_func.assert_called_with(path)
# Make sure the path is now writable
assert os.stat(path).st_mode & stat.S_IWRITE
def test_rmtree_errorhandler_reraises_error(tmpdir: Path) -> None:
"""
Test rmtree_errorhandler reraises an exception
by the given unreadable directory.
"""
# Create directory without read permission
path = tmpdir / "subdir"
path.mkdir()
old_mode = path.stat().st_mode
path.chmod(stat.S_IWRITE)
mock_func = Mock()
try:
raise RuntimeError("test message")
except RuntimeError:
# Make sure the handler reraises an exception
with pytest.raises(RuntimeError, match="test message"):
# Argument 3 to "rmtree_errorhandler" has incompatible type
# "Union[Tuple[Type[BaseException], BaseException, TracebackType],
# Tuple[None, None, None]]"; expected "Tuple[Type[BaseException],
# BaseException, TracebackType]"
rmtree_errorhandler(
mock_func, path, sys.exc_info() # type: ignore[arg-type]
)
finally:
# Restore permissions to let pytest to clean up temp dirs
path.chmod(old_mode)
mock_func.assert_not_called()
def test_rmtree_skips_nonexistent_directory() -> None:
"""
Test wrapped rmtree doesn't raise an error
by the given nonexistent directory.
"""
rmtree.__wrapped__("nonexistent-subdir") # type: ignore[attr-defined]
| Tests_EgglinkPath |
python | django__django | tests/admin_views/models.py | {
"start": 12410,
"end": 12597
} | class ____(models.Model):
post = models.ForeignKey(PrePopulatedPost, models.CASCADE)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
| PrePopulatedSubPost |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table35.py | {
"start": 315,
"end": 1508
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table35.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"num_format": "0.0000"})
data = [
["Foo", 1234, 0, 4321],
["Bar", 1256, 0, 4320],
["Baz", 2234, 0, 4332],
["Bop", 1324, 0, 4333],
]
worksheet.set_column("C:F", 10.288)
worksheet.add_table(
"C2:F6",
{
"data": data,
"columns": [
{},
{},
{},
{"formula": "BASE(0,2)", "format": format1},
],
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-the-guardian-api/components.py | {
"start": 412,
"end": 2149
} | class ____(PaginationStrategy):
"""
Starts page from 1 instead of the default value that is 0. Stops Pagination when currentPage is equal to totalPages.
"""
config: Config
page_size: Optional[Union[str, int]]
parameters: InitVar[Mapping[str, Any]]
start_from_page: int = 0
inject_on_first_request: bool = False
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
if isinstance(self.page_size, int) or (self.page_size is None):
self._page_size = self.page_size
else:
page_size = InterpolatedString(self.page_size, parameters=parameters).eval(self.config)
if not isinstance(page_size, int):
raise Exception(f"{page_size} is of type {type(page_size)}. Expected {int}")
self._page_size = page_size
@property
def initial_token(self) -> Optional[Any]:
if self.inject_on_first_request:
return self.start_from_page
return None
def next_page_token(
self,
response: requests.Response,
last_page_size: int,
last_record: Optional[Record],
last_page_token_value: Optional[Any],
) -> Optional[Any]:
res = response.json().get("response")
current_page = res.get("currentPage")
total_pages = res.get("pages")
# The first request to the API does not include the page_token, so it comes in as None when determing whether to paginate
last_page_token_value = last_page_token_value or 0
if current_page < total_pages:
return last_page_token_value + 1
else:
return None
def get_page_size(self) -> Optional[int]:
return self._page_size
| CustomPageIncrement |
python | keras-team__keras | keras/src/ops/numpy_test.py | {
"start": 85599,
"end": 128400
} | class ____(testing.TestCase):
def test_add(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.add(x, y), np.add(x, y))
self.assertAllClose(knp.add(x, z), np.add(x, z))
self.assertAllClose(knp.Add()(x, y), np.add(x, y))
self.assertAllClose(knp.Add()(x, z), np.add(x, z))
def test_heaviside(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [6, 5, 4]])
self.assertAllClose(knp.heaviside(x, y), np.heaviside(x, y))
self.assertAllClose(knp.Heaviside()(x, y), np.heaviside(x, y))
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array(4)
self.assertAllClose(knp.heaviside(x, y), np.heaviside(x, y))
self.assertAllClose(knp.Heaviside()(x, y), np.heaviside(x, y))
def test_hypot(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [6, 5, 4]])
self.assertAllClose(knp.hypot(x, y), np.hypot(x, y))
self.assertAllClose(knp.Hypot()(x, y), np.hypot(x, y))
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array(4)
self.assertAllClose(knp.hypot(x, y), np.hypot(x, y))
self.assertAllClose(knp.Hypot()(x, y), np.hypot(x, y))
def test_subtract(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.subtract(x, y), np.subtract(x, y))
self.assertAllClose(knp.subtract(x, z), np.subtract(x, z))
self.assertAllClose(knp.Subtract()(x, y), np.subtract(x, y))
self.assertAllClose(knp.Subtract()(x, z), np.subtract(x, z))
def test_multiply(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.multiply(x, y), np.multiply(x, y))
self.assertAllClose(knp.multiply(x, z), np.multiply(x, z))
self.assertAllClose(knp.Multiply()(x, y), np.multiply(x, y))
self.assertAllClose(knp.Multiply()(x, z), np.multiply(x, z))
def test_matmul(self):
x = np.ones([2, 3, 4, 5])
y = np.ones([2, 3, 5, 6])
z = np.ones([5, 6])
p = np.ones([4])
self.assertAllClose(knp.matmul(x, y), np.matmul(x, y))
self.assertAllClose(knp.matmul(x, z), np.matmul(x, z))
self.assertAllClose(knp.matmul(p, x), np.matmul(p, x))
self.assertAllClose(knp.Matmul()(x, y), np.matmul(x, y))
self.assertAllClose(knp.Matmul()(x, z), np.matmul(x, z))
self.assertAllClose(knp.Matmul()(p, x), np.matmul(p, x))
@parameterized.named_parameters(
named_product(
(
{
"testcase_name": "rank2",
"x_shape": (5, 3),
"y_shape": (3, 4),
},
{
"testcase_name": "rank3",
"x_shape": (2, 5, 3),
"y_shape": (2, 3, 4),
},
{
"testcase_name": "rank4",
"x_shape": (2, 2, 5, 3),
"y_shape": (2, 2, 3, 4),
},
),
dtype=["float16", "float32", "float64", "int32"],
x_sparse=[False, True],
y_sparse=[False, True],
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Segfault")
def test_matmul_sparse(self, dtype, x_shape, y_shape, x_sparse, y_sparse):
if backend.backend() == "tensorflow":
import tensorflow as tf
if x_sparse and y_sparse and dtype in ("float16", "int32"):
pytest.skip(
f"Sparse sparse matmul unsupported for {dtype}"
" with TensorFlow backend"
)
dense_to_sparse = tf.sparse.from_dense
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
dense_to_sparse = functools.partial(
jax_sparse.BCOO.fromdense, n_batch=len(x_shape) - 2
)
rng = np.random.default_rng(0)
x = x_np = (4 * rng.standard_normal(x_shape)).astype(dtype)
if x_sparse:
x_np = np.multiply(x_np, rng.random(x_shape) < 0.7)
x = dense_to_sparse(x_np)
y = y_np = (4 * rng.standard_normal(y_shape)).astype(dtype)
if y_sparse:
y_np = np.multiply(y_np, rng.random(y_shape) < 0.7)
y = dense_to_sparse(y_np)
atol = 0.1 if dtype == "float16" else 1e-4
self.assertAllClose(knp.matmul(x, y), np.matmul(x_np, y_np), atol=atol)
self.assertSparse(knp.matmul(x, y), x_sparse and y_sparse)
def test_power(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.power(x, y), np.power(x, y))
self.assertAllClose(knp.power(x, z), np.power(x, z))
self.assertAllClose(knp.Power()(x, y), np.power(x, y))
self.assertAllClose(knp.Power()(x, z), np.power(x, z))
def test_divide(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.divide(x, y), np.divide(x, y))
self.assertAllClose(knp.divide(x, z), np.divide(x, z))
self.assertAllClose(knp.Divide()(x, y), np.divide(x, y))
self.assertAllClose(knp.Divide()(x, z), np.divide(x, z))
def test_divide_no_nan(self):
x = np.array(
[[2, 1, 0], [np.inf, -np.inf, np.nan], [np.inf, -np.inf, np.nan]]
)
y = np.array([[2, 0, 0], [0, 0, 0], [3, 2, 1]])
expected_result = np.array(
[[1, 0, 0], [0, 0, 0], [np.inf, -np.inf, np.nan]]
)
self.assertAllClose(knp.divide_no_nan(x, y), expected_result)
self.assertAllClose(knp.DivideNoNan()(x, y), expected_result)
def test_true_divide(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]]])
self.assertAllClose(knp.true_divide(x, y), np.true_divide(x, y))
self.assertAllClose(knp.true_divide(x, z), np.true_divide(x, z))
self.assertAllClose(knp.TrueDivide()(x, y), np.true_divide(x, y))
self.assertAllClose(knp.TrueDivide()(x, z), np.true_divide(x, z))
def test_append(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
z = np.array([[[1, 2, 3], [3, 2, 1]], [[4, 5, 6], [3, 2, 1]]])
self.assertAllClose(knp.append(x, y), np.append(x, y))
self.assertAllClose(knp.append(x, y, axis=1), np.append(x, y, axis=1))
self.assertAllClose(knp.append(x, z), np.append(x, z))
self.assertAllClose(knp.Append()(x, y), np.append(x, y))
self.assertAllClose(knp.Append(axis=1)(x, y), np.append(x, y, axis=1))
self.assertAllClose(knp.Append()(x, z), np.append(x, z))
def test_arctan2(self):
x = np.array([[1.0, 2.0, 3.0], [3.0, 2.0, 1.0]])
y = np.array([[4.0, 5.0, 6.0], [3.0, 2.0, 1.0]])
self.assertAllClose(knp.arctan2(x, y), np.arctan2(x, y))
self.assertAllClose(knp.Arctan2()(x, y), np.arctan2(x, y))
a = np.array([0.0, 0.0, 0.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0])
b = np.array([0.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 0.0, 0.0])
self.assertAllClose(knp.arctan2(a, b), np.arctan2(a, b))
self.assertAllClose(knp.Arctan2()(a, b), np.arctan2(a, b))
m = np.array([[3, 4], [7, 8]], dtype=np.int8)
n = np.array([[1, 2], [3, 4]], dtype=float)
self.assertAllClose(knp.arctan2(m, n), np.arctan2(m, n))
self.assertAllClose(knp.Arctan2()(m, n), np.arctan2(m, n))
def test_bitwise_and(self):
x = np.array([2, 5, 255])
y = np.array([3, 14, 16])
self.assertAllClose(knp.bitwise_and(x, y), np.bitwise_and(x, y))
self.assertAllClose(knp.BitwiseAnd()(x, y), np.bitwise_and(x, y))
def test_bitwise_or(self):
x = np.array([2, 5, 255])
y = np.array([3, 14, 16])
self.assertAllClose(knp.bitwise_or(x, y), np.bitwise_or(x, y))
self.assertAllClose(knp.BitwiseOr()(x, y), np.bitwise_or(x, y))
def test_bitwise_xor(self):
x = np.array([2, 5, 255])
y = np.array([3, 14, 16])
self.assertAllClose(knp.bitwise_xor(x, y), np.bitwise_xor(x, y))
self.assertAllClose(knp.BitwiseXor()(x, y), np.bitwise_xor(x, y))
def test_bitwise_left_shift(self):
x = np.array([50, 60, 70])
y = np.array([1, 2, 3])
self.assertAllClose(knp.bitwise_left_shift(x, y), np.left_shift(x, y))
self.assertAllClose(knp.BitwiseLeftShift()(x, y), np.left_shift(x, y))
# left_shift is same as bitwise_left_shift
def test_bitwise_right_shift(self):
x = np.array([5, 6, 7])
y = np.array([1, 2, 3])
self.assertAllClose(knp.bitwise_right_shift(x, y), np.right_shift(x, y))
self.assertAllClose(knp.BitwiseRightShift()(x, y), np.right_shift(x, y))
# right_shift is same as bitwise_right_shift
def test_cross(self):
x1 = np.ones([2, 1, 4, 3])
x2 = np.ones([2, 1, 4, 2])
y1 = np.ones([2, 1, 4, 3])
y2 = np.ones([1, 5, 4, 3])
y3 = np.ones([1, 5, 4, 2])
self.assertAllClose(knp.cross(x1, y1), np.cross(x1, y1))
self.assertAllClose(knp.cross(x1, y2), np.cross(x1, y2))
if backend.backend() != "torch":
# API divergence between `torch.cross` and `np.cross`
# `torch.cross` only allows dim 3, `np.cross` allows dim 2 or 3
self.assertAllClose(knp.cross(x1, y3), np.cross(x1, y3))
self.assertAllClose(knp.cross(x2, y3), np.cross(x2, y3))
self.assertAllClose(knp.Cross()(x1, y1), np.cross(x1, y1))
self.assertAllClose(knp.Cross()(x1, y2), np.cross(x1, y2))
if backend.backend() != "torch":
# API divergence between `torch.cross` and `np.cross`
# `torch.cross` only allows dim 3, `np.cross` allows dim 2 or 3
self.assertAllClose(knp.Cross()(x1, y3), np.cross(x1, y3))
self.assertAllClose(knp.Cross()(x2, y3), np.cross(x2, y3))
# Test axis is not None
self.assertAllClose(
knp.cross(x1, y1, axis=-1), np.cross(x1, y1, axis=-1)
)
self.assertAllClose(
knp.Cross(axis=-1)(x1, y1), np.cross(x1, y1, axis=-1)
)
def test_einsum(self):
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
y = np.arange(24).reshape([2, 4, 3]).astype("float32")
self.assertAllClose(
knp.einsum("ijk,lkj->il", x, y),
np.einsum("ijk,lkj->il", x, y),
)
self.assertAllClose(
knp.einsum("ijk,ikj->i", x, y),
np.einsum("ijk,ikj->i", x, y),
)
self.assertAllClose(
knp.einsum("i...,j...k->...ijk", x, y),
np.einsum("i..., j...k->...ijk", x, y),
)
self.assertAllClose(knp.einsum(",ijk", 5, y), np.einsum(",ijk", 5, y))
self.assertAllClose(
knp.Einsum("ijk,lkj->il")(x, y),
np.einsum("ijk,lkj->il", x, y),
)
self.assertAllClose(
knp.Einsum("ijk,ikj->i")(x, y),
np.einsum("ijk,ikj->i", x, y),
)
self.assertAllClose(
knp.Einsum("i...,j...k->...ijk")(x, y),
np.einsum("i...,j...k->...ijk", x, y),
)
self.assertAllClose(knp.Einsum(",ijk")(5, y), np.einsum(",ijk", 5, y))
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason=f"{backend.backend()} doesn't implement custom ops for einsum.",
)
def test_einsum_custom_ops_for_tensorflow(self):
subscripts = "a,b->ab"
x = np.arange(2).reshape([2]).astype("float32")
y = np.arange(3).reshape([3]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "ab,b->a"
x = np.arange(6).reshape([2, 3]).astype("float32")
y = np.arange(3).reshape([3]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "ab,bc->ac"
x = np.arange(6).reshape([2, 3]).astype("float32")
y = np.arange(12).reshape([3, 4]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "ab,cb->ac"
x = np.arange(6).reshape([2, 3]).astype("float32")
y = np.arange(12).reshape([4, 3]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abc,cd->abd"
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
y = np.arange(20).reshape([4, 5]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abc,cde->abde"
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
y = np.arange(120).reshape([4, 5, 6]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abc,dc->abd"
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
y = np.arange(20).reshape([5, 4]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abc,dce->abde"
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
y = np.arange(120).reshape([5, 4, 6]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abc,dec->abde"
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
y = np.arange(120).reshape([5, 6, 4]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,abde->abce"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(180).reshape([2, 3, 5, 6]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,abed->abce"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(180).reshape([2, 3, 6, 5]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,acbe->adbe"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(144).reshape([2, 4, 3, 6]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,adbe->acbe"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(180).reshape([2, 5, 3, 6]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,aecd->acbe"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(240).reshape([2, 6, 4, 5]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,aecd->aceb"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(240).reshape([2, 6, 4, 5]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,cde->abe"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(120).reshape([4, 5, 6]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,ced->abe"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(120).reshape([4, 6, 5]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcd,ecd->abe"
x = np.arange(120).reshape([2, 3, 4, 5]).astype("float32")
y = np.arange(120).reshape([6, 4, 5]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcde,aebf->adbcf"
x = np.arange(720).reshape([2, 3, 4, 5, 6]).astype("float32")
y = np.arange(252).reshape([2, 6, 3, 7]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
subscripts = "abcde,afce->acdbf"
x = np.arange(720).reshape([2, 3, 4, 5, 6]).astype("float32")
y = np.arange(336).reshape([2, 7, 4, 6]).astype("float32")
self.assertAllClose(
knp.einsum(subscripts, x, y), np.einsum(subscripts, x, y)
)
def test_full_like(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
self.assertAllClose(knp.full_like(x, 2), np.full_like(x, 2))
self.assertAllClose(
knp.full_like(x, 2, dtype="float32"),
np.full_like(x, 2, dtype="float32"),
)
self.assertAllClose(
knp.full_like(x, np.ones([2, 3])),
np.full_like(x, np.ones([2, 3])),
)
self.assertAllClose(knp.FullLike()(x, 2), np.full_like(x, 2))
self.assertAllClose(
knp.FullLike(dtype="float32")(x, 2),
np.full_like(x, 2, dtype="float32"),
)
self.assertAllClose(
knp.FullLike()(x, np.ones([2, 3])),
np.full_like(x, np.ones([2, 3])),
)
def test_gcd(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.gcd(x, y), np.gcd(x, y))
self.assertAllClose(knp.gcd(x, 2), np.gcd(x, 2))
self.assertAllClose(knp.gcd(2, x), np.gcd(2, x))
self.assertAllClose(knp.Gcd()(x, y), np.gcd(x, y))
self.assertAllClose(knp.Gcd()(x, 2), np.gcd(x, 2))
self.assertAllClose(knp.Gcd()(2, x), np.gcd(2, x))
def test_greater(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.greater(x, y), np.greater(x, y))
self.assertAllClose(knp.greater(x, 2), np.greater(x, 2))
self.assertAllClose(knp.greater(2, x), np.greater(2, x))
self.assertAllClose(knp.Greater()(x, y), np.greater(x, y))
self.assertAllClose(knp.Greater()(x, 2), np.greater(x, 2))
self.assertAllClose(knp.Greater()(2, x), np.greater(2, x))
def test_greater_equal(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(
knp.greater_equal(x, y),
np.greater_equal(x, y),
)
self.assertAllClose(
knp.greater_equal(x, 2),
np.greater_equal(x, 2),
)
self.assertAllClose(
knp.greater_equal(2, x),
np.greater_equal(2, x),
)
self.assertAllClose(
knp.GreaterEqual()(x, y),
np.greater_equal(x, y),
)
self.assertAllClose(
knp.GreaterEqual()(x, 2),
np.greater_equal(x, 2),
)
self.assertAllClose(
knp.GreaterEqual()(2, x),
np.greater_equal(2, x),
)
def test_isclose(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.isclose(x, y), np.isclose(x, y))
self.assertAllClose(knp.isclose(x, 2), np.isclose(x, 2))
self.assertAllClose(knp.isclose(2, x), np.isclose(2, x))
self.assertAllClose(knp.Isclose()(x, y), np.isclose(x, y))
self.assertAllClose(knp.Isclose()(x, 2), np.isclose(x, 2))
self.assertAllClose(knp.Isclose()(2, x), np.isclose(2, x))
def test_isin(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.isin(x, y), np.isin(x, y))
self.assertAllClose(knp.isin(x, 2), np.isin(x, 2))
self.assertAllClose(knp.isin(2, x), np.isin(2, x))
self.assertAllClose(
knp.isin(x, y, assume_unique=True),
np.isin(x, y, assume_unique=True),
)
self.assertAllClose(
knp.isin(x, 2, assume_unique=True),
np.isin(x, 2, assume_unique=True),
)
self.assertAllClose(
knp.isin(2, x, assume_unique=True),
np.isin(2, x, assume_unique=True),
)
self.assertAllClose(
knp.isin(x, y, invert=True), np.isin(x, y, invert=True)
)
self.assertAllClose(
knp.isin(x, 2, invert=True), np.isin(x, 2, invert=True)
)
self.assertAllClose(
knp.isin(2, x, invert=True), np.isin(2, x, invert=True)
)
self.assertAllClose(
knp.isin(x, y, assume_unique=True, invert=True),
np.isin(x, y, assume_unique=True, invert=True),
)
self.assertAllClose(
knp.isin(x, 2, assume_unique=True, invert=True),
np.isin(x, 2, assume_unique=True, invert=True),
)
self.assertAllClose(
knp.isin(2, x, assume_unique=True, invert=True),
np.isin(2, x, assume_unique=True, invert=True),
)
self.assertAllClose(knp.IsIn()(x, y), np.isin(x, y))
self.assertAllClose(knp.IsIn()(x, 2), np.isin(x, 2))
self.assertAllClose(knp.IsIn()(2, x), np.isin(2, x))
self.assertAllClose(
knp.IsIn(assume_unique=True)(x, y),
np.isin(x, y, assume_unique=True),
)
self.assertAllClose(
knp.IsIn(invert=True)(x, y),
np.isin(x, y, invert=True),
)
self.assertAllClose(
knp.IsIn(assume_unique=True, invert=True)(x, y),
np.isin(x, y, assume_unique=True, invert=True),
)
def test_kron(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.kron(x, y), np.kron(x, y))
self.assertAllClose(knp.Kron()(x, y), np.kron(x, y))
def test_lcm(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.lcm(x, y), np.lcm(x, y))
self.assertAllClose(knp.Lcm()(x, y), np.lcm(x, y))
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array(4)
self.assertAllClose(knp.lcm(x, y), np.lcm(x, y))
self.assertAllClose(knp.Lcm()(x, y), np.lcm(x, y))
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([4])
self.assertAllClose(knp.lcm(x, y), np.lcm(x, y))
self.assertAllClose(knp.Lcm()(x, y), np.lcm(x, y))
def test_less(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.less(x, y), np.less(x, y))
self.assertAllClose(knp.less(x, 2), np.less(x, 2))
self.assertAllClose(knp.less(2, x), np.less(2, x))
self.assertAllClose(knp.Less()(x, y), np.less(x, y))
self.assertAllClose(knp.Less()(x, 2), np.less(x, 2))
self.assertAllClose(knp.Less()(2, x), np.less(2, x))
def test_less_equal(self):
x = np.array([[1, 2, 3], [3, 2, 1]])
y = np.array([[4, 5, 6], [3, 2, 1]])
self.assertAllClose(knp.less_equal(x, y), np.less_equal(x, y))
self.assertAllClose(knp.less_equal(x, 2), np.less_equal(x, 2))
self.assertAllClose(knp.less_equal(2, x), np.less_equal(2, x))
self.assertAllClose(knp.LessEqual()(x, y), np.less_equal(x, y))
self.assertAllClose(knp.LessEqual()(x, 2), np.less_equal(x, 2))
self.assertAllClose(knp.LessEqual()(2, x), np.less_equal(2, x))
def test_linspace(self):
self.assertAllClose(knp.linspace(0, 10, 5), np.linspace(0, 10, 5))
self.assertAllClose(
knp.linspace(0, 10, 5, endpoint=False),
np.linspace(0, 10, 5, endpoint=False),
)
self.assertAllClose(knp.Linspace(num=5)(0, 10), np.linspace(0, 10, 5))
self.assertAllClose(
knp.Linspace(num=5, endpoint=False)(0, 10),
np.linspace(0, 10, 5, endpoint=False),
)
self.assertAllClose(
knp.Linspace(num=0, endpoint=False)(0, 10),
np.linspace(0, 10, 0, endpoint=False),
)
start = np.zeros([2, 3, 4])
stop = np.ones([2, 3, 4])
self.assertAllClose(
knp.linspace(start, stop, 5, retstep=True)[0],
np.linspace(start, stop, 5, retstep=True)[0],
)
self.assertAllClose(
knp.linspace(start, stop, 5, endpoint=False, retstep=True)[0],
np.linspace(start, stop, 5, endpoint=False, retstep=True)[0],
)
self.assertAllClose(
knp.linspace(
start, stop, 5, endpoint=False, retstep=True, dtype="int32"
)[0],
np.linspace(
start, stop, 5, endpoint=False, retstep=True, dtype="int32"
)[0],
)
self.assertAllClose(
knp.Linspace(5, retstep=True)(start, stop)[0],
np.linspace(start, stop, 5, retstep=True)[0],
)
self.assertAllClose(
knp.Linspace(5, endpoint=False, retstep=True)(start, stop)[0],
np.linspace(start, stop, 5, endpoint=False, retstep=True)[0],
)
self.assertAllClose(
knp.Linspace(5, endpoint=False, retstep=True, dtype="int32")(
start, stop
)[0],
np.linspace(
start, stop, 5, endpoint=False, retstep=True, dtype="int32"
)[0],
)
# Test `num` as a tensor
# https://github.com/keras-team/keras/issues/19772
self.assertAllClose(
knp.linspace(0, 10, backend.convert_to_tensor(5)),
np.linspace(0, 10, 5),
)
self.assertAllClose(
knp.linspace(0, 10, backend.convert_to_tensor(5), endpoint=False),
np.linspace(0, 10, 5, endpoint=False),
)
def test_logical_and(self):
x = np.array([[True, False], [True, True]])
y = np.array([[False, False], [True, False]])
self.assertAllClose(knp.logical_and(x, y), np.logical_and(x, y))
self.assertAllClose(knp.logical_and(x, True), np.logical_and(x, True))
self.assertAllClose(knp.logical_and(True, x), np.logical_and(True, x))
self.assertAllClose(knp.LogicalAnd()(x, y), np.logical_and(x, y))
self.assertAllClose(knp.LogicalAnd()(x, True), np.logical_and(x, True))
self.assertAllClose(knp.LogicalAnd()(True, x), np.logical_and(True, x))
def test_logical_or(self):
x = np.array([[True, False], [True, True]])
y = np.array([[False, False], [True, False]])
self.assertAllClose(knp.logical_or(x, y), np.logical_or(x, y))
self.assertAllClose(knp.logical_or(x, True), np.logical_or(x, True))
self.assertAllClose(knp.logical_or(True, x), np.logical_or(True, x))
self.assertAllClose(knp.LogicalOr()(x, y), np.logical_or(x, y))
self.assertAllClose(knp.LogicalOr()(x, True), np.logical_or(x, True))
self.assertAllClose(knp.LogicalOr()(True, x), np.logical_or(True, x))
def test_logspace(self):
self.assertAllClose(knp.logspace(0, 10, 5), np.logspace(0, 10, 5))
self.assertAllClose(
knp.logspace(0, 10, 5, endpoint=False),
np.logspace(0, 10, 5, endpoint=False),
)
self.assertAllClose(knp.Logspace(num=5)(0, 10), np.logspace(0, 10, 5))
self.assertAllClose(
knp.Logspace(num=5, endpoint=False)(0, 10),
np.logspace(0, 10, 5, endpoint=False),
)
start = np.zeros([2, 3, 4])
stop = np.ones([2, 3, 4])
self.assertAllClose(
knp.logspace(start, stop, 5, base=10),
np.logspace(start, stop, 5, base=10),
)
self.assertAllClose(
knp.logspace(start, stop, 5, endpoint=False, base=10),
np.logspace(start, stop, 5, endpoint=False, base=10),
)
self.assertAllClose(
knp.Logspace(5, base=10)(start, stop),
np.logspace(start, stop, 5, base=10),
)
self.assertAllClose(
knp.Logspace(5, endpoint=False, base=10)(start, stop),
np.logspace(start, stop, 5, endpoint=False, base=10),
)
def test_maximum(self):
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
self.assertAllClose(knp.maximum(x, y), np.maximum(x, y))
self.assertAllClose(knp.maximum(x, 1), np.maximum(x, 1))
self.assertAllClose(knp.maximum(1, x), np.maximum(1, x))
self.assertAllClose(knp.Maximum()(x, y), np.maximum(x, y))
self.assertAllClose(knp.Maximum()(x, 1), np.maximum(x, 1))
self.assertAllClose(knp.Maximum()(1, x), np.maximum(1, x))
def test_minimum(self):
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
self.assertAllClose(knp.minimum(x, y), np.minimum(x, y))
self.assertAllClose(knp.minimum(x, 1), np.minimum(x, 1))
self.assertAllClose(knp.minimum(1, x), np.minimum(1, x))
self.assertAllClose(knp.Minimum()(x, y), np.minimum(x, y))
self.assertAllClose(knp.Minimum()(x, 1), np.minimum(x, 1))
self.assertAllClose(knp.Minimum()(1, x), np.minimum(1, x))
def test_mod(self):
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
self.assertAllClose(knp.mod(x, y), np.mod(x, y))
self.assertAllClose(knp.mod(x, 1), np.mod(x, 1))
self.assertAllClose(knp.mod(1, x), np.mod(1, x))
self.assertAllClose(knp.Mod()(x, y), np.mod(x, y))
self.assertAllClose(knp.Mod()(x, 1), np.mod(x, 1))
self.assertAllClose(knp.Mod()(1, x), np.mod(1, x))
def test_not_equal(self):
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
self.assertAllClose(knp.not_equal(x, y), np.not_equal(x, y))
self.assertAllClose(knp.not_equal(x, 1), np.not_equal(x, 1))
self.assertAllClose(knp.not_equal(1, x), np.not_equal(1, x))
self.assertAllClose(knp.NotEqual()(x, y), np.not_equal(x, y))
self.assertAllClose(knp.NotEqual()(x, 1), np.not_equal(x, 1))
self.assertAllClose(knp.NotEqual()(1, x), np.not_equal(1, x))
def test_outer(self):
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
self.assertAllClose(knp.outer(x, y), np.outer(x, y))
self.assertAllClose(knp.Outer()(x, y), np.outer(x, y))
x = np.ones([2, 3, 4])
y = np.ones([2, 3, 4, 5, 6])
self.assertAllClose(knp.outer(x, y), np.outer(x, y))
self.assertAllClose(knp.Outer()(x, y), np.outer(x, y))
def test_quantile(self):
x = np.arange(24).reshape([2, 3, 4]).astype("float32")
# q as scalar
q = np.array(0.5, dtype="float32")
self.assertAllClose(knp.quantile(x, q), np.quantile(x, q))
self.assertAllClose(
knp.quantile(x, q, keepdims=True), np.quantile(x, q, keepdims=True)
)
# q as 1D tensor
q = np.array([0.5, 1.0], dtype="float32")
self.assertAllClose(knp.quantile(x, q), np.quantile(x, q))
self.assertAllClose(
knp.quantile(x, q, keepdims=True), np.quantile(x, q, keepdims=True)
)
self.assertAllClose(
knp.quantile(x, q, axis=1), np.quantile(x, q, axis=1)
)
self.assertAllClose(
knp.quantile(x, q, axis=1, keepdims=True),
np.quantile(x, q, axis=1, keepdims=True),
)
# multiple axes
self.assertAllClose(
knp.quantile(x, q, axis=(1, 2)), np.quantile(x, q, axis=(1, 2))
)
# test all supported methods
q = np.array([0.501, 1.0], dtype="float32")
for method in ["linear", "lower", "higher", "midpoint", "nearest"]:
self.assertAllClose(
knp.quantile(x, q, method=method),
np.quantile(x, q, method=method),
)
self.assertAllClose(
knp.quantile(x, q, axis=1, method=method),
np.quantile(x, q, axis=1, method=method),
)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Only test tensorflow backend",
)
def test_quantile_in_tf_function(self):
import tensorflow as tf
x = knp.array([[1, 2, 3], [4, 5, 6]])
q = [0.5]
expected_output = np.array([[2, 5]])
@tf.function
def run_quantile(x, q, axis):
return knp.quantile(x, q, axis=axis)
result = run_quantile(x, q, axis=1)
self.assertAllClose(result, expected_output)
def test_take(self):
x = np.arange(24).reshape([1, 2, 3, 4])
indices = np.array([0, 1])
self.assertAllClose(knp.take(x, indices), np.take(x, indices))
self.assertAllClose(knp.take(x, 0), np.take(x, 0))
self.assertAllClose(knp.take(x, 0, axis=1), np.take(x, 0, axis=1))
self.assertAllClose(knp.Take()(x, indices), np.take(x, indices))
self.assertAllClose(knp.Take()(x, 0), np.take(x, 0))
self.assertAllClose(knp.Take(axis=1)(x, 0), np.take(x, 0, axis=1))
# Test with multi-dimensional indices
rng = np.random.default_rng(0)
x = rng.standard_normal((2, 3, 4, 5))
indices = rng.integers(0, 4, (6, 7))
self.assertAllClose(
knp.take(x, indices, axis=2), np.take(x, indices, axis=2)
)
# Test with negative axis
self.assertAllClose(
knp.take(x, indices, axis=-2), np.take(x, indices, axis=-2)
)
# Test with axis=None & x.ndim=2
x = np.array(([1, 2], [3, 4]))
indices = np.array([2, 3])
self.assertAllClose(
knp.take(x, indices, axis=None), np.take(x, indices, axis=None)
)
# Test with negative indices
x = rng.standard_normal((2, 3, 4, 5))
indices = rng.integers(-3, 0, (6, 7))
self.assertAllClose(
knp.take(x, indices, axis=2), np.take(x, indices, axis=2)
)
@parameterized.named_parameters(
named_product(
[
{"testcase_name": "axis_none", "axis": None},
{"testcase_name": "axis_0", "axis": 0},
{"testcase_name": "axis_1", "axis": 1},
{"testcase_name": "axis_minus1", "axis": -1},
],
dtype=[
"float16",
"float32",
"float64",
"uint8",
"int8",
"int16",
"int32",
],
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_take_sparse(self, dtype, axis):
rng = np.random.default_rng(0)
x = (4 * rng.standard_normal((3, 4, 5))).astype(dtype)
if backend.backend() == "tensorflow":
import tensorflow as tf
indices = tf.SparseTensor([[0, 0], [1, 2]], [-1, 2], (2, 3))
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
indices = jax_sparse.BCOO(([-1, 2], [[0, 0], [1, 2]]), shape=(2, 3))
self.assertAllClose(
knp.take(x, indices, axis=axis),
np.take(x, backend.convert_to_numpy(indices), axis=axis),
)
@parameterized.named_parameters(
named_product(
[
{"testcase_name": "axis_none", "axis": None},
{"testcase_name": "axis_0", "axis": 0},
{"testcase_name": "axis_1", "axis": 1},
{"testcase_name": "axis_minus1", "axis": -1},
],
dtype=[
"float16",
"float32",
"float64",
"uint8",
"int8",
"int16",
"int32",
],
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_RAGGED_TENSORS,
reason="Backend does not support ragged tensors.",
)
def test_take_ragged(self, dtype, axis):
rng = np.random.default_rng(0)
x = (4 * rng.standard_normal((3, 4, 5))).astype(dtype)
if backend.backend() == "tensorflow":
import tensorflow as tf
indices = tf.ragged.constant([[2], [0, -1, 1]])
mask = backend.convert_to_numpy(tf.ones_like(indices))
if axis == 0:
mask = np.expand_dims(mask, (2, 3))
elif axis == 1:
mask = np.expand_dims(mask, (2,))
self.assertAllClose(
knp.take(x, indices, axis=axis),
np.take(x, backend.convert_to_numpy(indices), axis=axis)
* mask.astype(dtype),
)
def test_take_along_axis(self):
x = np.arange(24).reshape([1, 2, 3, 4])
indices = np.ones([1, 4, 1, 1], dtype=np.int32)
self.assertAllClose(
knp.take_along_axis(x, indices, axis=1),
np.take_along_axis(x, indices, axis=1),
)
self.assertAllClose(
knp.TakeAlongAxis(axis=1)(x, indices),
np.take_along_axis(x, indices, axis=1),
)
x = np.arange(12).reshape([1, 1, 3, 4])
indices = np.ones([1, 4, 1, 1], dtype=np.int32)
self.assertAllClose(
knp.take_along_axis(x, indices, axis=2),
np.take_along_axis(x, indices, axis=2),
)
self.assertAllClose(
knp.TakeAlongAxis(axis=2)(x, indices),
np.take_along_axis(x, indices, axis=2),
)
# Test with axis=None
x = np.arange(12).reshape([1, 1, 3, 4])
indices = np.array([1, 2, 3], dtype=np.int32)
self.assertAllClose(
knp.take_along_axis(x, indices, axis=None),
np.take_along_axis(x, indices, axis=None),
)
self.assertAllClose(
knp.TakeAlongAxis(axis=None)(x, indices),
np.take_along_axis(x, indices, axis=None),
)
# Test with negative indices
x = np.arange(12).reshape([1, 1, 3, 4])
indices = np.full([1, 4, 1, 1], -1, dtype=np.int32)
self.assertAllClose(
knp.take_along_axis(x, indices, axis=2),
np.take_along_axis(x, indices, axis=2),
)
self.assertAllClose(
knp.TakeAlongAxis(axis=2)(x, indices),
np.take_along_axis(x, indices, axis=2),
)
def test_tensordot(self):
x = np.arange(24).reshape([1, 2, 3, 4]).astype("float32")
y = np.arange(24).reshape([3, 4, 1, 2]).astype("float32")
self.assertAllClose(
knp.tensordot(x, y, axes=2), np.tensordot(x, y, axes=2)
)
self.assertAllClose(
knp.tensordot(x, y, axes=([0, 1], [2, 3])),
np.tensordot(x, y, axes=([0, 1], [2, 3])),
)
self.assertAllClose(
knp.Tensordot(axes=2)(x, y),
np.tensordot(x, y, axes=2),
)
self.assertAllClose(
knp.Tensordot(axes=([0, 1], [2, 3]))(x, y),
np.tensordot(x, y, axes=([0, 1], [2, 3])),
)
self.assertAllClose(
knp.Tensordot(axes=[0, 2])(x, y),
np.tensordot(x, y, axes=[0, 2]),
)
def test_vdot(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([4.0, 5.0, 6.0])
self.assertAllClose(knp.vdot(x, y), np.vdot(x, y))
self.assertAllClose(knp.Vdot()(x, y), np.vdot(x, y))
def test_inner(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([4.0, 5.0, 6.0])
self.assertAllClose(knp.inner(x, y), np.inner(x, y))
self.assertAllClose(knp.Inner()(x, y), np.inner(x, y))
def test_where(self):
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
self.assertAllClose(knp.where(x > 1, x, y), np.where(x > 1, x, y))
self.assertAllClose(knp.Where()(x > 1, x, y), np.where(x > 1, x, y))
self.assertAllClose(knp.where(x > 1), np.where(x > 1))
self.assertAllClose(knp.Where()(x > 1), np.where(x > 1))
with self.assertRaisesRegex(
ValueError, "`x1` and `x2` either both should be `None`"
):
knp.where(x > 1, x, None)
def test_digitize(self):
x = np.array([0.0, 1.0, 3.0, 1.6])
bins = np.array([0.0, 3.0, 4.5, 7.0])
self.assertAllClose(knp.digitize(x, bins), np.digitize(x, bins))
self.assertAllClose(knp.Digitize()(x, bins), np.digitize(x, bins))
self.assertTrue(
standardize_dtype(knp.digitize(x, bins).dtype) == "int32"
)
self.assertTrue(
standardize_dtype(knp.Digitize()(x, bins).dtype) == "int32"
)
x = np.array([0.2, 6.4, 3.0, 1.6])
bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
self.assertAllClose(knp.digitize(x, bins), np.digitize(x, bins))
self.assertAllClose(knp.Digitize()(x, bins), np.digitize(x, bins))
self.assertTrue(
standardize_dtype(knp.digitize(x, bins).dtype) == "int32"
)
self.assertTrue(
standardize_dtype(knp.Digitize()(x, bins).dtype) == "int32"
)
x = np.array([1, 4, 10, 15])
bins = np.array([4, 10, 14, 15])
self.assertAllClose(knp.digitize(x, bins), np.digitize(x, bins))
self.assertAllClose(knp.Digitize()(x, bins), np.digitize(x, bins))
self.assertTrue(
standardize_dtype(knp.digitize(x, bins).dtype) == "int32"
)
self.assertTrue(
standardize_dtype(knp.Digitize()(x, bins).dtype) == "int32"
)
| NumpyTwoInputOpsCorrectnessTest |
python | getsentry__sentry | src/sentry/api/serializers/types.py | {
"start": 1199,
"end": 1619
} | class ____(TypedDict, total=False):
id: int
commitCount: int
data: dict[str, Any]
dateCreated: datetime
dateReleased: datetime | None
deployCount: int
ref: str | None
lastCommit: dict[str, Any] | None
lastDeploy: LastDeploy | None
status: str
url: str | None
userAgent: str | None
version: str | None
versionInfo: VersionInfo | None
| GroupEventReleaseSerializerResponse |
python | ansible__ansible | lib/ansible/modules/group.py | {
"start": 13867,
"end": 14107
} | class ____(FreeBsdGroup):
"""
This is a DragonFlyBSD Group manipulation class.
It inherits all behaviors from FreeBsdGroup class.
"""
platform = 'DragonFly'
# ===========================================
| DragonFlyBsdGroup |
python | facebook__pyre-check | tools/generate_taint_models/get_globals.py | {
"start": 646,
"end": 11293
} | class ____(ModelGenerator[Model]):
def __init__(
self,
root: str,
stub_root: Optional[str] = None,
blacklisted_globals: Optional[Set[str]] = None,
blacklisted_global_directories: Optional[Set[str]] = None,
) -> None:
self.root: str = root
self.stub_root: Final[Optional[str]] = stub_root
self.blacklisted_globals: Set[str] = blacklisted_globals or set()
self.blacklisted_global_directories: Set[str] = (
blacklisted_global_directories or set()
)
# flake8 suggests to reduce the complexity of the function, hence the noqa line
def _globals(self, root: str, path: str) -> Iterable[Model]: # noqa: C901
globals = set()
# The parent of the property needs to be stored as well, as we only store the
# module qualifier.
cached_properties: Set[Tuple[Optional[str], FunctionDefinition]] = set()
module = load_module(path)
if not module:
return globals
class NameVisitor(ast.NodeVisitor):
def __init__(self, globals: Set) -> None:
self.globals = globals
self.blacklist: Optional[Set[str]] = None
self.parent: Optional[str] = None
def visit_Name(self, name: ast.Name) -> None:
blacklist = self.blacklist
if blacklist is not None and name.id in blacklist:
return
parent = self.parent
if parent is not None:
name_to_register = f"{parent}.__class__.{name.id}"
else:
name_to_register = name.id
self.globals.add(name_to_register)
# Ensure that we stop recursing when we're in a complex assign, such as
# a.b = ... or a[b] = ... .
def visit_Attribute(self, attribute: ast.Attribute) -> None:
return
def visit_Subscript(self, subscript: ast.Subscript) -> None:
return
visitor: NameVisitor = NameVisitor(globals)
def visit_assignment(target: ast.expr, value: ast.expr) -> None:
if value is not None:
# namedtuples get preprocessed out by Pyre, and shouldn't be added
# as globals.
if isinstance(value, ast.Call):
callee = value.func
if (
isinstance(callee, ast.Attribute)
and callee.attr == "namedtuple"
):
return
if isinstance(callee, ast.Name) and callee.id == "namedtuple":
return
# Omit pure aliases of the form `x = alias`.
if isinstance(value, ast.Name) or isinstance(value, ast.Attribute):
return
# x = lambda: _ can safely be avoided, as the models confuse our taint
# analysis.
if isinstance(value, ast.Lambda):
return
visitor.visit(target)
def should_visit_class(class_definition: ast.ClassDef) -> bool:
# Ensure that we don't visit nested classes for now.
if visitor.parent is not None:
return False
# TypedDicts use top-level attribute declarations to declare attributes.
for base in class_definition.bases:
base_name = None
if isinstance(base, ast.Name):
base_name = base.id
if isinstance(base, ast.Attribute):
base_name = base.attr
if base_name == "TypedDict":
return False
def is_dataclass_decorator(expression: ast.expr) -> bool:
if isinstance(expression, ast.Call):
return is_dataclass_decorator(expression.func)
if isinstance(expression, ast.Name):
return expression.id == "dataclass"
if isinstance(expression, ast.Attribute):
base = expression.value
if isinstance(base, ast.Name) and base.id == "dataclasses":
return expression.attr == "dataclass"
return False
for decorator in class_definition.decorator_list:
# Skip visiting dataclasses, as they use class variables to generate
# instance variables. They can have one of the following forms:
# @dataclass(args), @dataclass, or `@dataclasses.dataclass(args)`.
if is_dataclass_decorator(decorator):
return False
return True
def all_attributes(class_definition: ast.ClassDef) -> Set[str]:
attributes = set()
for statement in class_definition.body:
if not isinstance(statement, ast.FunctionDef):
continue
for assignment in statement.body:
if isinstance(assignment, ast.Assign):
for target in assignment.targets:
attribute = _get_self_attribute(target)
if attribute is not None:
attributes.add(attribute)
elif isinstance(assignment, ast.AnnAssign):
attribute = _get_self_attribute(assignment.target)
if attribute is not None:
attributes.add(attribute)
return attributes
def visit_statement(statement: ast.stmt) -> None:
if isinstance(statement, ast.Assign):
# Omit pure aliases of the form `x = alias`.
for target in statement.targets:
visit_assignment(target, statement.value)
elif isinstance(statement, ast.AugAssign):
visitor.visit(statement.target)
# Don't attempt to register statements of the form `x: int`.
elif isinstance(statement, ast.AnnAssign):
value = statement.value
if value is not None:
visit_assignment(statement.target, value)
elif isinstance(statement, ast.FunctionDef) or isinstance(
statement, ast.AsyncFunctionDef
):
for decorator in statement.decorator_list:
if _is_cached_property_decorator(decorator):
cached_properties.add((visitor.parent, statement))
elif isinstance(statement, ast.ClassDef) and should_visit_class(statement):
visitor.parent = statement.name
visitor.blacklist = all_attributes(statement)
for toplevel_statement in statement.body:
visit_statement(toplevel_statement)
visitor.parent = None
visitor.blacklist = None
for statement in module.body:
visit_statement(statement)
module_qualifier = qualifier(root, path)
models = set()
for target in globals:
if target == "__all__":
continue
qualified_target = f"{module_qualifier}.{target}"
if qualified_target in self.blacklisted_globals:
continue
try:
generated = AssignmentModel(
annotation="TaintSink[Global]", target=qualified_target
)
models.add(generated)
except ValueError:
pass
for (parent, function_definition) in cached_properties:
is_class_property = any(
(
_is_class_property_decorator(decorator)
for decorator in function_definition.decorator_list
)
)
if is_class_property:
returns = "TaintSink[Global, Via[cached_class_property]]"
else:
returns = "TaintSink[Global, Via[cached_property]]"
if parent is not None:
function_qualifier = f"{module_qualifier}.{parent}"
else:
function_qualifier = module_qualifier
try:
function_definition_model = FunctionDefinitionModel(
qualifier=function_qualifier,
definition=function_definition,
returns=returns,
)
models.add(function_definition_model)
except ValueError:
pass
return models
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return []
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[Model]:
sinks: Set[Model] = set()
for path in find_all_paths(self.root):
relative_path = os.path.relpath(path, self.root)
should_skip = any(
(
relative_path.startswith(blacklisted)
for blacklisted in self.blacklisted_global_directories
)
)
if should_skip:
LOG.info("Skipping %s", os.path.relpath(path, self.root))
else:
sinks = sinks.union(self._globals(self.root, path))
stub_root = self.stub_root
if stub_root is not None:
stub_root = os.path.abspath(stub_root)
paths = glob.glob(stub_root + "/**/*.pyi", recursive=True)
for path in paths:
sinks = sinks.union(self._globals(stub_root, path))
return sinks
def _get_self_attribute(target: ast.expr) -> Optional[str]:
if isinstance(target, ast.Attribute):
value = target.value
if isinstance(value, ast.Name) and value.id == "self":
return target.attr
return None
def _is_cached_property_decorator(decorator: ast.expr) -> bool:
if isinstance(decorator, ast.Name):
name = decorator.id
elif isinstance(decorator, ast.Attribute):
name = decorator.attr
else:
name = None
if name is None:
return False
return "cached" in name and "property" in name
def _is_class_property_decorator(decorator: ast.expr) -> bool:
if isinstance(decorator, ast.Name):
name = decorator.id
elif isinstance(decorator, ast.Attribute):
name = decorator.attr
else:
name = None
if name is None:
return False
return "class" in name and "property" in name
| GlobalModelGenerator |
python | huggingface__transformers | tests/models/superpoint/test_modeling_superpoint.py | {
"start": 9483,
"end": 12547
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("magic-leap-community/superpoint") if is_vision_available() else None
@slow
def test_inference(self):
model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint").to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
expected_number_keypoints_image0 = 568
expected_number_keypoints_image1 = 830
expected_max_number_keypoints = max(expected_number_keypoints_image0, expected_number_keypoints_image1)
expected_keypoints_shape = torch.Size((len(images), expected_max_number_keypoints, 2))
expected_scores_shape = torch.Size(
(
len(images),
expected_max_number_keypoints,
)
)
expected_descriptors_shape = torch.Size((len(images), expected_max_number_keypoints, 256))
# Check output shapes
self.assertEqual(outputs.keypoints.shape, expected_keypoints_shape)
self.assertEqual(outputs.scores.shape, expected_scores_shape)
self.assertEqual(outputs.descriptors.shape, expected_descriptors_shape)
expected_keypoints_image0_values = torch.tensor([[0.75, 0.0188], [0.7719, 0.0188], [0.7641, 0.0333]]).to(
torch_device
)
expected_scores_image0_values = torch.tensor(
[0.0064, 0.0139, 0.0591, 0.0727, 0.5170, 0.0175, 0.1526, 0.2057, 0.0335]
).to(torch_device)
expected_descriptors_image0_value = torch.tensor(-0.1095).to(torch_device)
predicted_keypoints_image0_values = outputs.keypoints[0, :3]
predicted_scores_image0_values = outputs.scores[0, :9]
predicted_descriptors_image0_value = outputs.descriptors[0, 0, 0]
# Check output values
self.assertTrue(
torch.allclose(
predicted_keypoints_image0_values,
expected_keypoints_image0_values,
atol=1e-4,
)
)
torch.testing.assert_close(predicted_scores_image0_values, expected_scores_image0_values, rtol=1e-4, atol=1e-4)
self.assertTrue(
torch.allclose(
predicted_descriptors_image0_value,
expected_descriptors_image0_value,
atol=1e-4,
)
)
# Check mask values
self.assertTrue(outputs.mask[0, expected_number_keypoints_image0 - 1].item() == 1)
self.assertTrue(outputs.mask[0, expected_number_keypoints_image0].item() == 0)
self.assertTrue(torch.all(outputs.mask[0, : expected_number_keypoints_image0 - 1]))
self.assertTrue(torch.all(torch.logical_not(outputs.mask[0, expected_number_keypoints_image0:])))
self.assertTrue(torch.all(outputs.mask[1]))
| SuperPointModelIntegrationTest |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py | {
"start": 1921,
"end": 2882
} | class ____:
r"""
A dummy environment that implements the required subset of the OpenAI gym
interface. It exists only to avoid a dependency on gym for running the
tests in this file. It is designed to run for a set max number of iterations,
returning random states and rewards at each step.
"""
def __init__(self, state_dim=4, num_iters=10, reward_threshold=475.0):
self.state_dim = state_dim
self.num_iters = num_iters
self.iter = 0
self.reward_threshold = reward_threshold
def seed(self, manual_seed):
torch.manual_seed(manual_seed)
def reset(self):
self.iter = 0
return torch.randn(self.state_dim)
def step(self, action):
self.iter += 1
state = torch.randn(self.state_dim)
reward = torch.rand(1).item() * self.reward_threshold
done = self.iter >= self.num_iters
info = {}
return state, reward, done, info
| DummyEnv |
python | pytest-dev__pytest | testing/test_config.py | {
"start": 21959,
"end": 24250
} | class ____:
def test_parsing_again_fails(self, pytester: Pytester) -> None:
config = pytester.parseconfig()
pytest.raises(AssertionError, lambda: config.parse([]))
def test_explicitly_specified_config_file_is_loaded(
self, pytester: Pytester
) -> None:
pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("custom", "")
"""
)
pytester.makeini(
"""
[pytest]
custom = 0
"""
)
pytester.makefile(
".ini",
custom="""
[pytest]
custom = 1
""",
)
config = pytester.parseconfig("-c", "custom.ini")
assert config.getini("custom") == "1"
config = pytester.parseconfig("--config-file", "custom.ini")
assert config.getini("custom") == "1"
pytester.makefile(
".cfg",
custom_tool_pytest_section="""
[tool:pytest]
custom = 1
""",
)
config = pytester.parseconfig("-c", "custom_tool_pytest_section.cfg")
assert config.getini("custom") == "1"
config = pytester.parseconfig("--config-file", "custom_tool_pytest_section.cfg")
assert config.getini("custom") == "1"
pytester.makefile(
".toml",
custom="""
[tool.pytest.ini_options]
custom = 1
value = [
] # this is here on purpose, as it makes this an invalid '.ini' file
""",
)
config = pytester.parseconfig("-c", "custom.toml")
assert config.getini("custom") == "1"
config = pytester.parseconfig("--config-file", "custom.toml")
assert config.getini("custom") == "1"
def test_absolute_win32_path(self, pytester: Pytester) -> None:
temp_ini_file = pytester.makeini("[pytest]")
from os.path import normpath
temp_ini_file_norm = normpath(str(temp_ini_file))
ret = pytest.main(["-c", temp_ini_file_norm])
assert ret == ExitCode.NO_TESTS_COLLECTED
ret = pytest.main(["--config-file", temp_ini_file_norm])
assert ret == ExitCode.NO_TESTS_COLLECTED
| TestConfigCmdlineParsing |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.