language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-good-caption.py | {
"start": 57,
"end": 1293
} | class ____(object):
def minCostGoodCaption(self, caption):
"""
:type caption: str
:rtype: str
"""
L = 3
n = len(caption)
if n < L:
return ""
dp = [[[0]*2 for _ in xrange(26)] for _ in xrange(n-L+1)]
mn = [[0]*2 for _ in xrange(n-L+1)]
cap = map(lambda x: ord(x)-ord('a'), caption)
for i in reversed(xrange(n-L+1)):
for j in xrange(26):
if i == n-L:
dp[i][j][:] = [sum(abs(cap[k]-j) for k in xrange(i, i+L)), L]
continue
dp[i][j][:] = [dp[i+1][j][0]+abs(cap[i]-j), 1]
if i+L < n-2:
curr, c = mn[i+L]
curr += sum(abs(cap[k]-j) for k in xrange(i, i+L))
if curr < dp[i][j][0] or (curr == dp[i][j][0] and c < j):
dp[i][j][:] = [curr, L]
mn[i] = min([dp[i][j][0], j] for j in xrange(26))
result = []
i, j, l = 0, mn[0][1], 1
while i != n:
if l == L:
j = mn[i][1]
l = dp[i][j][1]
result.append(chr(ord('a')+j)*l)
i += l
return "".join(result)
| Solution |
python | google__pytype | pytype/tests/test_stdlib1.py | {
"start": 116,
"end": 7780
} | class ____(test_base.BaseTest):
"""Tests for files in typeshed/stdlib."""
def test_ast(self):
ty = self.Infer("""
import ast
def f():
return ast.parse("True")
""")
self.assertTypesMatchPytd(
ty,
"""
import ast
def f() -> _ast.Module: ...
""",
)
def test_urllib(self):
ty = self.Infer("""
import urllib
""")
self.assertTypesMatchPytd(
ty,
"""
import urllib
""",
)
def test_traceback(self):
ty = self.Infer("""
import traceback
def f(exc):
return traceback.format_exception(*exc)
""")
self.assertTypesMatchPytd(
ty,
"""
import traceback
from typing import List
def f(exc) -> List[str]: ...
""",
)
def test_os_walk(self):
ty = self.Infer("""
import os
x = list(os.walk("/tmp"))
""")
self.assertTypesMatchPytd(
ty,
"""
import os
from typing import List, Tuple
x = ... # type: List[Tuple[str, List[str], List[str]]]
""",
)
def test_struct(self):
ty = self.Infer("""
import struct
x = struct.Struct("b")
""")
self.assertTypesMatchPytd(
ty,
"""
import struct
x = ... # type: struct.Struct
""",
)
def test_warning(self):
ty = self.Infer("""
import warnings
""")
self.assertTypesMatchPytd(
ty,
"""
import warnings
""",
)
@test_utils.skipOnWin32("os.pathconf is not supported on Windows")
def test_path_conf(self):
self.Check("""
import os
max_len = os.pathconf('directory', 'name')
filename = 'foobar.baz'
r = len(filename) >= max_len - 1
""")
def test_environ(self):
self.Check("""
import os
os.getenv('foobar', 3j)
os.environ['hello'] = 'bar'
x = os.environ['hello']
y = os.environ.get(3.14, None)
z = os.environ.get(3.14, 3j)
del os.environ['hello']
""")
def test_stdlib(self):
self.Check("""
import re
s = "the quick brown fox jumps over the lazy dog"
word = re.compile(r"\\w*")
word.sub(lambda x: '<'+x.group(0)+'>', s)
""")
def test_namedtuple(self):
self.Check("""
import collections
collections.namedtuple(u"_", "")
collections.namedtuple("_", u"")
collections.namedtuple("_", [u"a", "b"])
""")
def test_defaultdict(self):
ty = self.Infer("""
import collections
a = collections.defaultdict(int, one = 1, two = 2)
b = collections.defaultdict(int, {'one': 1, 'two': 2})
c = collections.defaultdict(int, [('one', 1), ('two', 2)])
d = collections.defaultdict(int, {})
e = collections.defaultdict(int)
f = collections.defaultdict(default_factory = int)
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
a = ... # type: collections.defaultdict[str, int]
b = ... # type: collections.defaultdict[str, int]
c = ... # type: collections.defaultdict[str, int]
d = ... # type: collections.defaultdict[nothing, int]
e = ... # type: collections.defaultdict[nothing, int]
f = ... # type: collections.defaultdict[nothing, int]
""",
)
def test_defaultdict_no_factory(self):
ty = self.Infer("""
import collections
a = collections.defaultdict()
b = collections.defaultdict(None)
c = collections.defaultdict(lambda: __any_object__)
d = collections.defaultdict(None, one = 1, two = 2)
e = collections.defaultdict(None, {'one': 1, 'two': 2})
f = collections.defaultdict(None, [('one', 1), ('two', 2)])
g = collections.defaultdict(one = 1, two = 2)
h = collections.defaultdict(default_factory = None)
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
from typing import Any
a = ... # type: collections.defaultdict[nothing, nothing]
b = ... # type: collections.defaultdict[nothing, nothing]
c = ... # type: collections.defaultdict[nothing, Any]
d = ... # type: collections.defaultdict[str, int]
e = ... # type: collections.defaultdict[str, int]
f = ... # type: collections.defaultdict[str, int]
g = ... # type: collections.defaultdict[str, int]
h = ... # type: collections.defaultdict[nothing, nothing]
""",
)
def test_defaultdict_diff_defaults(self):
ty = self.Infer("""
import collections
a = collections.defaultdict(int, one = '1')
b = collections.defaultdict(str, one = 1)
c = collections.defaultdict(None, one = 1)
d = collections.defaultdict(int, {1: 'one'})
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
from typing import Union
a = ... # type: collections.defaultdict[str, Union[int, str]]
b = ... # type: collections.defaultdict[str, Union[int, str]]
c = ... # type: collections.defaultdict[str, int]
d = ... # type: collections.defaultdict[int, Union[int, str]]
""",
)
def test_counter(self):
self.Check("""
import collections
x = collections.Counter()
y = collections.Counter()
(x + y).elements
(x - y).elements
(x & y).elements
(x | y).elements
""")
def test_range(self):
self.Check("""
import random
random.sample(range(10), 5)
""")
def test_xml(self):
self.Check("""
import xml.etree.cElementTree
xml.etree.cElementTree.SubElement
xml.etree.cElementTree.iterparse
""")
def test_csv(self):
self.Check("""
import _csv
import csv
""")
def test_future(self):
self.Check("""
import __future__
""")
def test_sys_version_info(self):
ty = self.Infer("""
import sys
major, minor, micro, releaselevel, serial = sys.version_info
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
major: int
minor: int
micro: int
releaselevel: str
serial: int
""",
)
def test_subprocess(self):
# Sanity check to make sure basic type-checking works in both py2 and py3.
# The subprocess module changed significantly between versions.
self.Check("""
import subprocess
def run(cmd):
proc = subprocess.Popen(cmd)
return proc.communicate()
""")
def test_subprocess_subclass(self):
self.Check("""
import subprocess
class Popen(subprocess.Popen):
def wait(self, *args, **kwargs):
return super(Popen, self).wait(*args, **kwargs)
""")
def test_subprocess_src_and_pyi(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import subprocess
def f() -> subprocess.Popen: ...
""",
)
self.Check(
"""
import foo
import subprocess
def f():
p = foo.f()
return p.communicate()
def g():
p = subprocess.Popen(__any_object__)
return p.communicate()
""",
pythonpath=[d.path],
)
def test_namedtuple_from_counter(self):
self.Check("""
import collections
import six
Foo = collections.namedtuple('Foo', ('x', 'y'))
def foo(self):
c = collections.Counter()
return [Foo(*x) for x in six.iteritems(c)]
""")
def test_path(self):
self.Check("""
import pkgutil
__path__ = pkgutil.extend_path(__path__, '')
""")
if __name__ == "__main__":
test_base.main()
| StdlibTests |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_distinct_values.py | {
"start": 1133,
"end": 4209
} | class ____(ColumnAggregateMetricProvider):
metric_name = "column.distinct_values"
@column_aggregate_value(engine=PandasExecutionEngine) # type: ignore[misc] # untyped-decorator
def _pandas(cls, column: pd.Series, **kwargs) -> Set[Any]:
return set(column.unique())
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: Dict[str, str],
**kwargs,
) -> Set[Any]:
"""
Past implementations of column.distinct_values depended on column.value_counts.
This was causing performance issues due to the complex query used in column.value_counts and subsequent
in-memory operations.
""" # noqa: E501 # FIXME CoP
selectable: sqlalchemy.Selectable
accessor_domain_kwargs: Dict[str, str]
(
selectable,
_,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(metric_domain_kwargs, MetricDomainTypes.COLUMN)
column_name: str = accessor_domain_kwargs["column"]
column: sqlalchemy.ColumnClause = sa.column(column_name)
distinct_values: List[sqlalchemy.Row]
if hasattr(column, "is_not"):
distinct_values = execution_engine.execute_query( # type: ignore[assignment] # FIXME CoP
sa.select(column).where(column.is_not(None)).distinct().select_from(selectable) # type: ignore[arg-type] # FIXME CoP
).fetchall()
else:
distinct_values = execution_engine.execute_query( # type: ignore[assignment] # FIXME CoP
sa.select(column).where(column.isnot(None)).distinct().select_from(selectable) # type: ignore[arg-type] # FIXME CoP
).fetchall()
# Vectorized operation is not faster here due to overhead of converting to and from numpy array # noqa: E501 # FIXME CoP
return {row[0] for row in distinct_values}
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: Dict[str, str],
**kwargs,
) -> Set[Any]:
"""
Past implementations of column.distinct_values depended on column.value_counts.
This was causing performance issues due to the complex query used in column.value_counts and subsequent
in-memory operations.
""" # noqa: E501 # FIXME CoP
df: pyspark.DataFrame
accessor_domain_kwargs: Dict[str, str]
(
df,
_,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(metric_domain_kwargs, MetricDomainTypes.COLUMN)
column_name: str = accessor_domain_kwargs["column"]
distinct_values: List[pyspark.Row] = (
df.select(F.col(column_name))
.distinct()
.where(F.col(column_name).isNotNull())
.rdd.flatMap(lambda x: x)
.collect()
)
return set(distinct_values)
| ColumnDistinctValues |
python | astropy__astropy | astropy/coordinates/representation/spherical.py | {
"start": 57094,
"end": 60141
} | class ____(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta=None, d_r=None, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError("d_phi and d_theta should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(
representation.d_lon, -representation.d_lat, representation.d_distance
)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(self.d_phi, self.d_theta, op(self.d_r, *args))
else:
return super()._scale_operation(op, *args)
| PhysicsSphericalDifferential |
python | kamyu104__LeetCode-Solutions | Python/total-waviness-of-numbers-in-range-ii.py | {
"start": 5082,
"end": 7369
} | class ____(object):
def totalWaviness(self, num1, num2):
"""
:type num1: int
:type num2: int
:rtype: int
"""
def count(x):
def encode(prev, prev2, zero, tight):
key = 0
key = key*(10+1)+(prev+1)
key = key*(10+1)+(prev2+1)
key = key*2+(1 if zero else 0)
key = key*2+(1 if tight else 0)
return key
s = str(x)
state_size = (10+1)*(10+1)*2*2
dp = [None]*state_size
for prev in xrange(-1, 10):
for prev2 in xrange(-1, 10):
for zero in xrange(2):
for tight in xrange(2):
key = encode(prev, prev2, zero, tight)
dp[key] = (1, 0)
for i in reversed(xrange(len(s))):
new_dp = [None]*state_size
for prev in xrange(-1, 10):
for prev2 in xrange(-1, 10):
for zero in xrange(2):
for tight in xrange(2):
cnt = w = 0
mx = int(s[i]) if tight else 9
for d in xrange(mx+1):
new_tight = tight and (d == int(s[i]))
new_zero = zero and (d == 0)
new_prev2 = prev
new_prev = d if not new_zero else -1
key = encode(new_prev, new_prev2, new_zero, new_tight)
if dp[key] is not None:
new_cnt, nw = dp[key]
cnt += new_cnt
if not zero and prev2 != -1 and ((prev2 < prev and prev > d) or (prev2 > prev and prev < d)):
w += new_cnt
w += nw
new_dp[encode(prev, prev2, zero, tight)] = (cnt, w)
dp, new_dp = new_dp, dp
return dp[encode(-1, -1, True, True)][1]
return count(num2)-count(num1-1)
| Solution4 |
python | fsspec__filesystem_spec | fsspec/implementations/tests/test_archive.py | {
"start": 5985,
"end": 13138
} | class ____:
"""
Validate that all filesystem adapter implementations for archive files
will adhere to the same specification.
"""
scenarios = [
scenario_zip,
scenario_tar,
scenario_targz,
scenario_tarbz2,
scenario_tarxz,
scenario_libarchive,
]
def test_repr(self, scenario: ArchiveTestScenario):
with scenario.provider() as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
assert repr(fs).startswith("<Archive-like object")
def test_empty(self, scenario: ArchiveTestScenario):
with scenario.provider() as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
assert fs.find("") == []
assert fs.find("", withdirs=True) == []
with pytest.raises(FileNotFoundError):
fs.info("")
assert fs.ls("") == []
def test_glob(self, scenario: ArchiveTestScenario):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
assert fs.glob("*/*/*th") == ["deeply/nested/path"]
def test_mapping(self, scenario: ArchiveTestScenario):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
m = fs.get_mapper()
assert list(m) == ["a", "b", "deeply/nested/path"]
assert m["b"] == archive_data["b"]
def test_pickle(self, scenario: ArchiveTestScenario):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
fs2 = pickle.loads(pickle.dumps(fs))
assert fs2.cat("b") == b"hello"
def test_all_dirnames(self, scenario: ArchiveTestScenario):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
# fx are files, dx are a directories
assert fs._all_dirnames([]) == set()
assert fs._all_dirnames(["f1"]) == set()
assert fs._all_dirnames(["f1", "f2"]) == set()
assert fs._all_dirnames(["f1", "f2", "d1/f1"]) == {"d1"}
assert fs._all_dirnames(["f1", "d1/f1", "d1/f2"]) == {"d1"}
assert fs._all_dirnames(["f1", "d1/f1", "d2/f1"]) == {"d1", "d2"}
assert fs._all_dirnames(["d1/d1/d1/f1"]) == {"d1", "d1/d1", "d1/d1/d1"}
def test_ls(self, scenario: ArchiveTestScenario):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
assert fs.ls("", detail=False) == ["a", "b", "deeply"]
assert fs.ls("/") == fs.ls("")
assert fs.ls("deeply", detail=False) == ["deeply/nested"]
assert fs.ls("deeply/") == fs.ls("deeply")
assert fs.ls("deeply/nested", detail=False) == ["deeply/nested/path"]
assert fs.ls("deeply/nested/") == fs.ls("deeply/nested")
def test_find(self, scenario: ArchiveTestScenario):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
assert fs.find("") == ["a", "b", "deeply/nested/path"]
assert fs.find("", withdirs=True) == [
"a",
"b",
"deeply",
"deeply/nested",
"deeply/nested/path",
]
assert fs.find("deeply") == ["deeply/nested/path"]
assert fs.find("deeply/") == fs.find("deeply")
@pytest.mark.parametrize("topdown", [True, False])
@pytest.mark.parametrize("prune_nested", [True, False])
def test_walk(self, scenario: ArchiveTestScenario, topdown, prune_nested):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
expected = [
# (dirname, list of subdirs, list of files)
("", ["deeply"], ["a", "b"]),
("deeply", ["nested"], []),
]
if not topdown or not prune_nested:
expected.append(("deeply/nested", [], ["path"]))
if not topdown:
expected.reverse()
result = []
for path, dirs, files in fs.walk("", topdown=topdown):
result.append((path, dirs.copy(), files))
# Bypass the "nested" dir
if prune_nested and "nested" in dirs:
dirs.remove("nested")
# prior py3.10 zip() does not support strict=True, we need
# a manual len check here
assert len(result) == len(expected)
for lhs, rhs in zip(result, expected):
assert lhs[0] == rhs[0]
assert sorted(lhs[1]) == sorted(rhs[1])
assert sorted(lhs[2]) == sorted(rhs[2])
def test_info(self, scenario: ArchiveTestScenario):
# https://github.com/Suor/funcy/blob/1.15/funcy/colls.py#L243-L245
def project(mapping, keys):
"""Leaves only given keys in mapping."""
return {k: mapping[k] for k in keys if k in mapping}
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
with pytest.raises(FileNotFoundError):
fs.info("i-do-not-exist")
# Iterate over all directories.
for d in fs._all_dirnames(archive_data.keys()):
lhs = project(fs.info(d), ["name", "size", "type"])
expected = {"name": f"{d}", "size": 0, "type": "directory"}
assert lhs == expected
# Iterate over all files.
for f, v in archive_data.items():
lhs = fs.info(f)
assert lhs["name"] == f
assert lhs["size"] == len(v)
assert lhs["type"] == "file"
@pytest.mark.parametrize("scale", [128, 512, 4096])
def test_isdir_isfile(self, scenario: ArchiveTestScenario, scale: int):
def make_nested_dir(i):
x = f"{i}"
table = x.maketrans("0123456789", "ABCDEFGHIJ")
return "/".join(x.translate(table))
scaled_data = {f"{make_nested_dir(i)}/{i}": b"" for i in range(1, scale + 1)}
with scenario.provider(scaled_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
lhs_dirs, lhs_files = (
fs._all_dirnames(scaled_data.keys()),
scaled_data.keys(),
)
# Warm-up the Cache, this is done in both cases anyways...
fs._get_dirs()
entries = lhs_files | lhs_dirs
assert lhs_dirs == {e for e in entries if fs.isdir(e)}
assert lhs_files == {e for e in entries if fs.isfile(e)}
def test_read_empty_file(self, scenario: ArchiveTestScenario):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
assert fs.open("a").read() == b""
| TestAnyArchive |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 10107,
"end": 10186
} | class ____(MixinForBoringModel, CustomBoringModel):
pass
| BoringModelWithMixin |
python | readthedocs__readthedocs.org | readthedocs/gold/views.py | {
"start": 3591,
"end": 5366
} | class ____(GenericView):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
try:
user = request.user
schema = "https" if settings.PUBLIC_DOMAIN_USES_HTTPS else "http"
url = reverse_lazy("gold_detail")
url = f"{schema}://{settings.PRODUCTION_DOMAIN}{url}"
price = json.loads(request.body).get("priceId")
log.info(
"Creating Stripe Checkout Session.",
user_username=user.username,
price=price,
)
stripe_client = get_stripe_client()
checkout_session = stripe_client.checkout.sessions.create(
params={
"client_reference_id": user.username,
"customer_email": user.emailaddress_set.filter(verified=True).first()
or user.email,
"payment_method_types": ["card"],
"line_items": [
{
"price": price,
"quantity": 1,
}
],
"mode": "subscription",
# We use the same URL to redirect the user. We only show a different notification.
"success_url": f"{url}?subscribed=true",
"cancel_url": f"{url}?subscribed=false",
}
)
return JsonResponse({"session_id": checkout_session.id})
except: # noqa
log.exception("There was an error connecting to Stripe.")
return JsonResponse(
{"error": "There was an error connecting to Stripe."},
status=500,
)
| GoldCreateCheckoutSession |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/sensor.py | {
"start": 508,
"end": 868
} | class ____(BaseModel):
"""Single sensor metadata model."""
id: str
name: str
status: DgApiSensorStatus
sensor_type: DgApiSensorType
description: Optional[str] = None
repository_origin: Optional[str] = None
next_tick_timestamp: Optional[float] = None # Unix timestamp
class Config:
from_attributes = True
| DgApiSensor |
python | realpython__materials | python-built-in-functions/processors.py | {
"start": 522,
"end": 870
} | class ____:
def __init__(self, filename):
self.filename = filename
def read(self):
with open(self.filename, encoding="utf-8") as file:
return json.load(file)
def write(self, data):
with open(self.filename, mode="w", encoding="utf-8") as file:
json.dump(data, file, indent=2)
| JSONProcessor |
python | numpy__numpy | numpy/random/tests/test_generator_mt19937.py | {
"start": 83886,
"end": 108348
} | class ____:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437, 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array(
[1.1597068009872629,
0.6539188836253857,
1.1981526554349398]
)
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) # noqa: E501
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) # noqa: E501
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) # noqa: E501
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) # noqa: E501
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [[1 / 6.] * 6] * 2)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([[5], [20]], [[1 / 6.] * 6] * 2)
desired = np.array([[[0, 0, 2, 1, 2, 0],
[0, 0, 2, 1, 1, 1]],
[[4, 2, 3, 3, 5, 3],
[7, 2, 2, 1, 4, 4]]], dtype=np.int64)
assert_array_equal(actual, desired)
@pytest.mark.parametrize("n", [10,
np.array([10, 10]),
np.array([[[10]], [[10]]])
]
)
def test_multinomial_pval_broadcast(self, n):
random = Generator(MT19937(self.seed))
pvals = np.array([1 / 4] * 4)
actual = random.multinomial(n, pvals)
n_shape = () if isinstance(n, int) else n.shape
expected_shape = n_shape + (4,)
assert actual.shape == expected_shape
pvals = np.vstack([pvals, pvals])
actual = random.multinomial(n, pvals)
expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + (4,)
assert actual.shape == expected_shape
pvals = np.vstack([[pvals], [pvals]])
actual = random.multinomial(n, pvals)
expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1])
assert actual.shape == expected_shape + (4,)
actual = random.multinomial(n, pvals, size=(3, 2) + expected_shape)
assert actual.shape == (3, 2) + expected_shape + (4,)
with pytest.raises(ValueError):
# Ensure that size is not broadcast
actual = random.multinomial(n, pvals, size=(1,) * 6)
def test_invalid_pvals_broadcast(self):
random = Generator(MT19937(self.seed))
pvals = [[1 / 6] * 6, [1 / 4] * 6]
assert_raises(ValueError, random.multinomial, 1, pvals)
assert_raises(ValueError, random.multinomial, 6, 0.5)
def test_empty_outputs(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(np.empty((10, 0, 6), "i8"), [1 / 6] * 6)
assert actual.shape == (10, 0, 6, 6)
actual = random.multinomial(12, np.empty((10, 0, 10)))
assert actual.shape == (10, 0, 10)
actual = random.multinomial(np.empty((3, 0, 7), "i8"),
np.empty((3, 0, 7, 4)))
assert actual.shape == (3, 0, 7, 4)
@pytest.mark.skipif(IS_WASM, reason="can't start thread")
| TestBroadcast |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 23763,
"end": 23969
} | class ____(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
| SegfaultDataset |
python | huggingface__transformers | tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py | {
"start": 1166,
"end": 4116
} | class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=[16 * 8, 12 * 8],
patch_size=[8, 8],
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
# in VitPoseBackbone, the seq length equals the number of patches
num_patches = (image_size[0] // patch_size[0]) * (image_size[1] // patch_size[1])
self.seq_length = num_patches
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return VitPoseBackboneConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
num_labels=self.num_labels,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| VitPoseBackboneModelTester |
python | google__jax | tests/pallas/gpu_ops_test.py | {
"start": 10837,
"end": 12433
} | class ____(PallasBaseTest):
def setUp(self):
super().setUp()
if jtu.test_device_matches(["cpu", "tpu"]):
self.skipTest("Works only on GPU")
@parameterized.parameters(*[
(1, 384, 192),
(2, 384, 192),
])
def test_fused_layernorm_fwd(self, batch_size, seq_len, embed_dim):
k1, k2, k3 = random.split(random.key(0), 3)
x = random.normal(k1, (batch_size, seq_len, embed_dim), dtype=jnp.float32)
w = jax.random.normal(k2, (embed_dim,), dtype=jnp.float32)
b = jax.random.normal(k3, (embed_dim,), dtype=jnp.float32)
o = layer_norm.layer_norm(x, w, b)
o_ref = layer_norm.layer_norm_reference(x, w, b)
np.testing.assert_allclose(o, o_ref, atol=1e-5)
@parameterized.parameters(*[
(1, 384, 192),
(2, 384, 192),
])
def test_fused_layernorm_bwd(self, batch_size, seq_len, embed_dim):
k1, k2, k3 = random.split(random.key(0), 3)
x = random.normal(k1, (batch_size, seq_len, embed_dim), dtype=jnp.float32)
w = jax.random.normal(k2, (embed_dim,), dtype=jnp.float32)
b = jax.random.normal(k3, (embed_dim,), dtype=jnp.float32)
def f(x, w, b):
return layer_norm.layer_norm(x, w, b).sum()
def f_ref(x, w, b):
return layer_norm.layer_norm_reference(x, w, b).sum()
dx, dw, db = jax.grad(f, argnums=(0, 1, 2))(x, w, b)
dx_ref, dw_ref, db_ref = jax.grad(f_ref, argnums=(0, 1, 2))(x, w, b)
np.testing.assert_allclose(dx, dx_ref, rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(dw, dw_ref, rtol=1e-2, atol=1e-2)
np.testing.assert_allclose(db, db_ref, rtol=1e-2, atol=1e-2)
| FusedLayerNormTest |
python | django__django | django/contrib/syndication/views.py | {
"start": 1011,
"end": 9371
} | class ____:
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
language = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404("Feed object does not exist.")
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.content_type)
if hasattr(self, "item_pubdate") or hasattr(self, "item_updateddate"):
# if item_pubdate or item_updateddate is defined for the feed, set
# header so as ConditionalGetMiddleware can send 304 NOT MODIFIED.
response.headers["Last-Modified"] = http_date(
feedgen.latest_post_date().timestamp()
)
feedgen.write(response, "utf-8")
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(str(item))
def item_description(self, item):
return str(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"Give your %s class a get_absolute_url() method, or define an "
"item_link() method in your Feed class." % item.__class__.__name__
)
def item_enclosures(self, item):
enc_url = self._get_dynamic_attr("item_enclosure_url", item)
if enc_url:
enc = feedgenerator.Enclosure(
url=str(enc_url),
length=str(self._get_dynamic_attr("item_enclosure_length", item)),
mime_type=str(self._get_dynamic_attr("item_enclosure_mime_type", item)),
)
return [enc]
return []
def _get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
func = unwrap(attr)
try:
code = func.__code__
except AttributeError:
func = unwrap(attr.__call__)
code = func.__code__
# If function doesn't have arguments and it is not a static method,
# it was decorated without using @functools.wraps.
if not code.co_argcount and not isinstance(
getattr_static(self, func.__name__, None), staticmethod
):
raise ImproperlyConfigured(
f"Feed method {attname!r} decorated by {func.__name__!r} needs to "
f"use @functools.wraps."
)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Return an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Return an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Return a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {"obj": kwargs.get("item"), "site": kwargs.get("site")}
def get_feed(self, obj, request):
"""
Return a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raise FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self._get_dynamic_attr("link", obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title=self._get_dynamic_attr("title", obj),
subtitle=self._get_dynamic_attr("subtitle", obj),
link=link,
description=self._get_dynamic_attr("description", obj),
language=self.language or get_language(),
feed_url=add_domain(
current_site.domain,
self._get_dynamic_attr("feed_url", obj) or request.path,
request.is_secure(),
),
author_name=self._get_dynamic_attr("author_name", obj),
author_link=self._get_dynamic_attr("author_link", obj),
author_email=self._get_dynamic_attr("author_email", obj),
categories=self._get_dynamic_attr("categories", obj),
feed_copyright=self._get_dynamic_attr("feed_copyright", obj),
feed_guid=self._get_dynamic_attr("feed_guid", obj),
ttl=self._get_dynamic_attr("ttl", obj),
stylesheets=self._get_dynamic_attr("stylesheets", obj),
**self.feed_extra_kwargs(obj),
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self._get_dynamic_attr("items", obj):
context = self.get_context_data(
item=item, site=current_site, obj=obj, request=request
)
if title_tmp is not None:
title = title_tmp.render(context, request)
else:
title = self._get_dynamic_attr("item_title", item)
if description_tmp is not None:
description = description_tmp.render(context, request)
else:
description = self._get_dynamic_attr("item_description", item)
link = add_domain(
current_site.domain,
self._get_dynamic_attr("item_link", item),
request.is_secure(),
)
enclosures = self._get_dynamic_attr("item_enclosures", item)
author_name = self._get_dynamic_attr("item_author_name", item)
if author_name is not None:
author_email = self._get_dynamic_attr("item_author_email", item)
author_link = self._get_dynamic_attr("item_author_link", item)
else:
author_email = author_link = None
tz = get_default_timezone()
pubdate = self._get_dynamic_attr("item_pubdate", item)
if pubdate and is_naive(pubdate):
pubdate = make_aware(pubdate, tz)
updateddate = self._get_dynamic_attr("item_updateddate", item)
if updateddate and is_naive(updateddate):
updateddate = make_aware(updateddate, tz)
feed.add_item(
title=title,
link=link,
description=description,
unique_id=self._get_dynamic_attr("item_guid", item, link),
unique_id_is_permalink=self._get_dynamic_attr(
"item_guid_is_permalink", item
),
enclosures=enclosures,
pubdate=pubdate,
updateddate=updateddate,
author_name=author_name,
author_email=author_email,
author_link=author_link,
comments=self._get_dynamic_attr("item_comments", item),
categories=self._get_dynamic_attr("item_categories", item),
item_copyright=self._get_dynamic_attr("item_copyright", item),
**self.item_extra_kwargs(item),
)
return feed
| Feed |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/serialized_objects.py | {
"start": 1734,
"end": 4392
} | class ____:
"""Serializable indicator that this value was an AllPartitionsSubset at serialization time, but
the partitions may have changed since that time.
"""
def _get_maybe_compressed_dynamic_partitions_subset(
subset: EntitySubset,
) -> SerializableEntitySubset:
# for DefaultPartitionsSubset on a DynamicPartitionsDefinition, we convert this to a
# KeyRangesPartitionsSubset. this is technically a lossy conversion as it's possible
# for the set of keys between the start and end of a range to change after serialization
# (e.g. if a partition is deleted and then re-added). however, accuracy to that degree
# is not necessary given the space savings here.
internal_value = subset.get_internal_value()
if isinstance(internal_value, (DefaultPartitionsSubset, AllPartitionsSubset)) and isinstance(
subset.partitions_def, DynamicPartitionsDefinition
):
snap = PartitionsSnap.from_def(subset.partitions_def)
value = KeyRangesPartitionsSubset(
partitions_snap=snap,
key_ranges=internal_value.get_partition_key_ranges(subset.partitions_def),
)
return SerializableEntitySubset(key=subset.key, value=value)
else:
return subset.convert_to_serializable_subset()
def get_serializable_candidate_subset(
candidate_subset: EntitySubset,
) -> Union[SerializableEntitySubset, HistoricalAllPartitionsSubsetSentinel]:
"""Do not serialize the candidate subset directly if it is an AllPartitionsSubset, compress
DefaultPartitionsSubset on a DynamicPartitionsDefinition to a KeyRangesPartitionsSubset.
"""
internal_value = candidate_subset.get_internal_value()
# for AllPartitionsSubset, we convert this to a HistoricalAllPartitionsSubsetSentinel
# that will be deserialized as an AllPartitionsSubset. this is a lossy conversion as
# we are not recording the partitions that were in the AllPartitionsSubset at the time
# of serialization
if isinstance(internal_value, AllPartitionsSubset):
return HistoricalAllPartitionsSubsetSentinel()
else:
return _get_maybe_compressed_dynamic_partitions_subset(candidate_subset)
def get_serializable_true_subset(true_subset: EntitySubset) -> SerializableEntitySubset:
"""Compress DefaultPartitionsSubset on a DynamicPartitionsDefinition to a KeyRangesPartitionsSubset."""
return _get_maybe_compressed_dynamic_partitions_subset(true_subset)
OperatorType: TypeAlias = Union[Literal["and"], Literal["or"], Literal["not"], Literal["identity"]]
@whitelist_for_serdes(storage_name="AssetConditionSnapshot")
| HistoricalAllPartitionsSubsetSentinel |
python | pyca__cryptography | tests/hazmat/primitives/test_x963kdf.py | {
"start": 405,
"end": 4708
} | class ____:
def test_length_limit(self, backend):
big_length = hashes.SHA256().digest_size * (2**32 - 1) + 1
error = OverflowError if sys.maxsize <= 2**31 else ValueError
with pytest.raises(error):
X963KDF(hashes.SHA256(), big_length, None, backend)
def test_already_finalized(self, backend):
xkdf = X963KDF(hashes.SHA256(), 16, None, backend)
xkdf.derive(b"\x01" * 16)
with pytest.raises(AlreadyFinalized):
xkdf.derive(b"\x02" * 16)
def test_derive(self, backend):
key = binascii.unhexlify(
b"96c05619d56c328ab95fe84b18264b08725b85e33fd34f08"
)
derivedkey = binascii.unhexlify(b"443024c3dae66b95e6f5670601558f71")
xkdf = X963KDF(hashes.SHA256(), 16, None, backend)
assert xkdf.derive(key) == derivedkey
def test_buffer_protocol(self, backend):
key = bytearray(
binascii.unhexlify(
b"96c05619d56c328ab95fe84b18264b08725b85e33fd34f08"
)
)
derivedkey = binascii.unhexlify(b"443024c3dae66b95e6f5670601558f71")
xkdf = X963KDF(hashes.SHA256(), 16, None, backend)
assert xkdf.derive(key) == derivedkey
def test_verify(self, backend):
key = binascii.unhexlify(
b"22518b10e70f2a3f243810ae3254139efbee04aa57c7af7d"
)
sharedinfo = binascii.unhexlify(b"75eef81aa3041e33b80971203d2c0c52")
derivedkey = binascii.unhexlify(
b"c498af77161cc59f2962b9a713e2b215152d139766ce34a776df11866a69bf2e"
b"52a13d9c7c6fc878c50c5ea0bc7b00e0da2447cfd874f6cf92f30d0097111485"
b"500c90c3af8b487872d04685d14c8d1dc8d7fa08beb0ce0ababc11f0bd496269"
b"142d43525a78e5bc79a17f59676a5706dc54d54d4d1f0bd7e386128ec26afc21"
)
xkdf = X963KDF(hashes.SHA256(), 128, sharedinfo, backend)
xkdf.verify(key, derivedkey)
def test_invalid_verify(self, backend):
key = binascii.unhexlify(
b"96c05619d56c328ab95fe84b18264b08725b85e33fd34f08"
)
xkdf = X963KDF(hashes.SHA256(), 16, None, backend)
with pytest.raises(InvalidKey):
xkdf.verify(key, b"wrong derived key")
def test_unicode_typeerror(self, backend):
with pytest.raises(TypeError):
X963KDF(
hashes.SHA256(),
16,
sharedinfo="foo", # type: ignore[arg-type]
backend=backend,
)
with pytest.raises(TypeError):
xkdf = X963KDF(
hashes.SHA256(), 16, sharedinfo=None, backend=backend
)
xkdf.derive("foo") # type: ignore[arg-type]
with pytest.raises(TypeError):
xkdf = X963KDF(
hashes.SHA256(), 16, sharedinfo=None, backend=backend
)
xkdf.verify("foo", b"bar") # type: ignore[arg-type]
with pytest.raises(TypeError):
xkdf = X963KDF(
hashes.SHA256(), 16, sharedinfo=None, backend=backend
)
xkdf.verify(b"foo", "bar") # type: ignore[arg-type]
def test_derive_into(self, backend):
key = binascii.unhexlify(
b"96c05619d56c328ab95fe84b18264b08725b85e33fd34f08"
)
xkdf = X963KDF(hashes.SHA256(), 16, None, backend)
buf = bytearray(16)
n = xkdf.derive_into(key, buf)
assert n == 16
# Verify the output matches what derive would produce
xkdf2 = X963KDF(hashes.SHA256(), 16, None, backend)
expected = xkdf2.derive(key)
assert buf == expected
@pytest.mark.parametrize(
("buflen", "outlen"), [(15, 16), (17, 16), (8, 16), (32, 16)]
)
def test_derive_into_buffer_incorrect_size(self, buflen, outlen, backend):
xkdf = X963KDF(hashes.SHA256(), outlen, None, backend)
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
xkdf.derive_into(b"key", buf)
def test_derive_into_already_finalized(self, backend):
xkdf = X963KDF(hashes.SHA256(), 16, None, backend)
buf = bytearray(16)
xkdf.derive_into(b"key", buf)
with pytest.raises(AlreadyFinalized):
xkdf.derive_into(b"key", buf)
| TestX963KDF |
python | Netflix__metaflow | metaflow/includefile.py | {
"start": 1317,
"end": 2701
} | class ____(object):
# Thin wrapper to indicate to the MF client that this object is special
# and should be handled as an IncludedFile when returning it (ie: fetching
# the actual content)
# @tracefunc
def __init__(self, descriptor: Dict[str, Any]):
self._descriptor = descriptor
self._cached_size = None
@property
def descriptor(self):
return self._descriptor
@property
# @tracefunc
def size(self):
if self._cached_size is not None:
return self._cached_size
handler = UPLOADERS.get(self.descriptor.get("type", None), None)
if handler is None:
raise MetaflowException(
"Could not interpret size of IncludedFile: %s"
% json.dumps(self.descriptor)
)
self._cached_size = handler.size(self._descriptor)
return self._cached_size
# @tracefunc
def decode(self, name, var_type="Artifact"):
# We look for the uploader for it and decode it
handler = UPLOADERS.get(self.descriptor.get("type", None), None)
if handler is None:
raise MetaflowException(
"%s '%s' could not be loaded (IncludedFile) because no handler found: %s"
% (var_type, name, json.dumps(self.descriptor))
)
return handler.load(self._descriptor)
| IncludedFile |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 4001,
"end": 4127
} | class ____(OAuth2Error):
error = 'insecure_transport'
description = 'OAuth 2 MUST utilize https.'
| InsecureTransportError |
python | django__django | tests/backends/sqlite/tests.py | {
"start": 10343,
"end": 11136
} | class ____(TransactionTestCase):
available_apps = ["backends"]
def test_database_sharing_in_threads(self):
thread_connections = []
def create_object():
Object.objects.create()
thread_connections.append(connections[DEFAULT_DB_ALIAS].connection)
main_connection = connections[DEFAULT_DB_ALIAS].connection
try:
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(Object.objects.count(), 2)
finally:
for conn in thread_connections:
if conn is not main_connection:
conn.close()
@unittest.skipUnless(connection.vendor == "sqlite", "SQLite tests")
| ThreadSharing |
python | astropy__astropy | astropy/nddata/nddata_base.py | {
"start": 186,
"end": 1994
} | class ____(metaclass=ABCMeta):
"""Base metaclass that defines the interface for N-dimensional datasets
with associated meta information used in ``astropy``.
All properties and ``__init__`` have to be overridden in subclasses. See
`NDData` for a subclass that defines this interface on `numpy.ndarray`-like
``data``.
See also: https://docs.astropy.org/en/stable/nddata/
"""
@abstractmethod
def __init__(self):
pass
@property
@abstractmethod
def data(self):
"""The stored dataset."""
@property
@abstractmethod
def mask(self):
"""Mask for the dataset.
Masks should follow the ``numpy`` convention that **valid** data points
are marked by ``False`` and **invalid** ones with ``True``.
"""
return None
@property
@abstractmethod
def unit(self):
"""Unit for the dataset."""
return None
@property
@abstractmethod
def wcs(self):
"""World coordinate system (WCS) for the dataset."""
return None
# psf is concrete to avoid introducing a breaking change in release 5.2.
@property
def psf(self):
"""Image representation of the PSF for the dataset.
Should be `ndarray`-like.
"""
return None
@property
@abstractmethod
def meta(self):
"""Additional meta information about the dataset.
Should be `dict`-like.
"""
return None
@property
@abstractmethod
def uncertainty(self):
"""Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``"std"`` for standard deviation or
``"var"`` for variance.
"""
return None
| NDDataBase |
python | pytest-dev__pytest | src/_pytest/tmpdir.py | {
"start": 1146,
"end": 11387
} | class ____:
"""Factory for temporary directories under the common base temp directory,
as discussed at :ref:`temporary directory location and retention`.
"""
_given_basetemp: Path | None
# pluggy TagTracerSub, not currently exposed, so Any.
_trace: Any
_basetemp: Path | None
_retention_count: int
_retention_policy: RetentionType
def __init__(
self,
given_basetemp: Path | None,
retention_count: int,
retention_policy: RetentionType,
trace,
basetemp: Path | None = None,
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
if given_basetemp is None:
self._given_basetemp = None
else:
# Use os.path.abspath() to get absolute path instead of resolve() as it
# does not work the same in all platforms (see #4427).
# Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012).
self._given_basetemp = Path(os.path.abspath(str(given_basetemp)))
self._trace = trace
self._retention_count = retention_count
self._retention_policy = retention_policy
self._basetemp = basetemp
@classmethod
def from_config(
cls,
config: Config,
*,
_ispytest: bool = False,
) -> TempPathFactory:
"""Create a factory according to pytest configuration.
:meta private:
"""
check_ispytest(_ispytest)
count = int(config.getini("tmp_path_retention_count"))
if count < 0:
raise ValueError(
f"tmp_path_retention_count must be >= 0. Current input: {count}."
)
policy = config.getini("tmp_path_retention_policy")
if policy not in ("all", "failed", "none"):
raise ValueError(
f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}."
)
return cls(
given_basetemp=config.option.basetemp,
trace=config.trace.get("tmpdir"),
retention_count=count,
retention_policy=policy,
_ispytest=True,
)
def _ensure_relative_to_basetemp(self, basename: str) -> str:
basename = os.path.normpath(basename)
if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp():
raise ValueError(f"{basename} is not a normalized and relative path")
return basename
def mktemp(self, basename: str, numbered: bool = True) -> Path:
"""Create a new temporary directory managed by the factory.
:param basename:
Directory base name, must be a relative path.
:param numbered:
If ``True``, ensure the directory is unique by adding a numbered
suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True``
means that this function will create directories named ``"foo-0"``,
``"foo-1"``, ``"foo-2"`` and so on.
:returns:
The path to the new directory.
"""
basename = self._ensure_relative_to_basetemp(basename)
if not numbered:
p = self.getbasetemp().joinpath(basename)
p.mkdir(mode=0o700)
else:
p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700)
self._trace("mktemp", p)
return p
def getbasetemp(self) -> Path:
"""Return the base temporary directory, creating it if needed.
:returns:
The base temporary directory.
"""
if self._basetemp is not None:
return self._basetemp
if self._given_basetemp is not None:
basetemp = self._given_basetemp
if basetemp.exists():
rm_rf(basetemp)
basetemp.mkdir(mode=0o700)
basetemp = basetemp.resolve()
else:
from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT")
temproot = Path(from_env or tempfile.gettempdir()).resolve()
user = get_user() or "unknown"
# use a sub-directory in the temproot to speed-up
# make_numbered_dir() call
rootdir = temproot.joinpath(f"pytest-of-{user}")
try:
rootdir.mkdir(mode=0o700, exist_ok=True)
except OSError:
# getuser() likely returned illegal characters for the platform, use unknown back off mechanism
rootdir = temproot.joinpath("pytest-of-unknown")
rootdir.mkdir(mode=0o700, exist_ok=True)
# Because we use exist_ok=True with a predictable name, make sure
# we are the owners, to prevent any funny business (on unix, where
# temproot is usually shared).
# Also, to keep things private, fixup any world-readable temp
# rootdir's permissions. Historically 0o755 was used, so we can't
# just error out on this, at least for a while.
uid = get_user_id()
if uid is not None:
rootdir_stat = rootdir.stat()
if rootdir_stat.st_uid != uid:
raise OSError(
f"The temporary directory {rootdir} is not owned by the current user. "
"Fix this and try again."
)
if (rootdir_stat.st_mode & 0o077) != 0:
os.chmod(rootdir, rootdir_stat.st_mode & ~0o077)
keep = self._retention_count
if self._retention_policy == "none":
keep = 0
basetemp = make_numbered_dir_with_cleanup(
prefix="pytest-",
root=rootdir,
keep=keep,
lock_timeout=LOCK_TIMEOUT,
mode=0o700,
)
assert basetemp is not None, basetemp
self._basetemp = basetemp
self._trace("new basetemp", basetemp)
return basetemp
def get_user() -> str | None:
"""Return the current user name, or None if getuser() does not work
in the current environment (see #1010)."""
try:
# In some exotic environments, getpass may not be importable.
import getpass
return getpass.getuser()
except (ImportError, OSError, KeyError):
return None
def pytest_configure(config: Config) -> None:
"""Create a TempPathFactory and attach it to the config object.
This is to comply with existing plugins which expect the handler to be
available at pytest_configure time, but ideally should be moved entirely
to the tmp_path_factory session fixture.
"""
mp = MonkeyPatch()
config.add_cleanup(mp.undo)
_tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True)
mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False)
def pytest_addoption(parser: Parser) -> None:
parser.addini(
"tmp_path_retention_count",
help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.",
default="3",
# NOTE: Would have been better as an `int` but can't change it now.
type="string",
)
parser.addini(
"tmp_path_retention_policy",
help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. "
"(all/failed/none)",
type="string",
default="all",
)
@fixture(scope="session")
def tmp_path_factory(request: FixtureRequest) -> TempPathFactory:
"""Return a :class:`pytest.TempPathFactory` instance for the test session."""
# Set dynamically by pytest_configure() above.
return request.config._tmp_path_factory # type: ignore
def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path:
name = request.node.name
name = re.sub(r"[\W]", "_", name)
MAXVAL = 30
name = name[:MAXVAL]
return factory.mktemp(name, numbered=True)
@fixture
def tmp_path(
request: FixtureRequest, tmp_path_factory: TempPathFactory
) -> Generator[Path]:
"""Return a temporary directory (as :class:`pathlib.Path` object)
which is unique to each test function invocation.
The temporary directory is created as a subdirectory
of the base temporary directory, with configurable retention,
as discussed in :ref:`temporary directory location and retention`.
"""
path = _mk_tmp(request, tmp_path_factory)
yield path
# Remove the tmpdir if the policy is "failed" and the test passed.
policy = tmp_path_factory._retention_policy
result_dict = request.node.stash[tmppath_result_key]
if policy == "failed" and result_dict.get("call", True):
# We do a "best effort" to remove files, but it might not be possible due to some leaked resource,
# permissions, etc, in which case we ignore it.
rmtree(path, ignore_errors=True)
del request.node.stash[tmppath_result_key]
def pytest_sessionfinish(session, exitstatus: int | ExitCode):
"""After each session, remove base directory if all the tests passed,
the policy is "failed", and the basetemp is not specified by a user.
"""
tmp_path_factory: TempPathFactory = session.config._tmp_path_factory
basetemp = tmp_path_factory._basetemp
if basetemp is None:
return
policy = tmp_path_factory._retention_policy
if (
exitstatus == 0
and policy == "failed"
and tmp_path_factory._given_basetemp is None
):
if basetemp.is_dir():
# We do a "best effort" to remove files, but it might not be possible due to some leaked resource,
# permissions, etc, in which case we ignore it.
rmtree(basetemp, ignore_errors=True)
# Remove dead symlinks.
if basetemp.is_dir():
cleanup_dead_symlinks(basetemp)
@hookimpl(wrapper=True, tryfirst=True)
def pytest_runtest_makereport(
item: Item, call
) -> Generator[None, TestReport, TestReport]:
rep = yield
assert rep.when is not None
empty: dict[str, bool] = {}
item.stash.setdefault(tmppath_result_key, empty)[rep.when] = rep.passed
return rep
| TempPathFactory |
python | huggingface__transformers | src/transformers/models/granite/modular_granite.py | {
"start": 1497,
"end": 4988
} | class ____(LlamaDecoderLayer):
def __init__(self, config: GraniteConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.residual_multiplier = config.residual_multiplier
self.self_attn = GraniteAttention(config=config, layer_idx=layer_idx)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states * self.residual_multiplier
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states * self.residual_multiplier # main diff with Llama
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
| GraniteDecoderLayer |
python | run-llama__llama_index | llama-index-core/tests/agent/workflow/test_multi_agent_workflow.py | {
"start": 790,
"end": 18432
} | class ____(MockLLM):
def __init__(self, responses: List[ChatMessage]):
super().__init__()
self._responses = responses
self._response_index = 0
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
async def astream_chat(
self, messages: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
async def astream_chat_with_tools(
self, tools: List[Any], chat_history: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
def get_tool_calls_from_response(
self, response: ChatResponse, **kwargs: Any
) -> List[ToolSelection]:
return response.message.additional_kwargs.get("tool_calls", [])
def add(a: int, b: int) -> int:
"""Add two numbers."""
return a + b
def subtract(a: int, b: int) -> int:
"""Subtract two numbers."""
return a - b
@pytest.fixture()
def calculator_agent():
return ReActAgent(
name="calculator",
description="Performs basic arithmetic operations",
system_prompt="You are a calculator assistant.",
tools=[
FunctionTool.from_defaults(fn=add),
FunctionTool.from_defaults(fn=subtract),
],
llm=MockLLM(
responses=[
ChatMessage(
role=MessageRole.ASSISTANT,
content='Thought: I need to add these numbers\nAction: add\nAction Input: {"a": 5, "b": 3}\n',
),
ChatMessage(
role=MessageRole.ASSISTANT,
content=r"Thought: The result is 8\Answer: The sum is 8",
),
]
),
)
@pytest.fixture()
def empty_calculator_agent():
return ReActAgent(
name="calculator",
description="Performs basic arithmetic operations",
system_prompt="You are a calculator assistant.",
tools=[
FunctionTool.from_defaults(fn=add),
FunctionTool.from_defaults(fn=subtract),
],
llm=MockLLM(responses=[]),
)
@pytest.fixture()
def retriever_agent():
return FunctionAgent(
name="retriever",
description="Manages data retrieval",
system_prompt="You are a retrieval assistant.",
llm=MockLLM(
responses=[
ChatMessage(
role=MessageRole.ASSISTANT,
content="Let me help you with that calculation. I'll hand this off to the calculator.",
additional_kwargs={
"tool_calls": [
ToolSelection(
tool_id="one",
tool_name="handoff",
tool_kwargs={
"to_agent": "calculator",
"reason": "This requires arithmetic operations.",
},
)
]
},
),
],
),
)
@pytest.fixture()
def empty_retriever_agent():
return FunctionAgent(
name="retriever",
description="Manages data retrieval",
system_prompt="You are a retrieval assistant.",
llm=MockLLM(
responses=[],
),
)
@pytest.mark.asyncio
async def test_basic_workflow(calculator_agent, retriever_agent):
"""Test basic workflow initialization and validation."""
workflow = AgentWorkflow(
agents=[calculator_agent, retriever_agent],
root_agent="retriever",
)
assert workflow.root_agent == retriever_agent.name
assert len(workflow.agents) == 2
assert "calculator" in workflow.agents
assert "retriever" in workflow.agents
@pytest.mark.asyncio
async def test_workflow_requires_root_agent():
"""Test that workflow requires exactly one root agent."""
with pytest.raises(ValueError, match="Exactly one root agent must be provided"):
AgentWorkflow(
agents=[
FunctionAgent(
name="agent1",
description="test",
llm=MockLLM(
responses=[
ChatMessage(role=MessageRole.ASSISTANT, content="test"),
]
),
),
ReActAgent(
name="agent2",
description="test",
llm=MockLLM(
responses=[
ChatMessage(role=MessageRole.ASSISTANT, content="test"),
]
),
),
]
)
@pytest.mark.asyncio
async def test_workflow_execution(calculator_agent, retriever_agent):
"""Test basic workflow execution with agent handoff."""
workflow = AgentWorkflow(
agents=[calculator_agent, retriever_agent],
root_agent="retriever",
)
memory = ChatMemoryBuffer.from_defaults()
handler = workflow.run(user_msg="Can you add 5 and 3?", memory=memory)
events = []
async for event in handler.stream_events():
events.append(event)
response = await handler
# Verify we got events indicating handoff and calculation
assert any(
ev.current_agent_name == "retriever"
if hasattr(ev, "current_agent_name")
else False
for ev in events
)
assert any(
ev.current_agent_name == "calculator"
if hasattr(ev, "current_agent_name")
else False
for ev in events
)
assert "8" in str(response.response)
@pytest.mark.asyncio
async def test_workflow_execution_empty(empty_calculator_agent, retriever_agent):
"""Test basic workflow execution with agent handoff."""
workflow = AgentWorkflow(
agents=[empty_calculator_agent, retriever_agent],
root_agent="retriever",
)
memory = ChatMemoryBuffer.from_defaults()
handler = workflow.run(user_msg="Can you add 5 and 3?", memory=memory)
events = []
async for event in handler.stream_events():
events.append(event)
with pytest.raises(ValueError, match="Got empty message"):
await handler
@pytest.mark.asyncio
async def test_workflow_handoff_empty(calculator_agent, empty_retriever_agent):
"""Test basic workflow execution with agent handoff."""
workflow = AgentWorkflow(
agents=[calculator_agent, empty_retriever_agent],
root_agent="retriever",
)
memory = ChatMemoryBuffer.from_defaults()
handler = workflow.run(user_msg="Can you add 5 and 3?", memory=memory)
events = []
async for event in handler.stream_events():
events.append(event)
response = await handler
assert response.response.content is None
@pytest.mark.asyncio
async def test_invalid_handoff():
"""Test handling of invalid agent handoff."""
agent1 = FunctionAgent(
name="agent1",
description="test",
llm=MockLLM(
responses=[
ChatMessage(
role=MessageRole.ASSISTANT,
content="handoff invalid_agent Because reasons",
additional_kwargs={
"tool_calls": [
ToolSelection(
tool_id="one",
tool_name="handoff",
tool_kwargs={
"to_agent": "invalid_agent",
"reason": "Because reasons",
},
)
]
},
),
ChatMessage(role=MessageRole.ASSISTANT, content="guess im stuck here"),
],
),
)
agent2 = FunctionAgent(
**agent1.model_dump(exclude={"llm"}), llm=MockLLM(responses=[])
)
agent2.name = "agent2"
workflow = AgentWorkflow(
agents=[agent1, agent2],
root_agent="agent1",
)
handler = workflow.run(user_msg="test")
events = []
async for event in handler.stream_events():
events.append(event)
response = await handler
assert "Agent invalid_agent not found" in str(events)
@pytest.mark.asyncio
async def test_workflow_with_state():
"""Test workflow with state management."""
async def modify_state(random_arg: str, ctx_val: Context):
state = await ctx_val.store.get("state")
state["counter"] += 1
await ctx_val.store.set("state", state)
return f"State updated to {state}"
agent = FunctionAgent(
name="agent",
description="test",
tools=[modify_state],
llm=MockLLM(
responses=[
ChatMessage(
role=MessageRole.ASSISTANT,
content="handing off",
additional_kwargs={
"tool_calls": [
ToolSelection(
tool_id="one",
tool_name="modify_state",
tool_kwargs={"random_arg": "hello"},
)
]
},
),
ChatMessage(
role=MessageRole.ASSISTANT, content="Current state processed"
),
],
),
)
workflow = AgentWorkflow(
agents=[agent],
initial_state={"counter": 0},
state_prompt="Current state: {state}. User message: {msg}",
)
handler = workflow.run(user_msg="test")
async for ev in handler.stream_events():
if isinstance(ev, AgentInput):
for msg in ev.input:
if msg.role == MessageRole.USER:
# ensure we've only formatted the input once
assert len(msg.content.split("Current state:")) == 2
response = await handler
assert response is not None
state = await handler.ctx.store.get("state")
assert state["counter"] == 1
@pytest.mark.asyncio
async def test_agent_with_hitl():
"""Test agent with hitl."""
async def hitl(ctx: Context):
resp = await ctx.wait_for_event(
HumanResponseEvent,
waiter_event=InputRequiredEvent(prefix="What is your name?"),
)
return f"Your name is {resp.response}"
agent = FunctionAgent(
name="agent",
description="test",
tools=[hitl],
llm=MockLLM(
responses=[
ChatMessage(
role=MessageRole.ASSISTANT,
content="handing off",
additional_kwargs={
"tool_calls": [
ToolSelection(
tool_id="one",
tool_name="hitl",
tool_kwargs={},
)
]
},
),
ChatMessage(role=MessageRole.ASSISTANT, content="HITL successful"),
],
),
)
workflow = AgentWorkflow(
agents=[agent],
root_agent="agent",
)
handler = workflow.run(user_msg="test")
ctx_dict = None
async for ev in handler.stream_events():
if isinstance(ev, InputRequiredEvent):
ctx_dict = handler.ctx.to_dict()
await handler.cancel_run()
break
new_ctx = Context.from_dict(workflow, ctx_dict)
handler = workflow.run(user_msg="test", ctx=new_ctx)
handler.ctx.send_event(HumanResponseEvent(response="John Doe"))
response = await handler
assert response is not None
assert "HITL successful" in str(response)
@pytest.mark.asyncio
async def test_max_iterations():
"""Test max iterations."""
def random_tool() -> str:
return "random"
agent = FunctionAgent(
name="agent",
description="test",
tools=[random_tool],
llm=MockLLM(
responses=[
ChatMessage(
role=MessageRole.ASSISTANT,
content="handing off",
additional_kwargs={
"tool_calls": [
ToolSelection(
tool_id="one",
tool_name="random_tool",
tool_kwargs={},
)
]
},
),
]
* 100
),
)
workflow = AgentWorkflow(
agents=[agent],
)
# Default max iterations is 20
with pytest.raises(WorkflowRuntimeError, match="Either something went wrong"):
_ = await workflow.run(user_msg="test")
# Set max iterations to 101 to avoid error
_ = workflow.run(user_msg="test", max_iterations=101)
@pytest.mark.asyncio
async def test_workflow_pickle_serialize_and_resume():
"""Pause workflow, pickle-serialize context, and resume from serialized context."""
async def hitl(ctx: Context):
resp = await ctx.wait_for_event(
HumanResponseEvent,
waiter_event=InputRequiredEvent(prefix="Provide your name to continue"),
)
return f"Your name is {resp.response}"
agent = FunctionAgent(
name="agent",
description="test",
tools=[hitl],
llm=MockLLM(
responses=[
ChatMessage(
role=MessageRole.ASSISTANT,
content="handing off",
additional_kwargs={
"tool_calls": [
ToolSelection(
tool_id="one",
tool_name="hitl",
tool_kwargs={},
)
]
},
),
ChatMessage(role=MessageRole.ASSISTANT, content="HITL successful"),
],
),
)
workflow = AgentWorkflow(
agents=[agent],
root_agent="agent",
)
handler = workflow.run(user_msg="test")
# Wait for pause point, then serialize context with pickle serializer via to_dict
ctx_dict = None
async for ev in handler.stream_events():
if isinstance(ev, InputRequiredEvent):
serializer = PickleSerializer()
ctx_dict = handler.ctx.to_dict(serializer=serializer)
await handler.cancel_run()
break
assert ctx_dict is not None
# Deserialize context and resume workflow using from_dict with serializer
serializer = PickleSerializer()
new_ctx = Context.from_dict(workflow, ctx_dict, serializer=serializer)
handler = workflow.run(user_msg="test", ctx=new_ctx)
handler.ctx.send_event(HumanResponseEvent(response="Jane Doe"))
response = await handler
assert response is not None
assert "HITL successful" in str(response)
@pytest.mark.asyncio
async def test_retry():
"""Test retry."""
def add_tool(a: int, b: int) -> int:
return a + b
agent = ReActAgent(
name="agent",
description="test",
tools=[add_tool],
llm=MockLLM(
responses=[
ChatMessage(
role=MessageRole.ASSISTANT,
content='Thought: I need to add these numbers\nAction: add\n{"a": 5 "b": 3}\n',
),
ChatMessage(
role=MessageRole.ASSISTANT,
content='Thought: I need to add these numbers\nAction: add\nAction Input: {"a": 5, "b": 3}\n',
),
ChatMessage(
role=MessageRole.ASSISTANT,
content=r"Thought: The result is 8\Answer: The sum is 8",
),
]
),
)
workflow = AgentWorkflow(
agents=[agent],
)
memory = ChatMemoryBuffer.from_defaults()
handler = workflow.run(user_msg="Can you add 5 and 3?", memory=memory)
events = []
contains_error_message = False
async for event in handler.stream_events():
events.append(event)
if isinstance(event, AgentInput):
if "Error while parsing the output" in event.input[-1].content:
contains_error_message = True
assert contains_error_message
response = await handler
assert "8" in str(response.response)
| MockLLM |
python | huggingface__transformers | src/transformers/models/dots1/modular_dots1.py | {
"start": 3049,
"end": 4506
} | class ____(Qwen3ForCausalLM):
def forward(
self,
**super_kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Dots1ForCausalLM
>>> model = Dots1ForCausalLM.from_pretrained("rednote-hilab/dots1.llm1.inst")
>>> tokenizer = AutoTokenizer.from_pretrained("rednote-hilab/dots1.llm1.inst")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
return super().forward(**super_kwargs)
__all__ = [
"Dots1PreTrainedModel",
"Dots1Model",
"Dots1ForCausalLM",
]
| Dots1ForCausalLM |
python | bokeh__bokeh | src/bokeh/application/handlers/function.py | {
"start": 2250,
"end": 5005
} | class ____(Handler):
''' A Handler that accepts a plain python function to use for modifying
Bokeh Documents.
For example, the following code configures a handler with a function that
adds an empty plot to a Document:
.. code-block:: python
def add_empty_plot(doc: Document):
p = figure(x_range=(0, 10), y_range=(0, 10))
doc.add_root(p)
return doc
handler = FunctionHandler(add_empty_plot)
This handler could be configured on an Application, and the Application
would run this function every time a new session is created.
'''
_func: ModifyDoc
_trap_exceptions: bool
_safe_to_fork: bool
def __init__(self, func: ModifyDoc, *, trap_exceptions: bool = False) -> None:
'''
Args:
func (callable) : a function to modify and return a Bokeh Document.
The function should have the form:
.. code-block:: python
def func(doc: Document):
# modify doc
return doc
and it should return the passed-in document after making any
modifications in-place.
trap_exceptions (bool) : should exceptions in `func` be caught and
logged or allowed to propagate
'''
super().__init__()
_check_callback(func, ('doc',))
self._func = func
self._trap_exceptions = trap_exceptions
self._safe_to_fork = True
# Properties --------------------------------------------------------------
@property
def safe_to_fork(self) -> bool:
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if ``modify_doc`` has already been called.
'''
return self._safe_to_fork
# Public methods ----------------------------------------------------------
def modify_document(self, doc: Document) -> None:
''' Execute the configured ``func`` to modify the document.
After this method is first executed, ``safe_to_fork`` will return
``False``.
'''
try:
self._func(doc)
except Exception as e:
if self._trap_exceptions:
handle_exception(self, e)
else:
raise
finally:
self._safe_to_fork = False
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| FunctionHandler |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 14113,
"end": 14470
} | class ____(HTTPSuccessful):
"""
subclass of :class:`~HTTPSuccessful`
This indicates that the server has fulfilled the request but does
not need to return an entity-body, and might want to return updated
metainformation.
code: 204, title: No Content
"""
code = 204
title = 'No Content'
empty_body = True
| HTTPNoContent |
python | Pylons__pyramid | tests/test_util.py | {
"start": 29920,
"end": 36093
} | class ____(unittest.TestCase):
def _callFUT(self, view, attr=None, argname=None):
from pyramid.util import takes_one_arg
return takes_one_arg(view, attr=attr, argname=argname)
def test_requestonly_newstyle_class_no_init(self):
class foo:
""" """
self.assertFalse(self._callFUT(foo))
def test_requestonly_newstyle_class_init_toomanyargs(self):
class foo:
def __init__(self, context, request):
""" """
self.assertFalse(self._callFUT(foo))
def test_requestonly_newstyle_class_init_onearg_named_request(self):
class foo:
def __init__(self, request):
""" """
self.assertTrue(self._callFUT(foo))
def test_newstyle_class_init_onearg_named_somethingelse(self):
class foo:
def __init__(self, req):
""" """
self.assertTrue(self._callFUT(foo))
def test_newstyle_class_init_defaultargs_firstname_not_request(self):
class foo:
def __init__(self, context, request=None):
""" """
self.assertFalse(self._callFUT(foo))
def test_newstyle_class_init_defaultargs_firstname_request(self):
class foo:
def __init__(self, request, foo=1, bar=2):
""" """
self.assertTrue(self._callFUT(foo, argname='request'))
def test_newstyle_class_init_firstname_request_with_secondname(self):
class foo:
def __init__(self, request, two):
""" """
self.assertFalse(self._callFUT(foo))
def test_newstyle_class_init_noargs(self):
class foo:
def __init__():
""" """
self.assertFalse(self._callFUT(foo))
def test_oldstyle_class_no_init(self):
class foo:
""" """
self.assertFalse(self._callFUT(foo))
def test_oldstyle_class_init_toomanyargs(self):
class foo:
def __init__(self, context, request):
""" """
self.assertFalse(self._callFUT(foo))
def test_oldstyle_class_init_onearg_named_request(self):
class foo:
def __init__(self, request):
""" """
self.assertTrue(self._callFUT(foo))
def test_oldstyle_class_init_onearg_named_somethingelse(self):
class foo:
def __init__(self, req):
""" """
self.assertTrue(self._callFUT(foo))
def test_oldstyle_class_init_defaultargs_firstname_not_request(self):
class foo:
def __init__(self, context, request=None):
""" """
self.assertFalse(self._callFUT(foo))
def test_oldstyle_class_init_defaultargs_firstname_request(self):
class foo:
def __init__(self, request, foo=1, bar=2):
""" """
self.assertTrue(self._callFUT(foo, argname='request'), True)
def test_oldstyle_class_init_noargs(self):
class foo:
def __init__():
""" """
self.assertFalse(self._callFUT(foo))
def test_function_toomanyargs(self):
def foo(context, request):
""" """
self.assertFalse(self._callFUT(foo))
def test_function_with_attr_false(self):
def bar(context, request):
""" """
def foo(context, request):
""" """
foo.bar = bar
self.assertFalse(self._callFUT(foo, 'bar'))
def test_function_with_attr_true(self):
def bar(context, request):
""" """
def foo(request):
""" """
foo.bar = bar
self.assertTrue(self._callFUT(foo, 'bar'))
def test_function_onearg_named_request(self):
def foo(request):
""" """
self.assertTrue(self._callFUT(foo))
def test_function_onearg_named_somethingelse(self):
def foo(req):
""" """
self.assertTrue(self._callFUT(foo))
def test_function_defaultargs_firstname_not_request(self):
def foo(context, request=None):
""" """
self.assertFalse(self._callFUT(foo))
def test_function_defaultargs_firstname_request(self):
def foo(request, foo=1, bar=2):
""" """
self.assertTrue(self._callFUT(foo, argname='request'))
def test_function_noargs(self):
def foo():
""" """
self.assertFalse(self._callFUT(foo))
def test_instance_toomanyargs(self):
class Foo:
def __call__(self, context, request):
""" """
foo = Foo()
self.assertFalse(self._callFUT(foo))
def test_instance_defaultargs_onearg_named_request(self):
class Foo:
def __call__(self, request):
""" """
foo = Foo()
self.assertTrue(self._callFUT(foo))
def test_instance_defaultargs_onearg_named_somethingelse(self):
class Foo:
def __call__(self, req):
""" """
foo = Foo()
self.assertTrue(self._callFUT(foo))
def test_instance_defaultargs_firstname_not_request(self):
class Foo:
def __call__(self, context, request=None):
""" """
foo = Foo()
self.assertFalse(self._callFUT(foo))
def test_instance_defaultargs_firstname_request(self):
class Foo:
def __call__(self, request, foo=1, bar=2):
""" """
foo = Foo()
self.assertTrue(self._callFUT(foo, argname='request'), True)
def test_instance_nocall(self):
class Foo:
pass
foo = Foo()
self.assertFalse(self._callFUT(foo))
def test_method_onearg_named_request(self):
class Foo:
def method(self, request):
""" """
foo = Foo()
self.assertTrue(self._callFUT(foo.method))
def test_function_annotations(self):
def foo(bar):
""" """
# avoid SyntaxErrors in python2, this if effectively nop
getattr(foo, '__annotations__', {}).update({'bar': 'baz'})
self.assertTrue(self._callFUT(foo))
| Test_takes_one_arg |
python | ray-project__ray | python/ray/serve/tests/test_config_files/grpc_deployment.py | {
"start": 3107,
"end": 3430
} | class ____:
def __init__(self):
self.price = 3.0
def __call__(self, num_oranges: int):
return num_oranges * self.price
orange_stand = OrangeStand.bind()
apple_stand = AppleStand.bind()
g2 = FruitMarket.options(name="grpc-deployment-model-composition").bind(
orange_stand, apple_stand
)
| AppleStand |
python | mitmproxy__pdoc | test/testdata/misc.py | {
"start": 7094,
"end": 7764
} | class ____:
"""https://github.com/mitmproxy/pdoc/issues/519"""
def dynamically_modify_docstring1():
"""this should **not** be the docstring."""
def dynamically_modify_docstring2():
pass
dynamically_modify_docstring1.__doc__ = "https://github.com/mitmproxy/pdoc/issues/536"
dynamically_modify_docstring2.__doc__ = "https://github.com/mitmproxy/pdoc/issues/536"
def _docstring_modifier(fn):
fn.__doc__ = "https://github.com/mitmproxy/pdoc/issues/536"
return fn
@_docstring_modifier
def dynamically_modify_docstring3():
"""This should **not** be the docstring."""
@_docstring_modifier
def dynamically_modify_docstring4():
pass
| __init__ |
python | pandas-dev__pandas | pandas/io/sql.py | {
"start": 52006,
"end": 53957
} | class ____(PandasObject, ABC):
"""
Subclasses Should define read_query and to_sql.
"""
def __enter__(self) -> Self:
return self
def __exit__(self, *args) -> None:
pass
def read_table(
self,
table_name: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
schema: str | None = None,
chunksize: int | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
raise NotImplementedError
@abstractmethod
def read_query(
self,
sql: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
pass
@abstractmethod
def to_sql(
self,
frame,
name: str,
if_exists: Literal["fail", "replace", "append", "delete_rows"] = "fail",
index: bool = True,
index_label=None,
schema=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: Literal["multi"] | Callable | None = None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
pass
@abstractmethod
def execute(self, sql: str | Select | TextClause, params=None):
pass
@abstractmethod
def has_table(self, name: str, schema: str | None = None) -> bool:
pass
@abstractmethod
def _create_sql_schema(
self,
frame: DataFrame,
table_name: str,
keys: list[str] | None = None,
dtype: DtypeArg | None = None,
schema: str | None = None,
) -> str:
pass
| PandasSQL |
python | doocs__leetcode | lcof/面试题31. 栈的压入、弹出序列/Solution.py | {
"start": 0,
"end": 303
} | class ____:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
j, stk = 0, []
for v in pushed:
stk.append(v)
while stk and stk[-1] == popped[j]:
stk.pop()
j += 1
return j == len(pushed)
| Solution |
python | keras-team__keras | keras/src/export/onnx_test.py | {
"start": 511,
"end": 2485
} | class ____(models.Model):
def __init__(self, layer_list):
super().__init__()
self.layer_list = layer_list
def call(self, input):
output = input
for layer in self.layer_list:
output = layer(output)
return output
def get_model(type="sequential", input_shape=(10,), layer_list=None):
layer_list = layer_list or [
layers.Dense(10, activation="relu"),
layers.BatchNormalization(),
layers.Dense(1, activation="sigmoid"),
]
if type == "sequential":
return models.Sequential(layer_list)
elif type == "functional":
input = output = tree.map_shape_structure(layers.Input, input_shape)
for layer in layer_list:
output = layer(output)
return models.Model(inputs=input, outputs=output)
elif type == "subclass":
return CustomModel(layer_list)
elif type == "lstm":
# https://github.com/keras-team/keras/issues/21390
inputs = layers.Input((4, 10))
x = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="sum",
)(inputs)
outputs = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="concat",
)(x)
return models.Model(inputs=inputs, outputs=outputs)
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "jax", "torch"),
reason=(
"`export_onnx` only currently supports the tensorflow, jax and torch "
"backends."
),
)
@pytest.mark.skipif(
testing.jax_uses_gpu()
or testing.tensorflow_uses_gpu()
or testing.torch_uses_gpu(),
reason="Fails on GPU",
)
| CustomModel |
python | fluentpython__example-code | 18-asyncio/charfinder/charfinder.py | {
"start": 2527,
"end": 6354
} | class ____:
def __init__(self, chars=None):
self.load(chars)
def load(self, chars=None):
self.index = None
if chars is None:
try:
with open(INDEX_NAME, 'rb') as fp:
self.index = pickle.load(fp)
except OSError:
pass
if self.index is None:
self.build_index(chars)
if len(self.index) > MINIMUM_SAVE_LEN:
try:
self.save()
except OSError as exc:
warnings.warn('Could not save {!r}: {}'
.format(INDEX_NAME, exc))
def save(self):
with open(INDEX_NAME, 'wb') as fp:
pickle.dump(self.index, fp)
def build_index(self, chars=None):
if chars is None:
chars = (chr(i) for i in range(32, sys.maxunicode))
index = {}
for char in chars:
try:
name = unicodedata.name(char)
except ValueError:
continue
if name.startswith(CJK_UNI_PREFIX):
name = CJK_UNI_PREFIX
elif name.startswith(CJK_CMP_PREFIX):
name = CJK_CMP_PREFIX
for word in tokenize(name):
index.setdefault(word, set()).add(char)
self.index = index
def word_rank(self, top=None):
res = [(len(self.index[key]), key) for key in self.index]
res.sort(key=lambda item: (-item[0], item[1]))
if top is not None:
res = res[:top]
return res
def word_report(self, top=None):
for postings, key in self.word_rank(top):
print('{:5} {}'.format(postings, key))
def find_chars(self, query, start=0, stop=None):
stop = sys.maxsize if stop is None else stop
result_sets = []
for word in tokenize(query):
chars = self.index.get(word)
if chars is None: # shortcut: no such word
result_sets = []
break
result_sets.append(chars)
if not result_sets:
return QueryResult(0, ())
result = functools.reduce(set.intersection, result_sets)
result = sorted(result) # must sort to support start, stop
result_iter = itertools.islice(result, start, stop)
return QueryResult(len(result),
(char for char in result_iter))
def describe(self, char):
code_str = 'U+{:04X}'.format(ord(char))
name = unicodedata.name(char)
return CharDescription(code_str, char, name)
def find_descriptions(self, query, start=0, stop=None):
for char in self.find_chars(query, start, stop).items:
yield self.describe(char)
def get_descriptions(self, chars):
for char in chars:
yield self.describe(char)
def describe_str(self, char):
return '{:7}\t{}\t{}'.format(*self.describe(char))
def find_description_strs(self, query, start=0, stop=None):
for char in self.find_chars(query, start, stop).items:
yield self.describe_str(char)
@staticmethod # not an instance method due to concurrency
def status(query, counter):
if counter == 0:
msg = 'No match'
elif counter == 1:
msg = '1 match'
else:
msg = '{} matches'.format(counter)
return '{} for {!r}'.format(msg, query)
def main(*args):
index = UnicodeNameIndex()
query = ' '.join(args)
n = 0
for n, line in enumerate(index.find_description_strs(query), 1):
print(line)
print('({})'.format(index.status(query, n)))
if __name__ == '__main__':
if len(sys.argv) > 1:
main(*sys.argv[1:])
else:
print('Usage: {} word1 [word2]...'.format(sys.argv[0]))
| UnicodeNameIndex |
python | pydantic__pydantic | pydantic/experimental/pipeline.py | {
"start": 1270,
"end": 1349
} | class ____:
func: Callable[[Any], Any]
@dataclass(**_slots_frozen)
| _Transform |
python | google__pytype | pytype/overlays/attr_overlay.py | {
"start": 14122,
"end": 21393
} | class ____(classgen.FieldConstructor):
"""Implements attr.ib/attrs.field."""
@classmethod
def make(cls, ctx, module):
return super().make("ib" if module == "attr" else "field", ctx, module)
def _match_and_discard_args(self, node, funcb, args):
"""Discard invalid args so that we can still construct an attrib."""
func = funcb.data
args, errors = function.match_all_args(self.ctx, node, func, args)
# Raise the error, but continue constructing the attrib, so that if we
# have something like
# x = attr.ib(<bad args>)
# we still proceed with the rest of the analysis with x in the list of
# attribs, and with our best guess for typing information.
for e, name, _ in errors:
self.ctx.errorlog.invalid_function_call(self.ctx.vm.stack(func), e)
# The presence of 'factory' or 'default' means we need to preserve the
# fact that the attrib was intended to have a default, otherwise we might
# get an invalid sig for __init__ with a non-default param following a
# default param.
if name != "default":
args = args.delete_namedarg(name)
if name == "factory":
args = args.replace_namedarg("default", self.ctx.new_unsolvable(node))
return args
def call(self, node, func, args, alias_map=None):
"""Returns a type corresponding to an attr."""
args = args.simplify(node, self.ctx)
args = self._match_and_discard_args(node, func, args)
node, default_var = self._get_default_var(node, args)
type_var = args.namedargs.get("type")
init = self.get_kwarg(args, "init", True)
kw_only = self.get_kwarg(args, "kw_only", False)
conv_in, conv_out = self._get_converter_types(node, args)
if type_var:
type_source = TypeSource.TYPE
typ = self.ctx.annotation_utils.extract_annotation(
node,
type_var,
"attr.ib",
self.ctx.vm.simple_stack(),
allowed_type_params=self.ctx.vm.frame.type_params,
)
elif default_var:
type_source = TypeSource.DEFAULT
typ = get_type_from_default(default_var, self.ctx)
else:
type_source = None
typ = self.ctx.convert.unsolvable
if conv_out:
# If a converter’s first argument has a type annotation, that type will
# appear in the signature for __init__. When setting the field type, treat
# args as type > converter > default
init_type = conv_in or self.ctx.convert.unsolvable
if type_source == TypeSource.TYPE:
msg = (
"The type annotation and assignment are set by the "
"'type' and 'converter' args respectively."
)
self.ctx.check_annotation_type_mismatch(
node,
"attr.ib",
typ,
conv_out.instantiate(node),
self.ctx.vm.simple_stack(),
allow_none=True,
details=msg,
)
else:
type_source = TypeSource.CONVERTER
typ = conv_out
else:
init_type = None
ret = AttribInstance(
self.ctx, typ, type_source, init, init_type, kw_only, default_var
).to_variable(node)
return node, ret
@property
def sig(self):
return self.signatures[0].signature
def _get_converter_sig(self, converter, args):
"""Return the first signature with a single argument."""
def valid_arity(sig):
return sig.mandatory_param_count() <= 1 and (
sig.maximum_param_count() is None or sig.maximum_param_count() >= 1
)
sigs = function.get_signatures(converter)
valid_sigs = list(filter(valid_arity, sigs))
if not valid_sigs:
anyt = self.ctx.convert.unsolvable
wanted_type = abstract.CallableClass(
self.ctx.convert.lookup_value("typing", "Callable"),
{0: anyt, abstract_utils.ARGS: anyt, abstract_utils.RET: anyt},
self.ctx,
)
bad_param = error_types.BadType("converter", wanted_type)
raise error_types.WrongArgTypes(self.sig, args, self.ctx, bad_param)
return valid_sigs[0]
def _call_converter_function(self, node, converter_var, args):
"""Run converter and return the input and return types."""
binding = converter_var.bindings[0]
fn = binding.data
sig = self._get_converter_sig(fn, args)
if sig.param_names and sig.param_names[0] in sig.annotations:
input_type = sig.annotations[sig.param_names[0]]
else:
input_type = self.ctx.convert.unsolvable
if sig.has_return_annotation:
return_type = sig.annotations["return"]
else:
fn_args = function.Args(posargs=(input_type.instantiate(node),))
node, ret_var = fn.call(node, binding, fn_args)
return_type = self.ctx.convert.merge_classes(ret_var.data)
return input_type, return_type
def _get_converter_types(self, node, args):
converter_var = args.namedargs.get("converter")
if not converter_var:
return None, None
converter = converter_var.data[0]
if isinstance(converter, abstract.Class):
# If the converter is a class, set the field type to the class and the
# init type to Any.
# TODO(b/135553563): Check that converter.__init__ takes one argument and
# get its type.
return self.ctx.convert.unsolvable, converter
elif abstract_utils.is_callable(converter):
return self._call_converter_function(node, converter_var, args)
else:
return None, None
def _get_default_var(self, node, args):
if "default" in args.namedargs and "factory" in args.namedargs:
# attr.ib(factory=x) is syntactic sugar for attr.ib(default=Factory(x)).
raise error_types.DuplicateKeyword(self.sig, args, self.ctx, "default")
elif "default" in args.namedargs:
default_var = args.namedargs["default"]
elif "factory" in args.namedargs:
mod = self.ctx.vm.import_module("attr", "attr", 0)
node, attr = self.ctx.attribute_handler.get_attribute(
node, mod, "Factory"
)
# We know there is only one value because Factory is in the overlay.
(factory,) = attr.data
factory_args = function.Args(posargs=(args.namedargs["factory"],))
node, default_var = factory.call(node, attr.bindings[0], factory_args)
else:
default_var = None
return node, default_var
def _ordering_for_auto_attrib(auto_attrib):
return (
classgen.Ordering.FIRST_ANNOTATE
if auto_attrib
else classgen.Ordering.LAST_ASSIGN
)
def is_attrib(var):
return var and isinstance(var.data[0], AttribInstance)
def match_classvar(typ):
"""Unpack the type parameter from ClassVar[T]."""
return abstract_utils.match_type_container(typ, "typing.ClassVar")
def get_type_from_default(default_var, ctx):
"""Get the type of an attribute from its default value."""
if default_var.data == [ctx.convert.none]:
# A default of None doesn't give us any information about the actual type.
return ctx.convert.unsolvable
typ = ctx.convert.merge_classes(default_var.data)
if typ == ctx.convert.empty:
return ctx.convert.unsolvable
elif isinstance(typ, abstract.TupleClass) and not typ.tuple_length:
# The type of an attribute whose default is an empty tuple should be
# Tuple[Any, ...], not Tuple[()].
return ctx.convert.tuple_type
return typ
| Attrib |
python | getsentry__sentry | src/sentry/notifications/platform/types.py | {
"start": 5467,
"end": 5795
} | class ____(StrEnum):
"""
The type of formatting to be applied to the encapsulated blocks.
"""
PARAGRAPH = "paragraph"
"""
A block of text with a line break before.
"""
CODE_BLOCK = "code_block"
"""
A new section of code with a line break before.
"""
| NotificationBodyFormattingBlockType |
python | Textualize__textual | src/textual/_sleep.py | {
"start": 156,
"end": 1620
} | class ____(Thread):
def __init__(
self,
) -> None:
self._exit = False
self._sleep_time = 0.0
self._event = Event()
self.future: Future | None = None
self._loop = get_running_loop()
super().__init__(daemon=True)
def run(self):
while True:
self._event.wait()
if self._exit:
break
sleep(self._sleep_time)
self._event.clear()
# self.future.set_result(None)
assert self.future is not None
self._loop.call_soon_threadsafe(self.future.set_result, None)
async def sleep(self, sleep_time: float) -> None:
future = self.future = self._loop.create_future()
self._sleep_time = sleep_time
self._event.set()
await future
async def check_sleeps() -> None:
sleeper = Sleeper()
sleeper.start()
async def profile_sleep(sleep_for: float) -> float:
start = perf_counter()
while perf_counter() - start < sleep_for:
sleep(0)
elapsed = perf_counter() - start
return elapsed
for t in range(15, 120, 5):
sleep_time = 1 / t
elapsed = await profile_sleep(sleep_time)
difference = (elapsed / sleep_time * 100) - 100
print(
f"sleep={sleep_time*1000:.01f}ms clock={elapsed*1000:.01f}ms diff={difference:.02f}%"
)
from asyncio import run
run(check_sleeps())
| Sleeper |
python | django__django | django/forms/models.py | {
"start": 8521,
"end": 9219
} | class ____:
def __init__(self, options=None):
self.model = getattr(options, "model", None)
self.fields = getattr(options, "fields", None)
self.exclude = getattr(options, "exclude", None)
self.widgets = getattr(options, "widgets", None)
self.localized_fields = getattr(options, "localized_fields", None)
self.labels = getattr(options, "labels", None)
self.help_texts = getattr(options, "help_texts", None)
self.error_messages = getattr(options, "error_messages", None)
self.field_classes = getattr(options, "field_classes", None)
self.formfield_callback = getattr(options, "formfield_callback", None)
| ModelFormOptions |
python | ray-project__ray | python/ray/data/_internal/logical/operators/all_to_all_operator.py | {
"start": 1701,
"end": 2819
} | class ____(AbstractAllToAll, LogicalOperatorSupportsPredicatePassThrough):
"""Logical operator for randomize_block_order."""
def __init__(
self,
input_op: LogicalOperator,
seed: Optional[int] = None,
):
super().__init__(
"RandomizeBlockOrder",
input_op,
)
self._seed = seed
def infer_metadata(self) -> "BlockMetadata":
assert len(self._input_dependencies) == 1, len(self._input_dependencies)
assert isinstance(self._input_dependencies[0], LogicalOperator)
return self._input_dependencies[0].infer_metadata()
def infer_schema(
self,
) -> Optional["Schema"]:
assert len(self._input_dependencies) == 1, len(self._input_dependencies)
assert isinstance(self._input_dependencies[0], LogicalOperator)
return self._input_dependencies[0].infer_schema()
def predicate_passthrough_behavior(self) -> PredicatePassThroughBehavior:
# Randomizing block order doesn't affect filtering correctness
return PredicatePassThroughBehavior.PASSTHROUGH
| RandomizeBlocks |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_legacy_tab.py | {
"start": 1225,
"end": 2402
} | class ____(util.MdCase):
"""Test legacy tab slug separator cases."""
extension = ['pymdownx.blocks.tab', 'toc']
extension_configs = {
'pymdownx.blocks.tab': {'slugify': slugify(case='lower'), 'separator': '_'}
}
MD = r"""
### Here is some text
/// tab | Here is some text
content
///
/// tab | Here is some text
content
///
"""
def test_slug_with_separator(self):
"""Test tab slugs with separator."""
self.check_markdown(
self.MD,
'''
<h3 id="here-is-some-text">Here is some text</h3>
<div class="tabbed-set" data-tabs="1:2"><input checked="checked" id="here_is_some_text" name="__tabbed_1" type="radio" /><label for="here_is_some_text">Here is some text</label><div class="tabbed-content">
<p>content</p>
</div>
<input id="here_is_some_text_1" name="__tabbed_1" type="radio" /><label for="here_is_some_text_1">Here is some text</label><div class="tabbed-content">
<p>content</p>
</div>
</div>
''', # noqa: E501
True
)
| TestLegacyTabSlugsSep |
python | huggingface__transformers | tests/models/prophetnet/test_modeling_prophetnet.py | {
"start": 41873,
"end": 42382
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ProphetNetEncoder,) if is_torch_available() else ()
test_resize_embeddings = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = ProphetNetStandaloneEncoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=ProphetNetConfig)
def test_config(self):
self.config_tester.run_common_tests()
@require_torch
| ProphetNetStandaloneEncoderModelTest |
python | huggingface__transformers | src/transformers/models/gemma/modeling_gemma.py | {
"start": 12999,
"end": 14781
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: GemmaConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = GemmaAttention(config=config, layer_idx=layer_idx)
self.mlp = GemmaMLP(config)
self.input_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| GemmaDecoderLayer |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 99676,
"end": 100522
} | class ____(Response):
"""
Response of datasets.delete_version endpoint.
:param deleted:
:type deleted: bool
"""
_service = "datasets"
_action = "delete_version"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"deleted": {"type": ["boolean", "null"]}},
"type": "object",
}
def __init__(self, deleted=None, **kwargs):
super(DeleteVersionResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
self.assert_isinstance(value, "deleted", (bool,))
self._property_deleted = value
| DeleteVersionResponse |
python | getsentry__sentry | tests/sentry/utils/test_committers.py | {
"start": 5818,
"end": 8820
} | class ____(CommitTestCase):
def test_simple(self) -> None:
commit = self.create_commit()
file_change = self.create_commitfilechange(filename="hello/app.py", type="A", commit=commit)
file_changes = [
file_change,
self.create_commitfilechange(filename="goodbye/app.js", type="A"),
]
assert [(commit, 2)] == _match_commits_paths(file_changes, {"hello/app.py"})["hello/app.py"]
def test_skip_one_score_match_longer_than_one_token(self) -> None:
file_changes = [
self.create_commitfilechange(filename="hello/app.py", type="A"),
self.create_commitfilechange(filename="hello/world/app.py", type="A"),
self.create_commitfilechange(filename="hello/world/template/app.py", type="A"),
]
assert [] == _match_commits_paths(file_changes, {"app.py"})["app.py"]
def test_similar_paths(self) -> None:
commits = [self.create_commit(), self.create_commit(), self.create_commit()]
file_changes = [
self.create_commitfilechange(filename="hello/app.py", type="A", commit=commits[0]),
self.create_commitfilechange(
filename="world/hello/app.py", type="A", commit=commits[1]
),
self.create_commitfilechange(
filename="template/hello/app.py", type="A", commit=commits[2]
),
]
assert [(c, 2) for c in commits] == sorted(
_match_commits_paths(file_changes, {"hello/app.py"})["hello/app.py"],
key=lambda fc: fc[0].id,
)
def test_path_shorter_than_filechange(self) -> None:
commit_1 = self.create_commit()
commit_2 = self.create_commit()
file_changes = [
self.create_commitfilechange(filename="app.py", type="A"),
self.create_commitfilechange(filename="c/d/e/f/g/h/app.py", type="A", commit=commit_1),
self.create_commitfilechange(filename="c/d/e/f/g/h/app.py", type="M", commit=commit_2),
]
assert set(
map(
lambda x: x[0],
_match_commits_paths(file_changes, {"e/f/g/h/app.py"})["e/f/g/h/app.py"],
)
) == {
commit_1,
commit_2,
}
def test_path_longer_than_filechange(self) -> None:
commit_1 = self.create_commit()
commit_2 = self.create_commit()
file_changes = [
self.create_commitfilechange(filename="app.py", type="A"),
self.create_commitfilechange(filename="c/d/e/f/g/h/app.py", type="A", commit=commit_1),
self.create_commitfilechange(filename="c/d/e/f/g/h/app.py", type="M", commit=commit_2),
]
assert set(
map(
lambda x: x[0],
_match_commits_paths(file_changes, {"/a/b/c/d/e/f/g/h/app.py"})[
"/a/b/c/d/e/f/g/h/app.py"
],
)
) == {commit_1, commit_2}
| MatchCommitsPathTestCase |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 242190,
"end": 244290
} | class ____(NCCLTraceTestDumpOnTimeoutBase):
@requires_nccl()
@skip_if_lt_x_gpu(2)
@parametrize("timing_enabled", [True, False])
def test_timeout_dumps(self, timing_enabled):
# dump on heartbeatmonitor thread
os.environ["TORCH_NCCL_COORD_CHECK_MILSEC"] = "1000"
# need rank0 to crash before looking for its output file
os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "1"
if self.rank == self.MAIN_PROCESS_RANK:
# wait for rank0 to crash before looking for its output file
# we rely on rank0 holding off its abort long enough to dump the debug info
self.assertEqual(self._wait_process(0, timeout=90), -6)
with open(self._trace_name(rank=0), "rb") as f:
t = pickle.load(f)
t = t["entries"]
self.assertEqual(len(t), 2)
self.assertEqual(t[0]["collective_seq_id"], 1)
self.assertEqual(t[0]["state"], "completed")
self.assertEqual(t[1]["collective_seq_id"], 2)
self.assertEqual(
t[1]["state"], self.started_or_scheduled(timing_enabled)
)
self.assertFalse(os.path.exists(self._trace_name(rank=1)))
return
pg = self._create_process_group_nccl()
if timing_enabled:
# we force disabled timing in setup, since there is no 'disable' function
pg._enable_collectives_timing()
device = self.local_device
with torch.cuda.device(device):
a = torch.full((3, 4), float(self.rank), device=device)
pg.allreduce(a).wait()
if self.rank == 0:
pg.allreduce(a).wait()
# rank 0 will crash before it passes the sync, but rank1 will exit quickly and cleanly
torch.cuda.synchronize(device=device)
instantiate_parametrized_tests(ProcessGroupNCCLGroupTest)
instantiate_parametrized_tests(NCCLTraceTestDumpOnTimeout)
instantiate_parametrized_tests(NCCLTraceTest)
@skip_but_pass_in_sandcastle
| NCCLTraceTestDumpOnTimeout |
python | doocs__leetcode | solution/2400-2499/2461.Maximum Sum of Distinct Subarrays With Length K/Solution.py | {
"start": 0,
"end": 479
} | class ____:
def maximumSubarraySum(self, nums: List[int], k: int) -> int:
cnt = Counter(nums[:k])
s = sum(nums[:k])
ans = s if len(cnt) == k else 0
for i in range(k, len(nums)):
cnt[nums[i]] += 1
cnt[nums[i - k]] -= 1
if cnt[nums[i - k]] == 0:
cnt.pop(nums[i - k])
s += nums[i] - nums[i - k]
if len(cnt) == k:
ans = max(ans, s)
return ans
| Solution |
python | getsentry__sentry | tests/sentry/management/commands/test_generate_controlsilo_urls.py | {
"start": 192,
"end": 2516
} | class ____(TestCase):
def call_command(self, *args, **kwargs):
out = StringIO()
call_command("generate_controlsilo_urls", *args, stdout=out, stderr=StringIO(), **kwargs)
return out.getvalue()
def test_skip_includes(self) -> None:
result = self.call_command(format="js")
# Shouldn't contain patterns for urls
# that include more urls.
assert "new RegExp('^api/0/$')" not in result
def test_render_text(self) -> None:
result = self.call_command(format="text")
assert "^api/0/users/$" in result
def test_render_code(self) -> None:
result = self.call_command(format="js")
assert "new RegExp('^api/0/users/$')," in result
assert "new RegExp('^api/0/internal/integration-proxy/$')," in result
assert "const patterns" in result
assert "export default patterns;" in result
def test_write_file(self) -> None:
with tempfile.NamedTemporaryFile() as tf:
self.call_command(format="js", output=tf.name)
tf.seek(0)
result = tf.read().decode("utf8")
assert "This is generated code" in result
assert "new RegExp('^api/0/users/$')," in result
assert "const patterns" in result
assert "export default patterns;" in result
def test_no_missing_urls(self) -> None:
pattern_file = "static/app/data/controlsiloUrlPatterns.ts"
project_root = os.path.dirname(os.path.dirname(MODULE_ROOT))
pattern_filepath = os.path.join(project_root, pattern_file)
with open(pattern_filepath) as f:
current_state = f.read()
result = self.call_command(format="js")
for line in result.splitlines():
msg = f"""
New control silo URL patterns detected!
The pattern: {line}
Does not exist in the current pattern inventory. You should regenerate
the pattern inventory with:
cd ../getsentry
getsentry django generate_controlsilo_urls --format=js --output=../sentry/{pattern_file}
This command needs to be run in a getsentry environment
in order to not lose patterns that are important for sentry.io
"""
assert line in current_state, msg
| TestGenerateControlsiloUrls |
python | django-compressor__django-compressor | compressor/tests/test_utils.py | {
"start": 604,
"end": 1734
} | class ____(TestCase):
def test_has_finders_from_staticfiles(self):
self.assertTrue(
compressor.utils.staticfiles.finders is django.contrib.staticfiles.finders
)
def test_has_finders_from_staticfiles_if_configured_per_appconfig(self):
apps = get_apps_with_staticfiles_using_appconfig(settings.INSTALLED_APPS)
try:
with override_settings(INSTALLED_APPS=apps):
reload(compressor.utils.staticfiles)
self.assertTrue(
compressor.utils.staticfiles.finders
is django.contrib.staticfiles.finders
)
finally:
reload(compressor.utils.staticfiles)
def test_finders_is_none_if_staticfiles_is_not_installed(self):
apps = get_apps_without_staticfiles(settings.INSTALLED_APPS)
try:
with override_settings(INSTALLED_APPS=apps):
reload(compressor.utils.staticfiles)
self.assertTrue(compressor.utils.staticfiles.finders is None)
finally:
reload(compressor.utils.staticfiles)
| StaticFilesTestCase |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/keras_tensor.py | {
"start": 20845,
"end": 21675
} | class ____(type_spec_module.TypeSpec):
"""TypeSpec to represent user-registered symbolic objects."""
def __init__(self, shape, dtype):
self.shape = shape
self._dtype = dtype
self.dtype = dtype
def _component_specs(self):
raise NotImplementedError
def _from_components(self, components):
raise NotImplementedError
def _serialize(self):
raise NotImplementedError
def _to_components(self, value):
raise NotImplementedError
def value_type(self):
raise NotImplementedError
# TODO(b/161487382):
# Special-case user-registered symbolic objects (registered by the
# private `register_symbolic_tensor_type` method) by passing them between
# scratch graphs directly.
# This is needed to not break Tensorflow probability
# while they finish migrating to composite tensors.
| UserRegisteredSpec |
python | apache__airflow | airflow-core/tests/unit/models/test_xcom_arg.py | {
"start": 5280,
"end": 7995
} | class ____:
def test_xcom_pass_to_op(self, dag_maker):
with dag_maker(dag_id="test_xcom_pass_to_op") as dag:
operator = PythonOperator(
python_callable=lambda: VALUE,
task_id="return_value_1",
do_xcom_push=True,
)
xarg = XComArg(operator)
operator2 = PythonOperator(
python_callable=assert_is_value,
op_args=[xarg],
task_id="assert_is_value_1",
)
operator >> operator2
dag.test()
def test_xcom_push_and_pass(self, dag_maker):
def push_xcom_value(key, value, **context):
ti = context["task_instance"]
ti.xcom_push(key, value)
with dag_maker(dag_id="test_xcom_push_and_pass") as dag:
op1 = PythonOperator(
python_callable=push_xcom_value,
task_id="push_xcom_value",
op_args=["my_key", VALUE],
)
xarg = XComArg(op1, key="my_key")
op2 = PythonOperator(
python_callable=assert_is_value,
task_id="assert_is_value_1",
op_args=[xarg],
)
op1 >> op2
dag.test()
@pytest.mark.parametrize(
("fillvalue", "expected_results"),
[
(NOTSET, {("a", 1), ("b", 2), ("c", 3)}),
(None, {("a", 1), ("b", 2), ("c", 3), (None, 4)}),
],
)
def test_xcom_zip(dag_maker, session, fillvalue, expected_results):
results = set()
with dag_maker(session=session, serialized=True) as dag:
@dag.task
def push_letters():
return ["a", "b", "c"]
@dag.task
def push_numbers():
return [1, 2, 3, 4]
@dag.task
def pull(value):
results.add(value)
pull.expand(value=push_letters().zip(push_numbers(), fillvalue=fillvalue))
dr = dag_maker.create_dagrun()
# Run "push_letters" and "push_numbers".
decision = dr.task_instance_scheduling_decisions(session=session)
assert sorted(ti.task_id for ti in decision.schedulable_tis) == ["push_letters", "push_numbers"]
for ti in decision.schedulable_tis:
dag_maker.run_ti(task_id=ti.task_id, map_index=ti.map_index, dag_run=dr, session=session)
session.commit()
# Run "pull".
decision = dr.task_instance_scheduling_decisions(session=session)
assert sorted(ti.task_id for ti in decision.schedulable_tis) == ["pull"] * len(expected_results)
for ti in decision.schedulable_tis:
dag_maker.run_ti(task_id=ti.task_id, map_index=ti.map_index, dag_run=dr, session=session)
assert results == expected_results
| TestXComArgRuntime |
python | jazzband__django-formtools | tests/wizard/namedwizardtests/tests.py | {
"start": 14056,
"end": 15006
} | class ____(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_cookie'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
| NamedCookieWizardTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/test_post_comment_votes.py | {
"start": 1132,
"end": 8084
} | class ____(TestCase):
@property
def _config(self):
return (
ConfigBuilder()
.with_basic_auth_credentials("user@example.com", "password")
.with_subdomain("d3v-airbyte")
.with_start_date(ab_datetime_now().subtract(timedelta(weeks=104)))
.build()
)
def get_authenticator(self, config):
return ApiTokenAuthenticator(email=config["credentials"]["email"], password=config["credentials"]["api_token"])
@HttpMocker()
def test_given_one_page_when_read_posts_comments_votes_then_return_records(self, http_mocker):
"""
A normal full refresh sync without pagination
"""
api_token_authenticator = self.get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
posts_comments_record_builder = given_post_comments(
http_mocker, string_to_datetime(self._config["start_date"]), post["id"], api_token_authenticator
)
post_comment = posts_comments_record_builder.build()
http_mocker.get(
PostCommentVotesRequestBuilder.post_comment_votes_endpoint(api_token_authenticator, post["id"], post_comment["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
PostCommentVotesResponseBuilder.post_comment_votes_response()
.with_record(PostCommentVotesRecordBuilder.post_commetn_votes_record())
.build(),
)
output = read_stream("post_comment_votes", SyncMode.full_refresh, self._config)
assert len(output.records) == 1
@HttpMocker()
def test_given_403_error_when_read_posts_comments_then_skip_stream(self, http_mocker):
"""
Get a 403 error and then skip the stream
"""
api_token_authenticator = self.get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
posts_comments_record_builder = given_post_comments(
http_mocker, string_to_datetime(self._config["start_date"]), post["id"], api_token_authenticator
)
post_comment = posts_comments_record_builder.build()
http_mocker.get(
PostCommentVotesRequestBuilder.post_comment_votes_endpoint(api_token_authenticator, post["id"], post_comment["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
ErrorResponseBuilder.response_with_status(403).build(),
)
output = read_stream("post_comment_votes", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
assert output.get_stream_statuses("post_comment_votes")[-1] == AirbyteStreamStatus.INCOMPLETE
assert any(
[
"failed with status code '403' and error message" in error
for error in get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
]
)
@HttpMocker()
def test_given_404_error_when_read_posts_comments_then_skip_stream(self, http_mocker):
"""
Get a 404 error and then skip the stream
"""
api_token_authenticator = self.get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
posts_comments_record_builder = given_post_comments(
http_mocker, string_to_datetime(self._config["start_date"]), post["id"], api_token_authenticator
)
post_comment = posts_comments_record_builder.build()
http_mocker.get(
PostCommentVotesRequestBuilder.post_comment_votes_endpoint(api_token_authenticator, post["id"], post_comment["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
ErrorResponseBuilder.response_with_status(404).build(),
)
output = read_stream("post_comment_votes", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
assert output.get_stream_statuses("post_comment_votes")[-1] == AirbyteStreamStatus.INCOMPLETE
assert any(
[
"failed with status code '404' and error message" in error
for error in get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
]
)
@HttpMocker()
def test_given_500_error_when_read_posts_comments_then_stop_syncing(self, http_mocker):
"""
Get a 500 error and then stop syncing
"""
api_token_authenticator = self.get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
posts_comments_record_builder = given_post_comments(
http_mocker, string_to_datetime(self._config["start_date"]), post["id"], api_token_authenticator
)
post_comment = posts_comments_record_builder.build()
http_mocker.get(
PostCommentVotesRequestBuilder.post_comment_votes_endpoint(api_token_authenticator, post["id"], post_comment["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
ErrorResponseBuilder.response_with_status(500).build(),
)
with patch("time.sleep", return_value=None):
output = read_stream("post_comment_votes", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
error_logs = get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
assert any(["Internal server error" in error for error in error_logs])
@freezegun.freeze_time(_NOW.isoformat())
| TestPostsCommentVotesStreamFullRefresh |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_capture_strategies.py | {
"start": 4447,
"end": 6765
} | class ____(CaptureStrategy):
def _capture(
self, model, args, kwargs, dynamic_shapes
) -> torch.export.ExportedProgram:
with (
_patch_dynamo_unsupported_functions(),
# Support the dynamism with 0/1 input dim
torch.fx.experimental._config.patch(backed_size_oblivious=True), # type: ignore[attr-defined]
):
try:
return torch.export.export(
model,
args,
kwargs=kwargs,
dynamic_shapes=dynamic_shapes,
strict=True,
prefer_deferred_runtime_asserts_over_guards=_flags.PREFER_DEFERRED_RUNTIME_ASSERTS_OVER_GUARDS,
)
except torch._dynamo.exc.UserError as exc:
# Refine the dynamic shapes based on the suggested fixes.
try:
new_shapes = torch.export.dynamic_shapes.refine_dynamic_shapes_from_suggested_fixes(
exc.msg, dynamic_shapes
)
except Exception:
# If the dynamic shapes cannot be refined, re-raise the exception.
raise exc from None
return torch.export.export(
model,
args,
kwargs=kwargs,
dynamic_shapes=new_shapes,
strict=True,
prefer_deferred_runtime_asserts_over_guards=_flags.PREFER_DEFERRED_RUNTIME_ASSERTS_OVER_GUARDS,
)
def _enter(self, model) -> None:
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=True)`..."
)
def _success(self, model) -> None:
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=True)`... ✅"
)
def _failure(self, model, e) -> None:
del e # Unused
model_repr = _take_first_line(repr(model))
self._verbose_print(
f"Obtain model graph for `{model_repr}` with `torch.export.export(..., strict=True)`... ❌"
)
| TorchExportStrictStrategy |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column02.py | {
"start": 315,
"end": 1369
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column", "subtype": "stacked"})
chart.axis_ids = [49388544, 69387008]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | mlflow__mlflow | mlflow/gateway/config.py | {
"start": 10410,
"end": 10503
} | class ____(LimitModel):
calls: int
key: str | None = None
renewal_period: str
| Limit |
python | spack__spack | lib/spack/spack/test/concretization/core.py | {
"start": 10208,
"end": 117774
} | class ____:
def test_concretize(self, spec):
check_concretize(spec)
def test_concretize_mention_build_dep(self):
spec = check_concretize("cmake-client ^cmake@=3.21.3")
# Check parent's perspective of child
to_dependencies = spec.edges_to_dependencies(name="cmake")
assert len(to_dependencies) == 1
assert to_dependencies[0].depflag == dt.BUILD
# Check child's perspective of parent
cmake = spec["cmake"]
from_dependents = cmake.edges_from_dependents(name="cmake-client")
assert len(from_dependents) == 1
assert from_dependents[0].depflag == dt.BUILD
def test_concretize_preferred_version(self):
spec = check_concretize("python")
assert spec.version == ver("=2.7.11")
spec = check_concretize("python@3.5.1")
assert spec.version == ver("=3.5.1")
def test_concretize_with_restricted_virtual(self):
check_concretize("mpileaks ^mpich2")
concrete = check_concretize("mpileaks ^mpich2@1.1")
assert concrete["mpich2"].satisfies("mpich2@1.1")
concrete = check_concretize("mpileaks ^mpich2@1.2")
assert concrete["mpich2"].satisfies("mpich2@1.2")
concrete = check_concretize("mpileaks ^mpich2@:1.5")
assert concrete["mpich2"].satisfies("mpich2@:1.5")
concrete = check_concretize("mpileaks ^mpich2@:1.3")
assert concrete["mpich2"].satisfies("mpich2@:1.3")
concrete = check_concretize("mpileaks ^mpich2@:1.2")
assert concrete["mpich2"].satisfies("mpich2@:1.2")
concrete = check_concretize("mpileaks ^mpich2@:1.1")
assert concrete["mpich2"].satisfies("mpich2@:1.1")
concrete = check_concretize("mpileaks ^mpich2@1.1:")
assert concrete["mpich2"].satisfies("mpich2@1.1:")
concrete = check_concretize("mpileaks ^mpich2@1.5:")
assert concrete["mpich2"].satisfies("mpich2@1.5:")
concrete = check_concretize("mpileaks ^mpich2@1.3.1:1.4")
assert concrete["mpich2"].satisfies("mpich2@1.3.1:1.4")
def test_concretize_with_provides_when(self):
"""Make sure insufficient versions of MPI are not in providers list when
we ask for some advanced version.
"""
repo = spack.repo.PATH
assert not any(s.intersects("mpich2@:1.0") for s in repo.providers_for("mpi@2.1"))
assert not any(s.intersects("mpich2@:1.1") for s in repo.providers_for("mpi@2.2"))
assert not any(s.intersects("mpich@:1") for s in repo.providers_for("mpi@2"))
assert not any(s.intersects("mpich@:1") for s in repo.providers_for("mpi@3"))
assert not any(s.intersects("mpich2") for s in repo.providers_for("mpi@3"))
def test_provides_handles_multiple_providers_of_same_version(self):
""" """
providers = spack.repo.PATH.providers_for("mpi@3.0")
# Note that providers are repo-specific, so we don't misinterpret
# providers, but vdeps are not namespace-specific, so we can
# associate vdeps across repos.
assert Spec("builtin_mock.multi-provider-mpi@1.10.3") in providers
assert Spec("builtin_mock.multi-provider-mpi@1.10.2") in providers
assert Spec("builtin_mock.multi-provider-mpi@1.10.1") in providers
assert Spec("builtin_mock.multi-provider-mpi@1.10.0") in providers
assert Spec("builtin_mock.multi-provider-mpi@1.8.8") in providers
def test_different_compilers_get_different_flags(
self, mutable_config, clang12_with_flags, gcc11_with_flags
):
"""Tests that nodes get the flags of the associated compiler."""
mutable_config.set(
"packages",
{
"llvm": {"externals": [clang12_with_flags]},
"gcc": {"externals": [gcc11_with_flags]},
},
)
t = spack.vendor.archspec.cpu.host().family
client = spack.concretize.concretize_one(
Spec(
f"cmake-client platform=test os=redhat6 target={t} %gcc@11.1.0"
f" ^cmake platform=test os=redhat6 target={t} %clang@12.2.0"
)
)
cmake = client["cmake"]
assert set(client.compiler_flags["cflags"]) == {"-O0", "-g"}
assert set(cmake.compiler_flags["cflags"]) == {"-O3"}
assert set(client.compiler_flags["fflags"]) == {"-O0", "-g"}
assert not set(cmake.compiler_flags["fflags"])
@pytest.mark.regression("9908")
def test_spec_flags_maintain_order(self, mutable_config, gcc11_with_flags):
"""Tests that Spack assembles flags in a consistent way (i.e. with the same ordering),
for successive concretizations.
"""
mutable_config.set("packages", {"gcc": {"externals": [gcc11_with_flags]}})
spec_str = "libelf os=redhat6 %gcc@11.1.0"
for _ in range(3):
s = spack.concretize.concretize_one(spec_str)
assert all(
s.compiler_flags[x] == ["-O0", "-g"] for x in ("cflags", "cxxflags", "fflags")
)
@pytest.mark.parametrize(
"spec_str,expected,not_expected",
[
# Simple flag propagation from the root
("hypre cflags=='-g' ^openblas", ["hypre cflags='-g'", "^openblas cflags='-g'"], []),
(
"hypre cflags='-g' ^openblas",
["hypre cflags='-g'", "^openblas"],
["^openblas cflags='-g'"],
),
# Setting a flag overrides propagation
(
"hypre cflags=='-g' ^openblas cflags='-O3'",
["hypre cflags='-g'", "^openblas cflags='-O3'"],
["^openblas cflags='-g'"],
),
# Propagation doesn't go across build dependencies
(
"cmake-client cflags=='-O2 -g'",
["cmake-client cflags=='-O2 -g'", "^cmake"],
["cmake cflags=='-O2 -g'"],
),
],
)
def test_compiler_flag_propagation(self, spec_str, expected, not_expected):
root = spack.concretize.concretize_one(spec_str)
for constraint in expected:
assert root.satisfies(constraint)
for constraint in not_expected:
assert not root.satisfies(constraint)
def test_mixing_compilers_only_affects_subdag(self):
"""Tests that, when we mix compilers, the one with lower penalty is used for nodes
where the compiler is not forced.
"""
spec = spack.concretize.concretize_one("dt-diamond%clang ^dt-diamond-bottom%gcc")
# This is intended to traverse the "root" unification set, and check compilers
# on the nodes in the set
for x in spec.traverse(deptype=("link", "run")):
if "c" not in x or not x.name.startswith("dt-diamond"):
continue
expected_gcc = x.name != "dt-diamond"
assert bool(x.dependencies(name="llvm", deptype="build")) is not expected_gcc, x.tree()
assert bool(x.dependencies(name="gcc", deptype="build")) is expected_gcc
assert x.satisfies("%clang") is not expected_gcc
assert x.satisfies("%gcc") is expected_gcc
def test_disable_mixing_prevents_mixing(self):
with spack.config.override("concretizer", {"compiler_mixing": False}):
with pytest.raises(spack.error.UnsatisfiableSpecError):
spack.concretize.concretize_one("dt-diamond%clang ^dt-diamond-bottom%gcc")
def test_disable_mixing_override_by_package(self):
with spack.config.override("concretizer", {"compiler_mixing": ["dt-diamond-bottom"]}):
root = spack.concretize.concretize_one("dt-diamond%clang ^dt-diamond-bottom%gcc")
assert root.satisfies("%clang")
assert root["dt-diamond-bottom"].satisfies("%gcc")
assert root["dt-diamond-left"].satisfies("%clang")
with pytest.raises(spack.error.UnsatisfiableSpecError):
spack.concretize.concretize_one("dt-diamond%clang ^dt-diamond-left%gcc")
def test_disable_mixing_reuse(self, fake_db_install):
# Install a spec
left = spack.concretize.concretize_one("dt-diamond-left %gcc")
fake_db_install(left)
assert left.satisfies("%c=gcc")
lefthash = left.dag_hash()[:7]
# Check if mixing works when it's allowed
spack.concretize.concretize_one(f"dt-diamond%clang ^/{lefthash}")
# Now try to use it with compiler mixing disabled
with spack.config.override("concretizer", {"compiler_mixing": False}):
with pytest.raises(spack.error.UnsatisfiableSpecError):
spack.concretize.concretize_one(f"dt-diamond%clang ^/{lefthash}")
# Should be able to reuse if the compilers match
spack.concretize.concretize_one(f"dt-diamond%gcc ^/{lefthash}")
def test_disable_mixing_reuse_and_built(self, fake_db_install):
r"""In this case we have
x
|\
y z
Where y is a link dependency and z is a build dependency.
We install y with a compiler c1, and we make sure we cannot
ask for `x%c2 ^z%c1 ^/y
This looks similar to `test_disable_mixing_reuse`. But the
compiler nodes are handled differently in this case: this
is the only test that explicitly exercises compiler unmixing
rule #2.
"""
dep1 = spack.concretize.concretize_one("libdwarf %gcc")
fake_db_install(dep1)
assert dep1.satisfies("%c=gcc")
dep1hash = dep1.dag_hash()[:7]
spack.concretize.concretize_one(f"mixing-parent%clang ^cmake%gcc ^/{dep1hash}")
with spack.config.override("concretizer", {"compiler_mixing": False}):
with pytest.raises(spack.error.UnsatisfiableSpecError, match="mixing is disabled"):
spack.concretize.concretize_one(f"mixing-parent%clang ^cmake%gcc ^/{dep1hash}")
def test_disable_mixing_allow_compiler_link(self):
"""Check if we can use a compiler when mixing is disabled, and
still depend on a separate compiler package (in the latter case
not using it as a compiler but rather for some utility it
provides).
"""
with spack.config.override("concretizer", {"compiler_mixing": False}):
x = spack.concretize.concretize_one("llvm-client%gcc")
assert x.satisfies("%cxx=gcc")
assert x.satisfies("%c=gcc")
assert "llvm" in x
def test_disable_mixing_env(
self, mutable_mock_env_path, tmp_path: pathlib.Path, mock_packages, mutable_config
):
spack_yaml = tmp_path / ev.manifest_name
spack_yaml.write_text(
"""\
spack:
specs:
- dt-diamond%gcc
- dt-diamond%clang
concretizer:
compiler_mixing: false
unify: when_possible
"""
)
with ev.Environment(tmp_path) as e:
e.concretize()
for root in e.roots():
if root.satisfies("%gcc"):
assert root["dt-diamond-left"].satisfies("%gcc")
assert root["dt-diamond-bottom"].satisfies("%gcc")
else:
assert root["dt-diamond-left"].satisfies("%llvm")
assert root["dt-diamond-bottom"].satisfies("%llvm")
def test_compiler_inherited_upwards(self):
spec = spack.concretize.concretize_one("dt-diamond ^dt-diamond-bottom%clang")
for x in spec.traverse(deptype=("link", "run")):
if "c" not in x:
continue
assert x.satisfies("%clang")
def test_architecture_deep_inheritance(self, mock_targets, compiler_factory):
"""Make sure that indirect dependencies receive architecture
information from the root even when partial architecture information
is provided by an intermediate dependency.
"""
cnl_compiler = compiler_factory(
spec="gcc@4.5.0 os=CNL languages:=c,c++,fortran target=nocona"
)
with spack.config.override("packages", {"gcc": {"externals": [cnl_compiler]}}):
spec_str = "mpileaks os=CNL target=nocona %gcc@4.5.0 ^dyninst os=CNL ^callpath os=CNL"
spec = spack.concretize.concretize_one(spec_str)
for s in spec.traverse(root=False, deptype=("link", "run")):
if s.external:
continue
assert s.architecture.target == spec.architecture.target
def test_compiler_flags_from_user_are_grouped(self):
spec = Spec('pkg-a cflags="-O -foo-flag foo-val" platform=test %gcc')
spec = spack.concretize.concretize_one(spec)
cflags = spec.compiler_flags["cflags"]
assert any(x == "-foo-flag foo-val" for x in cflags)
def concretize_multi_provider(self):
s = Spec("mpileaks ^multi-provider-mpi@3.0")
s = spack.concretize.concretize_one(s)
assert s["mpi"].version == ver("1.10.3")
def test_concretize_dependent_with_singlevalued_variant_type(self):
s = Spec("singlevalue-variant-dependent-type")
s = spack.concretize.concretize_one(s)
@pytest.mark.parametrize("spec,version", [("dealii", "develop"), ("xsdk", "0.4.0")])
def concretize_difficult_packages(self, a, b):
"""Test a couple of large packages that are often broken due
to current limitations in the concretizer"""
s = Spec(a + "@" + b)
s = spack.concretize.concretize_one(s)
assert s[a].version == ver(b)
def test_concretize_two_virtuals(self):
"""Test a package with multiple virtual dependencies."""
spack.concretize.concretize_one("hypre")
def test_concretize_two_virtuals_with_one_bound(self, mutable_mock_repo):
"""Test a package with multiple virtual dependencies and one preset."""
spack.concretize.concretize_one("hypre ^openblas")
def test_concretize_two_virtuals_with_two_bound(self):
"""Test a package with multiple virtual deps and two of them preset."""
spack.concretize.concretize_one("hypre ^netlib-lapack")
def test_concretize_two_virtuals_with_dual_provider(self):
"""Test a package with multiple virtual dependencies and force a provider
that provides both.
"""
spack.concretize.concretize_one("hypre ^openblas-with-lapack")
def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self):
"""Test a package with multiple virtual dependencies and force a
provider that provides both, and another conflicting package that
provides one.
"""
s = Spec("hypre ^openblas-with-lapack ^netlib-lapack")
with pytest.raises(spack.error.SpackError):
spack.concretize.concretize_one(s)
@pytest.mark.parametrize(
"spec_str,expected_propagation",
[
# Propagates past a node that doesn't have the variant
("hypre~~shared ^openblas", [("hypre", "~shared"), ("openblas", "~shared")]),
# Propagates from root node to all nodes
(
"ascent~~shared +adios2",
[("ascent", "~shared"), ("adios2", "~shared"), ("bzip2", "~shared")],
),
# Propagate from a node that is not the root node
(
"ascent +adios2 ^adios2~~shared",
[("ascent", "+shared"), ("adios2", "~shared"), ("bzip2", "~shared")],
),
],
)
def test_concretize_propagate_disabled_variant(self, spec_str, expected_propagation):
"""Tests various patterns of boolean variant propagation"""
spec = spack.concretize.concretize_one(spec_str)
for key, expected_satisfies in expected_propagation:
spec[key].satisfies(expected_satisfies)
def test_concretize_propagate_variant_not_dependencies(self):
"""Test that when propagating a variant it is not propagated to dependencies that
do not have that variant"""
spec = Spec("quantum-espresso~~invino")
spec = spack.concretize.concretize_one(spec)
for dep in spec.traverse(root=False):
assert "invino" not in dep.variants.keys()
def test_concretize_propagate_variant_exclude_dependency_fail(self):
"""Tests that a propagating variant cannot be allowed to be excluded by any of
the source package's dependencies"""
spec = Spec("hypre ~~shared ^openblas +shared")
with pytest.raises(spack.error.UnsatisfiableSpecError):
spec = spack.concretize.concretize_one(spec)
def test_concretize_propagate_same_variant_from_direct_dep_fail(self):
"""Test that when propagating a variant from the source package and a direct
dependency also propagates the same variant with a different value. Raises error"""
spec = Spec("ascent +adios2 ++shared ^adios2 ~~shared")
with pytest.raises(spack.error.UnsatisfiableSpecError):
spec = spack.concretize.concretize_one(spec)
def test_concretize_propagate_same_variant_in_dependency_fail(self):
"""Test that when propagating a variant from the source package, none of it's
dependencies can propagate that variant with a different value. Raises error."""
spec = Spec("ascent +adios2 ++shared ^bzip2 ~~shared")
with pytest.raises(spack.error.UnsatisfiableSpecError):
spec = spack.concretize.concretize_one(spec)
def test_concretize_propagate_same_variant_virtual_dependency_fail(self):
"""Test that when propagating a variant from the source package and a direct
dependency (that is a virtual pkg) also propagates the same variant with a
different value. Raises error"""
spec = Spec("hypre ++shared ^openblas ~~shared")
with pytest.raises(spack.error.UnsatisfiableSpecError):
spec = spack.concretize.concretize_one(spec)
def test_concretize_propagate_same_variant_multiple_sources_diamond_dep_fail(self):
"""Test that fails when propagating the same variant with different values from multiple
sources that share a dependency"""
spec = Spec("parent-foo-bar ^dependency-foo-bar++bar ^direct-dep-foo-bar~~bar")
with pytest.raises(spack.error.UnsatisfiableSpecError):
spec = spack.concretize.concretize_one(spec)
def test_concretize_propagate_specified_variant(self):
"""Test that only the specified variant is propagated to the dependencies"""
spec = Spec("parent-foo-bar ~~foo")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("^dependency-foo-bar~foo")
assert spec.satisfies("^second-dependency-foo-bar-fee~foo")
assert spec.satisfies("^direct-dep-foo-bar~foo")
assert not spec.satisfies("^dependency-foo-bar+bar")
assert not spec.satisfies("^second-dependency-foo-bar-fee+bar")
assert not spec.satisfies("^direct-dep-foo-bar+bar")
def test_concretize_propagate_one_variant(self):
"""Test that you can specify to propagate one variant and not all"""
spec = Spec("parent-foo-bar ++bar ~foo")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("~foo") and not spec.satisfies("^dependency-foo-bar~foo")
assert spec.satisfies("+bar") and spec.satisfies("^dependency-foo-bar+bar")
def test_concretize_propagate_through_first_level_deps(self):
"""Test that boolean valued variants can be propagated past first level
dependecies even if the first level dependency does have the variant"""
spec = Spec("parent-foo-bar-fee ++fee")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("+fee") and not spec.satisfies("dependency-foo-bar+fee")
assert spec.satisfies("^second-dependency-foo-bar-fee+fee")
def test_concretize_propagate_multiple_variants(self):
"""Test that multiple boolean valued variants can be propagated from
the same source package"""
spec = Spec("parent-foo-bar-fee ~~foo ++bar")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("~foo") and spec.satisfies("+bar")
assert spec.satisfies("^dependency-foo-bar ~foo +bar")
assert spec.satisfies("^second-dependency-foo-bar-fee ~foo +bar")
def test_concretize_propagate_multiple_variants_mulitple_sources(self):
"""Test the propagates multiple different variants for multiple sources
in a diamond dependency"""
spec = Spec("parent-foo-bar ^dependency-foo-bar++bar ^direct-dep-foo-bar~~foo")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("^second-dependency-foo-bar-fee+bar")
assert spec.satisfies("^second-dependency-foo-bar-fee~foo")
assert not spec.satisfies("^dependency-foo-bar~foo")
assert not spec.satisfies("^direct-dep-foo-bar+bar")
def test_concretize_propagate_single_valued_variant(self):
"""Test propagation for single valued variants"""
spec = Spec("multivalue-variant libs==static")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("libs=static")
assert spec.satisfies("^pkg-a libs=static")
def test_concretize_propagate_multivalue_variant(self):
"""Test that multivalue variants are propagating the specified value(s)
to their dependecies. The dependencies should not have the default value"""
spec = Spec("multivalue-variant foo==baz,fee")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("^pkg-a foo=baz,fee")
assert spec.satisfies("^pkg-b foo=baz,fee")
assert not spec.satisfies("^pkg-a foo=bar")
assert not spec.satisfies("^pkg-b foo=bar")
def test_concretize_propagate_multiple_multivalue_variant(self):
"""Tests propagating the same mulitvalued variant from different sources allows
the dependents to accept all propagated values"""
spec = Spec("multivalue-variant foo==bar ^pkg-a foo==baz")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("multivalue-variant foo=bar")
assert spec.satisfies("^pkg-a foo=bar,baz")
assert spec.satisfies("^pkg-b foo=bar,baz")
def test_concretize_propagate_variant_not_in_source(self):
"""Test that variant is still propagated even if the source pkg
doesn't have the variant"""
spec = Spec("callpath++debug")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("^mpich+debug")
assert not spec.satisfies("callpath+debug")
assert not spec.satisfies("^dyninst+debug")
def test_concretize_propagate_variant_multiple_deps_not_in_source(self):
"""Test that a variant can be propagated to multiple dependencies
when the variant is not in the source package"""
spec = Spec("netlib-lapack++shared")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("^openblas+shared")
assert spec.satisfies("^perl+shared")
assert not spec.satisfies("netlib-lapack+shared")
def test_concretize_propagate_variant_second_level_dep_not_in_source(self):
"""Test that a variant can be propagated past first level dependencies
when the variant is not in the source package or any of the first level
dependencies"""
spec = Spec("parent-foo-bar ++fee")
spec = spack.concretize.concretize_one(spec)
assert spec.satisfies("^second-dependency-foo-bar-fee +fee")
assert not spec.satisfies("parent-foo-bar +fee")
def test_no_matching_compiler_specs(self):
s = Spec("pkg-a %gcc@0.0.0")
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError):
spack.concretize.concretize_one(s)
def test_no_compilers_for_arch(self):
s = Spec("pkg-a arch=linux-rhel0-x86_64")
with pytest.raises(spack.error.SpackError):
s = spack.concretize.concretize_one(s)
def test_virtual_is_fully_expanded_for_callpath(self):
# force dependence on fake "zmpi" by asking for MPI 10.0
spec = Spec("callpath ^mpi@10.0")
assert len(spec.dependencies(name="mpi")) == 1
assert "fake" not in spec
spec = spack.concretize.concretize_one(spec)
assert len(spec.dependencies(name="zmpi")) == 1
assert all(not d.dependencies(name="mpi") for d in spec.traverse())
assert all(x in spec for x in ("zmpi", "mpi"))
edges_to_zmpi = spec.edges_to_dependencies(name="zmpi")
assert len(edges_to_zmpi) == 1
assert "fake" in edges_to_zmpi[0].spec
def test_virtual_is_fully_expanded_for_mpileaks(self):
spec = Spec("mpileaks ^mpi@10.0")
assert len(spec.dependencies(name="mpi")) == 1
assert "fake" not in spec
spec = spack.concretize.concretize_one(spec)
assert len(spec.dependencies(name="zmpi")) == 1
assert len(spec.dependencies(name="callpath")) == 1
callpath = spec.dependencies(name="callpath")[0]
assert len(callpath.dependencies(name="zmpi")) == 1
zmpi = callpath.dependencies(name="zmpi")[0]
assert len(zmpi.dependencies(name="fake")) == 1
assert all(not d.dependencies(name="mpi") for d in spec.traverse())
assert all(x in spec for x in ("zmpi", "mpi"))
@pytest.mark.parametrize(
"spec_str,expected,not_expected",
[
# clang (llvm~flang) only provides C, and C++ compilers, while gcc has also fortran
#
# If we ask mpileaks%clang, then %gcc must be used for fortran, and since
# %gcc is preferred to clang in config, it will be used for most nodes
(
"mpileaks %clang",
{"mpileaks": "%clang", "libdwarf": "%gcc", "libelf": "%gcc"},
{"libdwarf": "%clang", "libelf": "%clang"},
),
(
"mpileaks %clang@:15.0.0",
{"mpileaks": "%clang", "libdwarf": "%gcc", "libelf": "%gcc"},
{"libdwarf": "%clang", "libelf": "%clang"},
),
(
"mpileaks %gcc",
{"mpileaks": "%gcc", "libdwarf": "%gcc", "libelf": "%gcc"},
{"mpileaks": "%clang", "libdwarf": "%clang", "libelf": "%clang"},
),
(
"mpileaks %gcc@10.2.1",
{"mpileaks": "%gcc", "libdwarf": "%gcc", "libelf": "%gcc"},
{"mpileaks": "%clang", "libdwarf": "%clang", "libelf": "%clang"},
),
# dyninst doesn't require fortran, so %clang is propagated
(
"dyninst %clang",
{"dyninst": "%clang", "libdwarf": "%clang", "libelf": "%clang"},
{"libdwarf": "%gcc", "libelf": "%gcc"},
),
],
)
def test_compiler_inheritance(self, spec_str, expected, not_expected):
"""Spack tries to propagate compilers as much as possible, but prefers using a single
toolchain on a node, rather than mixing them.
"""
spec = spack.concretize.concretize_one(spec_str)
for name, constraint in expected.items():
assert spec[name].satisfies(constraint)
for name, constraint in not_expected.items():
assert not spec[name].satisfies(constraint)
def test_external_package(self):
"""Tests that an external is preferred, if present, and that it does not
have dependencies.
"""
spec = spack.concretize.concretize_one("externaltool")
assert spec.external_path == os.path.sep + os.path.join("path", "to", "external_tool")
assert not spec.dependencies()
def test_nobuild_package(self):
"""Test that a non-buildable package raise an error if no specs
in packages.yaml are compatible with the request.
"""
spec = Spec("externaltool%clang")
with pytest.raises(spack.error.SpecError):
spec = spack.concretize.concretize_one(spec)
def test_external_and_virtual(self, mutable_config):
mutable_config.set("packages:stuff", {"buildable": False})
spec = spack.concretize.concretize_one("externaltest")
assert spec["externaltool"].external_path == os.path.sep + os.path.join(
"path", "to", "external_tool"
)
# "stuff" is a virtual provided by externalvirtual
assert spec["stuff"].external_path == os.path.sep + os.path.join(
"path", "to", "external_virtual_clang"
)
def test_compiler_child(self):
s = Spec("mpileaks target=x86_64 %clang ^dyninst%gcc")
s = spack.concretize.concretize_one(s)
assert s["mpileaks"].satisfies("%clang")
assert s["dyninst"].satisfies("%gcc")
def test_conflicts_in_spec(self, conflict_spec):
s = Spec(conflict_spec)
with pytest.raises(spack.error.SpackError):
s = spack.concretize.concretize_one(s)
def test_conflicts_show_cores(self, conflict_spec, monkeypatch):
s = Spec(conflict_spec)
with pytest.raises(spack.error.SpackError) as e:
s = spack.concretize.concretize_one(s)
assert "conflict" in e.value.message
def test_conflict_in_all_directives_true(self):
s = Spec("when-directives-true")
with pytest.raises(spack.error.SpackError):
s = spack.concretize.concretize_one(s)
@pytest.mark.parametrize("spec_str", ["unsat-provider@1.0+foo"])
def test_no_conflict_in_external_specs(self, spec_str):
# Modify the configuration to have the spec with conflict
# registered as an external
ext = Spec(spec_str)
data = {"externals": [{"spec": spec_str, "prefix": "/fake/path"}]}
spack.config.set("packages::{0}".format(ext.name), data)
ext = spack.concretize.concretize_one(ext) # failure raises exception
def test_regression_issue_4492(self):
# Constructing a spec which has no dependencies, but is otherwise
# concrete is kind of difficult. What we will do is to concretize
# a spec, and then modify it to have no dependency and reset the
# cache values.
s = Spec("mpileaks")
s = spack.concretize.concretize_one(s)
# Check that now the Spec is concrete, store the hash
assert s.concrete
# Remove the dependencies and reset caches
s.clear_dependencies()
s._concrete = False
assert not s.concrete
@pytest.mark.regression("7239")
def test_regression_issue_7239(self):
# Constructing a SpecBuildInterface from another SpecBuildInterface
# results in an inconsistent MRO
# Normal Spec
s = Spec("mpileaks")
s = spack.concretize.concretize_one(s)
assert spack.llnl.util.lang.ObjectWrapper not in s.__class__.__mro__
# Spec wrapped in a build interface
build_interface = s["mpileaks"]
assert spack.llnl.util.lang.ObjectWrapper in build_interface.__class__.__mro__
# Mimics asking the build interface from a build interface
build_interface = s["mpileaks"]["mpileaks"]
assert spack.llnl.util.lang.ObjectWrapper in build_interface.__class__.__mro__
@pytest.mark.regression("7705")
def test_regression_issue_7705(self):
# spec.package.provides(name) doesn't account for conditional
# constraints in the concretized spec
s = Spec("simple-inheritance~openblas")
s = spack.concretize.concretize_one(s)
assert not s.package.provides("lapack")
@pytest.mark.regression("7941")
def test_regression_issue_7941(self):
# The string representation of a spec containing
# an explicit multi-valued variant and a dependency
# might be parsed differently than the originating spec
s = Spec("pkg-a foobar=bar ^pkg-b")
t = Spec(str(s))
s = spack.concretize.concretize_one(s)
t = spack.concretize.concretize_one(t)
assert s.dag_hash() == t.dag_hash()
@pytest.mark.parametrize(
"abstract_specs",
[
# Establish a baseline - concretize a single spec
("mpileaks",),
# When concretized together with older version of callpath
# and dyninst it uses those older versions
("mpileaks", "callpath@0.9", "dyninst@8.1.1"),
# Handle recursive syntax within specs
("mpileaks", "callpath@0.9 ^dyninst@8.1.1", "dyninst"),
# Test specs that have overlapping dependencies but are not
# one a dependency of the other
("mpileaks", "direct-mpich"),
],
)
def test_simultaneous_concretization_of_specs(self, abstract_specs):
abstract_specs = [Spec(x) for x in abstract_specs]
concrete_specs = spack.concretize._concretize_specs_together(abstract_specs)
# Check there's only one configuration of each package in the DAG
names = set(
dep.name for spec in concrete_specs for dep in spec.traverse(deptype=("link", "run"))
)
for name in names:
name_specs = set(spec[name] for spec in concrete_specs if name in spec)
assert len(name_specs) == 1
# Check that there's at least one Spec that satisfies the
# initial abstract request
for aspec in abstract_specs:
assert any(cspec.satisfies(aspec) for cspec in concrete_specs)
# Make sure the concrete spec are top-level specs with no dependents
for spec in concrete_specs:
assert not spec.dependents()
@pytest.mark.parametrize("spec", ["noversion", "noversion-bundle"])
def test_noversion_pkg(self, spec):
"""Test concretization failures for no-version packages."""
with pytest.raises(spack.error.SpackError):
spack.concretize.concretize_one(spec)
@pytest.mark.not_on_windows("Not supported on Windows (yet)")
@pytest.mark.parametrize(
"spec,compiler_spec,best_achievable",
[
(
"mpileaks%gcc@=4.4.7 ^dyninst@=10.2.1 target=x86_64:",
"gcc@4.4.7 languages=c,c++,fortran",
"core2",
),
("mpileaks target=x86_64: %gcc@=4.8", "gcc@4.8 languages=c,c++,fortran", "haswell"),
(
"mpileaks target=x86_64: %gcc@=5.3.0",
"gcc@5.3.0 languages=c,c++,fortran",
"broadwell",
),
],
)
@pytest.mark.regression("13361", "20537")
@pytest.mark.usefixtures("mock_targets")
def test_adjusting_default_target_based_on_compiler(
self, spec, compiler_spec, best_achievable, current_host, compiler_factory, mutable_config
):
best_achievable = spack.vendor.archspec.cpu.TARGETS[best_achievable]
expected = best_achievable if best_achievable < current_host else current_host
mutable_config.set(
"packages", {"gcc": {"externals": [compiler_factory(spec=f"{compiler_spec}")]}}
)
s = spack.concretize.concretize_one(spec)
assert str(s.architecture.target) == str(expected)
@pytest.mark.parametrize(
"constraint,expected", [("%gcc@10.2", "@=10.2.1"), ("%gcc@10.2:", "@=10.2.1")]
)
def test_compiler_version_matches_any_entry_in_packages_yaml(self, constraint, expected):
# The behavior here has changed since #8735 / #14730. Now %gcc@10.2 is an abstract
# compiler spec, and it should first find a matching compiler gcc@=10.2.1
s = spack.concretize.concretize_one(f"mpileaks {constraint}")
gcc_deps = s.dependencies(name="gcc", deptype="build")
assert len(gcc_deps) == 1
assert gcc_deps[0].satisfies(expected)
def test_concretize_anonymous(self):
with pytest.raises(spack.error.SpackError):
s = Spec("+variant")
s = spack.concretize.concretize_one(s)
@pytest.mark.parametrize("spec_str", ["mpileaks ^%gcc", "mpileaks ^cflags=-g"])
def test_concretize_anonymous_dep(self, spec_str):
with pytest.raises(spack.error.SpackError):
s = Spec(spec_str)
s = spack.concretize.concretize_one(s)
@pytest.mark.parametrize(
"spec_str,expected_str",
[
# Unconstrained versions select default compiler (gcc@10.2.1)
("bowtie@1.4.0", "%gcc@10.2.1"),
# Version with conflicts and no valid gcc select another compiler
("bowtie@1.3.0", "%clang@15.0.0"),
# If a higher gcc is available, with a worse os, still prefer that,
# assuming the two operating systems are compatible
("bowtie@1.2.2 %gcc", "%gcc@11.1.0"),
],
)
def test_compiler_conflicts_in_package_py(
self, spec_str, expected_str, gcc11_with_flags, mutable_config
):
mutable_config.set(
"concretizer:os_compatible", {"debian6": ["redhat6"], "redhat6": ["debian6"]}
)
with spack.config.override("packages", {"gcc": {"externals": [gcc11_with_flags]}}):
s = spack.concretize.concretize_one(spec_str)
assert s.satisfies(expected_str)
@pytest.mark.parametrize(
"spec_str,expected,unexpected",
[
("conditional-variant-pkg@1.0", ["two_whens"], ["version_based", "variant_based"]),
("conditional-variant-pkg@2.0", ["version_based", "variant_based"], ["two_whens"]),
(
"conditional-variant-pkg@2.0~version_based",
["version_based"],
["variant_based", "two_whens"],
),
(
"conditional-variant-pkg@2.0+version_based+variant_based",
["version_based", "variant_based", "two_whens"],
[],
),
],
)
def test_conditional_variants(self, spec_str, expected, unexpected):
s = spack.concretize.concretize_one(spec_str)
for var in expected:
assert s.satisfies("%s=*" % var)
for var in unexpected:
assert not s.satisfies("%s=*" % var)
@pytest.mark.parametrize(
"bad_spec",
[
"@1.0~version_based",
"@1.0+version_based",
"@2.0~version_based+variant_based",
"@2.0+version_based~variant_based+two_whens",
],
)
def test_conditional_variants_fail(self, bad_spec):
with pytest.raises(
(spack.error.UnsatisfiableSpecError, spack.spec.InvalidVariantForSpecError)
):
_ = spack.concretize.concretize_one("conditional-variant-pkg" + bad_spec)
@pytest.mark.parametrize(
"spec_str,expected,unexpected",
[
("py-extension3 ^python@3.5.1", [], ["py-extension1"]),
("py-extension3 ^python@2.7.11", ["py-extension1"], []),
("py-extension3@1.0 ^python@2.7.11", ["patchelf@0.9"], []),
("py-extension3@1.1 ^python@2.7.11", ["patchelf@0.9"], []),
("py-extension3@1.0 ^python@3.5.1", ["patchelf@0.10"], []),
],
)
def test_conditional_dependencies(self, spec_str, expected, unexpected, fuzz_dep_order):
"""Tests that conditional dependencies are correctly attached.
The original concretizer can be sensitive to the iteration order over the dependencies of
a package, so we use a fuzzer function to test concretization with dependencies iterated
forwards and backwards.
"""
fuzz_dep_order("py-extension3") # test forwards and backwards
s = spack.concretize.concretize_one(spec_str)
for dep in expected:
msg = '"{0}" is not in "{1}" and was expected'
assert dep in s, msg.format(dep, spec_str)
for dep in unexpected:
msg = '"{0}" is in "{1}" but was unexpected'
assert dep not in s, msg.format(dep, spec_str)
@pytest.mark.parametrize(
"spec_str,patched_deps",
[
("patch-several-dependencies", [("libelf", 1), ("fake", 2)]),
("patch-several-dependencies@1.0", [("libelf", 1), ("fake", 2), ("libdwarf", 1)]),
(
"patch-several-dependencies@1.0 ^libdwarf@20111030",
[("libelf", 1), ("fake", 2), ("libdwarf", 2)],
),
("patch-several-dependencies ^libelf@0.8.10", [("libelf", 2), ("fake", 2)]),
("patch-several-dependencies +foo", [("libelf", 2), ("fake", 2)]),
],
)
def test_patching_dependencies(self, spec_str, patched_deps):
s = spack.concretize.concretize_one(spec_str)
for dep, num_patches in patched_deps:
assert s[dep].satisfies("patches=*")
assert len(s[dep].variants["patches"].value) == num_patches
@pytest.mark.regression("267,303,1781,2310,2632,3628")
@pytest.mark.parametrize(
"spec_str, expected",
[
# Need to understand that this configuration is possible
# only if we use the +mpi variant, which is not the default
("fftw ^mpich", ["+mpi"]),
# This spec imposes two orthogonal constraints on a dependency,
# one of which is conditional. The original concretizer fail since
# when it applies the first constraint, it sets the unknown variants
# of the dependency to their default values
("quantum-espresso", ["^fftw@1.0+mpi"]),
# This triggers a conditional dependency on ^fftw@1.0
("quantum-espresso", ["^openblas"]),
# This constructs a constraint for a dependency og the type
# @x.y:x.z where the lower bound is unconditional, the upper bound
# is conditional to having a variant set
("quantum-espresso", ["^libelf@0.8.12"]),
("quantum-espresso~veritas", ["^libelf@0.8.13"]),
],
)
def test_working_around_conflicting_defaults(self, spec_str, expected):
s = spack.concretize.concretize_one(spec_str)
assert s.concrete
for constraint in expected:
assert s.satisfies(constraint)
@pytest.mark.regression("5651")
def test_package_with_constraint_not_met_by_external(self):
"""Check that if we have an external package A at version X.Y in
packages.yaml, but our spec doesn't allow X.Y as a version, then
a new version of A is built that meets the requirements.
"""
packages_yaml = {"libelf": {"externals": [{"spec": "libelf@0.8.13", "prefix": "/usr"}]}}
spack.config.set("packages", packages_yaml)
# quantum-espresso+veritas requires libelf@:0.8.12
s = spack.concretize.concretize_one("quantum-espresso+veritas")
assert s.satisfies("^libelf@0.8.12")
assert not s["libelf"].external
@pytest.mark.regression("9744")
def test_cumulative_version_ranges_with_different_length(self):
s = spack.concretize.concretize_one("cumulative-vrange-root")
assert s.concrete
assert s.satisfies("^cumulative-vrange-bottom@2.2")
@pytest.mark.regression("9937")
def test_dependency_conditional_on_another_dependency_state(self):
root_str = "variant-on-dependency-condition-root"
dep_str = "variant-on-dependency-condition-a"
spec_str = "{0} ^{1}".format(root_str, dep_str)
s = spack.concretize.concretize_one(spec_str)
assert s.concrete
assert s.satisfies("^variant-on-dependency-condition-b")
s = spack.concretize.concretize_one(spec_str + "+x")
assert s.concrete
assert s.satisfies("^variant-on-dependency-condition-b")
s = spack.concretize.concretize_one(spec_str + "~x")
assert s.concrete
assert not s.satisfies("^variant-on-dependency-condition-b")
def test_external_that_would_require_a_virtual_dependency(self):
s = spack.concretize.concretize_one("requires-virtual")
assert s.external
assert "stuff" not in s
def test_transitive_conditional_virtual_dependency(self, mutable_config):
"""Test that an external is used as provider if the virtual is non-buildable"""
mutable_config.set("packages:stuff", {"buildable": False})
s = spack.concretize.concretize_one("transitive-conditional-virtual-dependency")
# Test that the default +stuff~mpi is maintained, and the right provider is selected
assert s.satisfies("^conditional-virtual-dependency +stuff~mpi")
assert s.satisfies("^[virtuals=stuff] externalvirtual")
@pytest.mark.regression("20040")
def test_conditional_provides_or_depends_on(self):
# Check that we can concretize correctly a spec that can either
# provide a virtual or depend on it based on the value of a variant
s = spack.concretize.concretize_one("v1-consumer ^conditional-provider +disable-v1")
assert "v1-provider" in s
assert s["v1"].name == "v1-provider"
assert s["v2"].name == "conditional-provider"
@pytest.mark.regression("20079")
@pytest.mark.parametrize(
"spec_str,tests_arg,with_dep,without_dep",
[
# Check that True is treated correctly and attaches test deps
# to all nodes in the DAG
("pkg-a", True, ["pkg-a"], []),
("pkg-a foobar=bar", True, ["pkg-a", "pkg-b"], []),
# Check that a list of names activates the dependency only for
# packages in that list
("pkg-a foobar=bar", ["pkg-a"], ["pkg-a"], ["pkg-b"]),
("pkg-a foobar=bar", ["pkg-b"], ["pkg-b"], ["pkg-a"]),
# Check that False disregard test dependencies
("pkg-a foobar=bar", False, [], ["pkg-a", "pkg-b"]),
],
)
def test_activating_test_dependencies(self, spec_str, tests_arg, with_dep, without_dep):
s = spack.concretize.concretize_one(spec_str, tests=tests_arg)
for pkg_name in with_dep:
msg = "Cannot find test dependency in package '{0}'"
node = s[pkg_name]
assert node.dependencies(deptype="test"), msg.format(pkg_name)
for pkg_name in without_dep:
msg = "Test dependency in package '{0}' is unexpected"
node = s[pkg_name]
assert not node.dependencies(deptype="test"), msg.format(pkg_name)
@pytest.mark.regression("19981")
def test_target_ranges_in_conflicts(self):
with pytest.raises(spack.error.SpackError):
spack.concretize.concretize_one("impossible-concretization")
def test_target_compatibility(self):
with pytest.raises(spack.error.SpackError):
spack.concretize.concretize_one(
Spec("libdwarf target=x86_64 ^libelf target=x86_64_v2")
)
@pytest.mark.regression("20040")
def test_variant_not_default(self):
s = spack.concretize.concretize_one("ecp-viz-sdk")
# Check default variant value for the package
assert "+dep" in s["conditional-constrained-dependencies"]
# Check that non-default variant values are forced on the dependency
d = s["dep-with-variants"]
assert "+foo+bar+baz" in d
def test_all_patches_applied(self):
uuidpatch = (
"a60a42b73e03f207433c5579de207c6ed61d58e4d12dd3b5142eb525728d89ea"
if sys.platform != "win32"
else "d0df7988457ec999c148a4a2af25ce831bfaad13954ba18a4446374cb0aef55e"
)
localpatch = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
spec = Spec("conditionally-patch-dependency+jasper")
spec = spack.concretize.concretize_one(spec)
assert (uuidpatch, localpatch) == spec["libelf"].variants["patches"].value
def test_dont_select_version_that_brings_more_variants_in(self):
s = spack.concretize.concretize_one("dep-with-variants-if-develop-root")
assert s["dep-with-variants-if-develop"].satisfies("@1.0")
@pytest.mark.regression("20244,20736")
@pytest.mark.parametrize(
"spec_str,is_external,expected",
[
# These are all externals, and 0_8 is a version not in package.py
("externaltool@1.0", True, "@1.0"),
("externaltool@0.9", True, "@0.9"),
("externaltool@0_8", True, "@0_8"),
# This external package is buildable, has a custom version
# in packages.yaml that is greater than the ones in package.py
# and specifies a variant
("external-buildable-with-variant +baz", True, "@1.1.special +baz"),
("external-buildable-with-variant ~baz", False, "@1.0 ~baz"),
("external-buildable-with-variant@1.0: ~baz", False, "@1.0 ~baz"),
# This uses an external version that meets the condition for
# having an additional dependency, but the dependency shouldn't
# appear in the answer set
("external-buildable-with-variant@0.9 +baz", True, "@0.9"),
# This package has an external version declared that would be
# the least preferred if Spack had to build it
("old-external", True, "@1.0.0"),
],
)
def test_external_package_versions(self, spec_str, is_external, expected):
s = spack.concretize.concretize_one(spec_str)
assert s.external == is_external
assert s.satisfies(expected)
@pytest.mark.parametrize("dev_first", [True, False])
@pytest.mark.parametrize(
"spec", ["dev-build-test-install", "dev-build-test-dependent ^dev-build-test-install"]
)
@pytest.mark.parametrize("mock_db", [True, False])
def test_reuse_does_not_overwrite_dev_specs(
self, dev_first, spec, mock_db, tmp_path: pathlib.Path, temporary_store, monkeypatch
):
"""Test that reuse does not mix dev specs with non-dev specs.
Tests for either order (dev specs are not reused for non-dev, and
non-dev specs are not reused for dev specs)
Tests for a spec in which the root is developed and a spec in
which a dep is developed.
Tests for both reuse from database and reuse from buildcache"""
# dev and non-dev specs that are otherwise identical
spec = Spec(spec)
dev_spec = spec.copy()
dev_spec["dev-build-test-install"].constrain(f"dev_path={tmp_path}")
# run the test in both orders
first_spec = dev_spec if dev_first else spec
second_spec = spec if dev_first else dev_spec
# concretize and setup spack to reuse in the appropriate manner
first_spec = spack.concretize.concretize_one(first_spec)
def mock_fn(*args, **kwargs):
return [first_spec]
if mock_db:
temporary_store.db.add(first_spec)
else:
monkeypatch.setattr(spack.binary_distribution, "update_cache_and_get_specs", mock_fn)
# concretize and ensure we did not reuse
with spack.config.override("concretizer:reuse", True):
second_spec = spack.concretize.concretize_one(second_spec)
assert first_spec.dag_hash() != second_spec.dag_hash()
@pytest.mark.regression("20292")
@pytest.mark.parametrize(
"context",
[
{"add_variant": True, "delete_variant": False},
{"add_variant": False, "delete_variant": True},
{"add_variant": True, "delete_variant": True},
],
)
def test_reuse_installed_packages_when_package_def_changes(
self, context, mutable_database, repo_with_changing_recipe
):
# test applies only with reuse turned off in concretizer
spack.config.set("concretizer:reuse", False)
# Install a spec
root = spack.concretize.concretize_one("root")
dependency = root["changing"].copy()
PackageInstaller([root.package], fake=True, explicit=True).install()
# Modify package.py
repo_with_changing_recipe.change(context)
# Try to concretize with the spec installed previously
new_root_with_reuse = spack.concretize.concretize_one(
Spec("root ^/{0}".format(dependency.dag_hash()))
)
new_root_without_reuse = spack.concretize.concretize_one("root")
# validate that the graphs are the same with reuse, but not without
assert ht.build_hash(root) == ht.build_hash(new_root_with_reuse)
assert ht.build_hash(root) != ht.build_hash(new_root_without_reuse)
# DAG hash should be the same with reuse since only the dependency changed
assert root.dag_hash() == new_root_with_reuse.dag_hash()
# Structure and package hash will be different without reuse
assert root.dag_hash() != new_root_without_reuse.dag_hash()
@pytest.mark.regression("43663")
def test_no_reuse_when_variant_condition_does_not_hold(self, mutable_database, mock_packages):
spack.config.set("concretizer:reuse", True)
# Install a spec for which the `version_based` variant condition does not hold
old = spack.concretize.concretize_one("conditional-variant-pkg @1")
PackageInstaller([old.package], fake=True, explicit=True).install()
# Then explicitly require a spec with `+version_based`, which shouldn't reuse previous spec
new1 = spack.concretize.concretize_one("conditional-variant-pkg +version_based")
assert new1.satisfies("@2 +version_based")
new2 = spack.concretize.concretize_one("conditional-variant-pkg +two_whens")
assert new2.satisfies("@2 +two_whens +version_based")
def test_reuse_with_flags(self, mutable_database, mutable_config):
spack.config.set("concretizer:reuse", True)
spec = spack.concretize.concretize_one("pkg-a cflags=-g cxxflags=-g")
PackageInstaller([spec.package], fake=True, explicit=True).install()
testspec = spack.concretize.concretize_one("pkg-a cflags=-g")
assert testspec == spec, testspec.tree()
@pytest.mark.regression("20784")
def test_concretization_of_test_dependencies(self):
# With clingo we emit dependency_conditions regardless of the type
# of the dependency. We need to ensure that there's at least one
# dependency type declared to infer that the dependency holds.
s = spack.concretize.concretize_one("test-dep-with-imposed-conditions")
assert "c" not in s
@pytest.mark.parametrize(
"spec_str", ["wrong-variant-in-conflicts", "wrong-variant-in-depends-on"]
)
def test_error_message_for_inconsistent_variants(self, spec_str):
s = Spec(spec_str)
with pytest.raises(vt.UnknownVariantError):
s = spack.concretize.concretize_one(s)
@pytest.mark.regression("22533")
@pytest.mark.parametrize(
"spec_str,variant_name,expected_values",
[
# Test the default value 'auto'
("mvapich2", "file_systems", ("auto",)),
# Test setting a single value from the disjoint set
("mvapich2 file_systems=lustre", "file_systems", ("lustre",)),
# Test setting multiple values from the disjoint set
("mvapich2 file_systems=lustre,gpfs", "file_systems", ("lustre", "gpfs")),
],
)
def test_mv_variants_disjoint_sets_from_spec(self, spec_str, variant_name, expected_values):
s = spack.concretize.concretize_one(spec_str)
assert set(expected_values) == set(s.variants[variant_name].value)
@pytest.mark.regression("22533")
def test_mv_variants_disjoint_sets_from_packages_yaml(self):
external_mvapich2 = {
"mvapich2": {
"buildable": False,
"externals": [{"spec": "mvapich2@2.3.1 file_systems=nfs,ufs", "prefix": "/usr"}],
}
}
spack.config.set("packages", external_mvapich2)
s = spack.concretize.concretize_one("mvapich2")
assert set(s.variants["file_systems"].value) == set(["ufs", "nfs"])
@pytest.mark.regression("22596")
def test_external_with_non_default_variant_as_dependency(self):
# This package depends on another that is registered as an external
# with 'buildable: true' and a variant with a non-default value set
s = spack.concretize.concretize_one("trigger-external-non-default-variant")
assert "~foo" in s["external-non-default-variant"]
assert "~bar" in s["external-non-default-variant"]
assert s["external-non-default-variant"].external
@pytest.mark.regression("22718")
@pytest.mark.parametrize(
"spec_str,expected_compiler",
[("mpileaks", "%gcc@10.2.1"), ("mpileaks ^mpich%clang@15.0.0", "%clang@15.0.0")],
)
def test_compiler_is_unique(self, spec_str, expected_compiler):
s = spack.concretize.concretize_one(spec_str)
for node in s.traverse():
if not node.satisfies("^ c"):
continue
assert node.satisfies(expected_compiler)
@pytest.mark.parametrize(
"spec_str,expected_dict",
[
# Check the defaults from the package (libs=shared)
("multivalue-variant", {"libs=shared": True, "libs=static": False}),
# Check that libs=static doesn't extend the default
("multivalue-variant libs=static", {"libs=shared": False, "libs=static": True}),
],
)
def test_multivalued_variants_from_cli(self, spec_str, expected_dict):
s = spack.concretize.concretize_one(spec_str)
for constraint, value in expected_dict.items():
assert s.satisfies(constraint) == value
@pytest.mark.regression("22351")
@pytest.mark.parametrize(
"spec_str,expected",
[
# Version 1.1.0 is deprecated and should not be selected, unless we
# explicitly asked for that
("deprecated-versions", "deprecated-versions@1.0.0"),
("deprecated-versions@=1.1.0", "deprecated-versions@1.1.0"),
],
)
def test_deprecated_versions_not_selected(self, spec_str, expected):
with spack.config.override("config:deprecated", True):
s = spack.concretize.concretize_one(spec_str)
s.satisfies(expected)
@pytest.mark.regression("24196")
def test_version_badness_more_important_than_default_mv_variants(self):
# If a dependency had an old version that for some reason pulls in
# a transitive dependency with a multi-valued variant, that old
# version was preferred because of the order of our optimization
# criteria.
s = spack.concretize.concretize_one("root")
assert s["gmt"].satisfies("@2.0")
@pytest.mark.regression("24205")
def test_provider_must_meet_requirements(self):
# A package can be a provider of a virtual only if the underlying
# requirements are met.
s = Spec("unsat-virtual-dependency")
with pytest.raises((RuntimeError, spack.error.UnsatisfiableSpecError)):
s = spack.concretize.concretize_one(s)
@pytest.mark.regression("23951")
def test_newer_dependency_adds_a_transitive_virtual(self):
# Ensure that a package doesn't concretize any of its transitive
# dependencies to an old version because newer versions pull in
# a new virtual dependency. The possible concretizations here are:
#
# root@1.0 <- middle@1.0 <- leaf@2.0 <- blas
# root@1.0 <- middle@1.0 <- leaf@1.0
#
# and "blas" is pulled in only by newer versions of "leaf"
s = spack.concretize.concretize_one("root-adds-virtual")
assert s["leaf-adds-virtual"].satisfies("@2.0")
assert "blas" in s
@pytest.mark.regression("26718")
def test_versions_in_virtual_dependencies(self):
# Ensure that a package that needs a given version of a virtual
# package doesn't end up using a later implementation
s = spack.concretize.concretize_one("hpcviewer@2019.02")
assert s["java"].satisfies("virtual-with-versions@1.8.0")
@pytest.mark.regression("26866")
def test_non_default_provider_of_multiple_virtuals(self):
s = spack.concretize.concretize_one("many-virtual-consumer ^low-priority-provider")
assert s["mpi"].name == "low-priority-provider"
assert s["lapack"].name == "low-priority-provider"
for virtual_pkg in ("mpi", "lapack"):
for pkg in spack.repo.PATH.providers_for(virtual_pkg):
if pkg.name == "low-priority-provider":
continue
assert pkg not in s
@pytest.mark.regression("27237")
@pytest.mark.parametrize(
"spec_str,expect_installed",
[("mpich", True), ("mpich+debug", False), ("mpich~debug", True)],
)
def test_concrete_specs_are_not_modified_on_reuse(
self, mutable_database, spec_str, expect_installed
):
# Test the internal consistency of solve + DAG reconstruction
# when reused specs are added to the mix. This prevents things
# like additional constraints being added to concrete specs in
# the answer set produced by clingo.
with spack.config.override("concretizer:reuse", True):
s = spack.concretize.concretize_one(spec_str)
assert s.installed is expect_installed
assert s.satisfies(spec_str)
@pytest.mark.regression("26721,19736")
def test_sticky_variant_in_package(self):
# Here we test that a sticky variant cannot be changed from its default value
# by the ASP solver if not set explicitly. The package used in the test needs
# to have +allow-gcc set to be concretized with %gcc and clingo is not allowed
# to change the default ~allow-gcc
with pytest.raises(spack.error.SpackError):
spack.concretize.concretize_one("sticky-variant %gcc")
s = spack.concretize.concretize_one("sticky-variant+allow-gcc %gcc")
assert s.satisfies("%gcc") and s.satisfies("+allow-gcc")
s = spack.concretize.concretize_one("sticky-variant %clang")
assert s.satisfies("%clang") and s.satisfies("~allow-gcc")
@pytest.mark.regression("42172")
@pytest.mark.parametrize(
"spec,allow_gcc",
[
("sticky-variant@1.0+allow-gcc", True),
("sticky-variant@1.0~allow-gcc", False),
# FIXME (externals as concrete) ("sticky-variant@1.0", False),
],
)
def test_sticky_variant_in_external(self, spec, allow_gcc):
# setup external for sticky-variant+allow-gcc
config = {"externals": [{"spec": spec, "prefix": "/fake/path"}], "buildable": False}
spack.config.set("packages:sticky-variant", config)
maybe = spack.llnl.util.lang.nullcontext if allow_gcc else pytest.raises
with maybe(spack.error.SpackError):
s = spack.concretize.concretize_one("sticky-variant-dependent%gcc")
if allow_gcc:
assert s.satisfies("%gcc")
assert s["sticky-variant"].satisfies("+allow-gcc")
assert s["sticky-variant"].external
def test_do_not_invent_new_concrete_versions_unless_necessary(self):
# ensure we select a known satisfying version rather than creating
# a new '2.7' version.
assert ver("=2.7.11") == spack.concretize.concretize_one("python@2.7").version
# Here there is no known satisfying version - use the one on the spec.
assert ver("=2.7.21") == spack.concretize.concretize_one("python@=2.7.21").version
@pytest.mark.parametrize(
"spec_str,valid",
[
("conditional-values-in-variant@1.62.0 cxxstd=17", False),
("conditional-values-in-variant@1.62.0 cxxstd=2a", False),
("conditional-values-in-variant@1.72.0 cxxstd=2a", False),
# Ensure disjoint set of values work too
("conditional-values-in-variant@1.72.0 staging=flexpath", False),
# Ensure conditional values set False fail too
("conditional-values-in-variant foo=bar", False),
("conditional-values-in-variant foo=foo", True),
],
)
def test_conditional_values_in_variants(self, spec_str, valid):
s = Spec(spec_str)
raises = pytest.raises((RuntimeError, spack.error.UnsatisfiableSpecError))
with spack.llnl.util.lang.nullcontext() if valid else raises:
s = spack.concretize.concretize_one(s)
def test_conditional_values_in_conditional_variant(self):
"""Test that conditional variants play well with conditional possible values"""
s = spack.concretize.concretize_one("conditional-values-in-variant@1.50.0")
assert "cxxstd" not in s.variants
s = spack.concretize.concretize_one("conditional-values-in-variant@1.60.0")
assert "cxxstd" in s.variants
def test_target_granularity(self):
# The test architecture uses core2 as the default target. Check that when
# we configure Spack for "generic" granularity we concretize for x86_64
default_target = spack.platforms.test.Test.default
generic_target = spack.vendor.archspec.cpu.TARGETS[default_target].generic.name
s = Spec("python")
assert spack.concretize.concretize_one(s).satisfies("target=%s" % default_target)
with spack.config.override("concretizer:targets", {"granularity": "generic"}):
assert spack.concretize.concretize_one(s).satisfies("target=%s" % generic_target)
def test_host_compatible_concretization(self):
# Check that after setting "host_compatible" to false we cannot concretize.
# Here we use "k10" to set a target non-compatible with the current host
# to avoid a lot of boilerplate when mocking the test platform. The issue
# is that the defaults for the test platform are very old, so there's no
# compiler supporting e.g. icelake etc.
s = Spec("python target=k10")
assert spack.concretize.concretize_one(s)
with spack.config.override("concretizer:targets", {"host_compatible": True}):
with pytest.raises(spack.error.SpackError):
spack.concretize.concretize_one(s)
def test_add_microarchitectures_on_explicit_request(self):
# Check that if we consider only "generic" targets, we can still solve for
# specific microarchitectures on explicit requests
with spack.config.override("concretizer:targets", {"granularity": "generic"}):
s = spack.concretize.concretize_one("python target=k10")
assert s.satisfies("target=k10")
@pytest.mark.regression("29201")
def test_delete_version_and_reuse(self, mutable_database, repo_with_changing_recipe):
"""Test that we can reuse installed specs with versions not
declared in package.py
"""
root = spack.concretize.concretize_one("root")
PackageInstaller([root.package], fake=True, explicit=True).install()
repo_with_changing_recipe.change({"delete_version": True})
with spack.config.override("concretizer:reuse", True):
new_root = spack.concretize.concretize_one("root")
assert root.dag_hash() == new_root.dag_hash()
@pytest.mark.regression("29201")
def test_installed_version_is_selected_only_for_reuse(
self, mutable_database, repo_with_changing_recipe
):
"""Test that a version coming from an installed spec is a possible
version only for reuse
"""
# Install a dependency that cannot be reused with "root"
# because of a conflict in a variant, then delete its version
dependency = spack.concretize.concretize_one("changing@1.0~foo")
PackageInstaller([dependency.package], fake=True, explicit=True).install()
repo_with_changing_recipe.change({"delete_version": True})
with spack.config.override("concretizer:reuse", True):
new_root = spack.concretize.concretize_one("root")
assert not new_root["changing"].satisfies("@1.0")
@pytest.mark.regression("28259")
def test_reuse_with_unknown_namespace_dont_raise(
self, temporary_store, mock_custom_repository
):
with spack.repo.use_repositories(mock_custom_repository, override=False):
s = spack.concretize.concretize_one("pkg-c")
assert s.namespace != "builtin_mock"
PackageInstaller([s.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
s = spack.concretize.concretize_one("pkg-c")
assert s.namespace == "builtin_mock"
@pytest.mark.regression("45538")
def test_reuse_from_other_namespace_no_raise(
self, temporary_store, monkeypatch, repo_builder: RepoBuilder
):
repo_builder.add_package("zlib")
builtin = spack.concretize.concretize_one("zlib")
PackageInstaller([builtin.package], fake=True, explicit=True).install()
with spack.repo.use_repositories(repo_builder.root, override=False):
with spack.config.override("concretizer:reuse", True):
zlib = spack.concretize.concretize_one(f"{repo_builder.namespace}.zlib")
assert zlib.namespace == repo_builder.namespace
@pytest.mark.regression("28259")
def test_reuse_with_unknown_package_dont_raise(
self, temporary_store, monkeypatch, repo_builder: RepoBuilder
):
repo_builder.add_package("pkg-c")
with spack.repo.use_repositories(repo_builder.root, override=False):
s = spack.concretize.concretize_one("pkg-c")
assert s.namespace == repo_builder.namespace
PackageInstaller([s.package], fake=True, explicit=True).install()
del sys.modules[f"spack_repo.{repo_builder.namespace}.packages.pkg_c"]
repo_builder.remove("pkg-c")
with spack.repo.use_repositories(repo_builder.root, override=False) as repos:
repos.repos[0]._pkg_checker.invalidate()
with spack.config.override("concretizer:reuse", True):
s = spack.concretize.concretize_one("pkg-c")
assert s.namespace == "builtin_mock"
@pytest.mark.parametrize(
"specs,checks",
[
(["libelf", "libelf@0.8.10"], {"libelf": 1}),
(["libdwarf%gcc", "libelf%clang"], {"libdwarf": 1, "libelf": 1}),
(["libdwarf%gcc", "libdwarf%clang"], {"libdwarf": 2, "libelf": 1}),
(["libdwarf^libelf@0.8.12", "libdwarf^libelf@0.8.13"], {"libdwarf": 2, "libelf": 2}),
(["hdf5", "zmpi"], {"zmpi": 1, "fake": 1}),
(["hdf5", "mpich"], {"mpich": 1}),
(["hdf5^zmpi", "mpich"], {"mpi": 2, "mpich": 1, "zmpi": 1, "fake": 1}),
(["mpi", "zmpi"], {"mpi": 1, "mpich": 0, "zmpi": 1, "fake": 1}),
(["mpi", "mpich"], {"mpi": 1, "mpich": 1, "zmpi": 0}),
],
)
def test_best_effort_coconcretize(self, specs, checks):
specs = [Spec(s) for s in specs]
solver = spack.solver.asp.Solver()
solver.reuse = False
concrete_specs = set()
for result in solver.solve_in_rounds(specs):
for s in result.specs:
concrete_specs.update(s.traverse())
for matching_spec, expected_count in checks.items():
matches = [x for x in concrete_specs if x.satisfies(matching_spec)]
assert len(matches) == expected_count
@pytest.mark.parametrize(
"specs,expected_spec,occurances",
[
# The algorithm is greedy, and it might decide to solve the "best"
# spec early in which case reuse is suboptimal. In this case the most
# recent version of libdwarf is selected and concretized to libelf@0.8.13
(
[
"libdwarf@20111030^libelf@0.8.10",
"libdwarf@20130207^libelf@0.8.12",
"libdwarf@20130729",
],
"libelf@0.8.12",
1,
),
# Check we reuse the best libelf in the environment
(
[
"libdwarf@20130729^libelf@0.8.10",
"libdwarf@20130207^libelf@0.8.12",
"libdwarf@20111030",
],
"libelf@0.8.12",
2,
),
(["libdwarf@20130729", "libdwarf@20130207", "libdwarf@20111030"], "libelf@0.8.13", 3),
# We need to solve in 2 rounds and we expect mpich to be preferred to zmpi
(["hdf5+mpi", "zmpi", "mpich"], "mpich", 2),
],
)
def test_best_effort_coconcretize_preferences(self, specs, expected_spec, occurances):
"""Test package preferences during coconcretization."""
specs = [Spec(s) for s in specs]
solver = spack.solver.asp.Solver()
solver.reuse = False
concrete_specs = {}
for result in solver.solve_in_rounds(specs):
concrete_specs.update(result.specs_by_input)
counter = 0
for spec in concrete_specs.values():
if expected_spec in spec:
counter += 1
assert counter == occurances, concrete_specs
def test_solve_in_rounds_all_unsolved(self, monkeypatch, mock_packages):
specs = [Spec(x) for x in ["libdwarf%gcc", "libdwarf%clang"]]
solver = spack.solver.asp.Solver()
solver.reuse = False
simulate_unsolved_property = list((x, None) for x in specs)
monkeypatch.setattr(spack.solver.asp.Result, "unsolved_specs", simulate_unsolved_property)
monkeypatch.setattr(spack.solver.asp.Result, "specs", list())
with pytest.raises(spack.solver.asp.OutputDoesNotSatisfyInputError):
list(solver.solve_in_rounds(specs))
def test_coconcretize_reuse_and_virtuals(self):
reusable_specs = []
for s in ["mpileaks ^mpich", "zmpi"]:
reusable_specs.extend(spack.concretize.concretize_one(s).traverse(root=True))
root_specs = [Spec("mpileaks"), Spec("zmpi")]
with spack.config.override("concretizer:reuse", True):
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(setup, root_specs, reuse=reusable_specs)
for spec in result.specs:
assert "zmpi" in spec
@pytest.mark.regression("30864")
def test_misleading_error_message_on_version(self, mutable_database):
# For this bug to be triggered we need a reusable dependency
# that is not optimal in terms of optimization scores.
# We pick an old version of "b"
reusable_specs = [spack.concretize.concretize_one("non-existing-conditional-dep@1.0")]
root_spec = Spec("non-existing-conditional-dep@2.0")
with spack.config.override("concretizer:reuse", True):
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError, match="Cannot satisfy"):
solver.driver.solve(setup, [root_spec], reuse=reusable_specs)
@pytest.mark.regression("31148")
def test_version_weight_and_provenance(self, mutable_config):
"""Test package preferences during concretization."""
reusable_specs = [
spack.concretize.concretize_one(spec_str) for spec_str in ("pkg-b@0.9", "pkg-b@1.0")
]
root_spec = Spec("pkg-a foobar=bar")
external_specs = SpecFilter.from_packages_yaml(
mutable_config, include=[], exclude=[]
).selected_specs()
with spack.config.override("concretizer:reuse", True):
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(
setup, [root_spec], reuse=reusable_specs + external_specs
)
# Version badness should be > 0 only for reused specs. For instance, for pkg-b
# the version provenance is:
#
# pkg_fact("pkg-b", version_declared("1.0", 0)).
# pkg_fact("pkg-b", version_origin("1.0", "installed")).
# pkg_fact("pkg-b", version_origin("1.0", "package_py")).
# pkg_fact("pkg-b", version_declared("0.9", 1)).
# pkg_fact("pkg-b", version_origin("0.9", "installed")).
# pkg_fact("pkg-b", version_origin("0.9", "package_py")).
weights = {}
for x in [x for x in result.criteria if x.name == "version badness (non roots)"]:
if x.kind == spack.solver.asp.OptimizationKind.CONCRETE:
weights["reused"] = x.value
else:
weights["built"] = x.value
assert weights["reused"] == 1 and weights["built"] == 0
result_spec = result.specs[0]
assert result_spec.satisfies("^pkg-b@1.0")
assert result_spec["pkg-b"].dag_hash() == reusable_specs[1].dag_hash()
@pytest.mark.regression("51267")
@pytest.mark.parametrize(
"packages_config,expected",
[
# Two preferences on different virtuals
(
"""
packages:
c:
prefer:
- clang
mpi:
prefer:
- mpich2
""",
[
'provider_weight_from_config("mpi","mpich2",0).',
'provider_weight_from_config("c","clang",0).',
],
),
# A requirement and a preference on the same virtual
(
"""
packages:
c:
require:
- gcc
prefer:
- clang
""",
[
'provider_weight_from_config("c","gcc",0).',
'provider_weight_from_config("c","clang",1).',
],
),
(
"""
packages:
c:
require:
- clang
prefer:
- gcc
""",
[
'provider_weight_from_config("c","gcc",1).',
'provider_weight_from_config("c","clang",0).',
],
),
# Multiple requirements with priorities
(
"""
packages:
all:
providers:
mpi: [low-priority-mpi]
mpi:
require:
- any_of: [mpich2, zmpi]
prefer:
- mpich
""",
[
'provider_weight_from_config("mpi","mpich2",0).',
'provider_weight_from_config("mpi","zmpi",1).',
'provider_weight_from_config("mpi","mpich",2).',
'provider_weight_from_config("mpi","low-priority-mpi",3).',
],
),
# Configuration with conflicts
(
"""
packages:
all:
providers:
mpi: [mpich, low-priority-mpi]
mpi:
require:
- mpich2
conflict:
- mpich
""",
[
'provider_weight_from_config("mpi","mpich2",0).',
'provider_weight_from_config("mpi","low-priority-mpi",1).',
],
),
(
"""
packages:
all:
providers:
mpi: [mpich, low-priority-mpi]
mpi:
require:
- mpich2
conflict:
- mpich@1
""",
[
'provider_weight_from_config("mpi","mpich2",0).',
'provider_weight_from_config("mpi","mpich",1).',
'provider_weight_from_config("mpi","low-priority-mpi",2).',
],
),
],
)
def test_requirements_and_weights(self, packages_config, expected, mutable_config):
"""Checks that requirements and strong preferences on virtual packages influence the
weights for providers, even if "package preferences" are not set consistently.
"""
packages_yaml = syaml.load_config(packages_config)
mutable_config.set("packages", packages_yaml["packages"])
setup = spack.solver.asp.SpackSolverSetup()
asp_problem = setup.setup([Spec("mpileaks")], reuse=[], allow_deprecated=False).asp_problem
assert all(x in asp_problem for x in expected)
def test_reuse_succeeds_with_config_compatible_os(self):
root_spec = Spec("pkg-b")
s = spack.concretize.concretize_one(root_spec)
other_os = s.copy()
mock_os = "ubuntu2204"
other_os.architecture = spack.spec.ArchSpec(
"test-{os}-{target}".format(os=mock_os, target=str(s.architecture.target))
)
reusable_specs = [other_os]
overrides = {"concretizer": {"reuse": True, "os_compatible": {s.os: [mock_os]}}}
custom_scope = spack.config.InternalConfigScope("concretize_override", overrides)
with spack.config.override(custom_scope):
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(setup, [root_spec], reuse=reusable_specs)
concrete_spec = result.specs[0]
assert concrete_spec.satisfies("os={}".format(other_os.architecture.os))
def test_git_hash_assigned_version_is_preferred(self):
hash = "a" * 40
s = Spec("develop-branch-version@%s=develop" % hash)
c = spack.concretize.concretize_one(s)
assert hash in str(c)
@pytest.mark.parametrize("git_ref", ("a" * 40, "0.2.15", "main"))
def test_git_ref_version_is_equivalent_to_specified_version(self, git_ref):
s = Spec("develop-branch-version@git.%s=develop" % git_ref)
c = spack.concretize.concretize_one(s)
assert git_ref in str(c)
assert s.satisfies("@develop")
assert s.satisfies("@0.1:")
@pytest.mark.parametrize("git_ref", ("a" * 40, "0.2.15", "fbranch"))
def test_git_ref_version_succeeds_with_unknown_version(self, git_ref):
# main is not defined in the package.py for this file
s = Spec("develop-branch-version@git.%s=main" % git_ref)
s = spack.concretize.concretize_one(s)
assert s.satisfies("develop-branch-version@main")
@pytest.mark.regression("31484")
def test_installed_externals_are_reused(
self, mutable_database, repo_with_changing_recipe, tmp_path: pathlib.Path
):
"""Tests that external specs that are in the DB can be reused, if they result in a
better optimization score.
"""
external_conf = {
"changing": {
"buildable": False,
"externals": [{"spec": "changing@1.0", "prefix": str(tmp_path)}],
}
}
spack.config.set("packages", external_conf)
# Install the external spec
middle_pkg = spack.concretize.concretize_one("middle")
PackageInstaller([middle_pkg.package], fake=True, explicit=True).install()
assert middle_pkg["changing"].external
changing_external = middle_pkg["changing"]
# Modify the package.py file
repo_with_changing_recipe.change({"delete_variant": True})
# Try to concretize the external without reuse and confirm the hash changed
with spack.config.override("concretizer:reuse", False):
root_no_reuse = spack.concretize.concretize_one("root")
assert root_no_reuse["changing"].dag_hash() != changing_external.dag_hash()
# ... while with reuse we have the same hash
with spack.config.override("concretizer:reuse", True):
root_with_reuse = spack.concretize.concretize_one("root")
assert root_with_reuse["changing"].dag_hash() == changing_external.dag_hash()
@pytest.mark.regression("31484")
def test_user_can_select_externals_with_require(
self, mutable_database, tmp_path: pathlib.Path
):
"""Test that users have means to select an external even in presence of reusable specs."""
external_conf: Dict[str, Any] = {
"mpi": {"buildable": False},
"multi-provider-mpi": {
"externals": [{"spec": "multi-provider-mpi@2.0.0", "prefix": str(tmp_path)}]
},
}
spack.config.set("packages", external_conf)
# mpich and others are installed, so check that
# fresh use the external, reuse does not
with spack.config.override("concretizer:reuse", False):
mpi_spec = spack.concretize.concretize_one("mpi")
assert mpi_spec.name == "multi-provider-mpi"
with spack.config.override("concretizer:reuse", True):
mpi_spec = spack.concretize.concretize_one("mpi")
assert mpi_spec.name != "multi-provider-mpi"
external_conf["mpi"]["require"] = "multi-provider-mpi"
spack.config.set("packages", external_conf)
with spack.config.override("concretizer:reuse", True):
mpi_spec = spack.concretize.concretize_one("mpi")
assert mpi_spec.name == "multi-provider-mpi"
@pytest.mark.regression("31484")
def test_installed_specs_disregard_conflicts(self, mutable_database, monkeypatch):
"""Test that installed specs do not trigger conflicts. This covers for the rare case
where a conflict is added on a package after a spec matching the conflict was installed.
"""
# Add a conflict to "mpich" that match an already installed "mpich~debug"
pkg_cls = spack.repo.PATH.get_pkg_class("mpich")
monkeypatch.setitem(pkg_cls.conflicts, Spec(), [(Spec("~debug"), None)])
# If we concretize with --fresh the conflict is taken into account
with spack.config.override("concretizer:reuse", False):
s = spack.concretize.concretize_one("mpich")
assert s.satisfies("+debug")
# If we concretize with --reuse it is not, since "mpich~debug" was already installed
with spack.config.override("concretizer:reuse", True):
s = spack.concretize.concretize_one("mpich")
assert s.installed
assert s.satisfies("~debug"), s
@pytest.mark.regression("32471")
def test_require_targets_are_allowed(self, mutable_config, mutable_database):
"""Test that users can set target constraints under the require attribute."""
# Configuration to be added to packages.yaml
required_target = spack.vendor.archspec.cpu.TARGETS[
spack.platforms.test.Test.default
].family
external_conf = {"all": {"require": f"target={required_target}"}}
mutable_config.set("packages", external_conf)
with spack.config.override("concretizer:reuse", False):
spec = spack.concretize.concretize_one("mpich")
for s in spec.traverse(deptype=("link", "run")):
assert s.satisfies(f"target={required_target}")
target = spack.platforms.test.Test.default
def test_external_python_extension_find_dependency_from_config(self, mutable_config, tmp_path):
"""Tests that an external Python extension gets a dependency on Python."""
packages_yaml = f"""
packages:
py-extension1:
buildable: false
externals:
- spec: py-extension1@2.0
prefix: {tmp_path / "py-extension1"}
python:
externals:
- spec: python@3.8.13
prefix: {tmp_path / "python"}
"""
configuration = syaml.load_config(packages_yaml)
mutable_config.set("packages", configuration["packages"])
py_extension = spack.concretize.concretize_one("py-extension1")
assert py_extension.external
assert py_extension["python"].external
assert py_extension["python"].prefix == str(tmp_path / "python")
@pytest.mark.regression("36190")
@pytest.mark.parametrize(
"specs",
[
["mpileaks^ callpath ^dyninst@8.1.1:8 ^mpich2@1.3:1"],
["multivalue-variant ^pkg-a@2:2"],
["v1-consumer ^conditional-provider@1:1 +disable-v1"],
],
)
def test_result_specs_is_not_empty(self, mutable_config, specs):
"""Check that the implementation of "result.specs" is correct in cases where we
know a concretization exists.
"""
specs = [Spec(s) for s in specs]
external_specs = SpecFilter.from_packages_yaml(
mutable_config, include=[], exclude=[]
).selected_specs()
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(setup, specs, reuse=external_specs)
assert result.specs
@pytest.mark.regression("38664")
def test_unsolved_specs_raises_error(self, monkeypatch, mock_packages):
"""Check that the solver raises an exception when input specs are not
satisfied.
"""
specs = [Spec("zlib")]
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
simulate_unsolved_property = list((x, None) for x in specs)
monkeypatch.setattr(spack.solver.asp.Result, "unsolved_specs", simulate_unsolved_property)
with pytest.raises(
spack.solver.asp.InternalConcretizerError,
match="the solver completed but produced specs",
):
solver.driver.solve(setup, specs, reuse=[])
@pytest.mark.regression("43141")
@pytest.mark.parametrize(
"spec_str,expected_match",
[
# A package does not exist
("pkg-a ^foo", "since 'foo' does not exist"),
# Request a compiler for a package that doesn't need it
("pkg-c %gcc", "cannot depend on gcc"),
],
)
def test_errors_on_statically_checked_preconditions(self, spec_str, expected_match):
"""Tests that the solver can report a case where the compiler cannot be set"""
with pytest.raises(spack.error.UnsatisfiableSpecError, match=expected_match):
spack.concretize.concretize_one(spec_str)
@pytest.mark.regression("36339")
@pytest.mark.parametrize(
"compiler_str,expected",
[
("gcc@:9", "@=9.4.0"),
("gcc@:10", "@=10.2.1"),
("gcc@10", "@=10.2.1"),
("gcc@10:", "@=10.2.1"),
],
)
def test_compiler_match_constraints_when_selected(self, compiler_str, expected):
"""Test that, when multiple compilers with the same name are in the configuration
we ensure that the selected one matches all the required constraints.
"""
s = spack.concretize.concretize_one(f"pkg-a %{compiler_str}")
assert s["gcc"].satisfies(expected)
@pytest.mark.parametrize("spec_str", ["mpileaks", "mpileaks ^mpich"])
def test_virtuals_are_annotated_on_edges(self, spec_str):
"""Tests that information on virtuals is annotated on DAG edges"""
spec = spack.concretize.concretize_one(spec_str)
mpi_provider = spec["mpi"].name
edges = spec.edges_to_dependencies(name=mpi_provider)
assert len(edges) == 1 and edges[0].virtuals == ("mpi",)
edges = spec.edges_to_dependencies(name="callpath")
assert len(edges) == 1 and edges[0].virtuals == ()
@pytest.mark.parametrize("transitive", [True, False])
def test_explicit_splices(
self, mutable_config, database_mutable_config, mock_packages, transitive, capfd
):
mpich_spec = database_mutable_config.query("mpich")[0]
splice_info = {
"target": "mpi",
"replacement": f"/{mpich_spec.dag_hash()}",
"transitive": transitive,
}
spack.config.CONFIG.set("concretizer", {"splice": {"explicit": [splice_info]}})
spec = spack.concretize.concretize_one("hdf5 ^zmpi")
assert spec.satisfies(f"^mpich@{mpich_spec.version}")
assert spec.build_spec.dependencies(name="zmpi", deptype="link")
assert spec["mpi"].build_spec.satisfies(mpich_spec)
assert not spec.build_spec.satisfies(f"^mpich/{mpich_spec.dag_hash()}")
assert not spec.dependencies(name="zmpi", deptype="link")
captured = capfd.readouterr()
assert "Warning: explicit splice configuration has caused" in captured.err
assert "hdf5 ^zmpi" in captured.err
assert str(spec) in captured.err
def test_explicit_splice_fails_nonexistent(mutable_config, mock_packages, mock_store):
splice_info = {"target": "mpi", "replacement": "mpich/doesnotexist"}
spack.config.CONFIG.set("concretizer", {"splice": {"explicit": [splice_info]}})
with pytest.raises(spack.spec.InvalidHashError):
_ = spack.concretize.concretize_one("hdf5^zmpi")
def test_explicit_splice_fails_no_hash(mutable_config, mock_packages, mock_store):
splice_info = {"target": "mpi", "replacement": "mpich"}
spack.config.CONFIG.set("concretizer", {"splice": {"explicit": [splice_info]}})
with pytest.raises(spack.solver.asp.InvalidSpliceError, match="must be specified by hash"):
_ = spack.concretize.concretize_one("hdf5^zmpi")
def test_explicit_splice_non_match_nonexistent_succeeds(
mutable_config, mock_packages, mock_store
):
"""When we have a nonexistent splice configured but are not using it, don't fail."""
splice_info = {"target": "will_not_match", "replacement": "nonexistent/doesnotexist"}
spack.config.CONFIG.set("concretizer", {"splice": {"explicit": [splice_info]}})
spec = spack.concretize.concretize_one("zlib")
# the main test is that it does not raise
assert not spec.spliced
@pytest.mark.db
@pytest.mark.parametrize(
"spec_str,mpi_name",
[("mpileaks", "mpich"), ("mpileaks ^mpich2", "mpich2"), ("mpileaks ^zmpi", "zmpi")],
)
def test_virtuals_are_reconstructed_on_reuse(self, spec_str, mpi_name, mutable_database):
"""Tests that when we reuse a spec, virtual on edges are reconstructed correctly"""
with spack.config.override("concretizer:reuse", True):
spec = spack.concretize.concretize_one(spec_str)
assert spec.installed
mpi_edges = spec.edges_to_dependencies(mpi_name)
assert len(mpi_edges) == 1
assert "mpi" in mpi_edges[0].virtuals
def test_dont_define_new_version_from_input_if_checksum_required(self, working_env):
os.environ["SPACK_CONCRETIZER_REQUIRE_CHECKSUM"] = "yes"
with pytest.raises(spack.error.UnsatisfiableSpecError):
# normally spack concretizes to @=3.0 if it's not defined in package.py, except
# when checksums are required
spack.concretize.concretize_one("pkg-a@=3.0")
@pytest.mark.regression("39570")
@pytest.mark.db
def test_reuse_python_from_cli_and_extension_from_db(self, mutable_database):
"""Tests that reusing python with and explicit request on the command line, when the spec
also reuses a python extension from the DB, doesn't fail.
"""
s = spack.concretize.concretize_one("py-extension1")
python_hash = s["python"].dag_hash()
PackageInstaller([s.package], fake=True, explicit=True).install()
with spack.config.override("concretizer:reuse", True):
with_reuse = spack.concretize.concretize_one(f"py-extension2 ^/{python_hash}")
with spack.config.override("concretizer:reuse", False):
without_reuse = spack.concretize.concretize_one("py-extension2")
assert with_reuse.dag_hash() == without_reuse.dag_hash()
@pytest.mark.regression("35536")
@pytest.mark.parametrize(
"spec_str,expected_namespaces",
[
# Single node with fully qualified namespace
("builtin_mock.gmake", {"gmake": "builtin_mock"}),
# Dependency with fully qualified namespace
("hdf5 ^builtin_mock.gmake", {"gmake": "builtin_mock", "hdf5": "duplicates_test"}),
("hdf5 ^gmake", {"gmake": "duplicates_test", "hdf5": "duplicates_test"}),
],
)
def test_select_lower_priority_package_from_repository_stack(
self, spec_str, expected_namespaces
):
"""Tests that a user can explicitly select a lower priority, fully qualified dependency
from cli.
"""
# 'builtin_mock" and "duplicates_test" share a 'gmake' package
additional_repo = os.path.join(
spack.paths.test_repos_path, "spack_repo", "duplicates_test"
)
with spack.repo.use_repositories(additional_repo, override=False):
s = spack.concretize.concretize_one(spec_str)
for name, namespace in expected_namespaces.items():
assert s[name].concrete
assert s[name].namespace == namespace
def test_reuse_specs_from_non_available_compilers(self, mutable_config, mutable_database):
"""Tests that we can reuse specs with compilers that are not configured locally."""
# All the specs in the mutable DB have been compiled with %gcc@10.2.1
mpileaks = [s for s in mutable_database.query_local() if s.name == "mpileaks"]
# Remove gcc@10.2.1
remover = spack.compilers.config.CompilerRemover(mutable_config)
remover.mark_compilers(match="gcc@=10.2.1")
remover.flush()
mutable_config.set("concretizer:reuse", True)
# mpileaks is in the database, it will be reused with gcc@=10.2.1
root = spack.concretize.concretize_one("mpileaks")
assert root.satisfies("%gcc@10.2.1")
assert any(root.dag_hash() == x.dag_hash() for x in mpileaks)
# fftw is not in the database, therefore it will be compiled with gcc@=9.4.0
root = spack.concretize.concretize_one("fftw~mpi")
assert root.satisfies("%gcc@9.4.0")
@pytest.mark.regression("43406")
def test_externals_with_platform_explicitly_set(self, tmp_path: pathlib.Path):
"""Tests that users can specify platform=xxx in an external spec"""
external_conf = {
"mpich": {
"buildable": False,
"externals": [{"spec": "mpich@=2.0.0 platform=test", "prefix": str(tmp_path)}],
}
}
spack.config.set("packages", external_conf)
s = spack.concretize.concretize_one("mpich")
assert s.external
@pytest.mark.regression("43267")
def test_spec_with_build_dep_from_json(self, tmp_path: pathlib.Path):
"""Tests that we can correctly concretize a spec, when we express its dependency as a
concrete spec to be read from JSON.
The bug was triggered by missing virtuals on edges that were trimmed from pure build
dependencies.
"""
build_dep = spack.concretize.concretize_one("dttop")
json_file = tmp_path / "build.json"
json_file.write_text(build_dep.to_json())
s = spack.concretize.concretize_one(f"dtuse ^{str(json_file)}")
assert s["dttop"].dag_hash() == build_dep.dag_hash()
@pytest.mark.regression("44040")
def test_exclude_specs_from_reuse(self, monkeypatch):
r"""Tests that we can exclude a spec from reuse when concretizing, and that the spec
is not added back to the solve as a dependency of another reusable spec.
The expected spec is:
o callpath@1.0
|\
o | mpich@3.0.4
|\ \
| |\ \
| | | o dyninst@8.2
| |_|/|
|/| |/|
| |/|/|
| | | |\
| | | | o libdwarf@20130729
| |_|_|/|
|/| |_|/|
| |/| |/|
| | |/|/
| | | o libelf@0.8.13
| |_|/|
|/| |/|
| |/|/
| o | gcc-runtime@10.5.0
|/| |
| |/
o | glibc@2.31
/
o gcc@10.5.0
"""
# Prepare a mock mirror that returns an old version of dyninst
request_str = "callpath ^mpich"
reused = spack.concretize.concretize_one(f"{request_str} ^dyninst@8.1.1")
monkeypatch.setattr(spack.solver.reuse, "_specs_from_mirror", lambda: [reused])
# Exclude dyninst from reuse, so we expect that the old version is not taken into account
with spack.config.override(
"concretizer:reuse",
{"from": [{"type": "buildcache", "exclude": ["dyninst"]}, {"type": "external"}]},
):
result = spack.concretize.concretize_one(request_str)
assert result.dag_hash() != reused.dag_hash()
assert result["mpich"].dag_hash() == reused["mpich"].dag_hash()
assert result["dyninst"].dag_hash() != reused["dyninst"].dag_hash()
assert result["dyninst"].satisfies("@=8.2")
for dep in result["dyninst"].traverse(root=False):
assert dep.dag_hash() == reused[dep.name].dag_hash()
@pytest.mark.regression("44091")
@pytest.mark.parametrize(
"included_externals",
[
["deprecated-versions"],
# Try the empty list, to ensure that in that case everything will be included
# since filtering should happen only when the list is non-empty
[],
],
)
def test_include_specs_from_externals_and_libcs(
self, included_externals, mutable_config, tmp_path: pathlib.Path
):
"""Tests that when we include specs from externals, we always include libcs."""
mutable_config.set(
"packages",
{
"deprecated-versions": {
"externals": [{"spec": "deprecated-versions@1.1.0", "prefix": str(tmp_path)}]
}
},
)
request_str = "deprecated-client"
# When using the external the version is selected even if deprecated
with spack.config.override(
"concretizer:reuse", {"from": [{"type": "external", "include": included_externals}]}
):
result = spack.concretize.concretize_one(request_str)
assert result["deprecated-versions"].satisfies("@1.1.0")
# When excluding it, we pick the non-deprecated version
with spack.config.override(
"concretizer:reuse",
{"from": [{"type": "external", "exclude": ["deprecated-versions"]}]},
):
result = spack.concretize.concretize_one(request_str)
assert result["deprecated-versions"].satisfies("@1.0.0")
@pytest.mark.regression("44085")
def test_can_reuse_concrete_externals_for_dependents(self, mutable_config):
"""Test that external specs that are in the DB can be reused. This means they are
preferred to concretizing another external from packages.yaml
"""
packages_yaml = {
"externaltool": {"externals": [{"spec": "externaltool@0.9", "prefix": "/fake/path"}]}
}
mutable_config.set("packages", packages_yaml)
# Concretize with v0.9 to get a suboptimal spec, since we have gcc@10 available
external_spec = spack.concretize.concretize_one("externaltool@0.9")
assert external_spec.external
root_specs = [Spec("sombrero")]
with spack.config.override("concretizer:reuse", True):
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(setup, root_specs, reuse=[external_spec])
assert len(result.specs) == 1
sombrero = result.specs[0]
assert sombrero["externaltool"].dag_hash() == external_spec.dag_hash()
def test_cannot_reuse_host_incompatible_libc(self):
"""Test whether reuse concretization correctly fails to reuse a spec with a host
incompatible libc."""
if not spack.solver.core.using_libc_compatibility():
pytest.skip("This test requires libc nodes")
# We install b@1 ^glibc@2.30, and b@0 ^glibc@2.28. The former is not host compatible, the
# latter is.
fst = spack.concretize.concretize_one("pkg-b@1")
fst._mark_concrete(False)
fst.dependencies("glibc")[0].versions = VersionList(["=2.30"])
fst._mark_concrete(True)
snd = spack.concretize.concretize_one("pkg-b@0")
# The spec b@1 ^glibc@2.30 is "more optimal" than b@0 ^glibc@2.28, but due to glibc
# incompatibility, it should not be reused.
solver = spack.solver.asp.Solver()
setup = spack.solver.asp.SpackSolverSetup()
result, _, _ = solver.driver.solve(setup, [Spec("pkg-b")], reuse=[fst, snd])
assert len(result.specs) == 1
assert result.specs[0] == snd
@pytest.mark.regression("45321")
@pytest.mark.parametrize(
"corrupted_str",
[
"cmake@3.4.3 foo=bar", # cmake has no variant "foo"
"mvdefaults@1.0 foo=a,d", # variant "foo" has no value "d"
"cmake %gcc", # spec has no version
],
)
def test_corrupted_external_does_not_halt_concretization(self, corrupted_str, mutable_config):
"""Tests that having a wrong variant in an external spec doesn't stop concretization"""
corrupted_spec = Spec(corrupted_str)
packages_yaml = {
f"{corrupted_spec.name}": {
"externals": [{"spec": corrupted_str, "prefix": "/dev/null"}]
}
}
mutable_config.set("packages", packages_yaml)
# Assert we don't raise due to the corrupted external entry above
s = spack.concretize.concretize_one("pkg-a")
assert s.concrete
@pytest.mark.regression("44828")
@pytest.mark.not_on_windows("Tests use linux paths")
def test_correct_external_is_selected_from_packages_yaml(self, mutable_config):
"""Tests that when filtering external specs, the correct external is selected to
reconstruct the prefix, and other external attributes.
"""
packages_yaml = {
"mpileaks": {
"externals": [
{"spec": "mpileaks@2.3 +opt", "prefix": "/tmp/prefix1"},
{"spec": "mpileaks@2.3 ~opt", "prefix": "/tmp/prefix2"},
]
}
}
concretizer_yaml = {
"reuse": {"roots": True, "from": [{"type": "external", "exclude": ["+opt"]}]}
}
mutable_config.set("packages", packages_yaml)
mutable_config.set("concretizer", concretizer_yaml)
s = spack.concretize.concretize_one("mpileaks")
# Check that we got the properties from the right external
assert s.external
assert s.satisfies("~opt")
assert s.prefix == "/tmp/prefix2"
def test_git_based_version_must_exist_to_use_ref(self):
# gmake should fail, only has sha256
with pytest.raises(spack.error.UnsatisfiableSpecError) as e:
spack.concretize.concretize_one(f"gmake commit={'a' * 40}")
assert "Cannot use commit variant with" in e.value.message
@pytest.fixture()
def duplicates_test_repository():
repository_path = os.path.join(spack.paths.test_repos_path, "spack_repo", "duplicates_test")
with spack.repo.use_repositories(repository_path) as mock_repo:
yield mock_repo
@pytest.mark.usefixtures("mutable_config", "duplicates_test_repository")
| TestConcretize |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/tests/config.py | {
"start": 1457,
"end": 4017
} | class ____(object):
def __init__(self):
self.values = {}
self.meta = {}
self.FLAGS = NameSpace(self.read)
self.use_absl = False
def update(self, name, val):
if self.use_absl:
setattr(self.absl_flags.FLAGS, name, val)
else:
self.check_exists(name)
if name not in self.values:
raise Exception("Unrecognized config option: {}".format(name))
self.values[name] = val
def read(self, name):
if self.use_absl:
return getattr(self.absl_flags.FLAGS, name)
else:
self.check_exists(name)
return self.values[name]
def add_option(self, name, default, opt_type, meta_args, meta_kwargs):
if name in self.values:
raise Exception("Config option {} already defined".format(name))
self.values[name] = default
self.meta[name] = (opt_type, meta_args, meta_kwargs)
def check_exists(self, name):
if name not in self.values:
raise Exception("Unrecognized config option: {}".format(name))
def DEFINE_bool(self, name, default, *args, **kwargs):
self.add_option(name, default, bool, args, kwargs)
def DEFINE_integer(self, name, default, *args, **kwargs):
self.add_option(name, default, int, args, kwargs)
def DEFINE_string(self, name, default, *args, **kwargs):
self.add_option(name, default, str, args, kwargs)
def DEFINE_enum(self, name, default, *args, **kwargs):
self.add_option(name, default, 'enum', args, kwargs)
def config_with_absl(self):
# Run this before calling `app.run(main)` etc
import absl.flags as absl_FLAGS
from absl import app, flags as absl_flags
self.use_absl = True
self.absl_flags = absl_flags
absl_defs = { bool: absl_flags.DEFINE_bool,
int: absl_flags.DEFINE_integer,
str: absl_flags.DEFINE_string,
'enum': absl_flags.DEFINE_enum }
for name, val in self.values.items():
flag_type, meta_args, meta_kwargs = self.meta[name]
absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)
app.call_after_init(lambda: self.complete_absl_config(absl_flags))
def complete_absl_config(self, absl_flags):
for name, _ in self.values.items():
self.update(name, getattr(absl_flags.FLAGS, name))
def parse_flags_with_absl(self):
global already_configured_with_absl
if not already_configured_with_absl:
import absl.flags
self.config_with_absl()
absl.flags.FLAGS(sys.argv, known_only=True)
self.complete_absl_config(absl.flags)
already_configured_with_absl = True
| Config |
python | pypa__packaging | src/packaging/pylock.py | {
"start": 8402,
"end": 9812
} | class ____:
type: str
url: str | None = None
path: str | None = None
requested_revision: str | None = None
commit_id: str # type: ignore[misc]
subdirectory: str | None = None
def __init__(
self,
*,
type: str,
url: str | None = None,
path: str | None = None,
requested_revision: str | None = None,
commit_id: str,
subdirectory: str | None = None,
) -> None:
# In Python 3.10+ make dataclass kw_only=True and remove __init__
object.__setattr__(self, "type", type)
object.__setattr__(self, "url", url)
object.__setattr__(self, "path", path)
object.__setattr__(self, "requested_revision", requested_revision)
object.__setattr__(self, "commit_id", commit_id)
object.__setattr__(self, "subdirectory", subdirectory)
@classmethod
def _from_dict(cls, d: Mapping[str, Any]) -> Self:
package_vcs = cls(
type=_get_required(d, str, "type"),
url=_get(d, str, "url"),
path=_get(d, str, "path"),
requested_revision=_get(d, str, "requested-revision"),
commit_id=_get_required(d, str, "commit-id"),
subdirectory=_get(d, str, "subdirectory"),
)
_validate_path_url(package_vcs.path, package_vcs.url)
return package_vcs
@dataclass(frozen=True, init=False)
| PackageVcs |
python | getsentry__sentry | tests/sentry/charts/test_chartcuterie.py | {
"start": 460,
"end": 4398
} | class ____(TestCase):
def test_enabled(self) -> None:
assert not charts.is_enabled()
with self.options({"chart-rendering.enabled": True}):
assert charts.is_enabled()
@responses.activate
@patch("sentry.charts.chartcuterie.uuid4")
def test_simple(self, mock_uuid: MagicMock) -> None:
mock_uuid.return_value = self.get_mock_uuid()
chart_data = {
"seriesName": "Discover total period",
"series": [
[1616168400, [{"count": 0}]],
[1616168700, [{"count": 12}]],
[1616169000, [{"count": 13}]],
],
}
service_url = "http://chartcuterie"
image_data = b"this is png data"
responses.add(
method=responses.POST,
url=f"{service_url}/render",
status=200,
content_type="image/png",
body=image_data,
)
options = {
"chart-rendering.enabled": True,
"chart-rendering.chartcuterie": {"url": service_url},
}
# Test the image can be uploaded and we get a URL back
with self.options(options):
url = charts.generate_chart(ChartType.SLACK_DISCOVER_TOTAL_PERIOD, chart_data)
assert url == absolute_uri(reverse("sentry-serve-media", args=["abc123.png"]))
request = responses.calls[0].request
payload = json.loads(request.body)
assert payload == {
"requestId": "abc123",
"style": ChartType.SLACK_DISCOVER_TOTAL_PERIOD.value,
"data": chart_data,
}
resp = self.client.get(url)
assert close_streaming_response(resp) == image_data
@responses.activate
def test_failed(self) -> None:
chart_data = {"seriesName": "Discover total period", "series": []}
service_url = "http://chartcuterie"
responses.add(
method=responses.POST, url=f"{service_url}/render", status=500, body="Service down"
)
options = {
"chart-rendering.enabled": True,
"chart-rendering.chartcuterie": {"url": service_url},
}
with (
self.options(options),
pytest.raises(RuntimeError, match="Chartcuterie responded with 500: Service down"),
):
charts.generate_chart(ChartType.SLACK_DISCOVER_TOTAL_PERIOD, chart_data)
@responses.activate
@patch("sentry.charts.chartcuterie.uuid4")
def test_custom_size(self, mock_uuid: MagicMock) -> None:
mock_uuid.return_value = self.get_mock_uuid()
chart_data = {
"seriesName": "Discover total period",
"series": [
[1616168400, [{"count": 0}]],
[1616168700, [{"count": 12}]],
[1616169000, [{"count": 13}]],
],
}
service_url = "http://chartcuterie"
image_data = b"this is png data"
responses.add(
method=responses.POST,
url=f"{service_url}/render",
status=200,
content_type="image/png",
body=image_data,
)
options = {
"chart-rendering.enabled": True,
"chart-rendering.chartcuterie": {"url": service_url},
}
with self.options(options):
url = charts.generate_chart(
ChartType.SLACK_DISCOVER_TOTAL_PERIOD,
chart_data,
size={"width": 1000, "height": 200},
)
request = responses.calls[0].request
payload = json.loads(request.body)
assert payload == {
"requestId": "abc123",
"style": ChartType.SLACK_DISCOVER_TOTAL_PERIOD.value,
"data": chart_data,
"width": 1000,
"height": 200,
}
resp = self.client.get(url)
assert close_streaming_response(resp) == image_data
| ChartcuterieTest |
python | wandb__wandb | wandb/sdk/artifacts/_generated/run_input_artifacts.py | {
"start": 325,
"end": 417
} | class ____(GQLResult):
run: Optional[RunInputArtifactsProjectRun]
| RunInputArtifactsProject |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_collection_aliases.py | {
"start": 501,
"end": 740
} | class ____(GQLResult):
typename__: Typename[
Literal["ArtifactCollection", "ArtifactPortfolio", "ArtifactSequence"]
]
aliases: ArtifactCollectionAliasesArtifactCollectionAliases
| ArtifactCollectionAliasesArtifactCollection |
python | django__django | tests/model_indexes/models.py | {
"start": 31,
"end": 652
} | class ____(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=50)
pages = models.IntegerField(db_column="page_count")
shortcut = models.CharField(max_length=50, db_tablespace="idx_tbls")
isbn = models.CharField(max_length=50, db_tablespace="idx_tbls")
barcode = models.CharField(max_length=31)
class Meta:
indexes = [
models.Index(fields=["title"]),
models.Index(fields=["isbn", "id"]),
models.Index(
fields=["barcode"], name="%(app_label)s_%(class)s_barcode_idx"
),
]
| Book |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/tf_function_test.py | {
"start": 1835,
"end": 14775
} | class ____(trt_test.TfTrtIntegrationTestBase):
def __init__(self, methodName): # pylint: disable=invalid-name
super(TfFunctionTest, self).__init__(methodName)
self._profile_strategy = "Range"
self._trt_engine_op_count_offset = 0
self._test_conversion_params = {
"_tftrt_convert_function": True,
"_tftrt_trt_logger_name": "DefaultLogger",
"_tftrt_max_batch_size": 10,
"_tftrt_max_workspace_size_bytes":
(trt_convert.DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES),
"_tftrt_precision_mode": "FP16",
"_tftrt_minimum_segment_size": 2,
"_tftrt_is_dyn_op": True,
"_tftrt_max_cached_engines": 1,
"_tftrt_use_calibration": False,
"_tftrt_use_implicit_batch": True,
"_tftrt_profile_strategy": self._profile_strategy,
"_tftrt_allow_build_at_runtime": False
}
self._is_v2 = False
def ShouldRunTest(self, run_params):
should_run, reason_for_skipping = (
trt_test.TfTrtIntegrationTestBase.ShouldRunTest(self, run_params))
if not should_run:
return should_run, reason_for_skipping
else:
# TODO(kyungtaek): Calibration currently does not run for nodes
# nested within functions. If this gets fixed, this method should not
# override the parent method.
return (not IsQuantizationWithCalibration(run_params),
"calibration is not supported for tf.functions")
def _copy_test_attr_to_func_def(self, func_def, param_name, attr_value_type):
test_value = self._test_conversion_params[param_name]
if attr_value_type == "s":
byte_value = compat.as_bytes(test_value)
func_def.attr[param_name].CopyFrom(attr_value_pb2.AttrValue(s=byte_value))
elif attr_value_type == "b":
func_def.attr[param_name].CopyFrom(attr_value_pb2.AttrValue(b=test_value))
elif attr_value_type == "i":
func_def.attr[param_name].CopyFrom(attr_value_pb2.AttrValue(i=test_value))
else:
logging.info("Attr_value type %s is not supported", attr_value_type)
def _ChainAllNodes(self, graph_def):
return itertools.chain(
graph_def.node,
itertools.chain(
*[func.node_def for func in graph_def.library.function]))
def _VerifyTestAttrs(self, function_protos):
if self._test_conversion_params["_tftrt_convert_function"]:
for func_def in function_protos:
if not func_def.signature.name.startswith("TRTEngine"):
for key, value in self._test_conversion_params.items():
self.assertIn(key, func_def.attr,
"key %s not found in func_def.attr" % key)
if isinstance(value, str):
self.assertEqual(func_def.attr[key].s, compat.as_bytes(value))
elif isinstance(value, bool):
self.assertEqual(func_def.attr[key].b, value)
elif isinstance(value, int):
self.assertEqual(func_def.attr[key].i, value)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 32, 32, 2], dtype=dtypes.float32)
])
def _conv_and_pool_0(self, inp):
dtype = inp.dtype
conv_filter = constant_op.constant([[[[1., 0.5, 4.], [1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
bias = constant_op.constant([4., 1.5, 2.], name="bias", dtype=dtype)
added = nn.bias_add(conv, bias, name="bias_add")
relu = nn.relu(added, "relu")
identity = array_ops.identity(relu, "identity")
pool = nn_ops.max_pool(
identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
return array_ops.squeeze(pool)
def GraphFn(self, x):
x = self._conv_and_pool_0(x)
return array_ops.identity(x, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[10, 32, 32, 2]],
[[10, 8, 8, 3]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_000": [
"weights", "conv", "bias", "bias_add", "relu", "identity",
"max_pool"
]
}
def _copy_test_attributes_to_func_def(self, func_def):
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_convert_function",
attr_value_type="b")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_trt_logger_name",
attr_value_type="s")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_max_batch_size",
attr_value_type="i")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_max_workspace_size_bytes",
attr_value_type="i")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_precision_mode",
attr_value_type="s")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_minimum_segment_size",
attr_value_type="i")
self._copy_test_attr_to_func_def(
func_def=func_def, param_name="_tftrt_is_dyn_op", attr_value_type="b")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_max_cached_engines",
attr_value_type="i")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_use_calibration",
attr_value_type="b")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_use_implicit_batch",
attr_value_type="b")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_profile_strategy",
attr_value_type="s")
self._copy_test_attr_to_func_def(
func_def=func_def,
param_name="_tftrt_allow_build_at_runtime",
attr_value_type="b")
def _MakeSavedModelV1(self, run_params):
"""Write the saved model as an input for testing.
In addition to creating a SavedModel like its parent method, this method
replaces this SavedModel by adding TF-TRT conversion parameters as function
attributes to each function in the SavedModel.
Args:
run_params: The current test run parameters.
Returns:
The directory of the saved model.
"""
saved_model_dir = trt_test.TfTrtIntegrationTestBase._MakeSavedModelV1(
self, run_params)
saved_model_proto = loader_impl.parse_saved_model(saved_model_dir)
new_saved_model = saved_model_pb2.SavedModel()
new_saved_model.CopyFrom(saved_model_proto)
new_meta_graph_def = new_saved_model.meta_graphs[0]
for func_def in new_meta_graph_def.graph_def.library.function:
# Disable function inlining.
func_def.attr["_noinline"].CopyFrom(attr_value_pb2.AttrValue(b=True))
self._copy_test_attributes_to_func_def(func_def)
old_saved_model_file = os.path.join(saved_model_dir,
constants.SAVED_MODEL_FILENAME_PB)
if os.path.exists(old_saved_model_file):
os.remove(old_saved_model_file)
path = os.path.join(
compat.as_bytes(saved_model_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(
path, new_saved_model.SerializeToString(deterministic=True))
return saved_model_dir
def _MakeSavedModelV2(self, run_params):
"""Write the saved model as an input for testing.
In addition to creating a SavedModel like its parent method, this method
replaces this SavedModel by adding TF-TRT conversion parameters as function
attributes to each function in the SavedModel.
Args:
run_params: The current test run parameters.
Returns:
The directory of the saved model.
"""
saved_model_dir = trt_test.TfTrtIntegrationTestBase._MakeSavedModelV2(
self, run_params)
saved_model_proto = loader_impl.parse_saved_model(saved_model_dir)
new_saved_model = saved_model_pb2.SavedModel()
new_saved_model.CopyFrom(saved_model_proto)
new_meta_graph_def = new_saved_model.meta_graphs[0]
prefix_len = len("__inference_")
for func_def in new_meta_graph_def.graph_def.library.function:
logging.info("_MakeSavedModelV2, func_def name: %s",
func_def.signature.name)
func_name_without_prefix = func_def.signature.name[prefix_len:]
if func_name_without_prefix.startswith(
("_conv_and_pool_0")):
func_def.attr["_noinline"].CopyFrom(attr_value_pb2.AttrValue(b=True))
self._copy_test_attributes_to_func_def(func_def)
old_saved_model_file = os.path.join(saved_model_dir,
constants.SAVED_MODEL_FILENAME_PB)
if os.path.exists(old_saved_model_file):
os.remove(old_saved_model_file)
path = os.path.join(
compat.as_bytes(saved_model_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(
path, new_saved_model.SerializeToString(deterministic=True))
return saved_model_dir
def _VerifyGraphDefV1(self, run_params, original_gdef, gdef_to_verify,
graph_state):
expected_engines = self.ExpectedEnginesToBuild(run_params)
num_engines = 0
functions = [f.signature.name for f in gdef_to_verify.library.function]
all_nodes = list(self._ChainAllNodes(gdef_to_verify))
all_nodes.sort(key=lambda x: x.name)
for node in all_nodes:
if node.op == "TRTEngineOp":
logging.info("Found TRTEngineOp: " + node.name)
num_engines += 1
segment_funcdef_name = node.attr["segment_func"].func.name
function_name = node.name + "_native_segment"
is_dynamic_engine = not node.attr["static_engine"].b
self.assertNotEmpty(segment_funcdef_name, node.name)
self.assertIn(function_name, functions)
if (not IsQuantizationWithCalibration(run_params) and
not is_dynamic_engine):
self.assertTrue(len(node.attr["serialized_segment"].s), node.name)
self.assertIn(
self._RemoveGraphSequenceNumber(node.name), expected_engines)
self.assertEqual(
self._ToBytes(run_params.precision_mode),
node.attr["precision_mode"].s, node.name)
self.assertEqual(run_params.dynamic_engine, is_dynamic_engine,
node.name)
self.assertEqual(node.attr["use_calibration"].b,
run_params.use_calibration, node.name)
has_calibration_data = len(node.attr["calibration_data"].s)
if (IsQuantizationWithCalibration(run_params) and
graph_state == GraphState.INFERENCE):
self.assertTrue(has_calibration_data, node.name)
else:
self.assertFalse(has_calibration_data, node.name)
if graph_state == GraphState.ORIGINAL:
self.assertEqual(0, num_engines)
self._VerifyTestAttrs(function_protos=gdef_to_verify.library.function)
else:
self.assertEqual(num_engines, len(expected_engines))
expected_connections = self.ExpectedConnections(run_params)
if expected_connections:
self._VerifyConnections(expected_engines, expected_connections,
original_gdef, gdef_to_verify)
self._VerifyMaxBatchSizeAnnotations(
expected_engines=expected_engines,
original_gdef=original_gdef,
converted_gdef=gdef_to_verify,
expected_max_batch_sizes=self.ExpectedMaxBatchSizes(run_params),
default_max_batch_size=self.GetMaxBatchSize(run_params))
self._VerifyTestAttrs(function_protos=gdef_to_verify.library.function)
def _ShouldConverterBuild(self, run_params):
return (run_params.is_v2 and not run_params.convert_online and
run_params.dynamic_engine)
def RunTest(self, run_params):
self._test_conversion_params["_tftrt_precision_mode"] = (
run_params.precision_mode)
self._test_conversion_params["_tftrt_use_calibration"] = (
run_params.use_calibration)
self._test_conversion_params["_tftrt_is_dyn_op"] = (
run_params.dynamic_engine)
# When running with V1, using dynamic_engine and
# allow_build_at_runtime==False at the same time do not work.
if run_params.is_v2:
self._test_conversion_params["_tftrt_allow_build_at_runtime"] = True
self._is_v2 = True
else:
self._test_conversion_params["_tftrt_allow_build_at_runtime"] = (
run_params.convert_online or run_params.dynamic_engine)
self._test_conversion_params["_tftrt_use_implicit_batch"] = \
not run_params.dynamic_shape
self.DisableNonTrtOptimizers()
trt_test.TfTrtIntegrationTestBase.RunTest(self, run_params)
if __name__ == "__main__":
test.main()
| TfFunctionTest |
python | catalyst-team__catalyst | catalyst/metrics/_accumulative.py | {
"start": 161,
"end": 3291
} | class ____(ICallbackLoaderMetric):
"""This metric accumulates all the input data along loader
Args:
keys: list of keys to accumulate data from batch
compute_on_call: if True, allows compute metric's value on call
prefix: metric prefix
suffix: metric suffix
"""
def __init__(
self,
keys: Iterable[str] = None,
compute_on_call: bool = True,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
) -> None:
"""Init AccumulativeMetric"""
super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
self.keys = keys or ()
self.storage = None
self.num_samples = None
self.collected_batches = None
self.collected_samples = None
def reset(self, num_batches: int, num_samples: int) -> None:
"""
Reset metrics fields
Args:
num_batches: expected number of batches
num_samples: expected number of samples to accumulate
"""
self.num_samples = num_samples
self.collected_batches = 0
self.collected_samples = 0
self.storage = None
def _allocate_memory(self, shape_type_dict: Dict[str, Any]) -> None:
"""
Allocate memory for data accumulation
Args:
shape_type_dict: dict that contains information about shape of each tensor
and it's dtype
"""
self.storage = defaultdict(torch.Tensor)
for key in shape_type_dict:
self.storage[key] = torch.empty(
size=shape_type_dict[key]["shape"], dtype=shape_type_dict[key]["dtype"]
)
def update(self, **kwargs) -> None:
"""
Update accumulated data with new batch
Args:
**kwargs: tensors that should be accumulates
"""
if self.collected_batches == 0:
shape_type_dict = {}
for field_name in self.keys:
shape_type_dict[field_name] = {}
shape_type_dict[field_name]["shape"] = (
self.num_samples,
*(kwargs[field_name].shape[1:]),
)
shape_type_dict[field_name]["dtype"] = kwargs[field_name].dtype
self._allocate_memory(shape_type_dict=shape_type_dict)
bs = 0
for field_name in self.keys:
bs = kwargs[field_name].shape[0]
self.storage[field_name][
self.collected_samples : self.collected_samples + bs, ...
] = (kwargs[field_name].detach().cpu())
self.collected_samples += bs
self.collected_batches += 1
def compute(self) -> Dict[str, torch.Tensor]:
"""
Return accumulated data
Returns:
dict of accumulated data
"""
return self.storage
def compute_key_value(self) -> Dict[str, torch.Tensor]:
"""
Return accumulated data
Returns:
dict of accumulated data
"""
return self.compute()
__all__ = ["AccumulativeMetric"]
| AccumulativeMetric |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_param_ref.py | {
"start": 383,
"end": 8400
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'namespace': 'str',
'parameter_not_found_action': 'str',
'selector': 'V1LabelSelector'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace',
'parameter_not_found_action': 'parameterNotFoundAction',
'selector': 'selector'
}
def __init__(self, name=None, namespace=None, parameter_not_found_action=None, selector=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1ParamRef - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._namespace = None
self._parameter_not_found_action = None
self._selector = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if parameter_not_found_action is not None:
self.parameter_not_found_action = parameter_not_found_action
if selector is not None:
self.selector = selector
@property
def name(self):
"""Gets the name of this V1alpha1ParamRef. # noqa: E501
`name` is the name of the resource being referenced. `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset. # noqa: E501
:return: The name of this V1alpha1ParamRef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1alpha1ParamRef.
`name` is the name of the resource being referenced. `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset. # noqa: E501
:param name: The name of this V1alpha1ParamRef. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1alpha1ParamRef. # noqa: E501
namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields. A per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty. - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error. - If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error. # noqa: E501
:return: The namespace of this V1alpha1ParamRef. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1alpha1ParamRef.
namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields. A per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty. - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error. - If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error. # noqa: E501
:param namespace: The namespace of this V1alpha1ParamRef. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def parameter_not_found_action(self):
"""Gets the parameter_not_found_action of this V1alpha1ParamRef. # noqa: E501
`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy. Allowed values are `Allow` or `Deny` Default to `Deny` # noqa: E501
:return: The parameter_not_found_action of this V1alpha1ParamRef. # noqa: E501
:rtype: str
"""
return self._parameter_not_found_action
@parameter_not_found_action.setter
def parameter_not_found_action(self, parameter_not_found_action):
"""Sets the parameter_not_found_action of this V1alpha1ParamRef.
`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy. Allowed values are `Allow` or `Deny` Default to `Deny` # noqa: E501
:param parameter_not_found_action: The parameter_not_found_action of this V1alpha1ParamRef. # noqa: E501
:type: str
"""
self._parameter_not_found_action = parameter_not_found_action
@property
def selector(self):
"""Gets the selector of this V1alpha1ParamRef. # noqa: E501
:return: The selector of this V1alpha1ParamRef. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1alpha1ParamRef.
:param selector: The selector of this V1alpha1ParamRef. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1ParamRef):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1ParamRef):
return True
return self.to_dict() != other.to_dict()
| V1alpha1ParamRef |
python | numpy__numpy | benchmarks/benchmarks/bench_ma.py | {
"start": 4206,
"end": 4902
} | class ____(Benchmark):
param_names = ['method', 'msize']
params = [['ravel', 'transpose', 'compressed', 'conjugate'],
['small', 'big']]
def setup(self, method, msize):
xs = np.random.uniform(-1, 1, 6).reshape(2, 3)
m1 = [[True, False, False], [False, False, True]]
xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100)
maskx = xl > 0.8
self.nmxs = np.ma.array(xs, mask=m1)
self.nmxl = np.ma.array(xl, mask=maskx)
def time_methods_0v(self, method, msize):
if msize == 'small':
mdat = self.nmxs
elif msize == 'big':
mdat = self.nmxl
getattr(mdat, method)()
| MAMethod0v |
python | zarr-developers__zarr-python | src/zarr/codecs/gzip.py | {
"start": 781,
"end": 2069
} | class ____(BytesBytesCodec):
"""gzip codec"""
is_fixed_size = False
level: int = 5
def __init__(self, *, level: int = 5) -> None:
level_parsed = parse_gzip_level(level)
object.__setattr__(self, "level", level_parsed)
@classmethod
def from_dict(cls, data: dict[str, JSON]) -> Self:
_, configuration_parsed = parse_named_configuration(data, "gzip")
return cls(**configuration_parsed) # type: ignore[arg-type]
def to_dict(self) -> dict[str, JSON]:
return {"name": "gzip", "configuration": {"level": self.level}}
async def _decode_single(
self,
chunk_bytes: Buffer,
chunk_spec: ArraySpec,
) -> Buffer:
return await asyncio.to_thread(
as_numpy_array_wrapper, GZip(self.level).decode, chunk_bytes, chunk_spec.prototype
)
async def _encode_single(
self,
chunk_bytes: Buffer,
chunk_spec: ArraySpec,
) -> Buffer | None:
return await asyncio.to_thread(
as_numpy_array_wrapper, GZip(self.level).encode, chunk_bytes, chunk_spec.prototype
)
def compute_encoded_size(
self,
_input_byte_length: int,
_chunk_spec: ArraySpec,
) -> int:
raise NotImplementedError
| GzipCodec |
python | huggingface__transformers | tests/models/maskformer/test_modeling_maskformer.py | {
"start": 20687,
"end": 29983
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco")
if is_vision_available()
else None
)
def test_inference_no_head(self):
model = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
inputs_shape = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(inputs_shape, (1, 3, 800, 1088))
with torch.no_grad():
outputs = model(**inputs)
expected_slice_hidden_state = torch.tensor(
[
[-0.0482, 0.9228, 0.4951],
[-0.2547, 0.8017, 0.8527],
[-0.0069, 0.3385, -0.0089],
]
).to(torch_device)
torch.allclose(outputs.encoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE) # fmt: skip
expectations = Expectations(
{
(None, None): [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]],
("cuda", 8): [
[-0.8422, -0.8435, -0.9717],
[-1.0145, -0.5564, -0.4195],
[-1.0040, -0.4486, -0.1962],
],
}
)
expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.allclose(outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE,rtol=TOLERANCE) # fmt: skip
expectations = Expectations(
{
(None, None): [
[0.2852, -0.0159, 0.9735],
[0.6254, 0.1858, 0.8529],
[-0.0680, -0.4116, 1.8413],
],
("cuda", 8): [
[0.2853, -0.0162, 0.9736],
[0.6256, 0.1856, 0.8530],
[-0.0679, -0.4118, 1.8416],
],
}
)
expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.allclose(outputs.transformer_decoder_last_hidden_state[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE) # fmt: skip
def test_inference_instance_segmentation_head(self):
model = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco")
.to(torch_device)
.eval()
)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
inputs_shape = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(inputs_shape, (1, 3, 800, 1088))
with torch.no_grad():
outputs = model(**inputs)
# masks_queries_logits
masks_queries_logits = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape,
(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4),
)
expectations = Expectations(
{
(None, None): [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
],
("cuda", 8): [
[-1.3737, -1.7727, -1.9367],
[-1.5979, -1.9871, -2.1527],
[-1.5797, -1.9271, -2.0941],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1)
)
expectations = Expectations(
{
(None, None): [
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
],
("cuda", 8): [
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6163e-02, -5.9025e00, -2.9313e00],
[1.1681e-04, -7.7631e00, -5.1263e00],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(
outputs.class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE
)
def test_inference_instance_segmentation_head_resnet_backbone(self):
model = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff")
.to(torch_device)
.eval()
)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
inputs_shape = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(inputs_shape, (1, 3, 800, 1088))
with torch.no_grad():
outputs = model(**inputs)
# masks_queries_logits
masks_queries_logits = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape,
(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4),
)
expectations = Expectations(
{
(None, None): [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]],
("cuda", 8): [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1)
)
expectations = Expectations(
{
(None, None): [
[4.7188, -3.2585, -2.8857],
[6.6871, -2.9181, -1.2487],
[7.2449, -2.2764, -2.1874],
],
("cuda", 8): [
[4.7188, -3.2585, -2.8857],
[6.6871, -2.9181, -1.2487],
[7.2449, -2.2764, -2.1874],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(
outputs.class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE
)
@require_torch_accelerator
@require_torch_fp16
def test_inference_fp16(self):
model = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff")
.to(torch_device, dtype=torch.float16)
.eval()
)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device, dtype=torch.float16)
with torch.no_grad():
_ = model(**inputs)
def test_with_segmentation_maps_and_loss(self):
model = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco")
.to(torch_device)
.eval()
)
image_processor = self.default_image_processor
inputs = image_processor(
[np.zeros((3, 400, 333)), np.zeros((3, 400, 333))],
segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)],
return_tensors="pt",
)
inputs["pixel_values"] = inputs["pixel_values"].to(torch_device)
inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]]
inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]]
with torch.no_grad():
outputs = model(**inputs)
self.assertTrue(outputs.loss is not None)
| MaskFormerModelIntegrationTest |
python | pypa__setuptools | setuptools/command/install_lib.py | {
"start": 205,
"end": 4319
} | class ____(orig.install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
def run(self) -> None:
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts)
@staticmethod
def _all_packages(pkg_name):
"""
>>> list(install_lib._all_packages('foo.bar.baz'))
['foo.bar.baz', 'foo.bar', 'foo']
"""
while pkg_name:
yield pkg_name
pkg_name, _sep, _child = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
"""
Get namespace packages (list) but only for
single_version_externally_managed installations and empty otherwise.
"""
# TODO: is it necessary to short-circuit here? i.e. what's the cost
# if get_finalized_command is called even when namespace_packages is
# False?
if not self.distribution.namespace_packages:
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return self.distribution.namespace_packages if svem else []
@staticmethod
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(sys, 'implementation'):
return
base = os.path.join('__pycache__', '__init__.' + sys.implementation.cache_tag)
yield base + '.pyc'
yield base + '.pyo'
yield base + '.opt-1.pyc'
yield base + '.opt-2.pyc'
def copy_tree(
self,
infile: StrPath,
outfile: str,
# override: Using actual booleans
preserve_mode: bool = True, # type: ignore[override]
preserve_times: bool = True, # type: ignore[override]
preserve_symlinks: bool = False, # type: ignore[override]
level: object = 1,
) -> list[str]:
assert preserve_mode
assert preserve_times
assert not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return orig.install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles: list[str] = []
def pf(src: str, dst: str):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)", dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
| install_lib |
python | kamyu104__LeetCode-Solutions | Python/largest-number-at-least-twice-of-others.py | {
"start": 29,
"end": 286
} | class ____(object):
def dominantIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
m = max(nums)
if all(m >= 2*x for x in nums if x != m):
return nums.index(m)
return -1
| Solution |
python | tensorflow__tensorflow | tensorflow/python/summary/writer/event_file_writer.py | {
"start": 8225,
"end": 10289
} | class ____:
"""Stripped-down fork of the standard library Queue that is closeable."""
def __init__(self, maxsize=0):
"""Create a queue object with a given maximum size.
Args:
maxsize: int size of queue. If <= 0, the queue size is infinite.
"""
self._maxsize = maxsize
self._queue = collections.deque()
self._closed = False
# Mutex must be held whenever queue is mutating; shared by conditions.
self._mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self._not_empty = threading.Condition(self._mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self._not_full = threading.Condition(self._mutex)
def get(self):
"""Remove and return an item from the queue.
If the queue is empty, blocks until an item is available.
Returns:
an item from the queue
"""
with self._not_empty:
while not self._queue:
self._not_empty.wait()
item = self._queue.popleft()
self._not_full.notify()
return item
def put(self, item):
"""Put an item into the queue.
If the queue is closed, fails immediately.
If the queue is full, blocks until space is available or until the queue
is closed by a call to close(), at which point this call fails.
Args:
item: an item to add to the queue
Raises:
QueueClosedError: if insertion failed because the queue is closed
"""
with self._not_full:
if self._closed:
raise QueueClosedError()
if self._maxsize > 0:
while len(self._queue) == self._maxsize:
self._not_full.wait()
if self._closed:
raise QueueClosedError()
self._queue.append(item)
self._not_empty.notify()
def close(self):
"""Closes the queue, causing any pending or future `put()` calls to fail."""
with self._not_full:
self._closed = True
self._not_full.notify_all()
| CloseableQueue |
python | huggingface__transformers | tests/models/csm/test_modeling_csm.py | {
"start": 11773,
"end": 38812
} | class ____(unittest.TestCase):
def setUp(self):
# TODO: @eustlb, update with correct sesame's repo
self.model_checkpoint = "sesame/csm-1b"
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def _load_conversation(self):
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
ds = ds.filter(lambda x: x["conversation_id"] == 0)
ds = ds.sort("turn_id")
return ds[0]
@slow
@require_torch_accelerator
def test_1b_model_integration_generate(self):
"""
Tests the generated tokens match the ones from the original model implementation.
Such tokens are to be retrieved using https://gist.github.com/eustlb/d25577a357ddcf8f4a8cd0d00baca551, which is a script that infers the original model.
"""
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
prompt = "<|begin_of_text|>[0]What are you working on?<|end_of_text|><|AUDIO|><|audio_eos|><|begin_of_text|>[1]I'm figuring out my budget.<|end_of_text|>"
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
audio = ds[0]["audio"]["array"]
inputs = processor(text=prompt, audio=audio, return_tensors="pt").to(torch_device)
model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device)
output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([[
[1140, 1818, 86, 1072, 1029, 1010, 796, 577, 1523, 1599, 902, 1308, 817, 232, 1860, 56, 327, 1399, 1069, 1014, 1980, 53, 407, 1841, 1559, 928, 972, 1432, 832, 1007, 1325, 371],
[955, 1390, 1503, 861, 265, 1753, 91, 1690, 389, 1025, 1086, 495, 1192, 1334, 773, 1277, 957, 1388, 513, 1110, 539, 349, 1865, 1515, 806, 1514, 237, 1424, 1783, 1928, 523, 1925],
[1925, 190, 654, 1538, 19, 37, 1923, 100, 1909, 1156, 1847, 1901, 975, 982, 2002, 544, 1933, 311, 79, 850, 238, 1034, 428, 1231, 764, 313, 973, 269, 1669, 1058, 1641, 891],
[1721, 92, 1298, 989, 1868, 154, 386, 1115, 347, 384, 853, 1439, 970, 1369, 238, 1279, 268, 595, 2010, 1861, 723, 999, 578, 1612, 69, 121, 306, 1647, 1609, 1185, 1786, 1268],
[1356, 1419, 1199, 1575, 418, 53, 1140, 805, 355, 324, 633, 199, 343, 1176, 784, 41, 268, 366, 1478, 466, 1591, 305, 1298, 1335, 1866, 1563, 1503, 1558, 1468, 852, 1244, 312],
[1860, 1603, 546, 1805, 607, 160, 1528, 191, 1867, 1830, 861, 661, 1740, 1276, 218, 954, 1286, 1216, 1727, 1637, 983, 597, 1857, 65, 797, 947, 427, 476, 739, 978, 107, 1394],
[1165, 1775, 177, 823, 100, 370, 521, 200, 2007, 434, 1444, 1205, 819, 1278, 31, 912, 150, 1546, 2035, 1147, 559, 1995, 639, 35, 1812, 56, 1485, 2003, 1573, 1693, 1762, 1313],
[1932, 704, 907, 897, 56, 1587, 990, 1905, 2007, 256, 671, 868, 282, 1731, 460, 1055, 1309, 1880, 584, 1849, 1643, 1198, 310, 361, 789, 1657, 905, 1564, 1354, 110, 915, 1011],
[1437, 1958, 1483, 313, 79, 28, 859, 397, 1783, 1693, 633, 1424, 1128, 1831, 605, 1123, 1496, 739, 1177, 498, 781, 1756, 1288, 890, 224, 1875, 279, 800, 1999, 1740, 348, 1420],
[724, 870, 1344, 861, 429, 522, 1877, 1689, 771, 1468, 1952, 156, 856, 462, 18, 834, 33, 840, 1136, 2012, 1766, 1891, 2034, 1731, 624, 108, 1469, 653, 1344, 1682, 407, 515],
[355, 26, 36, 1700, 1032, 293, 1799, 978, 944, 296, 1333, 1377, 664, 1249, 421, 516, 1178, 531, 1587, 899, 1, 1449, 934, 942, 1604, 1208, 1889, 710, 825, 2012, 1563, 1299],
[629, 15, 551, 861, 310, 918, 149, 1689, 1464, 1950, 1900, 1502, 1503, 615, 477, 1090, 1556, 1393, 1143, 1112, 1934, 416, 1604, 1470, 1501, 1594, 903, 1400, 972, 199, 1075, 1643],
[1281, 106, 1162, 1313, 115, 429, 1792, 1379, 1535, 1311, 743, 484, 333, 498, 547, 699, 1075, 1861, 1038, 1352, 166, 622, 759, 1398, 241, 138, 1330, 481, 1254, 1365, 985, 423],
[9, 520, 323, 25, 1873, 716, 1414, 1413, 266, 1449, 1265, 290, 1341, 836, 674, 411, 913, 911, 637, 1038, 1097, 1158, 1009, 803, 737, 154, 1388, 938, 466, 725, 1216, 1549],
[1944, 15, 62, 332, 540, 689, 106, 1805, 1303, 1787, 1724, 1011, 1515, 1442, 1197, 496, 2026, 1820, 906, 372, 322, 1413, 1305, 1674, 443, 1733, 828, 905, 1116, 1850, 1870, 786],
[221, 220, 1093, 1790, 759, 1266, 1169, 1379, 572, 1859, 1155, 596, 1398, 412, 1788, 1963, 167, 89, 1011, 1489, 714, 73, 486, 780, 1136, 254, 983, 138, 386, 800, 1819, 1857],
[1178, 1939, 107, 1605, 582, 1256, 420, 637, 648, 1023, 1809, 978, 1703, 278, 1668, 2044, 1599, 1321, 1670, 1716, 1155, 56, 602, 877, 886, 220, 910, 797, 1028, 1226, 869, 811],
[1432, 1926, 1197, 1687, 540, 1815, 658, 1080, 1162, 192, 315, 1713, 422, 586, 65, 947, 493, 1536, 13, 505, 1269, 456, 1042, 645, 512, 1394, 1124, 590, 1058, 1896, 1055, 1537],
[905, 564, 1739, 1594, 1201, 1773, 738, 994, 239, 1686, 1528, 368, 1791, 1924, 607, 44, 1320, 552, 1862, 1578, 591, 1434, 330, 1576, 1946, 1233, 113, 445, 669, 2041, 1242, 1406],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]])
# fmt: on
torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_1b_model_integration_generate_no_audio(self):
"""
Tests the generated tokens match the ones from the original model implementation.
Such tokens are to be retrieved using https://gist.github.com/eustlb/aed822f765e928b9612e01b0d8836d69, which is a script that infers the original model.
"""
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
conversation = [
{"role": "0", "content": [{"type": "text", "text": "The past is just a story we tell ourselves."}]},
]
inputs = processor.apply_chat_template(conversation, tokenize=True, return_dict=True).to(torch_device)
model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device)
output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False)
print(output_tokens)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([[
[1656, 629, 723, 1785, 206, 1873, 1059, 1190, 1833, 240, 618, 350, 156, 109, 2010, 452, 435, 1764, 77, 654, 1133, 908, 1095, 74, 804, 494, 1760, 1343, 1312, 1464, 1657, 324],
[366, 1532, 1945, 21, 145, 1428, 1417, 1987, 1793, 1444, 356, 1491, 849, 333, 788, 426, 1423, 1004, 414, 1823, 1169, 257, 1892, 696, 1572, 998, 1098, 523, 390, 1977, 546, 1692],
[1343, 1382, 1288, 1744, 1685, 1154, 1837, 1156, 1680, 1641, 1479, 1548, 632, 824, 694, 2010, 671, 1251, 1822, 343, 638, 1372, 696, 1272, 144, 125, 1332, 579, 936, 77, 159, 357],
[456, 1534, 349, 274, 1956, 1502, 1268, 1038, 1911, 523, 1360, 1159, 761, 293, 718, 1143, 63, 705, 168, 550, 413, 1372, 1771, 787, 631, 693, 784, 1789, 2039, 1131, 1601, 918],
[456, 829, 2026, 1108, 1649, 207, 1308, 1440, 1192, 1394, 426, 546, 590, 36, 1682, 1827, 1387, 1425, 1909, 1500, 1438, 1297, 5, 888, 948, 1745, 1304, 1364, 1692, 131, 300, 1908],
[2027, 1431, 1037, 1789, 1296, 1264, 1331, 1787, 1235, 1902, 1161, 1591, 590, 561, 1633, 1218, 510, 148, 1962, 118, 212, 608, 565, 1869, 583, 598, 532, 658, 1416, 9, 1172, 493],
[1215, 460, 1722, 317, 1423, 716, 1589, 1177, 1927, 1860, 1756, 1552, 1674, 643, 74, 1256, 587, 1742, 771, 2028, 469, 1070, 1683, 1614, 699, 494, 2020, 139, 1365, 1171, 171, 904],
[1615, 339, 323, 317, 469, 714, 104, 2015, 1407, 278, 468, 77, 2007, 650, 1630, 269, 168, 934, 1544, 58, 1487, 1373, 705, 874, 1252, 2031, 1995, 254, 1334, 1171, 1911, 1607],
[1259, 693, 666, 1700, 1115, 607, 982, 769, 1106, 1500, 101, 88, 1698, 1864, 1358, 1594, 192, 153, 1868, 1654, 604, 1948, 526, 778, 172, 1664, 1966, 99, 1334, 1030, 1349, 1209],
[1211, 579, 1369, 492, 1725, 203, 1125, 778, 701, 1982, 1420, 155, 736, 1145, 2018, 609, 658, 561, 1147, 923, 1794, 1753, 116, 1374, 612, 956, 1587, 392, 1062, 2047, 901, 1931],
[460, 1093, 1346, 1917, 1223, 470, 271, 390, 547, 112, 143, 1633, 1030, 643, 96, 1759, 920, 1959, 75, 1280, 1630, 999, 333, 853, 1110, 1291, 1911, 57, 171, 1658, 1704, 1508],
[908, 500, 393, 184, 1437, 482, 2008, 1834, 356, 1435, 1550, 1407, 1236, 109, 1167, 452, 1141, 934, 207, 957, 660, 670, 28, 1066, 1252, 1932, 669, 906, 1904, 1820, 2043, 881],
[1599, 1031, 1474, 336, 1540, 571, 437, 1440, 1616, 1365, 1412, 1246, 400, 405, 1776, 96, 296, 38, 1597, 466, 1630, 1256, 1940, 887, 1769, 294, 285, 842, 1756, 1619, 451, 1529],
[1615, 339, 1722, 525, 942, 105, 1365, 670, 785, 1316, 465, 1860, 438, 968, 547, 1938, 1816, 1429, 1065, 1942, 660, 1446, 1093, 1066, 931, 121, 688, 1033, 1178, 754, 1783, 94],
[912, 1354, 598, 254, 341, 1980, 1166, 585, 1302, 473, 554, 242, 174, 2030, 2011, 325, 978, 1690, 258, 396, 1831, 1768, 1291, 1699, 2001, 433, 1414, 2012, 1045, 511, 533, 1104],
[80, 1791, 1062, 1136, 391, 568, 1651, 101, 959, 2043, 1683, 760, 794, 181, 570, 540, 1599, 20, 1017, 973, 1654, 396, 586, 778, 2044, 1664, 1911, 929, 66, 897, 510, 643],
[1161, 1093, 161, 1296, 589, 54, 906, 981, 1927, 605, 516, 1731, 1461, 1204, 1902, 920, 1488, 177, 805, 1402, 610, 1446, 1154, 1067, 2025, 645, 762, 1715, 415, 1658, 1713, 1607],
[374, 1444, 1577, 792, 1450, 628, 604, 1729, 322, 514, 1725, 540, 1070, 575, 653, 800, 250, 187, 569, 349, 354, 1573, 176, 793, 897, 359, 536, 276, 1224, 23, 145, 1287],
[1184, 415, 1644, 1737, 1788, 385, 784, 1861, 1172, 1118, 367, 1156, 234, 1946, 1742, 981, 828, 1798, 1821, 361, 1148, 670, 518, 1288, 761, 1050, 1642, 1006, 1747, 840, 1599, 720],
[1141, 1731, 1670, 1542, 1347, 1907, 683, 753, 1347, 68, 2031, 153, 556, 719, 736, 1759, 1131, 1073, 1747, 1730, 1487, 1137, 1869, 1624, 699, 1900, 748, 49, 1312, 735, 726, 1268],
[1141, 1383, 405, 1033, 490, 488, 1102, 471, 713, 1630, 447, 703, 1495, 1001, 1855, 354, 456, 411, 786, 853, 168, 407, 116, 699, 605, 128, 532, 1076, 208, 447, 1448, 1071],
[345, 1013, 948, 1728, 1837, 337, 930, 1226, 1643, 1729, 983, 1688, 2009, 435, 1358, 721, 42, 1779, 1332, 1077, 1873, 128, 1327, 125, 1226, 1704, 705, 1459, 1449, 862, 155, 1870],
[336, 904, 684, 184, 1542, 714, 1752, 1180, 1373, 1816, 504, 1716, 1066, 1086, 1212, 530, 1413, 1278, 75, 1347, 82, 1623, 1307, 1717, 1861, 494, 888, 1589, 670, 1999, 905, 1430],
[578, 554, 14, 523, 1016, 300, 1589, 1017, 356, 1583, 1654, 414, 449, 376, 1413, 58, 706, 963, 388, 1626, 131, 352, 1024, 1054, 2025, 1561, 77, 1589, 1486, 431, 1249, 1508],
[184, 2043, 169, 1673, 580, 162, 1752, 397, 1119, 2009, 697, 150, 1475, 157, 1523, 1402, 575, 86, 1373, 1230, 1564, 1308, 626, 1093, 1603, 1446, 1390, 1543, 1778, 1142, 1357, 1831],
[1484, 1987, 932, 1728, 1504, 1618, 291, 1865, 1151, 460, 1792, 141, 234, 2043, 829, 513, 435, 791, 1037, 1541, 65, 424, 1589, 1711, 312, 1306, 212, 686, 673, 984, 1914, 1549],
[513, 1536, 1844, 1319, 572, 1069, 121, 735, 1949, 1211, 1362, 1027, 105, 1379, 315, 1782, 706, 1658, 1510, 1989, 1443, 1690, 822, 1614, 1194, 1460, 992, 2040, 1178, 1474, 1110, 1326],
[1858, 194, 1594, 1935, 1622, 1892, 1577, 137, 1907, 2015, 757, 414, 1823, 836, 496, 530, 1385, 1503, 1065, 1554, 664, 525, 1031, 433, 69, 466, 1016, 1846, 1609, 1658, 911, 94],
[1134, 1744, 323, 691, 1837, 347, 1871, 172, 811, 91, 1883, 436, 1912, 23, 1336, 1684, 519, 1612, 1219, 1402, 728, 1953, 1658, 641, 27, 1340, 436, 139, 2008, 1030, 159, 324],
[1270, 1536, 1639, 414, 1387, 1170, 1067, 1701, 1414, 505, 1122, 36, 1731, 350, 1552, 1214, 1444, 30, 107, 172, 480, 1858, 655, 168, 1107, 691, 1272, 797, 1656, 548, 1407, 1375],
[1270, 286, 1371, 1552, 1622, 1739, 1348, 2018, 345, 1537, 1941, 2024, 1423, 740, 284, 513, 91, 1228, 2015, 385, 992, 39, 813, 803, 2025, 497, 663, 462, 1609, 334, 927, 1470],
[1718, 994, 265, 1421, 1622, 1098, 845, 1868, 832, 459, 447, 619, 1970, 929, 513, 63, 1448, 1509, 1219, 1942, 285, 1373, 1259, 1004, 11, 1040, 1984, 57, 188, 1687, 1475, 805],
[1157, 832, 480, 1225, 1019, 347, 326, 999, 125, 1542, 118, 1383, 1343, 1077, 1821, 1602, 1978, 1642, 618, 808, 692, 1953, 1353, 963, 619, 1291, 1016, 1458, 1995, 1688, 1872, 1718],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]])
# fmt: on
torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_1b_model_integration_generate_multiple_audio(self):
"""
Test the generated tokens match the ones from the original model implementation.
Such tokens are to be retrieved using https://gist.github.com/eustlb/0c94de002e1325abb61d32217f74c0f8, which is a script that infers the original model.
"""
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
conversation = []
# context
for text, audio, speaker_id in zip(ds[:4]["text"], ds[:4]["audio"], ds[:4]["speaker_id"]):
conversation.append(
{
"role": f"{speaker_id}",
"content": [{"type": "text", "text": text}, {"type": "audio", "path": audio["array"]}],
}
)
# text prompt
conversation.append({"role": f"{ds[4]['speaker_id']}", "content": [{"type": "text", "text": ds[4]["text"]}]})
inputs = processor.apply_chat_template(
conversation,
tokenize=True,
return_dict=True,
).to(torch_device)
model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device)
output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([[
[420, 1189, 1311, 318, 359, 694, 1550, 1044, 1614, 1437, 1978, 537, 554, 1681, 147, 1225, 422, 1357, 1681, 1619, 165, 641, 1132, 1975, 1568, 406, 756, 503, 1673, 1428, 762, 781],
[1848, 1412, 957, 1656, 871, 540, 1999, 175, 711, 1383, 1814, 104, 742, 1285, 733, 1251, 1165, 1915, 1392, 645, 1804, 913, 1772, 632, 376, 1507, 1132, 725, 716, 1121, 1769, 1509],
[429, 1138, 895, 1018, 1099, 257, 1395, 1015, 576, 1599, 497, 19, 1858, 1437, 282, 357, 1143, 828, 1481, 70, 985, 551, 935, 278, 1102, 1453, 1902, 755, 526, 498, 1441, 1733],
[546, 343, 1547, 879, 2039, 692, 1999, 1150, 1969, 1866, 1178, 199, 1913, 1738, 1530, 1728, 1193, 74, 695, 612, 1095, 1597, 1381, 683, 1385, 2045, 1069, 865, 438, 70, 1437, 318],
[1741, 1621, 733, 1580, 1006, 482, 1508, 1722, 1529, 1822, 745, 552, 142, 1568, 704, 480, 214, 552, 321, 1858, 1902, 1042, 1249, 1328, 1730, 1218, 1755, 597, 670, 738, 1056, 762],
[1264, 1561, 1307, 730, 1403, 688, 212, 949, 1871, 994, 1174, 674, 858, 293, 1577, 1221, 1024, 1535, 1224, 872, 509, 1971, 46, 440, 1531, 1100, 1466, 732, 964, 381, 1933, 1612],
[1407, 982, 1665, 1247, 1636, 1546, 939, 882, 1999, 618, 484, 1632, 66, 430, 290, 327, 351, 1236, 687, 504, 1973, 1073, 1233, 1972, 82, 1655, 361, 1612, 861, 1085, 880, 1407],
[584, 637, 304, 1805, 1683, 1381, 404, 862, 1278, 916, 1695, 370, 316, 1049, 237, 1187, 1389, 300, 680, 135, 1068, 1368, 810, 1392, 103, 1459, 1051, 644, 38, 1517, 790, 646],
[471, 1984, 1333, 553, 193, 319, 1604, 1546, 153, 513, 990, 839, 1714, 1998, 984, 1882, 1055, 476, 1821, 1476, 1522, 1817, 949, 1923, 1416, 1885, 1832, 1368, 1782, 1229, 436, 918],
[28, 1238, 489, 1580, 596, 1232, 840, 835, 297, 762, 474, 1106, 1761, 483, 1165, 923, 1184, 1181, 1724, 398, 1484, 860, 1945, 665, 1925, 14, 67, 1693, 1853, 1283, 1822, 1973],
[20, 637, 253, 1254, 738, 188, 593, 1239, 1768, 1047, 1703, 1512, 1398, 464, 13, 161, 651, 1844, 666, 210, 1510, 1798, 614, 1649, 1751, 341, 808, 915, 1965, 840, 778, 950],
[1879, 2028, 1405, 694, 432, 2036, 612, 387, 1843, 1204, 1044, 8, 1538, 542, 1198, 598, 1131, 760, 1217, 901, 800, 1046, 136, 639, 1320, 618, 606, 707, 574, 1288, 1254, 198],
[1874, 937, 1063, 1341, 254, 13, 359, 888, 1837, 1246, 980, 818, 2046, 1258, 1290, 1470, 2028, 1701, 228, 1766, 51, 93, 296, 991, 1094, 1694, 156, 1207, 401, 967, 867, 211],
[1762, 426, 1749, 2004, 314, 903, 1254, 220, 1330, 1813, 534, 102, 658, 1460, 603, 1046, 402, 2005, 783, 973, 1764, 210, 1458, 803, 605, 369, 669, 352, 1964, 1549, 632, 1375],
[1577, 386, 503, 1492, 604, 405, 1329, 349, 180, 875, 329, 196, 514, 1854, 925, 159, 1428, 1300, 1510, 329, 76, 1682, 1036, 854, 695, 1097, 816, 382, 1417, 697, 1693, 194],
[1109, 848, 1385, 126, 1136, 979, 687, 130, 2045, 140, 562, 361, 921, 1706, 1060, 1723, 165, 1304, 203, 1067, 158, 692, 980, 313, 1896, 1812, 839, 837, 985, 116, 866, 1049],
[1810, 1092, 1534, 1730, 773, 2044, 1098, 1326, 85, 249, 455, 1728, 860, 443, 1841, 1885, 1698, 864, 1747, 1083, 1591, 1785, 1577, 1001, 1025, 1837, 1504, 1839, 1900, 1932, 230, 968],
[1547, 1465, 896, 794, 613, 1383, 1806, 1984, 526, 671, 100, 519, 2037, 1631, 1724, 633, 824, 994, 893, 1448, 1793, 1237, 1855, 699, 349, 143, 270, 535, 1550, 101, 22, 1311],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]])
# fmt: on
torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_1b_model_integration_generate_batched(self):
"""
Test the generated tokens match the ones from the original model implementation.
Such tokens are to be retrieved using https://gist.github.com/eustlb/bcc532b53161bc31da3d66cb07ae193f, which is a script that infers the original model.
"""
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
conversation = [
[
{
"role": f"{ds[0]['speaker_id']}",
"content": [
{"type": "text", "text": ds[0]["text"]},
{"type": "audio", "path": ds[0]["audio"]["array"]},
],
},
{
"role": f"{ds[1]['speaker_id']}",
"content": [
{"type": "text", "text": ds[1]["text"]},
],
},
],
[
{
"role": f"{ds[0]['speaker_id']}",
"content": [
{"type": "text", "text": ds[0]["text"]},
],
}
],
]
inputs = processor.apply_chat_template(
conversation,
tokenize=True,
return_dict=True,
).to(torch_device)
model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device)
output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([
[
[1140, 1818, 1713, 1072, 1029, 1185, 697, 358, 220, 481, 1127, 1779, 817, 891, 958, 1058, 672, 495, 426, 1135, 236, 1440, 829, 2023, 1097, 94, 926, 1830, 114, 307, 235, 1190],
[955, 968, 696, 676, 52, 618, 0, 1818, 1285, 143, 1733, 1268, 1317, 1510, 1027, 2033, 1276, 1744, 790, 638, 1179, 1125, 650, 266, 1180, 364, 1015, 1604, 1152, 154, 178, 284],
[1925, 274, 433, 273, 1391, 1528, 1683, 1120, 976, 944, 357, 1681, 847, 1783, 546, 857, 1662, 1695, 40, 152, 2039, 1076, 994, 1743, 265, 1751, 602, 981, 483, 981, 538, 1381],
[1908, 1625, 1975, 729, 1067, 1844, 837, 1849, 224, 1223, 1037, 1188, 1428, 1977, 317, 530, 990, 1670, 766, 1411, 811, 154, 433, 1645, 1565, 1291, 1390, 49, 1160, 1464, 1911, 1961],
[1908, 566, 175, 1387, 1437, 1873, 1785, 1536, 961, 414, 406, 1753, 835, 284, 764, 1522, 1889, 1816, 840, 440, 756, 860, 1753, 516, 601, 1498, 280, 1425, 1904, 1540, 1074, 314],
[1860, 296, 1766, 361, 1155, 1675, 528, 1975, 1286, 113, 1656, 237, 372, 580, 1571, 1958, 502, 893, 1300, 261, 313, 455, 693, 1658, 654, 1585, 1723, 721, 178, 679, 908, 1077],
[1165, 1787, 1877, 1904, 85, 609, 1007, 1724, 1959, 245, 645, 463, 1321, 1695, 192, 711, 1892, 1193, 302, 1835, 69, 940, 148, 913, 110, 108, 1244, 1510, 165, 726, 745, 1746],
[1405, 1410, 186, 1569, 1214, 1920, 1946, 1907, 990, 1152, 1401, 1713, 541, 115, 423, 616, 1191, 1149, 1122, 9, 303, 195, 906, 566, 1718, 668, 1637, 1975, 51, 2005, 1260, 1672],
[1932, 780, 143, 110, 286, 1460, 1136, 1366, 1788, 446, 645, 587, 1708, 189, 1295, 526, 1667, 735, 707, 1215, 27, 834, 1865, 182, 1776, 1130, 528, 1523, 1156, 316, 492, 1666],
[1437, 364, 314, 432, 575, 1640, 529, 1128, 973, 789, 1820, 808, 1317, 1681, 347, 471, 737, 1626, 1386, 75, 433, 517, 365, 1982, 1434, 1378, 1059, 56, 1475, 653, 1507, 861],
[724, 538, 1140, 1853, 76, 402, 0, 397, 330, 1787, 1382, 682, 1134, 296, 377, 997, 705, 627, 1700, 17, 1791, 1000, 1271, 1019, 1552, 1521, 668, 534, 433, 344, 1007, 1046],
[925, 1297, 1017, 1785, 1403, 520, 1603, 1908, 665, 1827, 951, 1588, 1526, 414, 1945, 1153, 1933, 1571, 1821, 104, 179, 769, 619, 117, 56, 790, 721, 992, 1284, 1495, 1459, 823],
[629, 1208, 689, 924, 1617, 1100, 1028, 1231, 1708, 1582, 200, 2011, 1611, 1966, 1153, 1326, 2036, 1515, 884, 1790, 581, 549, 1491, 701, 973, 836, 2031, 1249, 1411, 365, 1946, 1552],
[1281, 1305, 610, 1666, 676, 544, 1788, 315, 159, 809, 1333, 1785, 1159, 1084, 1356, 318, 1933, 854, 475, 638, 1616, 1801, 1816, 1921, 283, 1745, 814, 974, 1056, 1316, 1509, 2031],
[9, 212, 1590, 163, 1289, 923, 2046, 1620, 632, 127, 963, 405, 850, 471, 1430, 108, 1845, 1196, 1928, 143, 1717, 1054, 1288, 1351, 1340, 1294, 831, 480, 1562, 2004, 483, 1776],
[221, 142, 1555, 1434, 1481, 1371, 1873, 1607, 207, 631, 1042, 1084, 472, 465, 1772, 1002, 1761, 1912, 1298, 1918, 685, 1053, 1635, 1536, 497, 55, 1432, 1394, 1512, 365, 2026, 1210],
[1741, 1923, 930, 1423, 1258, 1227, 879, 1217, 1999, 422, 420, 1832, 1660, 1542, 92, 2000, 1790, 1909, 56, 695, 704, 1752, 371, 792, 625, 328, 567, 1397, 1557, 390, 1424, 14],
[1178, 812, 577, 895, 1386, 339, 1467, 844, 235, 703, 551, 2021, 1592, 1042, 353, 621, 1672, 653, 2029, 103, 766, 182, 2016, 1921, 556, 1092, 1579, 626, 1950, 70, 1467, 850],
[1352, 472, 577, 351, 1126, 1943, 52, 2028, 430, 1017, 1136, 645, 820, 2028, 723, 1385, 1922, 323, 106, 267, 438, 1064, 202, 1249, 244, 1962, 625, 1380, 476, 924, 1221, 1854],
[905, 811, 374, 2021, 1067, 675, 927, 427, 416, 1521, 663, 77, 457, 1849, 1362, 262, 1669, 1238, 286, 102, 555, 1809, 1585, 1918, 972, 1446, 688, 523, 1904, 943, 17, 904],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
[
[1375, 203, 265, 164, 200, 1867, 976, 924, 1972, 1637, 1048, 271, 1912, 1430, 853, 1942, 260, 1642, 400, 57, 1376, 1626, 1821, 1163, 619, 777, 1076, 951, 389, 1820, 84, 1417],
[914, 527, 286, 968, 305, 1314, 805, 1703, 87, 559, 1980, 1124, 1726, 36, 1139, 618, 1628, 519, 1943, 781, 400, 1265, 438, 113, 87, 856, 465, 162, 1099, 352, 1141, 274],
[1408, 6, 126, 2009, 90, 996, 934, 134, 1857, 126, 602, 876, 1092, 1962, 1205, 828, 707, 1063, 393, 1533, 123, 1086, 1749, 1324, 1, 1763, 1707, 1191, 34, 1323, 1017, 1787],
[1000, 683, 1630, 703, 1574, 587, 25, 1049, 213, 1270, 1641, 1072, 1892, 1634, 1603, 90, 867, 2037, 1021, 715, 206, 507, 1138, 959, 1822, 1785, 280, 1100, 1660, 251, 1903, 988],
[1657, 1981, 246, 1048, 1952, 451, 305, 423, 2000, 416, 756, 1748, 7, 748, 1866, 1795, 1682, 1832, 338, 212, 1685, 518, 154, 1407, 416, 765, 776, 25, 55, 458, 612, 262],
[1034, 564, 667, 1474, 1212, 350, 712, 941, 1151, 1182, 1280, 640, 924, 1722, 1816, 458, 226, 359, 1518, 102, 1203, 459, 676, 1788, 1110, 393, 1974, 1721, 795, 1459, 798, 1723],
[742, 1616, 119, 653, 441, 679, 246, 1432, 486, 1615, 1191, 500, 650, 223, 687, 1765, 1875, 963, 1385, 863, 151, 1771, 458, 1170, 737, 1932, 785, 1954, 1067, 16, 1986, 2029],
[1437, 1078, 1767, 1452, 1392, 45, 2010, 1664, 245, 2015, 1416, 1055, 457, 985, 740, 1594, 1562, 1838, 258, 1431, 701, 604, 1813, 352, 792, 632, 21, 895, 70, 609, 850, 1599],
[983, 1961, 54, 135, 846, 711, 473, 1630, 1373, 1094, 251, 525, 632, 1014, 1594, 1594, 1752, 398, 1266, 1357, 942, 1680, 191, 874, 483, 1291, 381, 1873, 1964, 1278, 1477, 122],
[1663, 1969, 1887, 113, 145, 251, 1133, 156, 245, 1641, 209, 1322, 2037, 836, 539, 667, 940, 797, 1758, 1357, 191, 1137, 587, 1699, 27, 701, 395, 99, 1682, 876, 762, 839],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
])
# fmt: on
torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
| CsmForConditionalGenerationIntegrationTest |
python | jschneier__django-storages | tests/test_gcloud.py | {
"start": 963,
"end": 22752
} | class ____(GCloudTestCase):
def test_open_read(self):
"""
Test opening a file and reading from it
"""
data = b"This is some test read data."
with self.storage.open(self.filename) as f:
self.storage._client.bucket.assert_called_with(self.bucket_name)
self.storage._bucket.get_blob.assert_called_with(
self.filename, chunk_size=None
)
f.blob.download_to_file = lambda tmpfile, **kwargs: tmpfile.write(data)
self.assertEqual(f.read(), data)
def test_open_read_num_bytes(self):
data = b"This is some test read data."
num_bytes = 10
with self.storage.open(self.filename) as f:
self.storage._client.bucket.assert_called_with(self.bucket_name)
self.storage._bucket.get_blob.assert_called_with(
self.filename, chunk_size=None
)
f.blob.download_to_file = lambda tmpfile, **kwargs: tmpfile.write(data)
self.assertEqual(f.read(num_bytes), data[0:num_bytes])
def test_open_read_nonexistent(self):
self.storage._bucket = mock.MagicMock()
self.storage._bucket.get_blob.return_value = None
self.assertRaises(FileNotFoundError, self.storage.open, self.filename)
self.storage._bucket.get_blob.assert_called_with(self.filename, chunk_size=None)
def test_open_read_nonexistent_unicode(self):
filename = "ủⓝï℅ⅆℇ.txt"
self.storage._bucket = mock.MagicMock()
self.storage._bucket.get_blob.return_value = None
self.assertRaises(FileNotFoundError, self.storage.open, filename)
@mock.patch("storages.backends.gcloud.Blob")
def test_open_write(self, MockBlob):
"""
Test opening a file and writing to it
"""
data = "This is some test write data."
# Simulate the file not existing before the write
self.storage._bucket = mock.MagicMock()
self.storage._bucket.get_blob.return_value = None
self.storage.default_acl = "projectPrivate"
f = self.storage.open(self.filename, "wb")
MockBlob.assert_called_with(
self.filename, self.storage._bucket, chunk_size=None
)
f.write(data)
tmpfile = f._file
# File data is not actually written until close(), so do that.
f.close()
MockBlob().upload_from_file.assert_called_with(
tmpfile,
rewind=True,
content_type=mimetypes.guess_type(self.filename)[0],
retry=DEFAULT_RETRY,
predefined_acl="projectPrivate",
)
def test_save(self):
data = "This is some test content."
content = ContentFile(data)
self.storage.save(self.filename, content)
self.storage._client.bucket.assert_called_with(self.bucket_name)
self.storage._bucket.get_blob().upload_from_file.assert_called_with(
content,
rewind=True,
retry=DEFAULT_RETRY,
size=len(data),
content_type=mimetypes.guess_type(self.filename)[0],
predefined_acl=None,
)
def test_save2(self):
data = "This is some test ủⓝï℅ⅆℇ content."
filename = "ủⓝï℅ⅆℇ.txt"
content = ContentFile(data)
self.storage.save(filename, content)
self.storage._client.bucket.assert_called_with(self.bucket_name)
self.storage._bucket.get_blob().upload_from_file.assert_called_with(
content,
rewind=True,
retry=DEFAULT_RETRY,
size=len(data),
content_type=mimetypes.guess_type(filename)[0],
predefined_acl=None,
)
def test_save_with_default_acl(self):
data = "This is some test ủⓝï℅ⅆℇ content."
filename = "ủⓝï℅ⅆℇ.txt"
content = ContentFile(data)
# ACL Options
# 'projectPrivate', 'bucketOwnerRead', 'bucketOwnerFullControl',
# 'private', 'authenticatedRead', 'publicRead', 'publicReadWrite'
self.storage.default_acl = "publicRead"
self.storage.save(filename, content)
self.storage._client.bucket.assert_called_with(self.bucket_name)
self.storage._bucket.get_blob().upload_from_file.assert_called_with(
content,
rewind=True,
retry=DEFAULT_RETRY,
size=len(data),
content_type=mimetypes.guess_type(filename)[0],
predefined_acl="publicRead",
)
def test_delete(self):
self.storage.delete(self.filename)
self.storage._client.bucket.assert_called_with(self.bucket_name)
self.storage._bucket.delete_blob.assert_called_with(
self.filename, retry=DEFAULT_RETRY
)
def test_exists(self):
self.storage._bucket = mock.MagicMock()
self.assertTrue(self.storage.exists(self.filename))
self.storage._bucket.get_blob.assert_called_with(self.filename)
self.storage._bucket.reset_mock()
self.storage._bucket.get_blob.return_value = None
self.assertFalse(self.storage.exists(self.filename))
self.storage._bucket.get_blob.assert_called_with(self.filename)
def test_exists_no_bucket(self):
# exists('') should return False if the bucket doesn't exist
self.storage._client = mock.MagicMock()
self.storage._client.get_bucket.side_effect = NotFound("dang")
self.assertFalse(self.storage.exists(""))
def test_exists_bucket(self):
# exists('') should return True if the bucket exists
self.assertTrue(self.storage.exists(""))
def test_listdir(self):
file_names = ["some/path/1.txt", "2.txt", "other/path/3.txt", "4.txt"]
subdir = ""
self.storage._bucket = mock.MagicMock()
blobs, prefixes = [], []
for name in file_names:
directory = name.rsplit("/", 1)[0] + "/" if "/" in name else ""
if directory == subdir:
blob = mock.MagicMock(spec=Blob)
blob.name = name.split("/")[-1]
blobs.append(blob)
else:
prefixes.append(directory.split("/")[0] + "/")
return_value = mock.MagicMock()
return_value.__iter__ = mock.MagicMock(return_value=iter(blobs))
return_value.prefixes = prefixes
self.storage._bucket.list_blobs.return_value = return_value
dirs, files = self.storage.listdir("")
self.assertEqual(len(dirs), 2)
for directory in ["some", "other"]:
self.assertTrue(
directory in dirs,
""" "{}" not in directory list "{}".""".format(directory, dirs),
)
self.assertEqual(len(files), 2)
for filename in ["2.txt", "4.txt"]:
self.assertTrue(
filename in files,
""" "{}" not in file list "{}".""".format(filename, files),
)
def test_listdir_subdir(self):
file_names = ["some/path/1.txt", "some/2.txt"]
subdir = "some/"
self.storage._bucket = mock.MagicMock()
blobs, prefixes = [], []
for name in file_names:
directory = name.rsplit("/", 1)[0] + "/"
if directory == subdir:
blob = mock.MagicMock(spec=Blob)
blob.name = name.split("/")[-1]
blobs.append(blob)
else:
prefixes.append(directory.split(subdir)[1])
return_value = mock.MagicMock()
return_value.__iter__ = mock.MagicMock(return_value=iter(blobs))
return_value.prefixes = prefixes
self.storage._bucket.list_blobs.return_value = return_value
dirs, files = self.storage.listdir(subdir)
self.assertEqual(len(dirs), 1)
self.assertTrue(
"path" in dirs, """ "path" not in directory list "{}".""".format(dirs)
)
self.assertEqual(len(files), 1)
self.assertTrue(
"2.txt" in files, """ "2.txt" not in files list "{}".""".format(files)
)
def test_size(self):
size = 1234
self.storage._bucket = mock.MagicMock()
blob = mock.MagicMock()
blob.size = size
self.storage._bucket.get_blob.return_value = blob
self.assertEqual(self.storage.size(self.filename), size)
self.storage._bucket.get_blob.assert_called_with(self.filename)
def test_size_no_file(self):
self.storage._bucket = mock.MagicMock()
self.storage._bucket.get_blob.return_value = None
self.assertRaises(NotFound, self.storage.size, self.filename)
def test_get_modified_time(self):
naive_date = datetime.datetime(2017, 1, 2, 3, 4, 5, 678)
aware_date = timezone.make_aware(naive_date, datetime.timezone.utc)
self.storage._bucket = mock.MagicMock()
blob = mock.MagicMock()
blob.updated = aware_date
self.storage._bucket.get_blob.return_value = blob
with self.settings(TIME_ZONE="America/Montreal", USE_TZ=False):
mt = self.storage.get_modified_time(self.filename)
self.assertTrue(timezone.is_naive(mt))
naive_date_montreal = timezone.make_naive(aware_date)
self.assertEqual(mt, naive_date_montreal)
self.storage._bucket.get_blob.assert_called_with(self.filename)
with self.settings(TIME_ZONE="America/Montreal", USE_TZ=True):
mt = self.storage.get_modified_time(self.filename)
self.assertTrue(timezone.is_aware(mt))
self.assertEqual(mt, aware_date)
self.storage._bucket.get_blob.assert_called_with(self.filename)
def test_get_created_time(self):
naive_date = datetime.datetime(2017, 1, 2, 3, 4, 5, 678)
aware_date = timezone.make_aware(naive_date, datetime.timezone.utc)
self.storage._bucket = mock.MagicMock()
blob = mock.MagicMock()
blob.time_created = aware_date
self.storage._bucket.get_blob.return_value = blob
with self.settings(TIME_ZONE="America/Montreal", USE_TZ=False):
mt = self.storage.get_created_time(self.filename)
self.assertTrue(timezone.is_naive(mt))
naive_date_montreal = timezone.make_naive(aware_date)
self.assertEqual(mt, naive_date_montreal)
self.storage._bucket.get_blob.assert_called_with(self.filename)
with self.settings(TIME_ZONE="America/Montreal", USE_TZ=True):
mt = self.storage.get_created_time(self.filename)
self.assertTrue(timezone.is_aware(mt))
self.assertEqual(mt, aware_date)
self.storage._bucket.get_blob.assert_called_with(self.filename)
def test_url_public_object(self):
url = "https://example.com/mah-bukkit/{}".format(self.filename)
self.storage.default_acl = "publicRead"
self.storage._bucket = mock.MagicMock()
blob = mock.MagicMock()
blob.public_url = url
blob.generate_signed_url = "not called"
self.storage._bucket.blob.return_value = blob
self.assertEqual(self.storage.url(self.filename), url)
self.storage._bucket.blob.assert_called_with(self.filename)
def test_url_not_public_file(self):
secret_filename = "secret_file.txt"
self.storage._bucket = mock.MagicMock()
blob = mock.MagicMock()
generate_signed_url = mock.MagicMock(return_value="http://signed_url")
blob.public_url = "http://this_is_public_url"
blob.generate_signed_url = generate_signed_url
self.storage._bucket.blob.return_value = blob
url = self.storage.url(secret_filename)
self.storage._bucket.blob.assert_called_with(secret_filename)
self.assertEqual(url, "http://signed_url")
blob.generate_signed_url.assert_called_with(
expiration=timedelta(seconds=86400), version="v4"
)
def test_url_not_public_file_with_custom_expires(self):
secret_filename = "secret_file.txt"
self.storage._bucket = mock.MagicMock()
blob = mock.MagicMock()
generate_signed_url = mock.MagicMock(return_value="http://signed_url")
blob.generate_signed_url = generate_signed_url
self.storage._bucket.blob.return_value = blob
self.storage.expiration = timedelta(seconds=3600)
url = self.storage.url(secret_filename)
self.storage._bucket.blob.assert_called_with(secret_filename)
self.assertEqual(url, "http://signed_url")
blob.generate_signed_url.assert_called_with(
expiration=timedelta(seconds=3600), version="v4"
)
def test_custom_endpoint_with_parameters(self):
self.storage.custom_endpoint = "https://example.com"
self.storage.default_acl = "publicRead"
url = "{}/{}".format(self.storage.custom_endpoint, self.filename)
self.assertEqual(self.storage.url(self.filename), url)
bucket_name = "hyacinth"
self.storage.default_acl = "projectPrivate"
self.storage._bucket = mock.MagicMock()
blob = mock.MagicMock()
generate_signed_url = mock.MagicMock()
blob.bucket = mock.MagicMock()
type(blob.bucket).name = mock.PropertyMock(return_value=bucket_name)
blob.generate_signed_url = generate_signed_url
self.storage._bucket.blob.return_value = blob
parameters = {"version": "v2", "method": "POST"}
self.storage.url(self.filename, parameters=parameters)
blob.generate_signed_url.assert_called_with(
bucket_bound_hostname=self.storage.custom_endpoint,
expiration=timedelta(seconds=86400),
method="POST",
version="v2",
)
def test_get_available_name(self):
self.storage.file_overwrite = True
self.assertEqual(self.storage.get_available_name(self.filename), self.filename)
self.storage._bucket = mock.MagicMock()
self.storage._bucket.get_blob.return_value = None
self.storage.file_overwrite = False
self.assertEqual(self.storage.get_available_name(self.filename), self.filename)
self.storage._bucket.get_blob.assert_called_with(self.filename)
def test_get_available_name_unicode(self):
filename = "ủⓝï℅ⅆℇ.txt"
self.assertEqual(self.storage.get_available_name(filename), filename)
def test_cache_control(self):
data = "This is some test content."
filename = "cache_control_file.txt"
content = ContentFile(data)
with override_settings(
GS_OBJECT_PARAMETERS={"cache_control": "public, max-age=604800"}
):
self.storage = gcloud.GoogleCloudStorage(bucket_name=self.bucket_name)
self.storage.save(filename, content)
bucket = self.storage.client.bucket(self.bucket_name)
blob = bucket.get_blob(filename)
self.assertEqual(blob.cache_control, "public, max-age=604800")
def test_storage_save_gzip_twice(self):
"""Test saving the same file content twice with gzip enabled."""
# Given
self.storage.gzip = True
name = "test_storage_save.css"
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save("test_storage_save_2.css", content)
# Then
self.storage._client.bucket.assert_called_with(self.bucket_name)
obj = self.storage._bucket.get_blob()
self.assertEqual(obj.content_encoding, "gzip")
obj.upload_from_file.assert_called_with(
mock.ANY,
rewind=True,
retry=DEFAULT_RETRY,
size=None,
predefined_acl=None,
content_type="text/css",
)
args, kwargs = obj.upload_from_file.call_args
content = args[0]
zfile = gzip.GzipFile(mode="rb", fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
"""Test that file returned by _compress_content() is readable."""
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_location_leading_slash(self):
msg = (
"GoogleCloudStorage.location cannot begin with a leading slash. "
"Found '/'. Use '' instead."
)
with self.assertRaises(ImproperlyConfigured, msg=msg):
gcloud.GoogleCloudStorage(location="/")
def test_override_settings(self):
with override_settings(GS_LOCATION="foo1"):
storage = gcloud.GoogleCloudStorage()
self.assertEqual(storage.location, "foo1")
with override_settings(GS_LOCATION="foo2"):
storage = gcloud.GoogleCloudStorage()
self.assertEqual(storage.location, "foo2")
def test_override_class_variable(self):
class MyStorage1(gcloud.GoogleCloudStorage):
location = "foo1"
storage = MyStorage1()
self.assertEqual(storage.location, "foo1")
class MyStorage2(gcloud.GoogleCloudStorage):
location = "foo2"
storage = MyStorage2()
self.assertEqual(storage.location, "foo2")
def test_override_init_argument(self):
storage = gcloud.GoogleCloudStorage(location="foo1")
self.assertEqual(storage.location, "foo1")
storage = gcloud.GoogleCloudStorage(location="foo2")
self.assertEqual(storage.location, "foo2")
def test_dupe_file_chunk_size(self):
"""
Tests that recreating a file that already exists in the bucket
respects the `GS_BLOB_CHUNK_SIZE` setting
"""
chunk_size = 1024 * 256
with override_settings(GS_BLOB_CHUNK_SIZE=chunk_size):
# Creating a new storage here since chunk-size is set as an
# attribute on init
storage = gcloud.GoogleCloudStorage()
storage._bucket = mock.MagicMock()
# Confirms that `get_blob` always returns a truthy value
storage._bucket.get_blob.return_value = True
storage.open(self.filename, "wb")
storage._bucket.get_blob.assert_called_with(
self.filename, chunk_size=chunk_size
)
def test_iam_sign_blob_setting(self):
self.assertEqual(self.storage.iam_sign_blob, False)
with override_settings(GS_IAM_SIGN_BLOB=True):
storage = gcloud.GoogleCloudStorage()
self.assertEqual(storage.iam_sign_blob, True)
def test_sa_email_setting(self):
self.assertEqual(self.storage.sa_email, None)
with override_settings(GS_SA_EMAIL="service_account_email@gmail.com"):
storage = gcloud.GoogleCloudStorage()
self.assertEqual(storage.sa_email, "service_account_email@gmail.com")
def test_iam_sign_blob_no_service_account_email_raises_attribute_error(self):
with override_settings(GS_IAM_SIGN_BLOB=True):
storage = gcloud.GoogleCloudStorage()
storage._bucket = mock.MagicMock()
storage.credentials = mock.MagicMock()
# deleting mocked attribute to simulate no service_account_email
del storage.credentials.service_account_email
# simulating access token
storage.credentials.token = "1234"
# no sa_email or adc service_account_email found
with self.assertRaises(
AttributeError,
msg=(
"Sign Blob API requires service_account_email to be available "
"through ADC or setting `sa_email`"
),
):
storage.url(self.filename)
def test_iam_sign_blob_with_adc_service_account_email(self):
with override_settings(GS_IAM_SIGN_BLOB=True):
storage = gcloud.GoogleCloudStorage()
storage._bucket = mock.MagicMock()
storage.credentials = mock.MagicMock()
# simulating adc service account email
storage.credentials.service_account_email = "service@gmail.com"
# simulating access token
storage.credentials.token = "1234"
blob = mock.MagicMock()
storage._bucket.blob.return_value = blob
storage.url(self.filename)
# called with adc service account email and access token
blob.generate_signed_url.assert_called_with(
expiration=timedelta(seconds=86400),
version="v4",
service_account_email=storage.credentials.service_account_email,
access_token=storage.credentials.token,
)
def test_iam_sign_blob_with_sa_email_setting(self):
with override_settings(
GS_IAM_SIGN_BLOB=True, GS_SA_EMAIL="service_account_email@gmail.com"
):
storage = gcloud.GoogleCloudStorage()
storage._bucket = mock.MagicMock()
storage.credentials = mock.MagicMock()
# simulating adc service account email
storage.credentials.service_account_email = "service@gmail.com"
# simulating access token
storage.credentials.token = "1234"
blob = mock.MagicMock()
storage._bucket.blob.return_value = blob
storage.url(self.filename)
# called with sa_email as it has final say
blob.generate_signed_url.assert_called_with(
expiration=timedelta(seconds=86400),
version="v4",
service_account_email=storage.sa_email,
access_token=storage.credentials.token,
)
| GCloudStorageTests |
python | doocs__leetcode | solution/3500-3599/3549.Multiply Two Polynomials/Solution.py | {
"start": 0,
"end": 1494
} | class ____:
def multiply(self, poly1: List[int], poly2: List[int]) -> List[int]:
if not poly1 or not poly2:
return []
m = len(poly1) + len(poly2) - 1
n = 1
while n < m:
n <<= 1
fa = list(map(complex, poly1)) + [0j] * (n - len(poly1))
fb = list(map(complex, poly2)) + [0j] * (n - len(poly2))
self._fft(fa, invert=False)
self._fft(fb, invert=False)
for i in range(n):
fa[i] *= fb[i]
self._fft(fa, invert=True)
return [int(round(fa[i].real)) for i in range(m)]
def _fft(self, a: List[complex], invert: bool) -> None:
n = len(a)
j = 0
for i in range(1, n):
bit = n >> 1
while j & bit:
j ^= bit
bit >>= 1
j ^= bit
if i < j:
a[i], a[j] = a[j], a[i]
len_ = 2
while len_ <= n:
ang = 2 * math.pi / len_ * (-1 if invert else 1)
wlen = complex(math.cos(ang), math.sin(ang))
for i in range(0, n, len_):
w = 1 + 0j
half = i + len_ // 2
for j in range(i, half):
u = a[j]
v = a[j + len_ // 2] * w
a[j] = u + v
a[j + len_ // 2] = u - v
w *= wlen
len_ <<= 1
if invert:
for i in range(n):
a[i] /= n
| Solution |
python | scikit-image__scikit-image | tests/skimage/io/test_imageio.py | {
"start": 1342,
"end": 2724
} | class ____:
@pytest.mark.parametrize(
"shape,dtype",
[
# float32, float64 can't be saved as PNG and raise
# uint32 is not roundtripping properly
((10, 10), np.uint8),
((10, 10), np.uint16),
((10, 10, 2), np.uint8),
((10, 10, 3), np.uint8),
((10, 10, 4), np.uint8),
],
)
def test_imsave_roundtrip(self, shape, dtype, tmp_path):
if np.issubdtype(dtype, np.floating):
min_ = 0
max_ = 1
else:
min_ = 0
max_ = np.iinfo(dtype).max
expected = np.linspace(
min_, max_, endpoint=True, num=np.prod(shape), dtype=dtype
)
expected = expected.reshape(shape)
file_path = tmp_path / "roundtrip.png"
imsave(file_path, expected)
actual = imread(file_path)
np.testing.assert_array_almost_equal(actual, expected)
def test_bool_array_save(self):
with NamedTemporaryFile(suffix='.png') as f:
fname = f.name
with pytest.warns(UserWarning, match=r'.* is a boolean image') as record:
a = np.zeros((5, 5), bool)
a[2, 2] = True
imsave(fname, a)
assert_stacklevel(record)
def test_return_class():
testing.assert_equal(type(imread(fetch('data/color.png'))), np.ndarray)
| TestSave |
python | marshmallow-code__marshmallow | src/marshmallow/validate.py | {
"start": 21997,
"end": 23138
} | class ____(OneOf):
"""Validator which succeeds if ``value`` is a sequence and each element
in the sequence is also in the sequence passed as ``choices``. Empty input
is considered valid.
:param choices: Same as :class:`OneOf`.
:param labels: Same as :class:`OneOf`.
:param error: Same as :class:`OneOf`.
.. versionchanged:: 3.0.0b2
Duplicate values are considered valid.
.. versionchanged:: 3.0.0b2
Empty input is considered valid. Use `validate.Length(min=1) <marshmallow.validate.Length>`
to validate against empty inputs.
"""
default_message = "One or more of the choices you made was not in: {choices}."
def _format_error(self, value) -> str:
value_text = ", ".join(str(val) for val in value)
return super()._format_error(value_text)
def __call__(self, value: typing.Sequence[_T]) -> typing.Sequence[_T]:
# We can't use set.issubset because does not handle unhashable types
for val in value:
if val not in self.choices:
raise ValidationError(self._format_error(value))
return value
| ContainsOnly |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_condition_evaluations.py | {
"start": 3159,
"end": 4476
} | class ____(graphene.ObjectType):
uniqueId = graphene.NonNull(graphene.String)
description = graphene.NonNull(graphene.String)
entityKey = graphene.NonNull(GrapheneEntityKey)
startTimestamp = graphene.Field(graphene.Float)
endTimestamp = graphene.Field(graphene.Float)
numTrue = graphene.NonNull(graphene.Int)
numCandidates = graphene.Field(graphene.Int)
childUniqueIds = non_null_list(graphene.String)
class Meta:
name = "PartitionedAssetConditionEvaluationNode"
def __init__(self, evaluation: AutomationConditionEvaluation):
super().__init__(
uniqueId=evaluation.condition_snapshot.unique_id,
description=evaluation.condition_snapshot.description,
entityKey=GrapheneEntityKey.from_entity_key(evaluation.key),
startTimestamp=evaluation.start_timestamp,
endTimestamp=evaluation.end_timestamp,
numTrue=evaluation.true_subset.size,
numCandidates=evaluation.candidate_subset.size
if isinstance(evaluation.candidate_subset, SerializableEntitySubset)
else None,
childUniqueIds=[
child.condition_snapshot.unique_id for child in evaluation.child_evaluations
],
)
| GraphenePartitionedAssetConditionEvaluationNode |
python | huggingface__transformers | tests/models/sam2_video/test_modeling_sam2_video.py | {
"start": 2032,
"end": 23858
} | class ____(unittest.TestCase):
def setUp(self):
super().setUp()
self.video_model = Sam2VideoModel.from_pretrained("facebook/sam2.1-hiera-tiny").to(torch.float32)
self.processor = Sam2VideoProcessor.from_pretrained("facebook/sam2.1-hiera-tiny")
self.video_model.to(torch_device)
self.video_model.eval()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_mask_generation_video_one_point(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
video_res_masks = self.processor.post_process_masks([low_res_masks], [raw_video.shape[-3:-1]], binarize=False)[
0
]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-21.4113, -21.4113, -22.9687], [-23.3090, -23.3090, -24.2606], [-27.5705, -27.5705, -27.1616]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-21.4113, -21.4113], [-23.3090, -23.3090]]]],
[[[[-20.1003, -20.1003], [-21.2294, -21.2294]]]],
[[[[-19.9619, -19.9619], [-21.3060, -21.3060]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_one_point_propagate_in_video_directly(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-21.4113, -21.4113], [-23.3090, -23.3090]]]],
[[[[-20.1003, -20.1003], [-21.2294, -21.2294]]]],
[[[[-19.9619, -19.9619], [-21.3060, -21.3060]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-11.1487, -11.1487, -11.4202], [-11.6522, -11.6522, -11.8057], [-12.7829, -12.7829, -12.6715]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-11.1487, -11.1487], [-11.6522, -11.6522]]]],
[[[[-15.3821, -15.3821], [-16.0333, -16.0333]]]],
[[[[-15.4855, -15.4855], [-16.4230, -16.4230]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-13.1427, -13.1427, -13.6418], [-13.7753, -13.7753, -14.1144], [-15.1957, -15.1957, -15.1757]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-13.1427, -13.1427], [-13.7753, -13.7753]]]],
[[[[-14.9998, -14.9998], [-15.7086, -15.7086]]]],
[[[[-15.4558, -15.4558], [-16.1649, -16.1649]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_point_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
input_points=[[[[460, 60]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-12.3525, -12.3525, -12.8907], [-13.0608, -13.0608, -13.4079], [-14.6511, -14.6511, -14.5694]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.3525, -12.3525], [-13.0608, -13.0608]]]],
[[[[-15.8181, -15.8181], [-16.4163, -16.4163]]]],
[[[[-15.8900, -15.8900], [-16.5953, -16.5953]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_multi_objects_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_points=[[[[200, 300], [230, 250], [275, 175]], [[400, 150]]]],
input_labels=[[[1, 1, 0], [1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (2, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[:, 0, :2, :2], # first object
torch.tensor(
[[[-12.6294, -12.6294], [-13.3659, -13.3659]], [[-20.3319, -20.3319], [-22.0491, -22.0491]]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.6294, -12.6294], [-13.3659, -13.3659]]], [[[-20.3319, -20.3319], [-22.0491, -22.0491]]]],
[[[[-18.5249, -18.5249], [-19.5830, -19.5830]]], [[[-17.5537, -17.5537], [-19.2259, -19.2259]]]],
[[[[-14.2722, -14.2722], [-15.4622, -15.4622]]], [[[-18.3185, -18.3185], [-20.0314, -20.0314]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_batched_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_boxes=[[[300, 0, 500, 400], [400, 0, 600, 400]]],
)
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
print(video_res_masks.shape)
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
print(frames.shape)
print(frames[:3, :, :, :2, :2])
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-13.1427, -13.1427], [-13.7753, -13.7753]]], [[[-8.4576, -8.4576], [-8.7329, -8.7329]]]],
[[[[-14.9998, -14.9998], [-15.7086, -15.7086]]], [[[-9.2998, -9.2998], [-9.8947, -9.8947]]]],
[[[[-15.4558, -15.4558], [-16.1649, -16.1649]]], [[[-10.4880, -10.4880], [-11.2098, -11.2098]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_video_from_mask_input(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
# get input_mask
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
# set mask as input
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_masks=self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = sam2_video_output.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-10.0000, -10.0000], [-10.0000, -10.0000]]]],
[[[[-18.4807, -18.4807], [-19.1966, -19.1966]]]],
[[[[-20.0512, -20.0512], [-20.9110, -20.9110]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_on_streamed_video(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(inference_device=torch_device)
video_res_masks = []
max_frame_num_to_track = 3
for frame_idx, frame in enumerate(raw_video):
if frame_idx >= max_frame_num_to_track:
break
inputs = self.processor(images=frame, device=torch_device, return_tensors="pt")
if frame_idx == 0:
self.processor.add_inputs_to_inference_session(
inference_session,
frame_idx=0,
obj_ids=1,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
original_size=inputs.original_sizes[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame=inputs.pixel_values[0])
video_res_masks.append(
self.processor.post_process_masks(
[sam2_video_output.pred_masks], inputs.original_sizes, binarize=False
)[0]
)
video_res_masks = torch.stack(video_res_masks, dim=0)
self.assertEqual(
video_res_masks.shape, (max_frame_num_to_track, 1, 1, raw_video.shape[-3], raw_video.shape[-2])
)
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
video_res_masks[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-11.1487, -11.1487], [-11.6522, -11.6522]]]],
[[[[-15.3821, -15.3821], [-16.0333, -16.0333]]]],
[[[[-15.4855, -15.4855], [-16.4230, -16.4230]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
| Sam2VideoModelIntegrationTest |
python | pytorch__pytorch | torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py | {
"start": 1758,
"end": 2418
} | class ____(NamedTuple):
"""A feature set has 2 types of features"""
dense_features: torch.Tensor
sparse_features: torch.LongTensor
values: torch.Tensor
def _call_method(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def _remote_method(method, rref, *args, **kwargs):
args_tup = tuple([method, rref] + list(args))
return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
def _remote_method_async(method, rref, *args, **kwargs):
args_tup = tuple([method, rref] + list(args))
return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
| FeatureSet |
python | doocs__leetcode | solution/2300-2399/2343.Query Kth Smallest Trimmed Number/Solution.py | {
"start": 0,
"end": 295
} | class ____:
def smallestTrimmedNumbers(
self, nums: List[str], queries: List[List[int]]
) -> List[int]:
ans = []
for k, trim in queries:
t = sorted((v[-trim:], i) for i, v in enumerate(nums))
ans.append(t[k - 1][1])
return ans
| Solution |
python | sympy__sympy | sympy/solvers/ode/single.py | {
"start": 67416,
"end": 70400
} | class ____(SingleODESolver):
r"""
Solves ODEs that only involve derivatives of the dependent variable using
a substitution of the form `f^n(x) = g(x)`.
For example any second order ODE of the form `f''(x) = h(f'(x), x)` can be
transformed into a pair of 1st order ODEs `g'(x) = h(g(x), x)` and
`f'(x) = g(x)`. Usually the 1st order ODE for `g` is easier to solve. If
that gives an explicit solution for `g` then `f` is found simply by
integration.
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = Eq(x*f(x).diff(x)**2 + f(x).diff(x, 2), 0)
>>> dsolve(eq, f(x), hint='nth_order_reducible')
... # doctest: +NORMALIZE_WHITESPACE
Eq(f(x), C1 - sqrt(-1/C2)*log(-C2*sqrt(-1/C2) + x) + sqrt(-1/C2)*log(C2*sqrt(-1/C2) + x))
"""
hint = "nth_order_reducible"
has_integral = False
def _matches(self):
# Any ODE that can be solved with a substitution and
# repeated integration e.g.:
# `d^2/dx^2(y) + x*d/dx(y) = constant
#f'(x) must be finite for this to work
eq = self.ode_problem.eq_preprocessed
func = self.ode_problem.func
x = self.ode_problem.sym
r"""
Matches any differential equation that can be rewritten with a smaller
order. Only derivatives of ``func`` alone, wrt a single variable,
are considered, and only in them should ``func`` appear.
"""
# ODE only handles functions of 1 variable so this affirms that state
if len(func.args) != 1:
raise ValueError("Function must have exactly one argument")
vc = [d.variable_count[0] for d in eq.atoms(Derivative)
if d.expr == func and len(d.variable_count) == 1]
ords = [c for v, c in vc if v == x]
if len(ords) < 2:
return False
self.smallest = min(ords)
# make sure func does not appear outside of derivatives
D = Dummy()
if eq.subs(func.diff(x, self.smallest), D).has(func):
return False
return True
def _get_general_solution(self, *, simplify_flag: bool = True):
eq = self.ode_problem.eq
f = self.ode_problem.func.func
x = self.ode_problem.sym
n = self.smallest
# get a unique function name for g
names = [a.name for a in eq.atoms(AppliedUndef)]
while True:
name = Dummy().name
if name not in names:
g = Function(name)
break
w = f(x).diff(x, n)
geq = eq.subs(w, g(x))
gsol = dsolve(geq, g(x))
if not isinstance(gsol, list):
gsol = [gsol]
# Might be multiple solutions to the reduced ODE:
fsol = []
for gsoli in gsol:
fsoli = dsolve(gsoli.subs(g(x), w), f(x)) # or do integration n times
fsol.append(fsoli)
return fsol
| NthOrderReducible |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/assets.py | {
"start": 1830,
"end": 1983
} | class ____(BaseModel):
"""Asset watcher serializer for responses."""
name: str
trigger_id: int
created_date: datetime
| AssetWatcherResponse |
python | doocs__leetcode | solution/3600-3699/3674.Minimum Operations to Equalize Array/Solution.py | {
"start": 0,
"end": 121
} | class ____:
def minOperations(self, nums: List[int]) -> int:
return int(any(x != nums[0] for x in nums))
| Solution |
python | huggingface__transformers | tests/models/bert_generation/test_tokenization_bert_generation.py | {
"start": 987,
"end": 9473
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "google/bert_for_seq_generation_L-24_bbc_encoder"
tokenizer_class = BertGenerationTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
@classmethod
def setUpClass(cls):
super().setUpClass()
tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(cls.tmpdirname)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<s>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id_with_added_voc(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "<pad>")
self.assertEqual(len(vocab_keys), 1_002)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1_000)
def test_full_tokenizer(self):
tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[285, 46, 10, 170, 382],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
@cached_property
def big_tokenizer(self):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
@slow
def test_tokenization_base_easy_symbols(self):
symbols = "Hello World!"
original_tokenizer_encodings = [18536, 2260, 101]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
# @slow
def test_tokenization_base_hard_symbols(self):
symbols = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
" add words that should not exist and be tokenized to <unk>, such as saoneuhaoesuth"
)
original_tokenizer_encodings = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
2253,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = self.big_tokenizer(sequence, return_tensors="pt", return_token_type_ids=False)
batch_encoded_sequence = self.big_tokenizer(
[sequence + " " + sequence], return_tensors="pt", return_token_type_ids=False
)
config = BertGenerationConfig()
model = BertGenerationEncoder(config)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
@slow
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="google/bert_for_seq_generation_L-24_bbc_encoder",
revision="c817d1fd1be2ffa69431227a1fe320544943d4db",
)
| BertGenerationTokenizationTest |
python | networkx__networkx | networkx/classes/reportviews.py | {
"start": 44809,
"end": 46132
} | class ____(OutMultiEdgeView):
"""A EdgeView class for inward edges of a MultiDiGraph"""
__slots__ = ()
def __setstate__(self, state):
self._graph = state["_graph"]
self._adjdict = state["_adjdict"]
self._nodes_nbrs = self._adjdict.items
dataview = InMultiEdgeDataView
def __init__(self, G):
self._graph = G
self._adjdict = G._pred if hasattr(G, "pred") else G._adj
self._nodes_nbrs = self._adjdict.items
def __iter__(self):
for n, nbrs in self._nodes_nbrs():
for nbr, kdict in nbrs.items():
for key in kdict:
yield (nbr, n, key)
def __contains__(self, e):
N = len(e)
if N == 3:
u, v, k = e
elif N == 2:
u, v = e
k = 0
else:
raise ValueError("MultiEdge must have length 2 or 3")
try:
return k in self._adjdict[v][u]
except KeyError:
return False
def __getitem__(self, e):
if isinstance(e, slice):
raise nx.NetworkXError(
f"{type(self).__name__} does not support slicing, "
f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]"
)
u, v, k = e
return self._adjdict[v][u][k]
| InMultiEdgeView |
python | pypa__warehouse | tests/common/db/organizations.py | {
"start": 3407,
"end": 3653
} | class ____(WarehouseFactory):
class Meta:
model = OrganizationRole
role_name = OrganizationRoleType.Owner
user = factory.SubFactory(UserFactory)
organization = factory.SubFactory(OrganizationFactory)
| OrganizationRoleFactory |
python | pydantic__pydantic | pydantic-core/tests/validators/test_union.py | {
"start": 31457,
"end": 51449
} | class ____:
class ModelA:
a: int = 0
class ModelB:
b: int = 0
model_a_schema = core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(
fields={'a': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0))}
),
)
model_b_schema = core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(
fields={'b': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0))}
),
)
@pytest.mark.parametrize('choices', permute_choices([model_a_schema, model_b_schema]))
def test_fields_set_ensures_best_match(self, choices) -> None:
validator = SchemaValidator(core_schema.union_schema(choices))
assert isinstance(validator.validate_python({'a': 1}), self.ModelA)
assert isinstance(validator.validate_python({'b': 1}), self.ModelB)
# defaults to leftmost choice if there's a tie
assert isinstance(validator.validate_python({}), choices[0]['cls'])
@pytest.mark.parametrize('choices', permute_choices([model_a_schema, model_b_schema]))
def test_optional_union_with_members_having_defaults(self, choices) -> None:
class WrapModel:
val: Optional[Union[self.ModelA, self.ModelB]] = None
val = SchemaValidator(
schema=core_schema.model_schema(
WrapModel,
core_schema.model_fields_schema(
fields={
'val': core_schema.model_field(
core_schema.with_default_schema(
core_schema.union_schema(choices),
default=None,
)
)
}
),
)
)
assert isinstance(val.validate_python({'val': {'a': 1}}).val, self.ModelA)
assert isinstance(val.validate_python({'val': {'b': 1}}).val, self.ModelB)
assert val.validate_python({}).val is None
def test_dc_smart_union_by_fields_set() -> None:
@dataclass
class ModelA:
x: int
@dataclass
class ModelB(ModelA):
y: int
dc_a_schema = core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema('ModelA', [core_schema.dataclass_field('x', core_schema.int_schema())]),
['x'],
)
dc_b_schema = core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB',
[
core_schema.dataclass_field('x', core_schema.int_schema()),
core_schema.dataclass_field('y', core_schema.int_schema()),
],
),
['x', 'y'],
)
for choices in permute_choices([dc_a_schema, dc_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert isinstance(validator.validate_python({'x': 1}), ModelA)
assert isinstance(validator.validate_python({'x': '1'}), ModelA)
assert isinstance(validator.validate_python({'x': 1, 'y': 2}), ModelB)
assert isinstance(validator.validate_python({'x': 1, 'y': '2'}), ModelB)
assert isinstance(validator.validate_python({'x': '1', 'y': 2}), ModelB)
assert isinstance(validator.validate_python({'x': '1', 'y': '2'}), ModelB)
def test_dc_smart_union_with_defaults() -> None:
@dataclass
class ModelA:
a: int = 0
@dataclass
class ModelB:
b: int = 0
dc_a_schema = core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema(
'ModelA',
[
core_schema.dataclass_field(
'a', core_schema.with_default_schema(schema=core_schema.int_schema(), default=0)
)
],
),
['a'],
)
dc_b_schema = core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB',
[
core_schema.dataclass_field(
'b', core_schema.with_default_schema(schema=core_schema.int_schema(), default=0)
)
],
),
['b'],
)
for choices in permute_choices([dc_a_schema, dc_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert isinstance(validator.validate_python({'a': 1}), ModelA)
assert isinstance(validator.validate_python({'b': 1}), ModelB)
def test_td_smart_union_by_fields_set() -> None:
td_a_schema = core_schema.typed_dict_schema(
fields={'x': core_schema.typed_dict_field(core_schema.int_schema())},
)
td_b_schema = core_schema.typed_dict_schema(
fields={
'x': core_schema.typed_dict_field(core_schema.int_schema()),
'y': core_schema.typed_dict_field(core_schema.int_schema()),
},
)
for choices in permute_choices([td_a_schema, td_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert set(validator.validate_python({'x': 1}).keys()) == {'x'}
assert set(validator.validate_python({'x': '1'}).keys()) == {'x'}
assert set(validator.validate_python({'x': 1, 'y': 2}).keys()) == {'x', 'y'}
assert set(validator.validate_python({'x': 1, 'y': '2'}).keys()) == {'x', 'y'}
assert set(validator.validate_python({'x': '1', 'y': 2}).keys()) == {'x', 'y'}
assert set(validator.validate_python({'x': '1', 'y': '2'}).keys()) == {'x', 'y'}
def test_smart_union_does_nested_model_field_counting() -> None:
class SubModelA:
x: int = 1
class SubModelB:
y: int = 2
class ModelA:
sub: SubModelA
class ModelB:
sub: SubModelB
model_a_schema = core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(
fields={
'sub': core_schema.model_field(
core_schema.model_schema(
SubModelA,
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(
core_schema.with_default_schema(core_schema.int_schema(), default=1)
)
}
),
)
)
}
),
)
model_b_schema = core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(
fields={
'sub': core_schema.model_field(
core_schema.model_schema(
SubModelB,
core_schema.model_fields_schema(
fields={
'y': core_schema.model_field(
core_schema.with_default_schema(core_schema.int_schema(), default=2)
)
}
),
)
)
}
),
)
for choices in permute_choices([model_a_schema, model_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert isinstance(validator.validate_python({'sub': {'x': 1}}), ModelA)
assert isinstance(validator.validate_python({'sub': {'y': 3}}), ModelB)
# defaults to leftmost choice if there's a tie
assert isinstance(validator.validate_python({'sub': {}}), choices[0]['cls'])
def test_smart_union_does_nested_dataclass_field_counting() -> None:
@dataclass
class SubModelA:
x: int = 1
@dataclass
class SubModelB:
y: int = 2
@dataclass
class ModelA:
sub: SubModelA
@dataclass
class ModelB:
sub: SubModelB
dc_a_schema = core_schema.dataclass_schema(
ModelA,
core_schema.dataclass_args_schema(
'ModelA',
[
core_schema.dataclass_field(
'sub',
core_schema.with_default_schema(
core_schema.dataclass_schema(
SubModelA,
core_schema.dataclass_args_schema(
'SubModelA',
[
core_schema.dataclass_field(
'x', core_schema.with_default_schema(core_schema.int_schema(), default=1)
)
],
),
['x'],
),
default=SubModelA(),
),
)
],
),
['sub'],
)
dc_b_schema = core_schema.dataclass_schema(
ModelB,
core_schema.dataclass_args_schema(
'ModelB',
[
core_schema.dataclass_field(
'sub',
core_schema.with_default_schema(
core_schema.dataclass_schema(
SubModelB,
core_schema.dataclass_args_schema(
'SubModelB',
[
core_schema.dataclass_field(
'y', core_schema.with_default_schema(core_schema.int_schema(), default=2)
)
],
),
['y'],
),
default=SubModelB(),
),
)
],
),
['sub'],
)
for choices in permute_choices([dc_a_schema, dc_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert isinstance(validator.validate_python({'sub': {'x': 1}}), ModelA)
assert isinstance(validator.validate_python({'sub': {'y': 3}}), ModelB)
# defaults to leftmost choice if there's a tie
assert isinstance(validator.validate_python({'sub': {}}), choices[0]['cls'])
def test_smart_union_does_nested_typed_dict_field_counting() -> None:
td_a_schema = core_schema.typed_dict_schema(
fields={
'sub': core_schema.typed_dict_field(
core_schema.typed_dict_schema(fields={'x': core_schema.typed_dict_field(core_schema.int_schema())})
)
}
)
td_b_schema = core_schema.typed_dict_schema(
fields={
'sub': core_schema.typed_dict_field(
core_schema.typed_dict_schema(fields={'y': core_schema.typed_dict_field(core_schema.int_schema())})
)
}
)
for choices in permute_choices([td_a_schema, td_b_schema]):
validator = SchemaValidator(core_schema.union_schema(choices=choices))
assert set(validator.validate_python({'sub': {'x': 1}})['sub'].keys()) == {'x'}
assert set(validator.validate_python({'sub': {'y': 2}})['sub'].keys()) == {'y'}
def test_nested_unions_bubble_up_field_count() -> None:
class SubModelX:
x1: int = 0
x2: int = 0
x3: int = 0
class SubModelY:
x1: int = 0
x2: int = 0
x3: int = 0
class SubModelZ:
z1: int = 0
z2: int = 0
z3: int = 0
class SubModelW:
w1: int = 0
w2: int = 0
w3: int = 0
class ModelA:
a: Union[SubModelX, SubModelY]
class ModelB:
b: Union[SubModelZ, SubModelW]
model_x_schema = core_schema.model_schema(
SubModelX,
core_schema.model_fields_schema(
fields={
'x1': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'x2': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'x3': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
}
),
)
model_y_schema = core_schema.model_schema(
SubModelY,
core_schema.model_fields_schema(
fields={
'x1': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'x2': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'x3': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
}
),
)
model_z_schema = core_schema.model_schema(
SubModelZ,
core_schema.model_fields_schema(
fields={
'z1': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'z2': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'z3': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
}
),
)
model_w_schema = core_schema.model_schema(
SubModelW,
core_schema.model_fields_schema(
fields={
'w1': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'w2': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
'w3': core_schema.model_field(core_schema.with_default_schema(core_schema.int_schema(), default=0)),
}
),
)
model_a_schema_options = [
core_schema.union_schema([model_x_schema, model_y_schema]),
core_schema.union_schema([model_y_schema, model_x_schema]),
]
model_b_schema_options = [
core_schema.union_schema([model_z_schema, model_w_schema]),
core_schema.union_schema([model_w_schema, model_z_schema]),
]
for model_a_schema in model_a_schema_options:
for model_b_schema in model_b_schema_options:
validator = SchemaValidator(
schema=core_schema.union_schema(
[
core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(fields={'a': core_schema.model_field(model_a_schema)}),
),
core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(fields={'b': core_schema.model_field(model_b_schema)}),
),
]
)
)
result = validator.validate_python(
{'a': {'x1': 1, 'x2': 2, 'y1': 1, 'y2': 2}, 'b': {'w1': 1, 'w2': 2, 'w3': 3}}
)
assert isinstance(result, ModelB)
assert isinstance(result.b, SubModelW)
@pytest.mark.parametrize('extra_behavior', ['forbid', 'ignore', 'allow'])
def test_smart_union_extra_behavior(extra_behavior) -> None:
class Foo:
foo: str = 'foo'
class Bar:
bar: str = 'bar'
class Model:
x: Union[Foo, Bar]
validator = SchemaValidator(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
fields={
'x': core_schema.model_field(
core_schema.union_schema(
[
core_schema.model_schema(
Foo,
core_schema.model_fields_schema(
fields={
'foo': core_schema.model_field(
core_schema.with_default_schema(core_schema.str_schema(), default='foo')
)
}
),
extra_behavior=extra_behavior,
),
core_schema.model_schema(
Bar,
core_schema.model_fields_schema(
fields={
'bar': core_schema.model_field(
core_schema.with_default_schema(core_schema.str_schema(), default='bar')
)
}
),
extra_behavior=extra_behavior,
),
]
)
)
}
),
)
)
assert isinstance(validator.validate_python({'x': {'foo': 'foo'}}).x, Foo)
assert isinstance(validator.validate_python({'x': {'bar': 'bar'}}).x, Bar)
def test_smart_union_wrap_validator_should_not_change_nested_model_field_counts() -> None:
"""Adding a wrap validator on a union member should not affect smart union behavior"""
class SubModel:
x: str = 'x'
class ModelA:
type: str = 'A'
sub: SubModel
class ModelB:
type: str = 'B'
sub: SubModel
submodel_schema = core_schema.model_schema(
SubModel,
core_schema.model_fields_schema(fields={'x': core_schema.model_field(core_schema.str_schema())}),
)
wrapped_submodel_schema = core_schema.no_info_wrap_validator_function(
lambda v, handler: handler(v), submodel_schema
)
model_a_schema = core_schema.model_schema(
ModelA,
core_schema.model_fields_schema(
fields={
'type': core_schema.model_field(
core_schema.with_default_schema(core_schema.literal_schema(['A']), default='A'),
),
'sub': core_schema.model_field(wrapped_submodel_schema),
},
),
)
model_b_schema = core_schema.model_schema(
ModelB,
core_schema.model_fields_schema(
fields={
'type': core_schema.model_field(
core_schema.with_default_schema(core_schema.literal_schema(['B']), default='B'),
),
'sub': core_schema.model_field(submodel_schema),
},
),
)
for choices in permute_choices([model_a_schema, model_b_schema]):
schema = core_schema.union_schema(choices)
validator = SchemaValidator(schema)
assert isinstance(validator.validate_python({'type': 'A', 'sub': {'x': 'x'}}), ModelA)
assert isinstance(validator.validate_python({'type': 'B', 'sub': {'x': 'x'}}), ModelB)
# defaults to leftmost choice if there's a tie
assert isinstance(validator.validate_python({'sub': {'x': 'x'}}), choices[0]['cls'])
# test validate_assignment
class RootModel:
ab: Union[ModelA, ModelB]
root_model = core_schema.model_schema(
RootModel,
core_schema.model_fields_schema(
fields={'ab': core_schema.model_field(core_schema.union_schema([model_a_schema, model_b_schema]))}
),
)
validator = SchemaValidator(root_model)
m = validator.validate_python({'ab': {'type': 'B', 'sub': {'x': 'x'}}})
assert isinstance(m, RootModel)
assert isinstance(m.ab, ModelB)
assert m.ab.sub.x == 'x'
m = validator.validate_assignment(m, 'ab', {'sub': {'x': 'y'}})
assert isinstance(m, RootModel)
assert isinstance(m.ab, ModelA)
assert m.ab.sub.x == 'y'
| TestSmartUnionWithDefaults |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/ext/association_proxy/association_proxy_one.py | {
"start": 556,
"end": 864
} | class ____(Base):
__tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String, nullable=False)
addresses: Mapped[Set["Address"]] = relationship()
email_addresses: AssociationProxy[Set[str]] = association_proxy(
"addresses", "email"
)
| User |
python | doocs__leetcode | solution/2300-2399/2366.Minimum Replacements to Sort the Array/Solution.py | {
"start": 0,
"end": 372
} | class ____:
def minimumReplacement(self, nums: List[int]) -> int:
ans = 0
n = len(nums)
mx = nums[-1]
for i in range(n - 2, -1, -1):
if nums[i] <= mx:
mx = nums[i]
continue
k = (nums[i] + mx - 1) // mx
ans += k - 1
mx = nums[i] // k
return ans
| Solution |
python | fastapi__sqlmodel | docs_src/tutorial/create_db_and_table/tutorial003.py | {
"start": 99,
"end": 615
} | class ____(SQLModel, table=True): # (3)!
id: Optional[int] = Field(default=None, primary_key=True) # (4)!
name: str # (5)!
secret_name: str # (6)!
age: Optional[int] = None # (7)!
sqlite_file_name = "database.db" # (8)!
sqlite_url = f"sqlite:///{sqlite_file_name}" # (9)!
engine = create_engine(sqlite_url, echo=True) # (10)!
def create_db_and_tables(): # (11)!
SQLModel.metadata.create_all(engine) # (12)!
if __name__ == "__main__": # (13)!
create_db_and_tables() # (14)!
| Hero |
python | catalyst-team__catalyst | examples/reinforcement_learning/ddpg.py | {
"start": 2413,
"end": 5409
} | class ____(gym.ActionWrapper):
def action(self, action: float) -> float:
low_bound = self.action_space.low
upper_bound = self.action_space.high
action = low_bound + (action + 1.0) * 0.5 * (upper_bound - low_bound)
action = np.clip(action, low_bound, upper_bound)
return action
def _reverse_action(self, action: float) -> float:
low_bound = self.action_space.low
upper_bound = self.action_space.high
action = 2 * (action - low_bound) / (upper_bound - low_bound) - 1
action = np.clip(action, low_bound, upper_bound)
return action
def get_action(
env, network: nn.Module, state: np.array, sigma: Optional[float] = None
) -> np.array:
state = torch.tensor(state, dtype=torch.float32).unsqueeze(0)
action = network(state).detach().cpu().numpy()[0]
if sigma is not None:
action = np.random.normal(action, sigma)
return action
def generate_session(
env,
network: nn.Module,
sigma: Optional[float] = None,
replay_buffer: Optional[ReplayBuffer] = None,
) -> Tuple[float, int]:
total_reward = 0
state = env.reset()
for t in range(env.spec.max_episode_steps):
action = get_action(env, network, state=state, sigma=sigma)
next_state, reward, done, _ = env.step(action)
if replay_buffer is not None:
transition = Transition(state, action, reward, done, next_state)
replay_buffer.append(transition)
total_reward += reward
state = next_state
if done:
break
return total_reward, t
def generate_sessions(
env,
network: nn.Module,
sigma: Optional[float] = None,
replay_buffer: Optional[ReplayBuffer] = None,
num_sessions: int = 100,
) -> Tuple[float, int]:
sessions_reward, sessions_steps = 0, 0
for i_episone in range(num_sessions):
r, t = generate_session(
env=env, network=network, sigma=sigma, replay_buffer=replay_buffer
)
sessions_reward += r
sessions_steps += t
return sessions_reward, sessions_steps
def get_network_actor(env):
inner_fn = get_optimal_inner_init(nn.ReLU)
outer_fn = outer_init
network = torch.nn.Sequential(
nn.Linear(env.observation_space.shape[0], 400),
nn.ReLU(),
nn.Linear(400, 300),
nn.ReLU(),
)
head = torch.nn.Sequential(nn.Linear(300, 1), nn.Tanh())
network.apply(inner_fn)
head.apply(outer_fn)
return torch.nn.Sequential(network, head)
def get_network_critic(env):
inner_fn = get_optimal_inner_init(nn.LeakyReLU)
outer_fn = outer_init
network = torch.nn.Sequential(
nn.Linear(env.observation_space.shape[0] + 1, 400),
nn.LeakyReLU(0.01),
nn.Linear(400, 300),
nn.LeakyReLU(0.01),
)
head = nn.Linear(300, 1)
network.apply(inner_fn)
head.apply(outer_fn)
return torch.nn.Sequential(network, head)
# Catalyst
| NormalizedActions |
python | walkccc__LeetCode | solutions/604. Design Compressed String Iterator/604.py | {
"start": 0,
"end": 607
} | class ____:
def __init__(self, compressedString: str):
self.s = compressedString
self.i = 0 # s' index
self.num = 0 # currentChar's count
self.currentChar = ' '
def next(self) -> str:
if not self.hasNext():
return ' '
if self.num == 0:
self.currentChar = self.s[self.i]
self.i += 1
while self.i < len(self.s) and self.s[self.i].isdigit():
self.num = self.num * 10 + int(self.s[self.i])
self.i += 1
self.num -= 1
return self.currentChar
def hasNext(self) -> bool:
return self.i < len(self.s) or self.num > 0
| StringIterator |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 95280,
"end": 95486
} | class ____(AddMMConfigMixin, MTIAMMTemplateConfigHeuristic):
"""Addmm specific mixin for MTIA"""
@register_template_heuristic(mm_template.uid, "mtia", op_name="scaled_mm")
| MTIAAddMMTemplateConfigHeuristic |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 124777,
"end": 125613
} | class ____(QueryTest):
def test_entity(self):
User = self.classes.User
s = fixture_session()
q = s.query(User)
assert q._compile_state()._has_mapper_entities
def test_cols(self):
User = self.classes.User
s = fixture_session()
q = s.query(User.id)
assert not q._compile_state()._has_mapper_entities
def test_cols_set_entities(self):
User = self.classes.User
s = fixture_session()
q = s.query(User.id)
q._set_entities(User)
assert q._compile_state()._has_mapper_entities
def test_entity_set_entities(self):
User = self.classes.User
s = fixture_session()
q = s.query(User)
q._set_entities(User.id)
assert not q._compile_state()._has_mapper_entities
| HasMapperEntitiesTest |
python | spack__spack | lib/spack/spack/vendor/pyrsistent/_plist.py | {
"start": 6189,
"end": 7268
} | class ____(_PListBase):
"""
Classical Lisp style singly linked list. Adding elements to the head using cons is O(1).
Element access is O(k) where k is the position of the element in the list. Taking the
length of the list is O(n).
Fully supports the Sequence and Hashable protocols including indexing and slicing but
if you need fast random access go for the PVector instead.
Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to
create an instance.
Some examples:
>>> x = plist([1, 2])
>>> y = x.cons(3)
>>> x
plist([1, 2])
>>> y
plist([3, 1, 2])
>>> y.first
3
>>> y.rest == x
True
>>> y[:2]
plist([3, 1])
"""
__slots__ = ('first', 'rest')
def __new__(cls, first, rest):
instance = super(PList, cls).__new__(cls)
instance.first = first
instance.rest = rest
return instance
def __bool__(self):
return True
__nonzero__ = __bool__
Sequence.register(PList)
Hashable.register(PList)
| PList |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py | {
"start": 1369,
"end": 1727
} | class ____(RkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany"""
primary_key = None
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "germany/"
# class that contains main source states | full-refresh
| Germany |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 9412,
"end": 13326
} | class ____(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None) -> None:
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.has_relative_position_bias = bool(window_size)
if self.has_relative_position_bias:
self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int]] = None,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Add relative position bias if present.
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attention_scores = attention_scores + self.relative_position_bias(
window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
# Add shared relative position bias if provided.
if relative_position_bias is not None:
attention_scores = attention_scores + relative_position_bias
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.beit.modeling_beit.BeitSdpaSelfAttention with Beit->Data2VecVision
| Data2VecVisionSelfAttention |
python | huggingface__transformers | src/transformers/models/diffllama/modular_diffllama.py | {
"start": 18552,
"end": 18830
} | class ____(LlamaDecoderLayer):
def __init__(self, config: DiffLlamaConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = DIFFLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
| DiffLlamaDecoderLayer |
python | PrefectHQ__prefect | tests/test_artifacts.py | {
"start": 808,
"end": 26776
} | class ____:
@pytest.fixture
def artifact(self):
yield ArtifactCreate(
key="voltaic",
data=1,
description="# This is a markdown description title",
)
async def test_create_and_read_link_artifact_with_linktext_succeeds(
self, artifact: ArtifactCreate, client: httpx.AsyncClient
):
my_link = "prefect.io"
link_text = "Prefect"
@flow
async def my_flow():
return await acreate_link_artifact(
key=artifact.key,
link=my_link,
link_text=link_text,
description=artifact.description,
)
artifact_id = await my_flow()
response = await client.get(f"/artifacts/{artifact_id}")
assert response.status_code == 200
result = schemas.core.Artifact.model_validate(response.json())
assert result.data == f"[{link_text}]({my_link})"
async def test_create_link_artifact_in_task_succeeds(
self, client: httpx.AsyncClient
):
@task
def my_special_task():
run_context = get_run_context()
assert isinstance(run_context, TaskRunContext)
task_run_id = run_context.task_run.id
artifact_id = create_link_artifact(
key="task-link-artifact-3",
link="google.com",
description="my-artifact-description",
)
return artifact_id, task_run_id
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id, task_run_id = my_special_task()
return artifact_id, flow_run_id, task_run_id
my_artifact_id, flow_run_id, task_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_link_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_link_artifact.flow_run_id == flow_run_id
assert my_link_artifact.task_run_id == task_run_id
async def test_create_link_artifact_in_flow_succeeds(
self, client: httpx.AsyncClient
):
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id = create_link_artifact(
key="task-link-artifact-4",
link="google.com",
description="my-artifact-description",
)
return artifact_id, flow_run_id
my_artifact_id, flow_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_link_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_link_artifact.flow_run_id == flow_run_id
assert my_link_artifact.task_run_id is None
async def test_create_link_artifact_in_subflow_succeeds(
self, client: httpx.AsyncClient
):
@flow
def my_subflow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id = create_link_artifact(
key="task-link-artifact-5",
link="google.com",
description="my-artifact-description",
)
return artifact_id, flow_run_id
@flow
def my_flow():
artifact_id, flow_run_id = my_subflow()
return artifact_id, flow_run_id
my_artifact_id, flow_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_link_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_link_artifact.flow_run_id == flow_run_id
assert my_link_artifact.task_run_id is None
async def test_create_link_artifact_using_map_succeeds(self):
"""
Test that we can create a markdown artifact using map.
"""
# An ode to prefect issue #5309.
@task
def add_ten(x: int) -> int:
create_link_artifact(
# TODO: uncomment this out once unique constraint is dropped on artifact key
# key="new-markdown-artifact",
link="s3://my-bucket/my-file",
description="my-artifact-description",
)
return x + 10
@flow
def simple_map(nums: list[int]):
big_nums = add_ten.map(nums)
return [big_num.result() for big_num in big_nums]
my_big_nums = simple_map([1, 2, 3])
assert my_big_nums == [11, 12, 13]
async def test_create_markdown_artifact_in_task_succeeds(
self, client: httpx.AsyncClient
):
@task
def my_special_task():
run_context = get_run_context()
assert isinstance(run_context, TaskRunContext)
task_run_id = run_context.task_run.id
artifact_id = create_markdown_artifact(
key="task-link-artifact-3",
markdown="my markdown",
description="my-artifact-description",
)
return artifact_id, task_run_id
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id, task_run_id = my_special_task()
return artifact_id, flow_run_id, task_run_id
my_artifact_id, flow_run_id, task_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_markdown_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_markdown_artifact.flow_run_id == flow_run_id
assert my_markdown_artifact.task_run_id == task_run_id
async def test_create_markdown_artifact_in_flow_succeeds(
self, client: httpx.AsyncClient
):
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id = create_markdown_artifact(
key="task-link-artifact-4",
markdown="my markdown",
description="my-artifact-description",
)
return artifact_id, flow_run_id
my_artifact_id, flow_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_markdown_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_markdown_artifact.flow_run_id == flow_run_id
assert my_markdown_artifact.task_run_id is None
async def test_create_markdown_artifact_in_subflow_succeeds(
self, client: httpx.AsyncClient
):
@flow
def my_subflow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id = create_markdown_artifact(
key="task-link-artifact-3",
markdown="my markdown",
description="my-artifact-description",
)
return artifact_id, flow_run_id
@flow
def my_flow():
artifact_id, flow_run_id = my_subflow()
return artifact_id, flow_run_id
my_artifact_id, flow_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_markdown_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_markdown_artifact.flow_run_id == flow_run_id
assert my_markdown_artifact.task_run_id is None
async def test_create_markdown_artifact_using_map_succeeds(self):
"""
Test that we can create a markdown artifact using map.
"""
@task
def add_ten(x: int) -> int:
create_markdown_artifact(
key="new-markdown-artifact",
markdown="my markdown",
description="my-artifact-description",
)
return x + 10
@flow
def simple_map(nums: list[int]) -> list[int]:
big_nums = add_ten.map(nums)
return [big_num.result() for big_num in big_nums]
my_big_nums = simple_map([1, 2, 3])
assert my_big_nums == [11, 12, 13]
async def test_create_and_read_dict_of_list_table_artifact_succeeds(
self, artifact: ArtifactCreate, client: httpx.AsyncClient
):
my_table = {"a": [1, 3], "b": [2, 4]}
@flow
async def my_flow():
return await acreate_table_artifact(
key=artifact.key,
table=my_table,
description=artifact.description,
)
artifact_id = await my_flow()
response = await client.get(f"/artifacts/{artifact_id}")
assert response.status_code == 200
result = schemas.core.Artifact.model_validate(response.json())
assert isinstance(result.data, str)
result_data = json.loads(result.data)
assert result_data == my_table
async def test_create_and_read_list_of_dict_table_artifact_succeeds(
self, artifact: ArtifactCreate, client: httpx.AsyncClient
):
my_table = [{"a": 1, "b": 2}, {"a": 3, "b": 4}]
@flow
async def my_flow():
return await acreate_table_artifact(
key=artifact.key,
table=my_table,
description=artifact.description,
)
artifact_id = await my_flow()
response = await client.get(f"/artifacts/{artifact_id}")
assert response.status_code == 200
result = schemas.core.Artifact.model_validate(response.json())
assert isinstance(result.data, str)
result_data = json.loads(result.data)
assert result_data == my_table
async def test_create_and_read_list_of_list_table_artifact_succeeds(
self, artifact: ArtifactCreate, client: httpx.AsyncClient
):
my_table = [[1, 2], [None, 4]]
@flow
async def my_flow():
return await acreate_table_artifact(
key=artifact.key,
table=my_table,
description=artifact.description,
)
artifact_id = await my_flow()
response = await client.get(f"/artifacts/{artifact_id}")
assert response.status_code == 200
result = schemas.core.Artifact.model_validate(response.json())
assert isinstance(result.data, str)
result_data = json.loads(result.data)
assert result_data == my_table
async def test_create_table_artifact_in_task_succeeds(
self, client: httpx.AsyncClient
):
@task
def my_special_task():
my_table = {"a": [1, 3], "b": [2, 4]}
run_context = get_run_context()
assert isinstance(run_context, TaskRunContext)
task_run_id = run_context.task_run.id
artifact_id = create_table_artifact(
key="task-link-artifact-3",
table=my_table,
description="my-artifact-description",
)
return artifact_id, task_run_id
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id, task_run_id = my_special_task()
return artifact_id, flow_run_id, task_run_id
my_artifact_id, flow_run_id, task_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
my_table_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_table_artifact.flow_run_id == flow_run_id
assert my_table_artifact.task_run_id == task_run_id
assert isinstance(my_table_artifact.data, str)
result_data = json.loads(my_table_artifact.data)
assert result_data == {"a": [1, 3], "b": [2, 4]}
async def test_create_table_artifact_in_flow_succeeds(
self, client: httpx.AsyncClient
):
@flow
def my_flow():
my_table = {"a": [1, 3], "b": [2, 4]}
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id = create_table_artifact(
key="task-link-artifact-4",
table=my_table,
description="my-artifact-description",
)
return artifact_id, flow_run_id
my_artifact_id, flow_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_table_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_table_artifact.flow_run_id == flow_run_id
assert my_table_artifact.task_run_id is None
assert isinstance(my_table_artifact.data, str)
result_data = json.loads(my_table_artifact.data)
assert result_data == {"a": [1, 3], "b": [2, 4]}
async def test_create_table_artifact_in_subflow_succeeds(
self, client: httpx.AsyncClient
):
@flow
def my_subflow():
my_table = {"a": [1, 3], "b": [2, 4]}
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id = create_table_artifact(
key="task-link-artifact-3",
table=my_table,
description="my-artifact-description",
)
return artifact_id, flow_run_id
@flow
def my_flow():
artifact_id, flow_run_id = my_subflow()
return artifact_id, flow_run_id
my_artifact_id, flow_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_table_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_table_artifact.flow_run_id == flow_run_id
assert isinstance(my_table_artifact.data, str)
result_data = json.loads(my_table_artifact.data)
assert result_data == {"a": [1, 3], "b": [2, 4]}
assert my_table_artifact.task_run_id is None
async def test_create_table_artifact_using_map_succeeds(self):
"""
Test that we can create a table artifact using map.
An ode to prefect issue
"""
@task
def add_ten(x: int) -> int:
my_table = {"a": [1, 3], "b": [2, 4]}
create_table_artifact(
# TODO: uncomment this out once unique constraint is dropped on artifact key
# key="task-link-artifact-3",
table=my_table,
description="my-artifact-description",
)
return x + 10
@flow
def simple_map(nums: list[int]):
big_nums = add_ten.map(nums)
return [big_num.result() for big_num in big_nums]
my_big_nums = simple_map([1, 2, 3])
assert my_big_nums == [11, 12, 13]
async def test_create_dict_table_artifact_with_none_succeeds(self):
my_table = {"a": [1, 3], "b": [2, None]}
@flow
def my_flow():
return create_table_artifact(
key="swiss-table",
table=my_table,
description="my-artifact-description",
)
my_flow()
async def test_create_dict_table_artifact_with_nan_succeeds(
self, client: httpx.AsyncClient
):
my_table = {"a": [1, 3], "b": [2, float("nan")]}
@flow
async def my_flow():
return await acreate_table_artifact(
key="swiss-table",
table=my_table,
description="my-artifact-description",
)
artifact_id = await my_flow()
response = await client.get(f"/artifacts/{artifact_id}")
assert response.status_code == 200
my_artifact = schemas.core.Artifact.model_validate(response.json())
assert isinstance(my_artifact.data, str)
my_data = json.loads(my_artifact.data)
assert my_data == {"a": [1, 3], "b": [2, None]}
async def test_create_list_table_artifact_with_none_succeeds(self):
my_table = [
{"a": 1, "b": 2},
{"a": 3, "b": None},
]
@flow
async def my_flow():
await acreate_table_artifact(
key="swiss-table",
table=my_table,
description="my-artifact-description",
)
await my_flow()
async def test_create_list_table_artifact_with_nan_succeeds(
self, client: httpx.AsyncClient
):
my_table = [
{"a": 1, "b": 2},
{"a": 3, "b": float("nan")},
]
@flow
async def my_flow():
return await acreate_table_artifact(
key="swiss-table",
table=my_table,
description="my-artifact-description",
)
artifact_id = await my_flow()
response = await client.get(f"/artifacts/{artifact_id}")
assert response.status_code == 200
my_artifact = schemas.core.Artifact.model_validate(response.json())
assert isinstance(my_artifact.data, str)
my_data = json.loads(my_artifact.data)
assert my_data == [
{"a": 1, "b": 2},
{"a": 3, "b": None},
]
async def test_create_progress_artifact_without_key(
self, client: httpx.AsyncClient
):
progress = 0.0
@flow
async def my_flow():
return await acreate_progress_artifact(
progress, description="my-description"
)
artifact_id = await my_flow()
response = await client.get(f"/artifacts/{artifact_id}")
assert response.status_code == 200
my_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_artifact.data == progress
assert my_artifact.type == "progress"
assert my_artifact.description == "my-description"
async def test_create_progress_artifact_with_key(self, client: httpx.AsyncClient):
progress = 0.0
@flow
def my_flow():
return create_progress_artifact(
progress, key="progress-artifact", description="my-description"
)
artifact_id = my_flow()
response = await client.get(f"/artifacts/{artifact_id}")
assert response.status_code == 200
my_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_artifact.data == progress
assert my_artifact.type == "progress"
assert my_artifact.key == "progress-artifact"
assert my_artifact.description == "my-description"
async def test_create_progress_artifact_in_task_succeeds(
self, client: httpx.AsyncClient
):
@task
def my_task():
run_context = get_run_context()
assert isinstance(run_context, TaskRunContext)
assert run_context.task_run is not None
task_run_id = run_context.task_run.id
artifact_id = create_progress_artifact(
key="task-link-artifact-3",
progress=0.0,
description="my-artifact-description",
)
return artifact_id, task_run_id
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id, task_run_id = my_task()
return artifact_id, flow_run_id, task_run_id
my_artifact_id, flow_run_id, task_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_progress_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_progress_artifact.flow_run_id == flow_run_id
assert my_progress_artifact.task_run_id == task_run_id
assert my_progress_artifact.data == 0.0
assert my_progress_artifact.type == "progress"
assert my_progress_artifact.description == "my-artifact-description"
async def test_create_progess_artifact_in_flow_succeeds(
self, client: httpx.AsyncClient
):
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id = create_progress_artifact(
key="task-link-artifact-4",
progress=0.0,
description="my-artifact-description",
)
return artifact_id, flow_run_id
my_artifact_id, flow_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_progress_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_progress_artifact.flow_run_id == flow_run_id
assert my_progress_artifact.task_run_id is None
assert my_progress_artifact.data == 0.0
assert my_progress_artifact.type == "progress"
assert my_progress_artifact.description == "my-artifact-description"
async def test_create_image_artifact_in_task_succeeds(
self, client: httpx.AsyncClient
):
@task
def my_task():
run_context = get_run_context()
assert isinstance(run_context, TaskRunContext)
assert run_context.task_run is not None
task_run_id = run_context.task_run.id
artifact_id = create_image_artifact(
image_url="https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png",
key="task-link-artifact-3",
description="my-artifact-description",
)
return artifact_id, task_run_id
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id, task_run_id = my_task()
return artifact_id, flow_run_id, task_run_id
my_artifact_id, flow_run_id, task_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_image_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_image_artifact.flow_run_id == flow_run_id
assert my_image_artifact.task_run_id == task_run_id
assert (
my_image_artifact.data
== "https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png"
)
assert my_image_artifact.type == "image"
assert my_image_artifact.description == "my-artifact-description"
async def test_create_image_artifact_in_flow_succeeds(
self, client: httpx.AsyncClient
):
@flow
def my_flow():
run_context = get_run_context()
assert isinstance(run_context, FlowRunContext)
assert run_context.flow_run is not None
flow_run_id = run_context.flow_run.id
artifact_id = create_image_artifact(
image_url="https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png",
key="task-link-artifact-4",
description="my-artifact-description",
)
return artifact_id, flow_run_id
my_artifact_id, flow_run_id = my_flow()
response = await client.get(f"/artifacts/{my_artifact_id}")
assert response.status_code == 200
my_image_artifact = schemas.core.Artifact.model_validate(response.json())
assert my_image_artifact.flow_run_id == flow_run_id
assert my_image_artifact.task_run_id is None
assert (
my_image_artifact.data
== "https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png"
)
assert my_image_artifact.type == "image"
assert my_image_artifact.description == "my-artifact-description"
async def test_creating_artifact_outside_of_flow_run_context_warns(self):
with pytest.warns(FutureWarning):
create_link_artifact("https://www.google.com", "Google", _sync=True) # pyright: ignore[reportCallIssue]
with pytest.warns(FutureWarning):
await acreate_link_artifact("https://www.google.com", "Google")
| TestCreateArtifacts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.