language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/core/logic.py | {
"start": 5327,
"end": 7836
} | class ____:
"""Logical expression"""
# {} 'op' -> LogicClass
op_2class: dict[str, type[Logic]] = {}
def __new__(cls, *args):
obj = object.__new__(cls)
obj.args = args
return obj
def __getnewargs__(self):
return self.args
def __hash__(self):
return hash((type(self).__name__,) + tuple(self.args))
def __eq__(a, b):
if not isinstance(b, type(a)):
return False
else:
return a.args == b.args
def __ne__(a, b):
if not isinstance(b, type(a)):
return True
else:
return a.args != b.args
def __str__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(str(a) for a in self.args))
__repr__ = __str__
@staticmethod
def fromstring(text):
"""Logic from string with space around & and | but none after !.
e.g.
!a & b | c
"""
lexpr = None # current logical expression
schedop = None # scheduled operation
for term in text.split():
# operation symbol
if term in '&|':
if schedop is not None:
raise ValueError(
'double op forbidden: "%s %s"' % (term, schedop))
if lexpr is None:
raise ValueError(
'%s cannot be in the beginning of expression' % term)
schedop = term
continue
if '&' in term or '|' in term:
raise ValueError('& and | must have space around them')
if term[0] == '!':
if len(term) == 1:
raise ValueError('do not include space after "!"')
term = Not(term[1:])
# already scheduled operation, e.g. '&'
if schedop:
lexpr = Logic.op_2class[schedop](lexpr, term)
schedop = None
continue
# this should be atom
if lexpr is not None:
raise ValueError(
'missing op between "%s" and "%s"' % (lexpr, term))
lexpr = term
# let's check that we ended up in correct state
if schedop is not None:
raise ValueError('premature end-of-expression in "%s"' % text)
if lexpr is None:
raise ValueError('"%s" is empty' % text)
# everything looks good now
return lexpr
| Logic |
python | pyinstaller__pyinstaller | bootloader/waflib/Logs.py | {
"start": 3933,
"end": 7207
} | class ____(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT)
def format(self, rec):
try:
msg = rec.msg.decode('utf-8')
except Exception:
msg = rec.msg
use = colors_lst['USE']
if (use == 1 and rec.stream.isatty()) or use == 2:
c1 = getattr(rec, 'c1', None)
if c1 is None:
c1 = ''
if rec.levelno >= logging.ERROR:
c1 = colors.RED
elif rec.levelno >= logging.WARNING:
c1 = colors.YELLOW
elif rec.levelno >= logging.INFO:
c1 = colors.GREEN
c2 = getattr(rec, 'c2', colors.NORMAL)
msg = '%s%s%s' % (c1, msg, c2)
else:
msg = re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))', '', msg)
if rec.levelno >= logging.INFO:
if rec.args:
try:
return msg % rec.args
except UnicodeDecodeError:
return msg.encode('utf-8') % rec.args
return msg
rec.msg = msg
rec.c1 = colors.PINK
rec.c2 = colors.NORMAL
return logging.Formatter.format(self, rec)
log = None
def debug(*k, **kw):
if verbose:
k = list(k)
k[0] = k[0].replace('\n', ' ')
log.debug(*k, **kw)
def error(*k, **kw):
log.error(*k, **kw)
if verbose > 2:
st = traceback.extract_stack()
if st:
st = st[:-1]
buf = []
for filename, lineno, name, line in st:
buf.append(' File %r, line %d, in %s' % (filename, lineno, name))
if line:
buf.append(' %s' % line.strip())
if buf:
log.error('\n'.join(buf))
def warn(*k, **kw):
log.warning(*k, **kw)
def info(*k, **kw):
log.info(*k, **kw)
def init_log():
global log
log = logging.getLogger('waflib')
log.handlers = []
log.filters = []
hdlr = log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path, name):
logger = logging.getLogger(name)
if sys.hexversion > 0x3000000:
encoding = sys.stdout.encoding
else:
encoding = None
hdlr = logging.FileHandler(path, 'w', encoding=encoding)
formatter = logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name, to_log, size=8192):
from logging.handlers import MemoryHandler
logger = logging.getLogger(name)
hdlr = MemoryHandler(size, target=to_log)
formatter = logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler = hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col, msg, label='', sep='\n'):
info('%s%s%s %s', colors(col), msg, colors.NORMAL, label, extra={'terminator': sep})
| formatter |
python | pypa__hatch | tests/project/test_frontend.py | {
"start": 779,
"end": 4233
} | class ____:
@pytest.mark.parametrize(
("backend_pkg", "backend_api"),
[pytest.param(backend_pkg, backend_api, id=backend_pkg) for backend_pkg, backend_api in BACKENDS],
)
def test_wheel(self, temp_dir, temp_dir_data, platform, global_application, backend_pkg, backend_api):
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / "pyproject.toml").write_text(
f"""\
[build-system]
requires = ["{backend_pkg}"]
build-backend = "{backend_api}"
[project]
name = "foo"
version = "9000.42"
description = "text"
"""
)
package_dir = project_dir / "foo"
package_dir.mkdir()
(package_dir / "__init__.py").touch()
project = Project(project_dir)
project.build_env = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
temp_dir_data,
temp_dir_data,
platform,
0,
global_application,
)
output_dir = temp_dir / "output"
output_dir.mkdir()
script = project.build_frontend.scripts.prepare_metadata(
output_dir=str(output_dir), project_root=str(project_dir)
)
platform.check_command([sys.executable, "-c", script])
work_dir = output_dir / "work"
output = json.loads((output_dir / "output.json").read_text())
metadata_file = work_dir / output["return_val"] / "METADATA"
assert project_metadata_from_core_metadata(metadata_file.read_text()) == {
"name": "foo",
"version": "9000.42",
"description": "text",
}
@pytest.mark.parametrize(
("backend_pkg", "backend_api"),
[pytest.param(backend_pkg, backend_api, id=backend_pkg) for backend_pkg, backend_api in BACKENDS],
)
def test_editable(self, temp_dir, temp_dir_data, platform, global_application, backend_pkg, backend_api):
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / "pyproject.toml").write_text(
f"""\
[build-system]
requires = ["{backend_pkg}"]
build-backend = "{backend_api}"
[project]
name = "foo"
version = "9000.42"
description = "text"
"""
)
package_dir = project_dir / "foo"
package_dir.mkdir()
(package_dir / "__init__.py").touch()
project = Project(project_dir)
project.build_env = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
temp_dir_data,
temp_dir_data,
platform,
0,
global_application,
)
output_dir = temp_dir / "output"
output_dir.mkdir()
script = project.build_frontend.scripts.prepare_metadata(
output_dir=str(output_dir), project_root=str(project_dir), editable=True
)
platform.check_command([sys.executable, "-c", script])
work_dir = output_dir / "work"
output = json.loads((output_dir / "output.json").read_text())
metadata_file = work_dir / output["return_val"] / "METADATA"
assert project_metadata_from_core_metadata(metadata_file.read_text()) == {
"name": "foo",
"version": "9000.42",
"description": "text",
}
| TestPrepareMetadata |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 78144,
"end": 80006
} | class ____(TestCase):
@xpassIfTorchDynamo_np # (reason="TODO")
def test_unsupported_commontype(self):
# linalg gracefully handles unsupported type
arr = np.array([[1, -2], [2, 5]], dtype="float16")
# with assert_raises_regex(TypeError, "unsupported in linalg"):
with assert_raises(TypeError):
linalg.cholesky(arr)
# @slow
# @pytest.mark.xfail(not HAS_LAPACK64, run=False,
# reason="Numpy not compiled with 64-bit BLAS/LAPACK")
# @requires_memory(free_bytes=16e9)
@skip(reason="Bad memory reports lead to OOM in ci testing")
def test_blas64_dot(self):
n = 2**32
a = np.zeros([1, n], dtype=np.float32)
b = np.ones([1, 1], dtype=np.float32)
a[0, -1] = 1
c = np.dot(b, a)
assert_equal(c[0, -1], 1)
@skip(reason="lapack-lite specific")
@xfail # (
# not HAS_LAPACK64, reason="Numpy not compiled with 64-bit BLAS/LAPACK"
# )
def test_blas64_geqrf_lwork_smoketest(self):
# Smoke test LAPACK geqrf lwork call with 64-bit integers
dtype = np.float64
lapack_routine = np.linalg.lapack_lite.dgeqrf
m = 2**32 + 1
n = 2**32 + 1
lda = m
# Dummy arrays, not referenced by the lapack routine, so don't
# need to be of the right size
a = np.zeros([1, 1], dtype=dtype)
work = np.zeros([1], dtype=dtype)
tau = np.zeros([1], dtype=dtype)
# Size query
results = lapack_routine(m, n, a, lda, tau, work, -1, 0)
assert_equal(results["info"], 0)
assert_equal(results["m"], m)
assert_equal(results["n"], m)
# Should result to an integer of a reasonable size
lwork = int(work.item())
assert_(2**32 < lwork < 2**42)
if __name__ == "__main__":
run_tests()
| TestMisc2 |
python | kamyu104__LeetCode-Solutions | Python/finding-3-digit-even-numbers.py | {
"start": 47,
"end": 901
} | class ____(object):
def findEvenNumbers(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
k = 3
def backtracking(curr, cnt, result):
if len(curr) == k:
result.append(reduce(lambda x, y: x*10+y, curr))
return
for i, c in enumerate(cnt):
if c == 0 or (not curr and i == 0) or (len(curr) == k-1 and i%2 != 0):
continue
cnt[i] -= 1
curr.append(i)
backtracking(curr, cnt, result)
curr.pop()
cnt[i] += 1
cnt = [0]*10
for d in digits:
cnt[d] += 1
result = []
backtracking([], cnt, result)
return result
# Time: O(n), n is 10^3
# Space: O(1)
import collections
| Solution |
python | django-debug-toolbar__django-debug-toolbar | tests/test_store.py | {
"start": 4257,
"end": 4302
} | class ____(store.BaseStore):
pass
| StubStore |
python | redis__redis-py | tests/test_multidb/test_healthcheck.py | {
"start": 2117,
"end": 5860
} | class ____:
@pytest.mark.parametrize(
"probes,hc1_side_effect,hc2_side_effect,hc1_call_count,hc2_call_count,expected_result",
[
(3, [True, False, False], [True, True, True], 3, 0, False),
(3, [True, True, True], [True, False, False], 3, 3, False),
(3, [True, False, True], [True, True, True], 3, 3, True),
(3, [True, True, True], [True, False, True], 3, 3, True),
(3, [True, True, False], [True, False, True], 3, 3, True),
(4, [True, True, False, False], [True, True, True, True], 4, 0, False),
(4, [True, True, True, True], [True, True, False, False], 4, 4, False),
(4, [False, True, True, True], [True, True, True, True], 4, 4, True),
(4, [True, True, True, True], [True, False, True, True], 4, 4, True),
(4, [False, True, True, True], [True, True, False, True], 4, 4, True),
],
ids=[
"HC1 - no majority - odd",
"HC2 - no majority - odd",
"HC1 - majority- odd",
"HC2 - majority - odd",
"HC1 + HC2 - majority - odd",
"HC1 - no majority - even",
"HC2 - no majority - even",
"HC1 - majority - even",
"HC2 - majority - even",
"HC1 + HC2 - majority - even",
],
)
def test_policy_returns_true_for_majority_successful_probes(
self,
probes,
hc1_side_effect,
hc2_side_effect,
hc1_call_count,
hc2_call_count,
expected_result,
):
mock_hc1 = Mock(spec=HealthCheck)
mock_hc2 = Mock(spec=HealthCheck)
mock_hc1.check_health.side_effect = hc1_side_effect
mock_hc2.check_health.side_effect = hc2_side_effect
mock_db = Mock(spec=Database)
policy = HealthyMajorityPolicy(probes, 0.01)
assert policy.execute([mock_hc1, mock_hc2], mock_db) == expected_result
assert mock_hc1.check_health.call_count == hc1_call_count
assert mock_hc2.check_health.call_count == hc2_call_count
@pytest.mark.parametrize(
"probes,hc1_side_effect,hc2_side_effect,hc1_call_count,hc2_call_count",
[
(3, [True, ConnectionError, ConnectionError], [True, True, True], 3, 0),
(3, [True, True, True], [True, ConnectionError, ConnectionError], 3, 3),
(
4,
[True, ConnectionError, ConnectionError, True],
[True, True, True, True],
3,
0,
),
(
4,
[True, True, True, True],
[True, ConnectionError, ConnectionError, False],
4,
3,
),
],
ids=[
"HC1 - majority- odd",
"HC2 - majority - odd",
"HC1 - majority - even",
"HC2 - majority - even",
],
)
def test_policy_raise_unhealthy_database_exception_on_majority_probes_exceptions(
self, probes, hc1_side_effect, hc2_side_effect, hc1_call_count, hc2_call_count
):
mock_hc1 = Mock(spec=HealthCheck)
mock_hc2 = Mock(spec=HealthCheck)
mock_hc1.check_health.side_effect = hc1_side_effect
mock_hc2.check_health.side_effect = hc2_side_effect
mock_db = Mock(spec=Database)
policy = HealthyAllPolicy(3, 0.01)
with pytest.raises(UnhealthyDatabaseException, match="Unhealthy database"):
policy.execute([mock_hc1, mock_hc2], mock_db)
assert mock_hc1.check_health.call_count == hc1_call_count
assert mock_hc2.check_health.call_count == hc2_call_count
@pytest.mark.onlynoncluster
| TestHealthyMajorityPolicy |
python | jazzband__django-polymorphic | example/orders/admin.py | {
"start": 298,
"end": 381
} | class ____(StackedPolymorphicInline.Child):
model = BankPayment
| BankPaymentInline |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 3073,
"end": 4100
} | class ____(App[None]):
"""An app with a simple low-priority alpha key binding."""
BINDINGS = [Binding("a", "a", "a", priority=False)]
async def test_just_app_low_priority_alpha_binding() -> None:
"""An app with a single low-priority binding should have just the one binding."""
async with LowAlphaBinding().run_test() as pilot:
assert sorted(pilot.app._bindings.key_to_bindings.keys()) == sorted(
["ctrl+c", "ctrl+p", "ctrl+q", "a"]
)
assert pilot.app._bindings.get_bindings_for_key("ctrl+q")[0].priority is True
assert pilot.app._bindings.get_bindings_for_key("a")[0].priority is False
##############################################################################
# A non-default screen with a single alpha key binding.
#
# There's little point in testing a screen with no bindings added as that's
# pretty much the same as an app with a default screen (for the purposes of
# these tests). So, let's test a screen with a single alpha-key binding.
| LowAlphaBinding |
python | neetcode-gh__leetcode | python/0078-subsets.py | {
"start": 0,
"end": 459
} | class ____:
def subsets(self, nums: List[int]) -> List[List[int]]:
res = []
subset = []
def dfs(i):
if i >= len(nums):
res.append(subset.copy())
return
# decision to include nums[i]
subset.append(nums[i])
dfs(i + 1)
# decision NOT to include nums[i]
subset.pop()
dfs(i + 1)
dfs(0)
return res
| Solution |
python | facebook__pyre-check | client/commands/tests/server_setup.py | {
"start": 10133,
"end": 10259
} | class ____(background_tasks.Task):
async def run(self) -> None:
await asyncio.Event().wait()
| WaitForeverBackgroundTask |
python | readthedocs__readthedocs.org | readthedocs/projects/querysets.py | {
"start": 522,
"end": 6600
} | class ____(NoReprQuerySet, models.QuerySet):
"""Projects take into account their own privacy_level setting."""
use_for_related_fields = True
def _add_user_projects(self, queryset, user, admin=False, member=False):
"""Add projects from where `user` is an `admin` or a `member`."""
projects = AdminPermission.projects(
user=user,
admin=admin,
member=member,
)
return queryset | projects
def for_user_and_viewer(self, user, viewer):
"""
Show projects that a user owns, that another user can see.
This includes:
- Projects where both are member
- Public projects from `user`
"""
viewer_projects = self._add_user_projects(self.none(), viewer, admin=True, member=True)
owner_projects = self._add_user_projects(self.none(), user, admin=True, member=True)
owner_public_projects = owner_projects.filter(privacy_level=constants.PUBLIC)
queryset = (viewer_projects & owner_projects) | owner_public_projects
return queryset.distinct()
def for_admin_user(self, user):
queryset = self._add_user_projects(self.none(), user, admin=True, member=False)
return queryset.distinct()
def public(self, user=None):
queryset = self.filter(privacy_level=constants.PUBLIC)
if user:
if user.is_superuser:
queryset = self.all()
else:
queryset = self._add_user_projects(
queryset=queryset,
user=user,
admin=True,
member=True,
)
return queryset.distinct()
def for_user(self, user):
"""Return all projects that an user belongs to."""
queryset = self._add_user_projects(self.none(), user, admin=True, member=True)
return queryset.distinct()
def is_active(self, project):
"""
Check if the project is active.
The check consists on:
* the Project shouldn't be marked as skipped.
* any of the project's owners shouldn't be banned.
* the project shouldn't have a high spam score.
* the organization associated to the project should not be disabled.
:param project: project to be checked
:type project: readthedocs.projects.models.Project
:returns: whether or not the project is active
:rtype: bool
"""
spam_project = False
any_owner_banned = any(u.profile.banned for u in project.users.all())
organization = project.organizations.first()
if "readthedocsext.spamfighting" in settings.INSTALLED_APPS:
from readthedocsext.spamfighting.utils import spam_score # noqa
if spam_score(project) > settings.RTD_SPAM_THRESHOLD_DONT_SERVE_DOCS:
spam_project = True
if (
project.skip
or any_owner_banned
or (organization and organization.disabled)
or spam_project
):
return False
return True
def max_concurrent_builds(self, project):
"""
Return the max concurrent builds allowed for the project.
Max concurrency build priority:
- project
- organization
- plan
- default setting
:param project: project to be checked
:type project: readthedocs.projects.models.Project
:returns: number of max concurrent builds for the project
:rtype: int
"""
from readthedocs.subscriptions.constants import TYPE_CONCURRENT_BUILDS
max_concurrent_organization = None
organization = project.organizations.first()
if organization:
max_concurrent_organization = organization.max_concurrent_builds
feature = get_feature(project, feature_type=TYPE_CONCURRENT_BUILDS)
feature_value = feature.value if feature else 1
return project.max_concurrent_builds or max_concurrent_organization or feature_value
def prefetch_latest_build(self):
"""
Prefetch and annotate to avoid N+1 queries.
.. note::
This should come after any filtering.
"""
from readthedocs.builds.models import Build
# NOTE: prefetching the latest build will perform worse than just
# accessing the latest build for each project.
# While prefetching reduces the number of queries,
# the query used to fetch the latest build can be quite expensive,
# specially in projects with lots of builds.
# Not prefetching here is fine, as this query is paginated by 15
# items per page, so it will generate at most 15 queries.
# This annotation performs fine in all cases.
# Annotate whether the project has a successful build or not,
# to avoid N+1 queries when showing the build status.
return self.annotate(
_has_good_build=Exists(Build.internal.filter(project=OuterRef("pk"), success=True))
)
# Aliases
def dashboard(self, user):
"""Get the projects for this user including the latest build."""
# Prefetching seems to cause some inconsistent performance issues,
# disabling for now. For more background, see:
# https://github.com/readthedocs/readthedocs.org/pull/11621
return self.for_user(user)
def api(self, user=None):
return self.public(user)
def api_v2(self, *args, **kwargs):
# API v2 is the same as API v3 for .org, but it's
# different for .com, this method is overridden there.
return self.api(*args, **kwargs)
def single_owner(self, user):
"""
Returns projects where `user` is the only owner.
Projects that belong to organizations aren't included.
"""
return self.annotate(count_users=Count("users")).filter(
users=user,
count_users=1,
organizations__isnull=True,
)
| ProjectQuerySetBase |
python | doocs__leetcode | solution/0200-0299/0252.Meeting Rooms/Solution.py | {
"start": 0,
"end": 175
} | class ____:
def canAttendMeetings(self, intervals: List[List[int]]) -> bool:
intervals.sort()
return all(a[1] <= b[0] for a, b in pairwise(intervals))
| Solution |
python | pytorch__pytorch | test/export/test_swap.py | {
"start": 882,
"end": 14569
} | class ____(TestCase):
def test_unflatten_preserve_signature(self):
class NestedChild(torch.nn.Module):
def forward(self, zx, y):
return {"x": y["key"] + zx[1], "w": y["key"] * zx[1]}
class Child1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.nested = NestedChild()
def forward(self, x, y):
z = torch.ones_like(x)
xw = self.nested((z, x), y={"key": y})
return xw["w"] + z - xw["x"]
class Child2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return x - 1
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = Child1()
self.bar = Child2()
def forward(self, x, y):
x = self.foo(x, y)
x = self.bar(x)
return x
orig_eager = MyModule()
inps = torch.rand(2, 3), torch.rand(2, 3)
ep = export(
orig_eager,
inps,
{},
preserve_module_call_signature=("foo.nested", "bar"),
strict=self.strict,
)
swapped_gm = _swap_modules(
ep,
{"foo.nested": NestedChild(), "bar": Child2()},
)
self.assertTrue(torch.allclose(ep.module()(*inps), swapped_gm(*inps)))
def test_unflatten_preserve_with_unused_input(self):
class M1(torch.nn.Module):
def forward(self, x, a, b):
return x + a, b
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m1 = M1()
def forward(self, x, y):
a, b = torch.topk(y, 2)
return self.m1(x, a, b)[0]
ep = torch.export.export(
M(),
(torch.randn(2), torch.randn(5)),
preserve_module_call_signature=("m1",),
strict=self.strict,
)
swapped_gm = _swap_modules(
ep,
{"m1": M1()},
)
inps = (torch.randn(2), torch.randn(5))
self.assertTrue(torch.allclose(ep.module()(*inps), swapped_gm(*inps)))
def test_nested_leaf(self):
class Leaf(torch.nn.Module):
def forward(self, x):
return x + 1
class Nested(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.leaf = Leaf()
def forward(self, x):
return self.leaf(x) + 2
class TopLevel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.nested = Nested()
def forward(self, x):
return self.nested(x) + 3
ep = torch.export.export(
TopLevel(),
(torch.randn(3),),
strict=self.strict,
preserve_module_call_signature=("nested",),
)
swapped_gm = _swap_modules(
ep,
{"nested": Nested()},
)
inps = (torch.randn(3),)
self.assertTrue(torch.allclose(ep.module()(*inps), swapped_gm(*inps)))
def test_dedup_sym_size(self):
# Here, sym_size & floor div are used in 3 subgraphs (top-level, m1, m2),
# but only one copy of sym_size is created in the initial export graph.
# For m1, sym_size & floordiv should be copied as recompute since we preserve the call signature,
# but for m2 floordiv should be passed in as a placeholder.
# Test that this is preserved, and the unflattened module runs correctly.
class M1(torch.nn.Module):
def forward(self, x, y):
d = x.size(0) // 2
return y[:d]
class M2(torch.nn.Module):
def forward(self, x, y):
d = x.size(0) // 2
return y[:d]
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m1 = M1()
self.m2 = M2()
def forward(self, x, y):
d = x.size(0) // 2
m1_res = self.m1(x, y)
m2_res = self.m2(x, y)
return y[d:] + m1_res + m2_res
inputs = (torch.ones(10), torch.ones(10))
d_ = torch.export.Dim("foo", max=2048)
d = 2 * d_
ep = torch.export.export(
M(),
inputs,
dynamic_shapes=((d,), (d,)),
strict=self.strict,
preserve_module_call_signature=("m1",),
)
swapped_gm = _swap_modules(
ep,
{"m1": M1()},
)
inps = (torch.randn(10), torch.randn(10))
self.assertTrue(torch.allclose(ep.module()(*inps), swapped_gm(*inps)))
inps = (torch.randn(20), torch.randn(20))
self.assertTrue(torch.allclose(ep.module()(*inps), swapped_gm(*inps)))
def test_remove_duplicate_pytree_simple(self):
class Child1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
z = torch.ones_like(x)
w = y + z[1]
x = y * z[1]
return {"res1": x + y, "res2": x * y}
class Child2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return x["res2"] + x["res1"] - 1
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = Child1()
self.bar = Child2()
def forward(self, x, y):
x = self.foo(x, y)
x = self.bar(x)
return x
orig_eager = MyModule()
inps = torch.rand(2, 3), torch.rand(2, 3)
ep = export(
orig_eager,
inps,
{},
preserve_module_call_signature=("foo", "bar"),
strict=self.strict,
)
swapped_gm = _swap_modules(
ep,
{"foo": Child1(), "bar": Child2()},
)
self.assertTrue(torch.allclose(ep.module()(*inps), swapped_gm(*inps)))
self.assertExpectedInline(
swapped_gm.code.strip(),
"""\
def forward(self, x, y):
x_1 = x
y_1 = y
_spec_0 = self._spec_0
_spec_1 = self._spec_1
_spec_4 = self._spec_4
tree_flatten = torch.utils._pytree.tree_flatten((x_1, y_1)); x_1 = y_1 = None
getitem = tree_flatten[0]; tree_flatten = None
x = getitem[0]
y = getitem[1]; getitem = None
tree_unflatten_1 = torch.utils._pytree.tree_unflatten([x, y], _spec_1); x = y = _spec_1 = None
getitem_1 = tree_unflatten_1[0]; tree_unflatten_1 = None
getitem_2 = getitem_1[0]
getitem_3 = getitem_1[1]; getitem_1 = None
foo = self.foo(getitem_2, getitem_3); getitem_2 = getitem_3 = None
bar = self.bar(foo); foo = None
tree_flatten_spec_1 = torch.fx._pytree.tree_flatten_spec(bar, _spec_4); bar = _spec_4 = None
getitem_10 = tree_flatten_spec_1[0]; tree_flatten_spec_1 = None
tree_unflatten = torch.utils._pytree.tree_unflatten((getitem_10,), _spec_0); getitem_10 = _spec_0 = None
return tree_unflatten""",
)
@unittest.expectedFailure
def test_remove_duplicate_pytree_different_order(self):
"""
This is not supported yet because module `foo`s outputs are not all
directly used in as inputs to `bar` in the same order as outputted from
`foo`. To support this, we would have to do some sort of ordering.
"""
class Child1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
return {"res1": x + y}, {"res2": x * y, "res3": x * x}
class Child2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, y, x):
y = y["res2"] * y["res3"]
x = x["res1"] + x["res1"]
return y - x
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = Child1()
self.bar = Child2()
def forward(self, x, y):
x, y = self.foo(x, y)
x = self.bar(y, x)
return x
orig_eager = MyModule()
inps = torch.rand(2, 3), torch.rand(2, 3)
ep = export(
orig_eager,
inps,
{},
preserve_module_call_signature=("foo", "bar"),
strict=self.strict,
)
swapped_gm = _swap_modules(
ep,
{"foo": Child1(), "bar": Child2()},
)
self.assertTrue(torch.allclose(ep.module()(*inps), swapped_gm(*inps)))
self.assertExpectedInline(
swapped_gm.code.strip(),
"""\
def forward(self, x, y):
x, y, = fx_pytree.tree_flatten_spec(([x, y], {}), self._in_spec)
_spec_0 = self._spec_0
_spec_3 = self._spec_3
tree_unflatten = torch.utils._pytree.tree_unflatten([x, y], _spec_0); x = y = _spec_0 = None
getitem = tree_unflatten[0]; tree_unflatten = None
getitem_1 = getitem[0]
getitem_2 = getitem[1]; getitem = None
foo = self.foo(getitem_1, getitem_2); getitem_1 = getitem_2 = None
getitem_3 = foo[0]
getitem_4 = foo[1];
bar = self.bar(getitem_4, getitem_3); foo = None
tree_flatten_spec_1 = torch.fx._pytree.tree_flatten_spec(bar, _spec_3); bar = _spec_3 = None
getitem_9 = tree_flatten_spec_1[0]; tree_flatten_spec_1 = None
return pytree.tree_unflatten((getitem_9,), self._out_spec)""",
)
def test_custom_input_args(self):
@dataclass
class CustomInput:
a: Tensor
b: Tensor
register_dataclass_as_pytree_node(
CustomInput,
serialized_type_name="test_swap.test_custom_input.CustomInput",
)
class Foo(torch.nn.Module):
def forward(self, inputs):
return torch.matmul(inputs.a, inputs.b)
ep = export(
Foo(),
(CustomInput(torch.randn(2, 3), torch.randn(3, 2)),),
strict=self.strict,
)
swapped = _swap_modules(ep, {})
inp = (CustomInput(torch.randn(2, 3), torch.randn(3, 2)),)
res1 = torch.fx.Interpreter(swapped).run(*inp)
res2 = swapped(*inp)
self.assertTrue(torch.allclose(res1, res2))
def test_custom_input_kwargs(self):
@dataclass
class CustomInput:
a: Tensor
b: Tensor
register_dataclass(
CustomInput,
serialized_type_name="test_swap.test_custom_input.CustomInput",
)
class Foo(torch.nn.Module):
def forward(self, x, *, inputs):
return x + torch.matmul(inputs.a, inputs.b)
for use_new_tracer in [True, False]:
with config.patch(use_new_tracer_experimental=use_new_tracer):
ep = export(
Foo(),
(torch.randn(2, 2),),
{"inputs": CustomInput(torch.randn(2, 3), torch.randn(3, 2))},
strict=self.strict,
)
swapped = _swap_modules(ep, {})
inp_args = (torch.randn(2, 2),)
inp_kwargs = {"inputs": CustomInput(torch.randn(2, 3), torch.randn(3, 2))}
res1 = torch.fx.Interpreter(swapped).run(*(*inp_args, *inp_kwargs.values()))
res2 = swapped(*inp_args, **inp_kwargs)
self.assertTrue(torch.allclose(res1, res2))
def test_custom_input_kwargs_use_private(self):
@dataclass
class CustomInput:
a: Tensor
b: Tensor
register_dataclass_as_pytree_node(
CustomInput,
serialized_type_name="test_swap.test_custom_input.CustomInput",
)
class Foo(torch.nn.Module):
def forward(self, x, *, inputs):
return x + torch.matmul(inputs.a, inputs.b)
# shouldn't error
with config.patch(use_new_tracer_experimental=True):
_ = export(
Foo(),
(torch.randn(2, 2),),
{"inputs": CustomInput(torch.randn(2, 3), torch.randn(3, 2))},
strict=self.strict,
)
def test_custom_output(self):
@dataclass
class CustomOutput:
a: Tensor
b: Tensor
register_dataclass_as_pytree_node(
CustomOutput,
serialized_type_name="test_swap.test_custom_input.CustomInput",
)
class Foo(torch.nn.Module):
def forward(self, a, b):
return (CustomOutput(a * a, b * b), CustomOutput(a * b.T, a + b.T))
ep = export(Foo(), (torch.randn(2, 3), torch.randn(3, 2)), strict=True)
swapped = _swap_modules(ep, {})
inp = (torch.randn(2, 3), torch.randn(3, 2))
res1 = torch.fx.Interpreter(swapped).run(*inp)
res2 = swapped(*inp)
self.assertTrue(torch.allclose(res1[0].a, res2[0].a))
self.assertTrue(torch.allclose(res1[0].b, res2[0].b))
self.assertTrue(torch.allclose(res1[1].a, res2[1].a))
self.assertTrue(torch.allclose(res1[1].b, res2[1].b))
if __name__ == "__main__":
run_tests()
| TestSwap |
python | pypa__hatch | tests/backend/builders/test_sdist.py | {
"start": 21685,
"end": 55756
} | class ____:
def test_default(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"sdist": {"versions": ["standard"]}}},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_default", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_default_no_reproducible(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"sdist": {"versions": ["standard"], "reproducible": False}}},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_default", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime != get_reproducible_timestamp()
def test_default_support_legacy(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"sdist": {"versions": ["standard"], "support-legacy": True}}},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_default_support_legacy", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_artifacts(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h\n")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
pathlib.Path('my_app', 'lib.so').touch()
pathlib.Path('my_app', 'lib.h').touch()
"""
)
)
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"sdist": {"versions": ["standard"], "exclude": [DEFAULT_BUILD_SCRIPT, ".gitignore"]}
},
"artifacts": ["my_app/lib.so"],
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_default_build_script_artifacts", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_extra_dependencies(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h\n")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
pathlib.Path('my_app', 'lib.so').touch()
pathlib.Path('my_app', 'lib.h').touch()
build_data['dependencies'].append('binary')
"""
)
)
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"sdist": {"versions": ["standard"], "exclude": [DEFAULT_BUILD_SCRIPT, ".gitignore"]}
},
"artifacts": ["my_app/lib.so"],
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_default_build_script_extra_dependencies", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
def test_include_project_file(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"], "readme": "README.md"},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"sdist": {"versions": ["standard"], "include": ["my_app/", "pyproject.toml"]}}
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_include", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_project_file_always_included(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"], "readme": "README.md"},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"sdist": {
"versions": ["standard"],
"only-include": ["my_app"],
"exclude": ["pyproject.toml"],
},
},
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
# Ensure that only the root project file is forcibly included
(project_path / "my_app" / "pyproject.toml").touch()
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_include", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_config_file_always_included(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"], "readme": "README.md"},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"sdist": {
"versions": ["standard"],
"only-include": ["my_app"],
"exclude": [DEFAULT_CONFIG_FILE],
},
},
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
(project_path / DEFAULT_CONFIG_FILE).touch()
# Ensure that only the root config file is forcibly included
(project_path / "my_app" / DEFAULT_CONFIG_FILE).touch()
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_include_config_file", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_include_readme(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"], "readme": "README.md"},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"sdist": {"versions": ["standard"], "include": ["my_app/", "README.md"]}}},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_include", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_readme_always_included(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"], "readme": "README.md"},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"sdist": {"versions": ["standard"], "only-include": ["my_app"], "exclude": ["README.md"]},
},
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
# Ensure that only the desired readme is forcibly included
(project_path / "my_app" / "README.md").touch()
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_include", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_include_license_files(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"], "readme": "README.md"},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"sdist": {"versions": ["standard"], "include": ["my_app/", "LICENSE.txt"]}}},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_include", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_license_files_always_included(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"], "readme": "README.md"},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"sdist": {"versions": ["standard"], "only-include": ["my_app"], "exclude": ["LICENSE.txt"]},
},
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
# Ensure that only the desired readme is forcibly included
(project_path / "my_app" / "LICENSE.txt").touch()
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_include", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_default_vcs_git_exclusion_files(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = temp_dir / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h\n")
(project_path / "my_app" / "lib.so").touch()
(project_path / "my_app" / "lib.h").touch()
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"sdist": {"versions": ["standard"], "exclude": [".gitignore"]}},
"artifacts": ["my_app/lib.so"],
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_default_vcs_git_exclusion_files", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_vcs_mercurial_exclusion_files(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = temp_dir / ".hgignore"
vcs_ignore_file.write_text(
helpers.dedent(
"""
syntax: glob
*.pyc
syntax: foo
README.md
syntax: glob
*.so
*.h
"""
)
)
(project_path / "my_app" / "lib.so").touch()
(project_path / "my_app" / "lib.h").touch()
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"sdist": {"versions": ["standard"], "exclude": [".hgignore"]}},
"artifacts": ["my_app/lib.so"],
},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_default_vcs_mercurial_exclusion_files", project_name, relative_root=builder.project_id
)
helpers.assert_files(extraction_directory, expected_files)
def test_no_strict_naming(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"sdist": {"versions": ["standard"], "strict-naming": False}}},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.artifact_project_id}.tar.gz")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with tarfile.open(str(expected_artifact), "r:gz") as tar_archive:
tar_archive.extractall(str(extraction_directory), **helpers.tarfile_extraction_compat_options())
expected_files = helpers.get_template_files(
"sdist.standard_default", project_name, relative_root=builder.artifact_project_id
)
helpers.assert_files(extraction_directory, expected_files)
stat = os.stat(str(extraction_directory / builder.artifact_project_id / "PKG-INFO"))
assert stat.st_mtime == get_reproducible_timestamp()
def test_file_permissions_normalized(self, hatch, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"sdist": {"versions": ["standard"]}}},
},
},
}
builder = SdistBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.artifact_project_id}.tar.gz")
file_stat = os.stat(expected_artifact)
# we assert that at minimum 644 is set, based on the platform (e.g.)
# windows it may be higher
assert file_stat.st_mode & 0o644
| TestBuildStandard |
python | google__jax | jax/_src/literals.py | {
"start": 937,
"end": 1259
} | class ____(int):
dtype: np.dtype
def __new__(cls, value: int, dtype: np.dtype):
v = super(TypedInt, cls).__new__(cls, value)
v.dtype = dtype
return v
def __repr__(self):
return f'TypedInt({int(self)}, dtype={self.dtype.name})'
def __getnewargs__(self):
return (int(self), self.dtype)
| TypedInt |
python | astropy__astropy | astropy/io/fits/diff.py | {
"start": 36662,
"end": 42833
} | class ____(_BaseDiff):
"""
Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE
extension HDU, though the data unit is assumed to be "pixels").
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: If the two arrays contain either a different number
of dimensions or different sizes in any dimension, this contains a
2-tuple of the shapes of each array. Currently no further comparison is
performed on images that don't have the exact same dimensions.
- ``diff_pixels``: If the two images contain any different pixels, this
contains a list of 2-tuples of the array index where the difference was
found, and another 2-tuple containing the different values. For example,
if the pixel at (0, 0) contains different values this would look like::
[(0, 0), (1.1, 2.2)]
where 1.1 and 2.2 are the values of that pixel in each array. This
array only contains up to ``self.numdiffs`` differences, for storage
efficiency.
- ``diff_total``: The total number of different pixels found between the
arrays. Although ``diff_pixels`` does not necessarily contain all the
different pixel values, this can be used to get a count of the total
number of differences found.
- ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number
of pixels in the arrays.
"""
def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0):
"""
Parameters
----------
a : BaseHDU
An HDU object.
b : BaseHDU
An HDU object to compare to the first HDU object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionadded:: 2.0
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
self.diff_dimensions = ()
self.diff_pixels = []
self.diff_ratio = 0
self.max_absolute = 0
self.max_relative = 0
# self.diff_pixels only holds up to numdiffs differing pixels, but this
# self.diff_total stores the total count of differences between
# the images, but not the different values
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
if self.a.shape != self.b.shape:
self.diff_dimensions = (self.a.shape, self.b.shape)
# Don't do any further comparison if the dimensions differ
# TODO: Perhaps we could, however, diff just the intersection
# between the two images
return
# Find the indices where the values are not equal
# If neither a nor b are floating point (or complex), ignore rtol and
# atol
if not (
np.issubdtype(self.a.dtype, np.inexact)
or np.issubdtype(self.b.dtype, np.inexact)
):
rtol = 0
atol = 0
else:
rtol = self.rtol
atol = self.atol
diffs, self.max_absolute, self.max_relative = where_not_allclose(
self.a, self.b, atol=atol, rtol=rtol, return_maxdiff=True
)
self.diff_total = len(diffs[0])
if self.diff_total == 0:
# Then we're done
return
if self.numdiffs < 0:
numdiffs = self.diff_total
else:
numdiffs = self.numdiffs
self.diff_pixels = [
(idx, (self.a[idx], self.b[idx]))
for idx in islice(zip(*diffs), 0, numdiffs)
]
self.diff_ratio = float(self.diff_total) / float(len(self.a.flat))
def _report(self):
if self.diff_dimensions:
dimsa = " x ".join(str(d) for d in reversed(self.diff_dimensions[0]))
dimsb = " x ".join(str(d) for d in reversed(self.diff_dimensions[1]))
self._writeln(" Data dimensions differ:")
self._writeln(f" a: {dimsa}")
self._writeln(f" b: {dimsb}")
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(" No further data comparison performed.")
return
if not self.diff_pixels:
return
for index, values in self.diff_pixels:
# Convert to int to avoid np.int64 in list repr.
index = [int(x + 1) for x in reversed(index)]
self._writeln(f" Data differs at {index}:")
report_diff_values(
values[0],
values[1],
fileobj=self._fileobj,
indent_width=self._indent + 1,
rtol=self.rtol,
atol=self.atol,
)
if self.diff_total > self.numdiffs:
self._writeln(" ...")
self._writeln(
f" {self.diff_total} different pixels found "
f"({self.diff_ratio:.2%} different)."
)
self._writeln(f" Maximum relative difference: {self.max_relative}")
self._writeln(f" Maximum absolute difference: {self.max_absolute}")
| ImageDataDiff |
python | coleifer__peewee | tests/cysqlite.py | {
"start": 16895,
"end": 17456
} | class ____(TableFunction):
columns = ('key', 'value')
params = ()
name = 'data_types'
def initialize(self):
self.values = (
None,
1,
2.,
u'unicode str',
b'byte str',
False,
True)
self.idx = 0
self.n = len(self.values)
def iterate(self, idx):
if idx < self.n:
return ('k%s' % idx, self.values[idx])
raise StopIteration
@skip_unless(sqlite3.sqlite_version_info >= (3, 9), 'requires sqlite >= 3.9')
| DataTypes |
python | huggingface__transformers | src/transformers/models/falcon_mamba/modular_falcon_mamba.py | {
"start": 26023,
"end": 26732
} | class ____(MambaModel, FalconMambaPreTrainedModel):
def __init__(self, config):
FalconMambaPreTrainedModel.__init__(self, config)
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList(
[FalconMambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
self.norm_f = FalconMambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
# Initialize weights and apply final processing
self.post_init()
def load_hook(self, state_dict, prefix, *args):
raise AttributeError("Not needed for FalconMamba")
| FalconMambaModel |
python | kamyu104__LeetCode-Solutions | Python/evaluate-division.py | {
"start": 257,
"end": 1635
} | class ____(object):
def __init__(self):
self.set = {}
self.rank = collections.Counter()
def find_set(self, x):
xp, xr = self.set.setdefault(x, (x, 1.0))
if x != xp:
pp, pr = self.find_set(xp) # path compression.
self.set[x] = (pp, xr*pr) # x/pp = xr*pr
return self.set[x]
def union_set(self, x, y, r):
(xp, xr), (yp, yr) = map(self.find_set, (x, y))
if xp == yp:
return False
if self.rank[xp] < self.rank[yp]: # union by rank
# to make x/yp = r*yr and merge xp into yp
# => since x/xp = xr, we can merge with xp/yp = r*yr/xr
self.set[xp] = (yp, r*yr/xr)
elif self.rank[xp] > self.rank[yp]:
# to make y/xp = 1/r*xr and merge xp into yp
# => since y/yp = yr, we can merge with yp/xp = 1/r*xr/yr
self.set[yp] = (xp, 1.0/r*xr/yr)
else:
# to make y/xp = 1/r*xr and merge xp into yp
# => since y/yp = yr, we can merge with yp/xp = 1/r*xr/yr
self.set[yp] = (xp, 1.0/r*xr/yr)
self.rank[xp] += 1
return True
def query_set(self, x, y):
if x not in self.set or y not in self.set:
return -1.0
(xp, xr), (yp, yr) = map(self.find_set, (x, y))
return xr/yr if xp == yp else -1.0
| UnionFind |
python | python-openxml__python-docx | tests/test_section.py | {
"start": 18375,
"end": 23908
} | class ____:
"""Unit-test suite for `docx.section._BaseHeaderFooter`."""
@pytest.mark.parametrize(("has_definition", "expected_value"), [(False, True), (True, False)])
def it_knows_when_its_linked_to_the_previous_header_or_footer(
self,
has_definition: bool,
expected_value: bool,
header: _BaseHeaderFooter,
_has_definition_prop_: Mock,
):
_has_definition_prop_.return_value = has_definition
assert header.is_linked_to_previous is expected_value
@pytest.mark.parametrize(
("has_definition", "value", "drop_calls", "add_calls"),
[
(False, True, 0, 0),
(True, False, 0, 0),
(True, True, 1, 0),
(False, False, 0, 1),
],
)
def it_can_change_whether_it_is_linked_to_previous_header_or_footer(
self,
has_definition: bool,
value: bool,
drop_calls: int,
add_calls: int,
header: _BaseHeaderFooter,
_has_definition_prop_: Mock,
_drop_definition_: Mock,
_add_definition_: Mock,
):
_has_definition_prop_.return_value = has_definition
header.is_linked_to_previous = value
assert _drop_definition_.call_args_list == [call(header)] * drop_calls
assert _add_definition_.call_args_list == [call(header)] * add_calls
def it_provides_access_to_the_header_or_footer_part_for_BlockItemContainer(
self, header: _BaseHeaderFooter, _get_or_add_definition_: Mock, header_part_: Mock
):
# ---this override fulfills part of the BlockItemContainer subclass interface---
_get_or_add_definition_.return_value = header_part_
header_part = header.part
_get_or_add_definition_.assert_called_once_with(header)
assert header_part is header_part_
def it_provides_access_to_the_hdr_or_ftr_element_to_help(
self, header: _BaseHeaderFooter, _get_or_add_definition_: Mock, header_part_: Mock
):
hdr = element("w:hdr")
_get_or_add_definition_.return_value = header_part_
header_part_.element = hdr
hdr_elm = header._element
_get_or_add_definition_.assert_called_once_with(header)
assert hdr_elm is hdr
def it_gets_the_definition_when_it_has_one(
self,
header: _BaseHeaderFooter,
_has_definition_prop_: Mock,
_definition_prop_: Mock,
header_part_: Mock,
):
_has_definition_prop_.return_value = True
_definition_prop_.return_value = header_part_
header_part = header._get_or_add_definition()
assert header_part is header_part_
def but_it_gets_the_prior_definition_when_it_is_linked(
self,
header: _BaseHeaderFooter,
_has_definition_prop_: Mock,
_prior_headerfooter_prop_: Mock,
prior_headerfooter_: Mock,
header_part_: Mock,
):
_has_definition_prop_.return_value = False
_prior_headerfooter_prop_.return_value = prior_headerfooter_
prior_headerfooter_._get_or_add_definition.return_value = header_part_
header_part = header._get_or_add_definition()
prior_headerfooter_._get_or_add_definition.assert_called_once_with()
assert header_part is header_part_
def and_it_adds_a_definition_when_it_is_linked_and_the_first_section(
self,
header: _BaseHeaderFooter,
_has_definition_prop_: Mock,
_prior_headerfooter_prop_: Mock,
_add_definition_: Mock,
header_part_: Mock,
):
_has_definition_prop_.return_value = False
_prior_headerfooter_prop_.return_value = None
_add_definition_.return_value = header_part_
header_part = header._get_or_add_definition()
_add_definition_.assert_called_once_with(header)
assert header_part is header_part_
# -- fixture -----------------------------------------------------
@pytest.fixture
def _add_definition_(self, request: FixtureRequest):
return method_mock(request, _BaseHeaderFooter, "_add_definition")
@pytest.fixture
def _definition_prop_(self, request: FixtureRequest):
return property_mock(request, _BaseHeaderFooter, "_definition")
@pytest.fixture
def document_part_(self, request: FixtureRequest):
return instance_mock(request, DocumentPart)
@pytest.fixture
def _drop_definition_(self, request: FixtureRequest):
return method_mock(request, _BaseHeaderFooter, "_drop_definition")
@pytest.fixture
def _get_or_add_definition_(self, request: FixtureRequest):
return method_mock(request, _BaseHeaderFooter, "_get_or_add_definition")
@pytest.fixture
def _has_definition_prop_(self, request: FixtureRequest):
return property_mock(request, _BaseHeaderFooter, "_has_definition")
@pytest.fixture
def header(self, document_part_: Mock) -> _BaseHeaderFooter:
sectPr = cast(CT_SectPr, element("w:sectPr"))
return _BaseHeaderFooter(sectPr, document_part_, WD_HEADER_FOOTER.PRIMARY)
@pytest.fixture
def header_part_(self, request: FixtureRequest):
return instance_mock(request, HeaderPart)
@pytest.fixture
def prior_headerfooter_(self, request: FixtureRequest):
return instance_mock(request, _BaseHeaderFooter)
@pytest.fixture
def _prior_headerfooter_prop_(self, request: FixtureRequest):
return property_mock(request, _BaseHeaderFooter, "_prior_headerfooter")
| Describe_BaseHeaderFooter |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_exceptions/invalid_exceptions_caught.py | {
"start": 374,
"end": 449
} | class ____(MyGoodException):
"""Custom exception."""
| MySecondGoodException |
python | django-import-export__django-import-export | import_export/forms.py | {
"start": 3257,
"end": 3736
} | class ____(FieldNamePrefixMixin, forms.Form):
import_file_name = forms.CharField(widget=forms.HiddenInput())
original_file_name = forms.CharField(widget=forms.HiddenInput())
format = forms.CharField(widget=forms.HiddenInput())
resource = forms.CharField(widget=forms.HiddenInput(), required=False)
def clean_import_file_name(self):
data = self.cleaned_data["import_file_name"]
data = os.path.basename(data)
return data
| ConfirmImportForm |
python | walkccc__LeetCode | solutions/2825. Make String a Subsequence Using Cyclic Increments/2825.py | {
"start": 0,
"end": 303
} | class ____:
def canMakeSubsequence(self, str1: str, str2: str) -> bool:
i = 0 # str2's index
for c in str1:
if c == str2[i] or chr(
ord('a') + (ord(c) - ord('a') + 1) % 26) == str2[i]:
i += 1
if i == len(str2):
return True
return False
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_network_policy_egress_rule.py | {
"start": 383,
"end": 5713
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ports': 'list[V1NetworkPolicyPort]',
'to': 'list[V1NetworkPolicyPeer]'
}
attribute_map = {
'ports': 'ports',
'to': 'to'
}
def __init__(self, ports=None, to=None, local_vars_configuration=None): # noqa: E501
"""V1NetworkPolicyEgressRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ports = None
self._to = None
self.discriminator = None
if ports is not None:
self.ports = ports
if to is not None:
self.to = to
@property
def ports(self):
"""Gets the ports of this V1NetworkPolicyEgressRule. # noqa: E501
ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501
:return: The ports of this V1NetworkPolicyEgressRule. # noqa: E501
:rtype: list[V1NetworkPolicyPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1NetworkPolicyEgressRule.
ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501
:param ports: The ports of this V1NetworkPolicyEgressRule. # noqa: E501
:type: list[V1NetworkPolicyPort]
"""
self._ports = ports
@property
def to(self):
"""Gets the to of this V1NetworkPolicyEgressRule. # noqa: E501
to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list. # noqa: E501
:return: The to of this V1NetworkPolicyEgressRule. # noqa: E501
:rtype: list[V1NetworkPolicyPeer]
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this V1NetworkPolicyEgressRule.
to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list. # noqa: E501
:param to: The to of this V1NetworkPolicyEgressRule. # noqa: E501
:type: list[V1NetworkPolicyPeer]
"""
self._to = to
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NetworkPolicyEgressRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NetworkPolicyEgressRule):
return True
return self.to_dict() != other.to_dict()
| V1NetworkPolicyEgressRule |
python | cython__cython | Cython/Compiler/Dataclass.py | {
"start": 3069,
"end": 5585
} | class ____:
"""
Adds the ability to keep track of placeholder argument names to PyxCodeWriter.
Also adds extra_stats which are nodes bundled at the end when this
is converted to a tree.
"""
_placeholder_count = 0
def __init__(self, writer=None, placeholders=None, extra_stats=None):
self.writer = PyxCodeWriter() if writer is None else writer
self.placeholders = {} if placeholders is None else placeholders
self.extra_stats = [] if extra_stats is None else extra_stats
def add_code_line(self, code_line):
self.writer.putln(code_line)
def add_code_chunk(self, code_chunk):
self.writer.put_chunk(code_chunk)
def reset(self):
# don't attempt to reset placeholders - it really doesn't matter if
# we have unused placeholders
self.writer.reset()
def empty(self):
return self.writer.empty()
def indent(self):
self.writer.indent()
def dedent(self):
self.writer.dedent()
def indenter(self, block_opener_line):
return self.writer.indenter(block_opener_line)
def new_placeholder(self, field_names, value):
name = self._new_placeholder_name(field_names)
self.placeholders[name] = value
return name
def add_extra_statements(self, statements):
if self.extra_stats is None:
assert False, "Can only use add_extra_statements on top-level writer"
self.extra_stats.extend(statements)
def _new_placeholder_name(self, field_names):
while True:
name = f"DATACLASS_PLACEHOLDER_{self._placeholder_count:d}"
if (name not in self.placeholders
and name not in field_names):
# make sure name isn't already used and doesn't
# conflict with a variable name (which is unlikely but possible)
break
self._placeholder_count += 1
return name
def generate_tree(self, level='c_class'):
stat_list_node = TreeFragment(
self.writer.getvalue(),
level=level,
pipeline=[NormalizeTree(None)],
).substitute(self.placeholders)
stat_list_node.stats += self.extra_stats
return stat_list_node
def insertion_point(self):
new_writer = self.writer.insertion_point()
return TemplateCode(
writer=new_writer,
placeholders=self.placeholders,
extra_stats=self.extra_stats
)
| TemplateCode |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_lambda_function.py | {
"start": 1402,
"end": 4999
} | class ____:
def test_get_conn_returns_a_boto3_connection(self, hook):
assert hook.conn is not None
@mock.patch(
"airflow.providers.amazon.aws.hooks.lambda_function.LambdaHook.conn", new_callable=mock.PropertyMock
)
@pytest.mark.parametrize(
("payload", "invoke_payload"),
[(PAYLOAD, BYTES_PAYLOAD), (BYTES_PAYLOAD, BYTES_PAYLOAD)],
)
def test_invoke_lambda(self, mock_conn, payload, invoke_payload):
hook = LambdaHook()
hook.invoke_lambda(function_name=FUNCTION_NAME, payload=payload)
mock_conn().invoke.assert_called_once_with(
FunctionName=FUNCTION_NAME,
Payload=invoke_payload,
)
@pytest.mark.parametrize(
("hook_params", "boto3_params"),
[
pytest.param(
{
"function_name": FUNCTION_NAME,
"runtime": RUNTIME,
"role": ROLE,
"handler": HANDLER,
"code": CODE,
"package_type": "Zip",
},
{
"FunctionName": FUNCTION_NAME,
"Runtime": RUNTIME,
"Role": ROLE,
"Handler": HANDLER,
"Code": CODE,
"PackageType": "Zip",
},
id="'Zip' as 'package_type'",
),
pytest.param(
{
"function_name": FUNCTION_NAME,
"role": ROLE,
"code": CODE,
"package_type": "Image",
},
{
"FunctionName": FUNCTION_NAME,
"Role": ROLE,
"Code": CODE,
"PackageType": "Image",
},
id="'Image' as 'package_type'",
),
],
)
def test_create_lambda(self, hook_params, boto3_params, hook):
hook.conn.create_function.reset_mock()
hook.conn.create_function.return_value = {}
hook.create_lambda(**hook_params)
hook.conn.create_function.assert_called_once_with(**boto3_params)
@pytest.mark.parametrize(
"params",
[
pytest.param(
{
"handler": HANDLER,
},
id="'runtime' not provided",
),
pytest.param(
{
"runtime": RUNTIME,
},
id="'handler' not provided",
),
],
)
def test_create_lambda_with_zip_package_type_and_missing_args(self, params, hook):
hook.conn.create_function.return_value = {}
with pytest.raises(TypeError):
hook.create_lambda(
function_name=FUNCTION_NAME,
role=ROLE,
code=CODE,
package_type="Zip",
**params,
)
def test_encode_log_result(self):
assert LambdaHook.encode_log_result(LOG_RESPONSE) == ["FOO", "", "BAR", ""]
assert LambdaHook.encode_log_result(LOG_RESPONSE, keep_empty_lines=False) == ["FOO", "BAR"]
assert LambdaHook.encode_log_result("") == []
@pytest.mark.parametrize(
"log_result",
[
pytest.param(BAD_LOG_RESPONSE, id="corrupted"),
pytest.param(None, id="none"),
],
)
def test_encode_corrupted_log_result(self, log_result):
assert LambdaHook.encode_log_result(log_result) is None
| TestLambdaHook |
python | astropy__astropy | astropy/uncertainty/tests/test_distribution.py | {
"start": 20077,
"end": 21237
} | class ____:
@classmethod
def setup_class(cls):
cls.distribution = np.arange(60.0).reshape(3, 4, 5)
cls.d = Distribution(cls.distribution)
def test_setup(self):
ai1, ai2 = ADVANCED_INDICES[:2]
# Check that the first two indices produce the same output.
assert_array_equal(self.distribution[ai1], self.distribution[ai2])
@pytest.mark.parametrize("item", ADVANCED_INDICES)
def test_getitem(self, item):
v = self.d[item]
assert v.shape == item[0].shape
assert_array_equal(v.distribution, self.distribution[item])
@pytest.mark.parametrize("item", [([0, 4],), ([0], [0], [0])])
def test_getitem_bad(self, item):
with pytest.raises(IndexError):
self.d[item]
@pytest.mark.parametrize("item", ADVANCED_INDICES)
def test_setitem(self, item):
d = self.d.copy()
d[item] = 0.0
distribution = self.distribution.copy()
distribution[item] = 0.0
assert_array_equal(d.distribution, distribution)
d[item] = self.d[item]
assert_array_equal(d.distribution, self.distribution)
| TestGetSetItemAdvancedIndex |
python | simonw__datasette | datasette/views/table.py | {
"start": 21846,
"end": 63070
} | class ____(BaseView):
name = "table-drop"
def __init__(self, datasette):
self.ds = datasette
async def post(self, request):
try:
resolved = await self.ds.resolve_table(request)
except NotFound as e:
return _error([e.args[0]], 404)
db = resolved.db
database_name = db.name
table_name = resolved.table
# Table must exist
db = self.ds.get_database(database_name)
if not await db.table_exists(table_name):
return _error(["Table not found: {}".format(table_name)], 404)
if not await self.ds.allowed(
action="drop-table",
resource=TableResource(database=database_name, table=table_name),
actor=request.actor,
):
return _error(["Permission denied"], 403)
if not db.is_mutable:
return _error(["Database is immutable"], 403)
confirm = False
try:
data = json.loads(await request.post_body())
confirm = data.get("confirm")
except json.JSONDecodeError:
pass
if not confirm:
return Response.json(
{
"ok": True,
"database": database_name,
"table": table_name,
"row_count": (
await db.execute("select count(*) from [{}]".format(table_name))
).single_value(),
"message": 'Pass "confirm": true to confirm',
},
status=200,
)
# Drop table
def drop_table(conn):
sqlite_utils.Database(conn)[table_name].drop()
await db.execute_write_fn(drop_table)
await self.ds.track_event(
DropTableEvent(
actor=request.actor, database=database_name, table=table_name
)
)
return Response.json({"ok": True}, status=200)
def _get_extras(request):
extra_bits = request.args.getlist("_extra")
extras = set()
for bit in extra_bits:
extras.update(bit.split(","))
return extras
async def _columns_to_select(table_columns, pks, request):
columns = list(table_columns)
if "_col" in request.args:
columns = list(pks)
_cols = request.args.getlist("_col")
bad_columns = [column for column in _cols if column not in table_columns]
if bad_columns:
raise DatasetteError(
"_col={} - invalid columns".format(", ".join(bad_columns)),
status=400,
)
# De-duplicate maintaining order:
columns.extend(dict.fromkeys(_cols))
if "_nocol" in request.args:
# Return all columns EXCEPT these
bad_columns = [
column
for column in request.args.getlist("_nocol")
if (column not in table_columns) or (column in pks)
]
if bad_columns:
raise DatasetteError(
"_nocol={} - invalid columns".format(", ".join(bad_columns)),
status=400,
)
tmp_columns = [
column for column in columns if column not in request.args.getlist("_nocol")
]
columns = tmp_columns
return columns
async def _sortable_columns_for_table(datasette, database_name, table_name, use_rowid):
db = datasette.databases[database_name]
table_metadata = await datasette.table_config(database_name, table_name)
if "sortable_columns" in table_metadata:
sortable_columns = set(table_metadata["sortable_columns"])
else:
sortable_columns = set(await db.table_columns(table_name))
if use_rowid:
sortable_columns.add("rowid")
return sortable_columns
async def _sort_order(table_metadata, sortable_columns, request, order_by):
sort = request.args.get("_sort")
sort_desc = request.args.get("_sort_desc")
if not sort and not sort_desc:
sort = table_metadata.get("sort")
sort_desc = table_metadata.get("sort_desc")
if sort and sort_desc:
raise DatasetteError(
"Cannot use _sort and _sort_desc at the same time", status=400
)
if sort:
if sort not in sortable_columns:
raise DatasetteError(f"Cannot sort table by {sort}", status=400)
order_by = escape_sqlite(sort)
if sort_desc:
if sort_desc not in sortable_columns:
raise DatasetteError(f"Cannot sort table by {sort_desc}", status=400)
order_by = f"{escape_sqlite(sort_desc)} desc"
return sort, sort_desc, order_by
async def table_view(datasette, request):
await datasette.refresh_schemas()
with tracer.trace_child_tasks():
response = await table_view_traced(datasette, request)
# CORS
if datasette.cors:
add_cors_headers(response.headers)
# Cache TTL header
ttl = request.args.get("_ttl", None)
if ttl is None or not ttl.isdigit():
ttl = datasette.setting("default_cache_ttl")
if datasette.cache_headers and response.status == 200:
ttl = int(ttl)
if ttl == 0:
ttl_header = "no-cache"
else:
ttl_header = f"max-age={ttl}"
response.headers["Cache-Control"] = ttl_header
# Referrer policy
response.headers["Referrer-Policy"] = "no-referrer"
return response
async def table_view_traced(datasette, request):
from datasette.app import TableNotFound
try:
resolved = await datasette.resolve_table(request)
except TableNotFound as not_found:
# Was this actually a canned query?
canned_query = await datasette.get_canned_query(
not_found.database_name, not_found.table, request.actor
)
# If this is a canned query, not a table, then dispatch to QueryView instead
if canned_query:
return await QueryView()(request, datasette)
else:
raise
if request.method == "POST":
return Response.text("Method not allowed", status=405)
format_ = request.url_vars.get("format") or "html"
extra_extras = None
context_for_html_hack = False
default_labels = False
if format_ == "html":
extra_extras = {"_html"}
context_for_html_hack = True
default_labels = True
view_data = await table_view_data(
datasette,
request,
resolved,
extra_extras=extra_extras,
context_for_html_hack=context_for_html_hack,
default_labels=default_labels,
)
if isinstance(view_data, Response):
return view_data
data, rows, columns, expanded_columns, sql, next_url = view_data
# Handle formats from plugins
if format_ == "csv":
async def fetch_data(request, _next=None):
(
data,
rows,
columns,
expanded_columns,
sql,
next_url,
) = await table_view_data(
datasette,
request,
resolved,
extra_extras=extra_extras,
context_for_html_hack=context_for_html_hack,
default_labels=default_labels,
_next=_next,
)
data["rows"] = rows
data["table"] = resolved.table
data["columns"] = columns
data["expanded_columns"] = expanded_columns
return data, None, None
return await stream_csv(datasette, fetch_data, request, resolved.db.name)
elif format_ in datasette.renderers.keys():
# Dispatch request to the correct output format renderer
# (CSV is not handled here due to streaming)
result = call_with_supported_arguments(
datasette.renderers[format_][0],
datasette=datasette,
columns=columns,
rows=rows,
sql=sql,
query_name=None,
database=resolved.db.name,
table=resolved.table,
request=request,
view_name="table",
truncated=False,
error=None,
# These will be deprecated in Datasette 1.0:
args=request.args,
data=data,
)
if asyncio.iscoroutine(result):
result = await result
if result is None:
raise NotFound("No data")
if isinstance(result, dict):
r = Response(
body=result.get("body"),
status=result.get("status_code") or 200,
content_type=result.get("content_type", "text/plain"),
headers=result.get("headers"),
)
elif isinstance(result, Response):
r = result
# if status_code is not None:
# # Over-ride the status code
# r.status = status_code
else:
assert False, f"{result} should be dict or Response"
elif format_ == "html":
headers = {}
templates = [
f"table-{to_css_class(resolved.db.name)}-{to_css_class(resolved.table)}.html",
"table.html",
]
environment = datasette.get_jinja_environment(request)
template = environment.select_template(templates)
alternate_url_json = datasette.absolute_url(
request,
datasette.urls.path(path_with_format(request=request, format="json")),
)
headers.update(
{
"Link": '<{}>; rel="alternate"; type="application/json+datasette"'.format(
alternate_url_json
)
}
)
r = Response.html(
await datasette.render_template(
template,
dict(
data,
append_querystring=append_querystring,
path_with_replaced_args=path_with_replaced_args,
fix_path=datasette.urls.path,
settings=datasette.settings_dict(),
# TODO: review up all of these hacks:
alternate_url_json=alternate_url_json,
datasette_allow_facet=(
"true" if datasette.setting("allow_facet") else "false"
),
is_sortable=any(c["sortable"] for c in data["display_columns"]),
allow_execute_sql=await datasette.allowed(
action="execute-sql",
resource=DatabaseResource(database=resolved.db.name),
actor=request.actor,
),
query_ms=1.2,
select_templates=[
f"{'*' if template_name == template.name else ''}{template_name}"
for template_name in templates
],
top_table=make_slot_function(
"top_table",
datasette,
request,
database=resolved.db.name,
table=resolved.table,
),
count_limit=resolved.db.count_limit,
),
request=request,
view_name="table",
),
headers=headers,
)
else:
assert False, "Invalid format: {}".format(format_)
if next_url:
r.headers["link"] = f'<{next_url}>; rel="next"'
return r
async def table_view_data(
datasette,
request,
resolved,
extra_extras=None,
context_for_html_hack=False,
default_labels=False,
_next=None,
):
extra_extras = extra_extras or set()
# We have a table or view
db = resolved.db
database_name = resolved.db.name
table_name = resolved.table
is_view = resolved.is_view
# Can this user view it?
visible, private = await datasette.check_visibility(
request.actor,
action="view-table",
resource=TableResource(database=database_name, table=table_name),
)
if not visible:
raise Forbidden("You do not have permission to view this table")
# Redirect based on request.args, if necessary
redirect_response = await _redirect_if_needed(datasette, request, resolved)
if redirect_response:
return redirect_response
# Introspect columns and primary keys for table
pks = await db.primary_keys(table_name)
table_columns = await db.table_columns(table_name)
# Take ?_col= and ?_nocol= into account
specified_columns = await _columns_to_select(table_columns, pks, request)
select_specified_columns = ", ".join(escape_sqlite(t) for t in specified_columns)
select_all_columns = ", ".join(escape_sqlite(t) for t in table_columns)
# rowid tables (no specified primary key) need a different SELECT
use_rowid = not pks and not is_view
order_by = ""
if use_rowid:
select_specified_columns = f"rowid, {select_specified_columns}"
select_all_columns = f"rowid, {select_all_columns}"
order_by = "rowid"
order_by_pks = "rowid"
else:
order_by_pks = ", ".join([escape_sqlite(pk) for pk in pks])
order_by = order_by_pks
if is_view:
order_by = ""
# TODO: This logic should turn into logic about which ?_extras get
# executed instead:
nocount = request.args.get("_nocount")
nofacet = request.args.get("_nofacet")
nosuggest = request.args.get("_nosuggest")
if request.args.get("_shape") in ("array", "object"):
nocount = True
nofacet = True
table_metadata = await datasette.table_config(database_name, table_name)
# Arguments that start with _ and don't contain a __ are
# special - things like ?_search= - and should not be
# treated as filters.
filter_args = []
for key in request.args:
if not (key.startswith("_") and "__" not in key):
for v in request.args.getlist(key):
filter_args.append((key, v))
# Build where clauses from query string arguments
filters = Filters(sorted(filter_args))
where_clauses, params = filters.build_where_clauses(table_name)
# Execute filters_from_request plugin hooks - including the default
# ones that live in datasette/filters.py
extra_context_from_filters = {}
extra_human_descriptions = []
for hook in pm.hook.filters_from_request(
request=request,
table=table_name,
database=database_name,
datasette=datasette,
):
filter_arguments = await await_me_maybe(hook)
if filter_arguments:
where_clauses.extend(filter_arguments.where_clauses)
params.update(filter_arguments.params)
extra_human_descriptions.extend(filter_arguments.human_descriptions)
extra_context_from_filters.update(filter_arguments.extra_context)
# Deal with custom sort orders
sortable_columns = await _sortable_columns_for_table(
datasette, database_name, table_name, use_rowid
)
sort, sort_desc, order_by = await _sort_order(
table_metadata, sortable_columns, request, order_by
)
from_sql = "from {table_name} {where}".format(
table_name=escape_sqlite(table_name),
where=(
("where {} ".format(" and ".join(where_clauses))) if where_clauses else ""
),
)
# Copy of params so we can mutate them later:
from_sql_params = dict(**params)
count_sql = f"select count(*) {from_sql}"
# Handle pagination driven by ?_next=
_next = _next or request.args.get("_next")
offset = ""
if _next:
sort_value = None
if is_view:
# _next is an offset
offset = f" offset {int(_next)}"
else:
components = urlsafe_components(_next)
# If a sort order is applied and there are multiple components,
# the first of these is the sort value
if (sort or sort_desc) and (len(components) > 1):
sort_value = components[0]
# Special case for if non-urlencoded first token was $null
if _next.split(",")[0] == "$null":
sort_value = None
components = components[1:]
# Figure out the SQL for next-based-on-primary-key first
next_by_pk_clauses = []
if use_rowid:
next_by_pk_clauses.append(f"rowid > :p{len(params)}")
params[f"p{len(params)}"] = components[0]
else:
# Apply the tie-breaker based on primary keys
if len(components) == len(pks):
param_len = len(params)
next_by_pk_clauses.append(compound_keys_after_sql(pks, param_len))
for i, pk_value in enumerate(components):
params[f"p{param_len + i}"] = pk_value
# Now add the sort SQL, which may incorporate next_by_pk_clauses
if sort or sort_desc:
if sort_value is None:
if sort_desc:
# Just items where column is null ordered by pk
where_clauses.append(
"({column} is null and {next_clauses})".format(
column=escape_sqlite(sort_desc),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} is not null or ({column} is null and {next_clauses}))".format(
column=escape_sqlite(sort),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} {op} :p{p}{extra_desc_only} or ({column} = :p{p} and {next_clauses}))".format(
column=escape_sqlite(sort or sort_desc),
op=">" if sort else "<",
p=len(params),
extra_desc_only=(
""
if sort
else " or {column2} is null".format(
column2=escape_sqlite(sort or sort_desc)
)
),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
params[f"p{len(params)}"] = sort_value
order_by = f"{order_by}, {order_by_pks}"
else:
where_clauses.extend(next_by_pk_clauses)
where_clause = ""
if where_clauses:
where_clause = f"where {' and '.join(where_clauses)} "
if order_by:
order_by = f"order by {order_by}"
extra_args = {}
# Handle ?_size=500
# TODO: This was:
# page_size = _size or request.args.get("_size") or table_metadata.get("size")
page_size = request.args.get("_size") or table_metadata.get("size")
if page_size:
if page_size == "max":
page_size = datasette.max_returned_rows
try:
page_size = int(page_size)
if page_size < 0:
raise ValueError
except ValueError:
raise BadRequest("_size must be a positive integer")
if page_size > datasette.max_returned_rows:
raise BadRequest(f"_size must be <= {datasette.max_returned_rows}")
extra_args["page_size"] = page_size
else:
page_size = datasette.page_size
# Facets are calculated against SQL without order by or limit
sql_no_order_no_limit = (
"select {select_all_columns} from {table_name} {where}".format(
select_all_columns=select_all_columns,
table_name=escape_sqlite(table_name),
where=where_clause,
)
)
# This is the SQL that populates the main table on the page
sql = "select {select_specified_columns} from {table_name} {where}{order_by} limit {page_size}{offset}".format(
select_specified_columns=select_specified_columns,
table_name=escape_sqlite(table_name),
where=where_clause,
order_by=order_by,
page_size=page_size + 1,
offset=offset,
)
if request.args.get("_timelimit"):
extra_args["custom_time_limit"] = int(request.args.get("_timelimit"))
# Execute the main query!
try:
results = await db.execute(sql, params, truncate=True, **extra_args)
except (sqlite3.OperationalError, InvalidSql) as e:
raise DatasetteError(str(e), title="Invalid SQL", status=400)
except sqlite3.OperationalError as e:
raise DatasetteError(str(e))
columns = [r[0] for r in results.description]
rows = list(results.rows)
# Expand labeled columns if requested
expanded_columns = []
# List of (fk_dict, label_column-or-None) pairs for that table
expandable_columns = []
for fk in await db.foreign_keys_for_table(table_name):
label_column = await db.label_column_for_table(fk["other_table"])
expandable_columns.append((fk, label_column))
columns_to_expand = None
try:
all_labels = value_as_boolean(request.args.get("_labels", ""))
except ValueError:
all_labels = default_labels
# Check for explicit _label=
if "_label" in request.args:
columns_to_expand = request.args.getlist("_label")
if columns_to_expand is None and all_labels:
# expand all columns with foreign keys
columns_to_expand = [fk["column"] for fk, _ in expandable_columns]
if columns_to_expand:
expanded_labels = {}
for fk, _ in expandable_columns:
column = fk["column"]
if column not in columns_to_expand:
continue
if column not in columns:
continue
expanded_columns.append(column)
# Gather the values
column_index = columns.index(column)
values = [row[column_index] for row in rows]
# Expand them
expanded_labels.update(
await datasette.expand_foreign_keys(
request.actor, database_name, table_name, column, values
)
)
if expanded_labels:
# Rewrite the rows
new_rows = []
for row in rows:
new_row = CustomRow(columns)
for column in row.keys():
value = row[column]
if (column, value) in expanded_labels and value is not None:
new_row[column] = {
"value": value,
"label": expanded_labels[(column, value)],
}
else:
new_row[column] = value
new_rows.append(new_row)
rows = new_rows
_next = request.args.get("_next")
# Pagination next link
next_value, next_url = await _next_value_and_url(
datasette,
db,
request,
table_name,
_next,
rows,
pks,
use_rowid,
sort,
sort_desc,
page_size,
is_view,
)
rows = rows[:page_size]
# Resolve extras
extras = _get_extras(request)
if any(k for k in request.args.keys() if k == "_facet" or k.startswith("_facet_")):
extras.add("facet_results")
if request.args.get("_shape") == "object":
extras.add("primary_keys")
if extra_extras:
extras.update(extra_extras)
async def extra_count_sql():
return count_sql
async def extra_count():
"Total count of rows matching these filters"
# Calculate the total count for this query
count = None
if (
not db.is_mutable
and datasette.inspect_data
and count_sql == f"select count(*) from {table_name} "
):
# We can use a previously cached table row count
try:
count = datasette.inspect_data[database_name]["tables"][table_name][
"count"
]
except KeyError:
pass
# Otherwise run a select count(*) ...
if count_sql and count is None and not nocount:
count_sql_limited = (
f"select count(*) from (select * {from_sql} limit 10001)"
)
try:
count_rows = list(await db.execute(count_sql_limited, from_sql_params))
count = count_rows[0][0]
except QueryInterrupted:
pass
return count
async def facet_instances(extra_count):
facet_instances = []
facet_classes = list(
itertools.chain.from_iterable(pm.hook.register_facet_classes())
)
for facet_class in facet_classes:
facet_instances.append(
facet_class(
datasette,
request,
database_name,
sql=sql_no_order_no_limit,
params=params,
table=table_name,
table_config=table_metadata,
row_count=extra_count,
)
)
return facet_instances
async def extra_facet_results(facet_instances):
"Results of facets calculated against this data"
facet_results = {}
facets_timed_out = []
if not nofacet:
# Run them in parallel
facet_awaitables = [facet.facet_results() for facet in facet_instances]
facet_awaitable_results = await run_sequential(*facet_awaitables)
for (
instance_facet_results,
instance_facets_timed_out,
) in facet_awaitable_results:
for facet_info in instance_facet_results:
base_key = facet_info["name"]
key = base_key
i = 1
while key in facet_results:
i += 1
key = f"{base_key}_{i}"
facet_results[key] = facet_info
facets_timed_out.extend(instance_facets_timed_out)
return {
"results": facet_results,
"timed_out": facets_timed_out,
}
async def extra_suggested_facets(facet_instances):
"Suggestions for facets that might return interesting results"
suggested_facets = []
# Calculate suggested facets
if (
datasette.setting("suggest_facets")
and datasette.setting("allow_facet")
and not _next
and not nofacet
and not nosuggest
):
# Run them in parallel
facet_suggest_awaitables = [facet.suggest() for facet in facet_instances]
for suggest_result in await run_sequential(*facet_suggest_awaitables):
suggested_facets.extend(suggest_result)
return suggested_facets
# Faceting
if not datasette.setting("allow_facet") and any(
arg.startswith("_facet") for arg in request.args
):
raise BadRequest("_facet= is not allowed")
# human_description_en combines filters AND search, if provided
async def extra_human_description_en():
"Human-readable description of the filters"
human_description_en = filters.human_description_en(
extra=extra_human_descriptions
)
if sort or sort_desc:
human_description_en = " ".join(
[b for b in [human_description_en, sorted_by] if b]
)
return human_description_en
if sort or sort_desc:
sorted_by = "sorted by {}{}".format(
(sort or sort_desc), " descending" if sort_desc else ""
)
async def extra_next_url():
"Full URL for the next page of results"
return next_url
async def extra_columns():
"Column names returned by this query"
return columns
async def extra_primary_keys():
"Primary keys for this table"
return pks
async def extra_actions():
async def actions():
links = []
kwargs = {
"datasette": datasette,
"database": database_name,
"actor": request.actor,
"request": request,
}
if is_view:
kwargs["view"] = table_name
method = pm.hook.view_actions
else:
kwargs["table"] = table_name
method = pm.hook.table_actions
for hook in method(**kwargs):
extra_links = await await_me_maybe(hook)
if extra_links:
links.extend(extra_links)
return links
return actions
async def extra_is_view():
return is_view
async def extra_debug():
"Extra debug information"
return {
"resolved": repr(resolved),
"url_vars": request.url_vars,
"nofacet": nofacet,
"nosuggest": nosuggest,
}
async def extra_request():
"Full information about the request"
return {
"url": request.url,
"path": request.path,
"full_path": request.full_path,
"host": request.host,
"args": request.args._data,
}
async def run_display_columns_and_rows():
display_columns, display_rows = await display_columns_and_rows(
datasette,
database_name,
table_name,
results.description,
rows,
link_column=not is_view,
truncate_cells=datasette.setting("truncate_cells_html"),
sortable_columns=sortable_columns,
request=request,
)
return {
"columns": display_columns,
"rows": display_rows,
}
async def extra_display_columns(run_display_columns_and_rows):
return run_display_columns_and_rows["columns"]
async def extra_display_rows(run_display_columns_and_rows):
return run_display_columns_and_rows["rows"]
async def extra_query():
"Details of the underlying SQL query"
return {
"sql": sql,
"params": params,
}
async def extra_metadata():
"Metadata about the table and database"
tablemetadata = await datasette.get_resource_metadata(database_name, table_name)
rows = await datasette.get_internal_database().execute(
"""
SELECT
column_name,
value
FROM metadata_columns
WHERE database_name = ?
AND resource_name = ?
AND key = 'description'
""",
[database_name, table_name],
)
tablemetadata["columns"] = dict(rows)
return tablemetadata
async def extra_database():
return database_name
async def extra_table():
return table_name
async def extra_database_color():
return db.color
async def extra_form_hidden_args():
form_hidden_args = []
for key in request.args:
if (
key.startswith("_")
and key not in ("_sort", "_sort_desc", "_search", "_next")
and "__" not in key
):
for value in request.args.getlist(key):
form_hidden_args.append((key, value))
return form_hidden_args
async def extra_filters():
return filters
async def extra_custom_table_templates():
return [
f"_table-{to_css_class(database_name)}-{to_css_class(table_name)}.html",
f"_table-table-{to_css_class(database_name)}-{to_css_class(table_name)}.html",
"_table.html",
]
async def extra_sorted_facet_results(extra_facet_results):
return sorted(
extra_facet_results["results"].values(),
key=lambda f: (len(f["results"]), f["name"]),
reverse=True,
)
async def extra_table_definition():
return await db.get_table_definition(table_name)
async def extra_view_definition():
return await db.get_view_definition(table_name)
async def extra_renderers(extra_expandable_columns, extra_query):
renderers = {}
url_labels_extra = {}
if extra_expandable_columns:
url_labels_extra = {"_labels": "on"}
for key, (_, can_render) in datasette.renderers.items():
it_can_render = call_with_supported_arguments(
can_render,
datasette=datasette,
columns=columns or [],
rows=rows or [],
sql=extra_query.get("sql", None),
query_name=None,
database=database_name,
table=table_name,
request=request,
view_name="table",
)
it_can_render = await await_me_maybe(it_can_render)
if it_can_render:
renderers[key] = datasette.urls.path(
path_with_format(
request=request, format=key, extra_qs={**url_labels_extra}
)
)
return renderers
async def extra_private():
return private
async def extra_expandable_columns():
expandables = []
db = datasette.databases[database_name]
for fk in await db.foreign_keys_for_table(table_name):
label_column = await db.label_column_for_table(fk["other_table"])
expandables.append((fk, label_column))
return expandables
async def extra_extras():
"Available ?_extra= blocks"
all_extras = [
(key[len("extra_") :], fn.__doc__)
for key, fn in registry._registry.items()
if key.startswith("extra_")
]
return [
{
"name": name,
"description": doc,
"toggle_url": datasette.absolute_url(
request,
datasette.urls.path(
path_with_added_args(request, {"_extra": name})
if name not in extras
else path_with_removed_args(request, {"_extra": name})
),
),
"selected": name in extras,
}
for name, doc in all_extras
]
async def extra_facets_timed_out(extra_facet_results):
return extra_facet_results["timed_out"]
bundles = {
"html": [
"suggested_facets",
"facet_results",
"facets_timed_out",
"count",
"count_sql",
"human_description_en",
"next_url",
"metadata",
"query",
"columns",
"display_columns",
"display_rows",
"database",
"table",
"database_color",
"actions",
"filters",
"renderers",
"custom_table_templates",
"sorted_facet_results",
"table_definition",
"view_definition",
"is_view",
"private",
"primary_keys",
"expandable_columns",
"form_hidden_args",
]
}
for key, values in bundles.items():
if f"_{key}" in extras:
extras.update(values)
extras.discard(f"_{key}")
registry = Registry(
extra_count,
extra_count_sql,
extra_facet_results,
extra_facets_timed_out,
extra_suggested_facets,
facet_instances,
extra_human_description_en,
extra_next_url,
extra_columns,
extra_primary_keys,
run_display_columns_and_rows,
extra_display_columns,
extra_display_rows,
extra_debug,
extra_request,
extra_query,
extra_metadata,
extra_extras,
extra_database,
extra_table,
extra_database_color,
extra_actions,
extra_filters,
extra_renderers,
extra_custom_table_templates,
extra_sorted_facet_results,
extra_table_definition,
extra_view_definition,
extra_is_view,
extra_private,
extra_expandable_columns,
extra_form_hidden_args,
)
results = await registry.resolve_multi(
["extra_{}".format(extra) for extra in extras]
)
data = {
"ok": True,
"next": next_value and str(next_value) or None,
}
data.update(
{
key.replace("extra_", ""): value
for key, value in results.items()
if key.startswith("extra_") and key.replace("extra_", "") in extras
}
)
raw_sqlite_rows = rows[:page_size]
data["rows"] = [dict(r) for r in raw_sqlite_rows]
if context_for_html_hack:
data.update(extra_context_from_filters)
# filter_columns combine the columns we know are available
# in the table with any additional columns (such as rowid)
# which are available in the query
data["filter_columns"] = list(columns) + [
table_column
for table_column in table_columns
if table_column not in columns
]
url_labels_extra = {}
if data.get("expandable_columns"):
url_labels_extra = {"_labels": "on"}
url_csv_args = {"_size": "max", **url_labels_extra}
url_csv = datasette.urls.path(
path_with_format(request=request, format="csv", extra_qs=url_csv_args)
)
url_csv_path = url_csv.split("?")[0]
data.update(
{
"url_csv": url_csv,
"url_csv_path": url_csv_path,
"url_csv_hidden_args": [
(key, value)
for key, value in urllib.parse.parse_qsl(request.query_string)
if key not in ("_labels", "_facet", "_size")
]
+ [("_size", "max")],
}
)
# if no sort specified AND table has a single primary key,
# set sort to that so arrow is displayed
if not sort and not sort_desc:
if 1 == len(pks):
sort = pks[0]
elif use_rowid:
sort = "rowid"
data["sort"] = sort
data["sort_desc"] = sort_desc
return data, rows[:page_size], columns, expanded_columns, sql, next_url
async def _next_value_and_url(
datasette,
db,
request,
table_name,
_next,
rows,
pks,
use_rowid,
sort,
sort_desc,
page_size,
is_view,
):
next_value = None
next_url = None
if 0 < page_size < len(rows):
if is_view:
next_value = int(_next or 0) + page_size
else:
next_value = path_from_row_pks(rows[-2], pks, use_rowid)
# If there's a sort or sort_desc, add that value as a prefix
if (sort or sort_desc) and not is_view:
try:
prefix = rows[-2][sort or sort_desc]
except IndexError:
# sort/sort_desc column missing from SELECT - look up value by PK instead
prefix_where_clause = " and ".join(
"[{}] = :pk{}".format(pk, i) for i, pk in enumerate(pks)
)
prefix_lookup_sql = "select [{}] from [{}] where {}".format(
sort or sort_desc, table_name, prefix_where_clause
)
prefix = (
await db.execute(
prefix_lookup_sql,
{
**{
"pk{}".format(i): rows[-2][pk]
for i, pk in enumerate(pks)
}
},
)
).single_value()
if isinstance(prefix, dict) and "value" in prefix:
prefix = prefix["value"]
if prefix is None:
prefix = "$null"
else:
prefix = tilde_encode(str(prefix))
next_value = f"{prefix},{next_value}"
added_args = {"_next": next_value}
if sort:
added_args["_sort"] = sort
else:
added_args["_sort_desc"] = sort_desc
else:
added_args = {"_next": next_value}
next_url = datasette.absolute_url(
request, datasette.urls.path(path_with_replaced_args(request, added_args))
)
return next_value, next_url
| TableDropView |
python | lepture__authlib | authlib/oauth1/rfc5849/resource_protector.py | {
"start": 200,
"end": 1279
} | class ____(BaseServer):
def validate_request(self, method, uri, body, headers):
request = OAuth1Request(method, uri, body, headers)
if not request.client_id:
raise MissingRequiredParameterError("oauth_consumer_key")
client = self.get_client_by_id(request.client_id)
if not client:
raise InvalidClientError()
request.client = client
if not request.token:
raise MissingRequiredParameterError("oauth_token")
token = self.get_token_credential(request)
if not token:
raise InvalidTokenError()
request.credential = token
self.validate_timestamp_and_nonce(request)
self.validate_oauth_signature(request)
return request
def get_token_credential(self, request):
"""Fetch the token credential from data store like a database,
framework should implement this function.
:param request: OAuth1Request instance
:return: Token model instance
"""
raise NotImplementedError()
| ResourceProtector |
python | facebook__pyre-check | client/commands/start.py | {
"start": 2056,
"end": 2265
} | class ____(enum.Enum):
BASE_NAME = "base_name"
EXTENSION = "extension"
FULL_PATH = "full_path"
def __str__(self) -> str:
return self.value
@dataclasses.dataclass(frozen=True)
| MatchPolicy |
python | django__django | django/db/models/fields/tuple_lookups.py | {
"start": 5574,
"end": 6375
} | class ____(TupleLookupMixin, IsNull):
def get_prep_lookup(self):
rhs = self.rhs
if isinstance(rhs, (tuple, list)) and len(rhs) == 1:
rhs = rhs[0]
if isinstance(rhs, bool):
return rhs
raise ValueError(
"The QuerySet value for an isnull lookup must be True or False."
)
def as_sql(self, compiler, connection):
# e.g.: (a, b, c) is None as SQL:
# WHERE a IS NULL OR b IS NULL OR c IS NULL
# e.g.: (a, b, c) is not None as SQL:
# WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL
rhs = self.rhs
lookups = [IsNull(col, rhs) for col in self.lhs]
root = WhereNode(lookups, connector=OR if rhs else AND)
return root.as_sql(compiler, connection)
| TupleIsNull |
python | networkx__networkx | networkx/generators/tests/test_atlas.py | {
"start": 698,
"end": 2530
} | class ____:
"""Unit tests for the :func:`~networkx.graph_atlas_g` function."""
@classmethod
def setup_class(cls):
cls.GAG = graph_atlas_g()
def test_sizes(self):
G = self.GAG[0]
assert G.number_of_nodes() == 0
assert G.number_of_edges() == 0
G = self.GAG[7]
assert G.number_of_nodes() == 3
assert G.number_of_edges() == 3
def test_names(self):
for i, G in enumerate(self.GAG):
assert int(G.name[1:]) == i
def test_nondecreasing_nodes(self):
# check for nondecreasing number of nodes
for n1, n2 in pairwise(map(len, self.GAG)):
assert n2 <= n1 + 1
def test_nondecreasing_edges(self):
# check for nondecreasing number of edges (for fixed number of
# nodes)
for n, group in groupby(self.GAG, key=nx.number_of_nodes):
for m1, m2 in pairwise(map(nx.number_of_edges, group)):
assert m2 <= m1 + 1
def test_nondecreasing_degree_sequence(self):
# Check for lexicographically nondecreasing degree sequences
# (for fixed number of nodes and edges).
#
# There are three exceptions to this rule in the order given in
# the "Atlas of Graphs" book, so we need to manually exclude
# those.
exceptions = [("G55", "G56"), ("G1007", "G1008"), ("G1012", "G1013")]
for n, group in groupby(self.GAG, key=nx.number_of_nodes):
for m, group in groupby(group, key=nx.number_of_edges):
for G1, G2 in pairwise(group):
if (G1.name, G2.name) in exceptions:
continue
d1 = sorted(d for v, d in G1.degree())
d2 = sorted(d for v, d in G2.degree())
assert d1 <= d2
| TestAtlasGraphG |
python | getsentry__sentry | tests/sentry/ratelimits/utils/test_get_ratelimit_key.py | {
"start": 9098,
"end": 9821
} | class ____(TestCase):
def setUp(self) -> None:
self.view = DummyEndpoint.as_view()
self.request = RequestFactory().get("/")
self.rate_limit_config = get_rate_limit_config(self.view.view_class)
self.rate_limit_group = (
self.rate_limit_config.group if self.rate_limit_config else RateLimitConfig().group
)
def test_group_key(self) -> None:
user = User(id=1)
self.request.session = SessionBase()
self.request.user = user
assert (
get_rate_limit_key(
self.view, self.request, self.rate_limit_group, self.rate_limit_config
)
== f"user:default:GET:{user.id}"
)
| TestDefaultToGroup |
python | pytorch__pytorch | test/test_datapipe.py | {
"start": 17730,
"end": 18411
} | class ____(TestCase):
def get_new_df(self):
return df_wrapper.create_dataframe([[1, 2]], columns=["a", "b"])
def compare_capture_and_eager(self, operations):
cdf = CaptureDataFrame()
cdf = operations(cdf)
df = self.get_new_df()
cdf = cdf.apply_ops(df)
df = self.get_new_df()
df = operations(df)
self.assertTrue(df.equals(cdf))
def test_basic_capture(self):
def operations(df):
df["c"] = df.b + df["a"] * 7
# somehow swallows pandas UserWarning when `df.c = df.b + df['a'] * 7`
return df
self.compare_capture_and_eager(operations)
| TestCaptureDataFrame |
python | protocolbuffers__protobuf | objectivec/DevTools/pddm_tests.py | {
"start": 8852,
"end": 12076
} | class ____(unittest.TestCase):
def testBasicParse(self):
test_list = [
# 1. no directives
('a\nb\nc',
(3,) ),
# 2. One define
('a\n//%PDDM-DEFINE foo()\n//%body\nc',
(1, 2, 1) ),
# 3. Two defines
('a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE bar()\n//%body2\nc',
(1, 4, 1) ),
# 4. Two defines with ends
('a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE-END\n'
'//%PDDM-DEFINE bar()\n//%body2\n//%PDDM-DEFINE-END\nc',
(1, 6, 1) ),
# 5. One expand, one define (that runs to end of file)
('a\n//%PDDM-EXPAND foo()\nbody\n//%PDDM-EXPAND-END\n'
'//%PDDM-DEFINE bar()\n//%body2\n',
(1, 1, 2) ),
# 6. One define ended with an expand.
('a\nb\n//%PDDM-DEFINE bar()\n//%body2\n'
'//%PDDM-EXPAND bar()\nbody2\n//%PDDM-EXPAND-END\n',
(2, 2, 1) ),
# 7. Two expands (one end), one define.
('a\n//%PDDM-EXPAND foo(1)\nbody\n//%PDDM-EXPAND foo(2)\nbody2\n//%PDDM-EXPAND-END\n'
'//%PDDM-DEFINE foo()\n//%body2\n',
(1, 2, 2) ),
]
for idx, (input_str, line_counts) in enumerate(test_list, 1):
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
sf._ParseFile()
self.assertEqual(len(sf._sections), len(line_counts),
'Entry %d -- %d != %d' %
(idx, len(sf._sections), len(line_counts)))
for idx2, (sec, expected) in enumerate(zip(sf._sections, line_counts), 1):
self.assertEqual(sec.num_lines_captured, expected,
'Entry %d, section %d -- %d != %d' %
(idx, idx2, sec.num_lines_captured, expected))
def testErrors(self):
test_list = [
# 1. Directive within expansion
('//%PDDM-EXPAND a()\n//%PDDM-BOGUS',
'Ran into directive ("//%PDDM-BOGUS", line 2) while in "//%PDDM-EXPAND a()".'),
('//%PDDM-EXPAND a()\n//%PDDM-DEFINE a()\n//%body\n',
'Ran into directive ("//%PDDM-DEFINE", line 2) while in "//%PDDM-EXPAND a()".'),
# 3. Expansion ran off end of file
('//%PDDM-EXPAND a()\na\nb\n',
'Hit the end of the file while in "//%PDDM-EXPAND a()".'),
# 4. Directive within define
('//%PDDM-DEFINE a()\n//%body\n//%PDDM-BOGUS',
'Ran into directive ("//%PDDM-BOGUS", line 3) while in "//%PDDM-DEFINE a()".'),
('//%PDDM-DEFINE a()\n//%body\n//%PDDM-EXPAND-END a()',
'Ran into directive ("//%PDDM-EXPAND-END", line 3) while in "//%PDDM-DEFINE a()".'),
# 6. Directives that shouldn't start sections
('a\n//%PDDM-DEFINE-END a()\n//a\n',
'Unexpected line 2: "//%PDDM-DEFINE-END a()".'),
('a\n//%PDDM-EXPAND-END a()\n//a\n',
'Unexpected line 2: "//%PDDM-EXPAND-END a()".'),
('//%PDDM-BOGUS\n//a\n',
'Unexpected line 1: "//%PDDM-BOGUS".'),
]
for idx, (input_str, expected_err) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
pddm.SourceFile(f)._ParseFile()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message, expected_err,
'Entry %d failed: %r' % (idx, e))
| TestParsingSource |
python | pytorch__pytorch | torch/_inductor/fx_passes/split_cat.py | {
"start": 48761,
"end": 121453
} | class ____(CallFunction):
def __init__(self, arg, index, _users=1) -> None:
super().__init__(operator.getitem, arg, index, _users=_users)
def find_anchor_nodes(self, ctx: MatchContext, searched: OrderedSet[torch.fx.Node]):
# We generally match GetItem with arg being an Arg(). So, we never return the anchor
# nodes as the stored node in ctx.pattern_to_node is returned. Here we override find_anchor_nodes
# to not use ctx.pattern_to_node
for pattern in self.flat_args_kwargs[0]:
if isinstance(pattern, PatternExpr):
for other_node in pattern.find_anchor_nodes(ctx, searched):
if not isinstance(other_node, torch.fx.Node):
continue
for node in other_node.users:
if node not in searched:
if self._match_fns(node):
yield node
searched.add(node)
@register_graph_pattern(
RepeatedExpr(
CallFunction(
torch.squeeze,
GetItem(
TorchSplit(
KeywordArg("split_input"),
KeywordArg("split_sizes"),
),
Ignored(),
),
KeywordArg("dim"),
_users=MULTIPLE,
),
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
)
@register_graph_pattern(
RepeatedExpr(
CallFunction(
torch.squeeze,
GetItem(
TorchSplit(
KeywordArg("split_input"),
KeywordArg("split_sizes"),
),
Ignored(),
),
dim=KeywordArg("dim"),
_users=MULTIPLE,
)
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
)
def merge_split_squeeze(
match: Match, split_input: torch.fx.Node, split_sizes: list[int], dim: int
):
graph = match.graph
split = next(node for node in match.nodes if node.target is torch.split)
if not all(s == 1 for s in split_sizes):
return
if isinstance(dim, Sequence):
return
next_users = find_next_users(split)
if not all(node.target is torch.squeeze for node in next_users):
return
with graph.inserting_before(match.output_node()):
unbind = graph.call_function(
torch.unbind, args=(split_input,), kwargs={"dim": dim}
)
if is_node_meta_valid(split_input):
unbind.meta["example_value"] = torch.unbind(
split_input.meta["example_value"], dim=dim
)
for item_index, getitem_node in sorted(
[(getitem_node.args[1], getitem_node) for getitem_node in split.users]
):
squeeze = next(iter(getitem_node.users.keys()))
new_get_item = graph.call_function(
operator.getitem, args=(unbind, item_index)
)
squeeze.replace_all_uses_with(new_get_item)
new_get_item.meta.update(squeeze.meta)
graph.erase_node(squeeze)
graph.erase_node(getitem_node)
graph.erase_node(split)
counters[backend]["split_cat_pass"] += 1
getitem_unbind = ListOf(
GetItem(
CallFunction(
torch.unbind,
KeywordArg("unbind_input"),
dim=KeywordArg("dim"),
_users=MULTIPLE,
),
Ignored(),
_users=MULTIPLE,
),
partial=True,
)
@register_graph_pattern(
CallFunction([torch.stack, torch.cat], getitem_unbind, Ignored(), _users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("unbind_stack_pass"),
)
@register_graph_pattern(
CallFunction(
[torch.stack, torch.cat], getitem_unbind, dim=Ignored(), _users=MULTIPLE
),
pass_dict=construct_pattern_matcher_pass("unbind_stack_pass"),
)
@register_graph_pattern(
CallFunction(
[torch.stack, torch.cat], tensors=getitem_unbind, dim=Ignored(), _users=MULTIPLE
),
pass_dict=construct_pattern_matcher_pass("unbind_stack_pass"),
)
def merge_unbind_stack(match: Match, unbind_input: torch.fx.Node, dim: int):
unbind_node = next(node for node in match.nodes if node.target is torch.unbind)
UnbindCatRemover().remove_unbind(match.graph, unbind_node)
getitem_split = ListOf(
CallFunction(
operator.getitem,
TorchSplit(
Ignored(),
KeywordArg("split_sections"),
),
Ignored(),
_users=MULTIPLE,
),
partial=True,
)
reshape_getitem_split = ListOf(
CallFunction(
torch.reshape,
CallFunction(
operator.getitem,
TorchSplit(
Ignored(),
KeywordArg("split_sections"),
),
Ignored(),
_users=MULTIPLE,
),
Arg(),
_users=MULTIPLE,
),
partial=True,
)
@register_graph_pattern(
CallFunction(
[torch.stack, torch.cat],
tensors=getitem_split,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
)
@register_graph_pattern(
CallFunction(
[torch.stack, torch.cat],
getitem_split,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
)
@register_graph_pattern(
CallFunction(
[torch.stack, torch.cat],
getitem_split,
Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
)
def simplify_split_cat(match: Match, split_sections: list[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
return
split_node = next(node for node in match.nodes if node.target is torch.split)
# pyrefly: ignore [bad-argument-type]
SplitCatSimplifier().simplify(match.graph, split_node, split_sections)
# noqa: W605
# ############pattern to be optimized is#########
# split_node(dim=1)
# / \ ... / \
# getitem getitem getitem getitem -> user=1
# \ / \ /
# cat (user=mul, dim=1) cat(user=mul, dim=1)
# | \ | \
# ################after transformation#############
# split_node(dim=1)
# / ... \
# getitem getitem
# | \ | \
def has_same_parent_node(node: torch.fx.Node):
# the input nodes of the node should come from the same parent
prev_node = None
for getitem in node.args[0]: # type: ignore[union-attr]
if getitem.target != operator.getitem: # type: ignore[union-attr]
return False
if prev_node is None:
prev_node = getitem.args[0] # type: ignore[union-attr]
else:
if getitem.args[0] != prev_node: # type: ignore[union-attr]
return False
return True
def remove_zeros(split_sections: list[int]):
"""
Remove zeros from the list and get the index mapping dict from getitem
in split node to getitem in new split node
"""
new_split_sections, index_mapping = [], {}
idx = 0
for i in range(len(split_sections)):
if split_sections[i] > 0:
new_split_sections.append(split_sections[i])
index_mapping[i] = idx
idx += 1
return new_split_sections, index_mapping
def is_sorted_and_consecutive(arr: list[int]) -> bool:
# check if the array is sorted
if arr == sorted(arr):
# check if the differences between adjacent elements are all 1
return all(x[1] - x[0] == 1 for x in itertools.pairwise(arr))
else:
return False
def calculate_fused_tensor_size(split_node: torch.fx.Node, indices: list[int]) -> int:
"""
Calculate the fused tensor size in the indices
"""
fused_tensor_size = 0
for i in range(len(split_node.args[1])): # type: ignore[arg-type]
if i in indices:
fused_tensor_size += split_node.args[1][i] # type: ignore[operator, assignment, index]
# pyrefly: ignore [bad-return]
return fused_tensor_size
@register_graph_pattern(
CallFunction(
torch.cat,
getitem_split,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("merge_getitem_cat_pass"),
)
def merge_getitem_cat(match: Match, split_sections: list[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
return
graph = match.graph
split_node = next(node for node in match.nodes if node.target is torch.split)
split_input, _split_size, split_dim = _get_split_args_default(split_node)
# if the cat and split have different dims, return
# Find the next users (i.e. users after the getitem)
next_users = find_next_users(split_node)
# 'immutable_list' object does not support mutation. Create a new copy of it
split_sections = list(split_sections)
for cat_user in next_users:
if cat_user.target is torch.cat:
cat_dim = get_arg_value(cat_user, 1, "dim")
# check the all getitems in the cat_user from the same node
# check the input of the cat has all getitem from the split
# check all getitem only has one single user
if (
split_dim != cat_dim
or not has_same_parent_node(cat_user)
or not all(len(arg.users) == 1 for arg in cat_user.args[0]) # type: ignore[union-attr]
):
continue
# find the index of getitems to be cated/stacked
# type: ignore[union-attr]
indices = [arg.args[1] for arg in cat_user.args[0]] # type: ignore[union-attr]
# the getitems to be merged must be consecutive, otherwise
# returned sliced tensor could be wrong
if not is_sorted_and_consecutive(indices): # type: ignore[arg-type]
continue
# update the arg of cat user, only keep the first getitem
cat_user.update_arg(0, cat_user.args[0][0]) # type: ignore[index]
# calculate the fused tensor sizes in the indices
fused_tensor_size = 0
for i in range(len(split_node.args[1])): # type: ignore[arg-type]
if i in indices:
fused_tensor_size += split_node.args[1][i] # type: ignore[operator, assignment, index]
# update the split sections
split_sections[indices[0]] = calculate_fused_tensor_size( # type: ignore[index]
split_node,
indices, # type: ignore[arg-type]
)
# padding others with zeros to keep the same dict size
for i in indices[1:]:
split_sections[i] = 0 # type: ignore[index]
# remove all unused indexes in the split_node
new_split_sections, index_mapping = remove_zeros(split_sections)
with graph.inserting_after(split_node):
new_split_node = graph.call_function(
torch.split,
args=(split_input, split_sections),
kwargs={"dim": split_dim},
)
split_node.replace_all_uses_with(new_split_node)
new_split_node.meta.update(split_node.meta)
# remove all unused getitem nodes
to_remove = [cat_user]
# dictionary keys changed during iteration
new_split_getitem_nodes = list(new_split_node.users.keys())
for getitem_node in new_split_getitem_nodes:
if getitem_node.args[1] in indices[1:]:
to_remove.append(getitem_node)
# update meta data of getitem
elif getitem_node.args[1] == indices[0]:
cat_user.replace_all_uses_with(getitem_node)
getitem_node.meta.update(cat_user.meta)
else:
# update getitem index for new split node
getitem_node.update_arg(1, index_mapping[getitem_node.args[1]])
graph.erase_node(split_node)
for getitem_node in to_remove:
graph.erase_node(getitem_node)
# update the split sections of new split node
new_split_node.update_arg(1, new_split_sections)
split_node = new_split_node
split_sections = new_split_sections
counters[backend]["merge_getitem_cat_pass"] += 1
# ############pattern to be optimized is#########
# split_node(dim=1) -> user=multiple
# / \ ... / \
# getitem getitem getitem getitem -> user=multiple
# \ \ / \
# other_op /cat(user=mul, dim=1) other_op
# |
# ################after transformation#############
# split_node(dim=1) -> -> user=multiple
# / \ ... / \
# getitem getitem getitem getitem -> user=multiple
# \ \ / \
# other_op
@register_graph_pattern(
CallFunction(
torch.cat,
getitem_split,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("mutate_cat_pass"),
)
def mutate_cat_node(match: Match, split_sections: list[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
return
graph = match.graph
split_node = next(node for node in match.nodes if node.target is torch.split)
_split_input, _split_size, split_dim = _get_split_args_default(split_node)
# if the cat and split have different dims, return
# Find the next users (i.e. users after the getitem)
next_users = find_next_users(split_node)
for cat_user in next_users:
if cat_user.target is torch.cat:
cat_dim = get_arg_value(cat_user, 1, "dim") or 0
# check that all getitems in the cat_user from the same node
# check the input of the cat has all getitem from the split
if split_dim != cat_dim or not has_same_parent_node(cat_user):
continue
# find the index of getitems to be cat
indices, idx_to_getitem = [], {}
for getitem in cat_user.args[0]: # type: ignore[union-attr]
indices.append(getitem.args[1]) # type: ignore[union-attr]
idx_to_getitem[getitem.args[1]] = getitem # type: ignore[union-attr]
# the getitems to be merged must be consecutive, otherwise
# returned sliced tensor could be wrong
if not is_sorted_and_consecutive(indices): # type: ignore[arg-type]
continue
# case 1: the cat uses all getitems from the split
if len(split_sections) == len(cat_user.args[0]): # type: ignore[arg-type]
# replace the users of the cat node to be the input of the split node
cat_user.replace_all_uses_with(split_node.args[0]) # type: ignore[arg-type]
# remove the cat node
graph.erase_node(cat_user)
counters[backend]["mutate_cat_pass"] += 1
# case 2: the cat uses some getitems from the split
elif is_node_meta_valid(split_node.args[0]): # type: ignore[arg-type]
# check the split dim, and construct the slice tuple
start_fused_size = calculate_fused_tensor_size(
split_node,
list(range(indices[0])), # type: ignore[arg-type]
)
end_fused_size = start_fused_size + calculate_fused_tensor_size(
split_node,
indices, # type: ignore[arg-type]
)
slice_list = []
for i in range(len(split_node.args[0].meta["example_value"].shape)): # type: ignore[union-attr]
if i != split_dim:
slice_list.append(slice(None, None, None))
else:
slice_list.append(slice(start_fused_size, end_fused_size, None))
with graph.inserting_after(split_node):
slice_node = graph.call_function(
operator.getitem,
args=(split_node.args[0], tuple(slice_list)),
)
cat_user.replace_all_uses_with(slice_node)
slice_node.meta.update(cat_user.meta)
# remove the cat node
graph.erase_node(cat_user)
counters[backend]["mutate_cat_pass"] += 1
getitem_split_aten = ListOf(
CallFunction(
operator.getitem,
CallFunctionVarArgs([torch.ops.aten.split_with_sizes.default], users=MULTIPLE),
Ignored(),
_users=MULTIPLE,
),
partial=True,
)
@register_graph_pattern(
CallFunctionVarArgs(torch.ops.aten.split.Tensor, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_aten_pass"),
)
def normalize_split_default_aten(match: Match, *args, **kwargs):
split_node = match.nodes[0]
graph = match.graph
split_input, split_size, split_dim = _get_split_args_default(split_node)
if split_input is None or split_dim is None or split_size is None:
log.debug("couldn't find split args")
return
if not is_node_meta_valid(split_node):
log.debug("val absent for node: %s", split_node)
return
assert isinstance(split_node.meta["val"], (list, tuple))
split_sections = [t.size()[split_dim] for t in split_node.meta["val"]]
if any(isinstance(section, torch.SymInt) for section in split_sections):
# TODO dynamic_shapes with assume_static_by_default=False fails while AOT Autograd tracing.
return
if split_dim < 0: # Normalize split dim
split_dim += split_input.meta["val"].dim()
# we also need to check the input of the split_node
# primals =torch.randn(4096, 300)
# split = torch.ops.aten.split.Tensor(primals, 320, 1) -> truncate to 300 automatically
# split_2 = torch.ops.aten.split_with_sizes.default(primals, [320], dim = 1) -> runtime error
split_input_size = split_input.meta["val"].shape[split_dim]
split_size = min(split_size, split_input_size)
split_section_list = [split_size] * (len(split_node.meta["val"]))
new_args = (split_input, split_section_list)
new_kwargs = {"dim": split_dim}
if (
split_node.args == new_args
and split_node.kwargs == new_kwargs
and split_node.op == "call_function"
):
return
with graph.inserting_after(split_node):
new_split_node = graph.call_function(
torch.ops.aten.split_with_sizes.default,
args=new_args,
kwargs=new_kwargs, # type: ignore[arg-type]
)
split_node.replace_all_uses_with(new_split_node)
new_split_node.meta.update(split_node.meta)
graph.erase_node(split_node)
counters[backend]["normalization_aten_pass"] += 1
@register_graph_pattern(
CallFunctionVarArgs(torch.ops.aten.split_with_sizes.default, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_aten_pass"),
)
def normalize_split_with_size_default_aten(match: Match, *args, **kwargs):
split_node = match.nodes[0]
graph = match.graph
split_input, split_sections, split_dim = _get_split_args_default(split_node)
if split_input is None or split_dim is None or split_sections is None:
log.debug("couldn't find split args")
return
if not is_node_meta_valid(split_node):
log.debug("val absent for node: %s", split_node)
return
if any(isinstance(section, torch.SymInt) for section in split_sections):
# TODO dynamic_shapes with assume_static_by_default=False fails while AOT Autograd tracing.
return
if split_dim < 0: # Normalize split dim
split_dim += split_input.meta["val"].dim()
new_args = (split_input, split_sections)
new_kwargs = {"dim": split_dim}
if (
split_node.args == new_args
and split_node.kwargs == new_kwargs
and split_node.op == "call_function"
):
return
with graph.inserting_after(split_node):
new_split_node = graph.call_function(
torch.ops.aten.split_with_sizes.default,
args=new_args,
kwargs=new_kwargs, # type: ignore[arg-type]
)
split_node.replace_all_uses_with(new_split_node)
new_split_node.meta.update(split_node.meta)
graph.erase_node(split_node)
counters[backend]["normalization_aten_pass"] += 1
@register_graph_pattern(
CallFunction(
torch.ops.aten.cat.default,
getitem_split_aten,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_cat_aten_pass"),
)
def merge_split_cat_aten(match: Match, *args, **kwargs):
graph = match.graph
split_node = match.nodes[0]
threshold_to_cat = torch._inductor.config.post_grad_fusion_options[
"split_cat_aten_pass"
].get("threshold_to_cat", 10)
# get the getitem nodes from the split node
getitem_nodes = list(split_node.users.keys())
for cat_node in list(getitem_nodes[0].users.keys()):
cat_dim = get_arg_value(cat_node, 1, "dim")
cat_inputs = get_arg_value(cat_node, 0, "tensors")
try:
cat_input_len = len(cat_inputs)
except TypeError:
continue
if cat_input_len < threshold_to_cat:
continue
# check split node and cat node has same dim, and all getitem nodes have same parent node
parent_to_indices = defaultdict(list) # type: ignore[var-annotated]
parent_to_getitems = defaultdict(list) # type: ignore[var-annotated]
for cat_input in cat_inputs:
# skip all non-getitem cat input
if cat_input.target != operator.getitem:
continue
current_getitem_parent = cat_input.args[0]
split_dim = get_arg_value(current_getitem_parent, 2, "dim")
if split_dim != cat_dim:
break
getitem_idx = cat_input.args[1]
if (
current_getitem_parent not in parent_to_indices
) or getitem_idx != parent_to_indices[current_getitem_parent][-1][-1] + 1:
parent_to_indices[current_getitem_parent].append([getitem_idx])
parent_to_getitems[current_getitem_parent].append([cat_input])
else:
parent_to_getitems[current_getitem_parent][-1].append(cat_input)
parent_to_indices[current_getitem_parent][-1].append(getitem_idx)
cat_inputs_list = list(cat_inputs)
update_cat_arg = []
# iterate through the indices to construct the slice nodes
for parent, indices in parent_to_indices.items():
for idx, indice in enumerate(indices):
start, end = indice[0], indice[-1]
split_sections = list(parent.args[1])
input_of_current_getitem_parent = parent.args[0]
if len(indice) >= threshold_to_cat or len(indice) == len(
split_sections
):
if len(indice) != len(split_sections):
# get the start and end slicing indices
slice_node = graph.call_function(
torch.ops.aten.slice.Tensor,
args=(
input_of_current_getitem_parent,
split_dim, # type: ignore[possibly-undefined]
sum(split_sections[:start]),
sum(split_sections[: end + 1]),
),
)
else:
slice_node = input_of_current_getitem_parent
# find the index in the cat_inputs_list given the getitem node
update_cat_arg.append(
(
slice_node,
cat_inputs_list.index(parent_to_getitems[parent][idx][0]),
cat_inputs_list.index(parent_to_getitems[parent][idx][-1]),
)
)
result = []
i = 0
for slice_tensor, start, end in update_cat_arg:
while i < start:
result.append(cat_inputs_list[i])
i += 1
result.append(slice_tensor)
i = end + 1
while i < len(cat_inputs_list):
result.append(cat_inputs_list[i])
i += 1
cat_node.update_arg(0, result)
for getitem_node in getitem_nodes:
if len(getitem_node.users) == 0:
graph.erase_node(getitem_node)
if len(split_node.users) == 0:
graph.erase_node(split_node)
counters[backend]["split_cat_aten_pass"] += 1
@register_graph_pattern(
CallFunction(
torch.ops.aten.cat.default,
ListOf(
CallFunctionVarArgs(torch.ops.aten.select.int, users=MULTIPLE),
partial=True,
),
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("select_cat_aten_pass"),
)
def merge_select_cat_aten(match: Match, *args, **kwargs):
graph = match.graph
node = match.nodes[0]
node_input = get_arg_value(node, 0, "tensors")
# get the select nodes from the node
select_nodes = list(node_input.users.keys())
for cat_node in list(node.users.keys()):
if cat_node.target is torch.ops.aten.cat.default:
cat_dim = get_arg_value(cat_node, 1, "dim")
cat_inputs = get_arg_value(cat_node, 0, "tensors")
# check all select nodes has same slice dim
if not all(
select_node.args[1] == select_nodes[0].args[1]
for select_node in select_nodes
):
continue
# We only consider the case where selece slice dim and cat node has same dim
if select_nodes[0].args[1] != cat_dim:
continue
if not is_node_meta_valid(cat_node):
continue
# check the cat node has consecutive indices
indices = [select.args[2] for select in cat_node.args[0]] # type: ignore[union-attr]
if (
not is_sorted_and_consecutive(indices) # type: ignore[arg-type]
or len(select_nodes) != len(cat_inputs)
):
continue
# check all the select nodes can be merged to the cat node input
if len(indices) != select_nodes[0].args[0].meta["val"].shape[cat_dim]: # type: ignore[union-attr]
continue
# reshape the node input to be the same shape as the cat node
with graph.inserting_before(node):
view_node = graph.call_function(
torch.ops.aten.view.default,
args=(node_input, cat_node.meta["val"].shape),
)
# replace the node input with the new node
cat_node.replace_all_uses_with(view_node)
view_node.meta.update(cat_node.meta)
# remove the cat node
graph.erase_node(cat_node)
for select_node in select_nodes:
if len(select_node.users) == 0:
graph.erase_node(select_node)
counters[backend]["select_cat_aten_pass"] += 1
@register_graph_pattern(
CallFunctionVarArgs(torch.ops.aten.cat.default, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_aten_pass"),
)
def normalize_cat_default_aten(match: Match, *args, **kwargs):
cat_node = match.nodes[0]
graph = match.graph
tensors = get_arg_value(cat_node, 0, "tensors")
cat_dim = get_arg_value(cat_node, 1, "dim")
if cat_dim is None:
cat_axis = cat_node.kwargs.get("axis")
if cat_axis is not None:
cat_dim = cat_axis
else:
cat_dim = 0
if tensors is None or cat_dim is None:
log.debug("couldn't find cat args")
return
assert isinstance(tensors, (list, tuple))
for tensor in itertools.chain([cat_node], tensors):
if "val" not in tensor.meta:
log.debug("val absent for node: %s", tensor)
return
ndim = cat_node.meta["val"].dim()
def is_empty_tensor(x: torch.fx.Node) -> bool:
# special case where torch.ops.aten.cat.default supports cat'ing with an empty tensor
x_shape = x.meta["val"].shape
return len(x_shape) == 1 and x_shape[0] == 0
assert all(ndim == x.meta["val"].dim() or is_empty_tensor(x) for x in tensors)
# pyrefly: ignore [unsupported-operation]
if cat_dim < 0: # Normalize cat dim
cat_dim += ndim
with graph.inserting_after(cat_node):
new_cat_node = graph.call_function(
torch.ops.aten.cat.default,
args=(tensors,),
kwargs={"dim": cat_dim},
)
cat_node.replace_all_uses_with(new_cat_node)
new_cat_node.meta.update(cat_node.meta)
graph.erase_node(cat_node)
counters[backend]["normalization_aten_pass"] += 1
@register_graph_pattern(
CallFunction(
torch.ops.aten.cat,
ListOf(CallFunctionVarArgs(torch.ops.aten.unsqueeze)),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("unbind_stack_aten_pass"),
)
def merge_unbind_stack_aten(match: Match, *args, **kwargs):
node = match.nodes[-1]
graph = match.graph
# pyre-fixme[6]
unsqueeze_nodes = list(node.args[0]) # type: ignore[arg-type]
cat_dim = get_arg_value(node, 1, "dim")
# check the unsqueeze nodes come from the select nodes
if not all(
get_arg_value(unsqueeze_node, 0, "input").target is torch.ops.aten.select
for unsqueeze_node in unsqueeze_nodes
):
return
select_nodes = [
get_arg_value(unsqueeze_node, 0, "input") for unsqueeze_node in unsqueeze_nodes
]
parent_of_select_node = get_arg_value(select_nodes[0], 0, "input")
# check the target of select_nodes are the same
if not all(
select_node.target is torch.ops.aten.select for select_node in select_nodes
):
return
# check the select nodes come from the same parent node
if not all(
get_arg_value(select_node, 0, "input") == parent_of_select_node
for select_node in select_nodes
):
return
if len(unsqueeze_nodes) != len(select_nodes):
return
# check the select nodes have the same dim
if not all(
get_arg_value(select_node, 1, "dim") == cat_dim for select_node in select_nodes
):
return
# check the select nodes have consecutive indices starting from 0
if get_arg_value(select_nodes[0], 2, "index") != 0 or not is_sorted_and_consecutive(
[get_arg_value(select_node, 2, "index") for select_node in select_nodes]
):
return
# check the users of parent of select node only from unsqueeze nodes that go to the cat node
# we simply check the number of users of the parent of select node
if len(parent_of_select_node.users.keys()) != len(node.args[0]): # type: ignore[arg-type]
return
node.replace_all_uses_with(parent_of_select_node)
graph.erase_node(node)
for unsqueeze_node in unsqueeze_nodes:
graph.erase_node(unsqueeze_node)
for select_node in select_nodes:
if len(select_node.users) == 0:
graph.erase_node(select_node)
counters[backend]["unbind_stack_aten_pass"] += 1
def divide_into_consecutive_sublists(indices: list[int]) -> list[list[int]]:
n = len(indices)
if n <= 1:
return [indices]
# Initialize the list of sublists
sublists = []
# Iterate over the indices
i = 0
while i < n:
# Initialize the current sublist
sublist = [indices[i]]
# Iterate over the remaining indices
j = i + 1
while j < n and indices[j] == indices[j - 1] + 1:
# Add the next index to the current sublist
sublist.append(indices[j])
j += 1
# Add the current sublist to the list of sublists
sublists.append(sublist)
# Move to the next index
i = j
return sublists
def update_args_from_split_getitem(
graph: torch.fx.Graph,
node: torch.fx.Node,
getitem_indices: list[int],
parents_seen: list[torch.fx.Node],
new_cat_args: list[torch.fx.Node],
new_cat_args_meta: list[torch.fx.Node],
idx_to_getitems: dict[int, torch.fx.Node],
threshold_to_cat: int = 2,
):
split_input, split_size, split_dim = _get_split_args_default(parents_seen[-1])
# case 1: the number of getitems is the same as the split size, eliminate the split
if len(split_size) == len(getitem_indices) and is_sorted_and_consecutive(
getitem_indices
):
# we can merge the getitems from the previous parent
new_cat_args.append(split_input)
new_cat_args_meta.append(split_input.meta["example_value"])
else:
if len(getitem_indices) > 0:
# case 2: the number of getitems is smaller than the split size but larger than the threshold, and
# the indices of getitems are not all consecutive, we need to divide the indices into multiple groups
geitem_indices_sublist = divide_into_consecutive_sublists(getitem_indices)
for sublist in geitem_indices_sublist:
if len(sublist) >= threshold_to_cat:
# case 2: the number of getitems is smaller than the split size but larger than the threshold
# we need to slice the input of parent
start_fused_size = sum(split_size[: sublist[0]])
end_fused_size = sum(split_size[: sublist[-1] + 1])
slice_list = []
for i in range(len(split_input.meta["example_value"].shape)): # type: ignore[union-attr]
if i != split_dim:
slice_list.append(slice(None, None, None))
else:
slice_list.append(
slice(start_fused_size, end_fused_size, None)
)
with graph.inserting_after(node):
slice_node = graph.call_function(
operator.getitem,
args=(split_input, tuple(slice_list)),
)
slice_node.meta["example_value"] = split_input.meta[
"example_value"
][tuple(slice_list)]
new_cat_args.append(slice_node)
new_cat_args_meta.append(slice_node.meta["example_value"])
else:
# case 3: the number of getitems is smaller than the threshold, no merge is done
# get the getitems based on the indexes
for i in sublist:
new_cat_args.append(idx_to_getitems[i])
new_cat_args_meta.append(
idx_to_getitems[i].meta["example_value"]
)
def reshape_cat_node(
graph: torch.fx.Graph,
cat_node: torch.fx.Node,
unbind_input: torch.fx.Node,
cat_dim: int,
unbind_dim: int,
cat_shape: torch.Size,
) -> torch.fx.Node:
if cat_dim != unbind_dim:
# construct the permute node args, which has the same shape as the slice node
# then it has the same dim as the unbind_input, i.e., shape of cat + 1
with graph.inserting_after(cat_node):
permute_list = list(range(len(cat_shape) + 1))
permute_list[unbind_dim], permute_list[cat_dim] = (
permute_list[cat_dim],
permute_list[unbind_dim],
)
permute_node = graph.call_function(
torch.permute,
args=(unbind_input, permute_list),
)
permute_node.meta["example_value"] = torch.permute(
unbind_input.meta["example_value"], permute_list
) # type: ignore[arg-type]
else:
permute_node = unbind_input
with graph.inserting_after(permute_node):
reshape_node = graph.call_function(
torch.reshape, args=(permute_node, tuple(cat_shape))
)
reshape_node.meta["example_value"] = torch.reshape(
permute_node.meta["example_value"], tuple(cat_shape)
) # type: ignore[arg-type]
return reshape_node
def update_args_from_unbind_getitem(
graph: torch.fx.Graph,
node: torch.fx.Node, # cat or stack node
getitem_indices: list[int],
parents_seen: list[torch.fx.Node],
new_cat_args: list[torch.fx.Node],
new_cat_args_meta: list[torch.fx.Node],
idx_to_getitems: dict[int, torch.fx.Node],
threshold_to_cat: int = 2,
):
unbind_input = get_arg_value(parents_seen[-1], 0, "input") # split or unbind input
unbind_dim = get_arg_value(parents_seen[-1], 1, "dim") # split or unbind dim
cat_dim = get_arg_value(node, 1, "dim") # cat or stack dim
# case 1: the number of getitems is the same as the split size, eliminate the split
size = list(unbind_input.meta["example_value"].shape)[unbind_dim]
if size == len(getitem_indices):
cat_shape = torch.cat(
[idx_to_getitems[i].meta["example_value"] for i in getitem_indices],
dim=cat_dim,
).shape
# we can merge the getitems from the previous parent
reshape_node = reshape_cat_node(
graph, node, unbind_input, cat_dim, unbind_dim, cat_shape
)
new_cat_args.append(reshape_node)
new_cat_args_meta.append(reshape_node.meta["example_value"])
elif len(getitem_indices) >= threshold_to_cat and is_sorted_and_consecutive(
getitem_indices
):
# case 2: the number of getitems is smaller than the split size but larger than the threshold
# we need to slice the input of parent
cat_shape = torch.cat(
[idx_to_getitems[i].meta["example_value"] for i in getitem_indices],
dim=cat_dim,
).shape
slice_list = []
for i in range(len(cat_shape) + 1):
if i != unbind_dim:
slice_list.append(slice(None, None, None)) # start, end, step
else:
slice_list.append(
slice(getitem_indices[0], getitem_indices[-1] + 1, None)
)
with graph.inserting_after(node):
slice_node = graph.call_function(
operator.getitem,
args=(unbind_input, tuple(slice_list)),
)
slice_node.meta["example_value"] = torch.narrow(
unbind_input.meta["example_value"],
unbind_dim,
getitem_indices[0],
getitem_indices[-1] - getitem_indices[0] + 1,
)
reshape_node = reshape_cat_node(
graph, node, slice_node, cat_dim, unbind_dim, cat_shape
)
new_cat_args.append(reshape_node)
new_cat_args_meta.append(reshape_node.meta["example_value"])
else:
# case 3: the number of getitems is smaller than the threshold, no merge is done
# get the getitems based on the indexes
for i in getitem_indices:
new_cat_args.append(idx_to_getitems[i])
new_cat_args_meta.append(idx_to_getitems[i].meta["example_value"])
def construct_cat_args(
graph: torch.fx.Graph,
cat_or_stack_node: torch.fx.Node,
inputs: list[torch.fx.Node],
split_or_unbind_node: torch.fx.Node,
threshold_to_cat: int = 2,
run_update_func: Callable = update_args_from_split_getitem, # type: ignore[type-arg]
) -> tuple[list[torch.fx.Node], list[torch.Tensor]]:
new_cat_args, parents_seen, getitem_indices, idx_to_getitems = [], [], [], {} # type: ignore[var-annotated]
new_cat_args_meta = [] # type: ignore[var-annotated]
for input in inputs:
if input.target != operator.getitem:
# update the last arg based on getitem_indices and parents_seens
if len(parents_seen) > 0:
run_update_func( # type: ignore[arg-type, union-attr]
graph,
cat_or_stack_node,
getitem_indices,
parents_seen,
new_cat_args,
new_cat_args_meta,
idx_to_getitems, # type: ignore[arg-type, union-attr]
threshold_to_cat,
)
new_cat_args.append(input)
new_cat_args_meta.append(input.meta["example_value"])
# reset the indices array
getitem_indices, idx_to_getitems = [], {}
else:
# get the parent node of the getitem input
parent, idx = input.args[0], input.args[1] # type: ignore[union-attr]
if parent.target != split_or_unbind_node.target: # type: ignore[union-attr]
new_cat_args.append(input)
new_cat_args_meta.append(input.meta["example_value"])
continue
# cannot use parents_seen to check since the first item could be non getitem node
if len(parents_seen) == 0:
parents_seen.append(parent)
idx_to_getitems[idx] = input
getitem_indices.append(idx)
# case: we only have one getitem input, and it is in the last position
if input == inputs[-1]:
new_cat_args.append(input)
new_cat_args_meta.append(input.meta["example_value"])
continue
# if it is the last input in the tensors, we also check if it can be optimized
if parent != parents_seen[-1] or input == inputs[-1]:
if input == inputs[-1]:
getitem_indices.append(idx)
idx_to_getitems[idx] = input
run_update_func( # type: ignore[arg-type, union-attr]
graph,
cat_or_stack_node,
getitem_indices,
parents_seen,
new_cat_args,
new_cat_args_meta,
idx_to_getitems, # type: ignore[arg-type, union-attr]
threshold_to_cat,
)
# reset the indices array for the next parent
# remember to add the last element since it is the first
# item in this round of parent
# add the parent to the list of seen parents
parents_seen.append(parent)
getitem_indices, idx_to_getitems = [idx], {idx: input}
else:
getitem_indices.append(idx)
idx_to_getitems[idx] = input
return new_cat_args, new_cat_args_meta
def remove_split_unbind_children(graph: torch.fx.Graph, inputs: list[torch.fx.Node]):
nodes = OrderedSet[Any]()
for input in inputs:
if input.target is operator.getitem:
nodes.add(input.args[0]) # type: ignore[union-attr]
if len(input.users.keys()) == 0:
graph.erase_node(input)
# check the split node to remove if it has no users
for node in nodes:
if len(node.users.keys()) == 0: # type: ignore[union-attr]
graph.erase_node(node) # type: ignore[arg-type]
# ############pattern to be optimized is#########
# split_node(dim=1) -> user=multiple
# / \ ... / \
# other inputs getitem getitem getitem -> user=multiple
# \ / \
# cat(user=mul, dim=1) other_op
# |
# ################after transformation#############
# split_node(dim=1) other inputs -> -> user=multiple
# / \
# cat (user=mul, dim=1, split_node)
@register_graph_pattern(
CallFunction(
torch.cat,
getitem_split,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_cat_to_slices_pass"),
)
def split_cat_to_slices(match: Match, split_sections: list[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
return
split_nodes = [node for node in match.nodes if node.target is torch.split]
if split_nodes:
split_node = next(node for node in split_nodes)
else:
# Handle the case where there are no nodes with a target of torch.split
return
split_dim = get_arg_value(split_node, 2, "dim") or 0
graph = match.graph
threshold_to_cat = torch._inductor.config.pre_grad_fusion_options[
"split_cat_to_slices_pass"
].get("threshold_to_cat", 10)
# get the cat_node and check its inputs and meta data
next_users = find_next_users(split_node)
for cat_node in next_users:
if cat_node.target != torch.cat or not is_node_meta_valid(cat_node):
continue
cat_inputs = get_arg_value(cat_node, 0, "tensors") # type: ignore[union-attr]
new_cat_args, _ = construct_cat_args(
graph,
cat_node,
cat_inputs,
split_node,
threshold_to_cat,
update_args_from_split_getitem,
)
# At least one node would be in the returned new_cat_args
# case 1: if new cat args has length 1, we can remove the cat node
if len(new_cat_args) == 1:
cat_node.replace_all_uses_with(new_cat_args[0])
# remove inputs of cat_node if they have no users
cat_inputs = cat_node.args[0] # type: ignore[union-attr]
graph.erase_node(cat_node)
remove_split_unbind_children(graph, cat_inputs) # type: ignore[arg-type]
counters[backend]["split_cat_to_slices_pass"] += 1
continue
if len(new_cat_args) > 1 and len(new_cat_args) < len(cat_inputs):
new_args = (new_cat_args,)
with graph.inserting_after(cat_node):
new_cat_node = graph.call_function(
torch.cat,
args=new_args,
# split and cat have the same dim
kwargs={"dim": split_dim},
)
cat_node.replace_all_uses_with(new_cat_node)
new_cat_node.meta.update(cat_node.meta)
# remove the cat node
graph.erase_node(cat_node)
remove_split_unbind_children(graph, cat_inputs)
counters[backend]["split_cat_to_slices_pass"] += 1
# ############pattern to be optimized is#########
# unbind(dim=0) -> user=multiple
# / \ ... / \
# getitem getitem getitem getitem -> user=multiple
# \ / \
# cat(user=mul, dim=1) other_op
# |
# ################after transformation#############
# input_of_unbind
# | \
# slice
# |
# view
# |
@register_graph_pattern(
CallFunction(
torch.cat,
getitem_unbind,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("unbind_cat_to_view_pass"),
)
def unbind_cat_to_view(match: Match, unbind_input: torch.fx.Node, dim: int):
unbind_node = next(node for node in match.nodes if node.target is torch.unbind)
graph = match.graph
# get the cat_node and check its inputs and meta data
next_users = find_next_users(unbind_node)
threshold_to_cat = torch._inductor.config.pre_grad_fusion_options[
"unbind_cat_to_view_pass"
].get("threshold_to_cat", 10)
# get the cat_node and check its inputs and meta data
for cat_node in next_users:
if cat_node.target != torch.cat or not is_node_meta_valid(cat_node):
continue
inputs = get_arg_value(cat_node, 0, "tensors") # type: ignore[union-attr]
new_cat_args, new_cat_args_meta = construct_cat_args(
graph,
cat_node,
inputs,
unbind_node,
threshold_to_cat,
update_args_from_unbind_getitem,
)
# get the view shape
# At least one node would be in the returned new_cat_args
# case 1: only one node in the new cat args, don't need to cat
if len(new_cat_args) == 1:
cat_node.replace_all_uses_with(new_cat_args[0])
# remove inputs of cat_node if they have no users
cat_inputs = cat_node.args[0] # type: ignore[union-attr]
graph.erase_node(cat_node)
remove_split_unbind_children(graph, cat_inputs) # type: ignore[arg-type]
counters[backend]["unbind_cat_to_view_pass"] += 1
continue
if len(new_cat_args) > 1 and len(new_cat_args) < len(inputs):
# get the view shape
cat_dim = get_arg_value(cat_node, 1, "dim")
with graph.inserting_after(cat_node):
new_cat_node = graph.call_function(
torch.cat,
args=(new_cat_args,),
kwargs={"dim": cat_dim},
)
new_cat_node.meta["example_value"] = torch.cat(
new_cat_args_meta, dim=cat_dim
) # type: ignore[arg-type]
cat_node.replace_all_uses_with(new_cat_node)
new_cat_node.meta.update(cat_node.meta)
# remove inputs of cat_node if they have no users
cat_inputs = cat_node.args[0] # type: ignore[union-attr]
graph.erase_node(cat_node)
remove_split_unbind_children(graph, cat_inputs) # type: ignore[arg-type]
counters[backend]["unbind_cat_to_view_pass"] += 1
def reshape_cat_node_to_stack(
graph: torch.fx.Graph,
cat_node: torch.fx.Node,
stack_node: torch.fx.Node,
split_or_unbind_dim: int,
) -> None:
# reshape the cat node to the stack node shape
stack_shape = stack_node.meta["example_value"].shape
stack_dim = _get_dim(stack_node)
if stack_dim != split_or_unbind_dim:
# case 1: the stack dim is not the same as the split dim
# we need to reshape the split input before we do the reshape
reshape_list = list(stack_shape)
reshape_list[stack_dim], reshape_list[split_or_unbind_dim] = (
reshape_list[split_or_unbind_dim],
reshape_list[stack_dim],
)
reshape_node = graph.call_function(
torch.reshape,
args=(cat_node, tuple(reshape_list)),
)
reshape_node.meta["example_value"] = torch.reshape(
cat_node.meta["example_value"],
tuple(reshape_list), # pyrefly: ignore [bad-argument-type]
)
permute_list = list(range(len(stack_shape)))
permute_list[stack_dim], permute_list[split_or_unbind_dim] = (
permute_list[split_or_unbind_dim],
permute_list[stack_dim],
)
permute_node = graph.call_function(
torch.permute,
args=(reshape_node, permute_list),
)
permute_node.meta["example_value"] = torch.permute(
reshape_node.meta["example_value"], permute_list
)
else:
# case 2: the stack dim is the same as the split dim
# we can directly reshape the split input
permute_node = cat_node
reshape_node = graph.call_function(
torch.Tensor.view,
args=(permute_node, *stack_shape), # type: ignore[arg-type]
)
stack_node.replace_all_uses_with(reshape_node)
reshape_node.meta.update(stack_node.meta)
stack_inputs = stack_node.args[0] # type: ignore[union-attr]
# remove stack node
graph.erase_node(stack_node)
# check the input of stack node, and remove nodes that have no users
remove_split_unbind_children(graph, stack_inputs) # type: ignore[arg-type]
def convert_reshape_cat_arg_to_stack(
graph: torch.fx.Graph,
cat_node: torch.fx.Node,
stack_node: torch.fx.Node,
stack_node_shape: torch.Size,
stack_dim: int,
split_dim: int,
) -> torch.fx.Node:
# reshape the cat node to the stack node shape
cat_shape = cat_node.meta["example_value"].shape
if stack_dim != split_dim:
permute_list = list(range(len(cat_shape)))
permute_list[stack_dim], permute_list[split_dim] = (
permute_list[split_dim],
permute_list[stack_dim],
)
permute_node = graph.call_function(
torch.permute,
args=(cat_node, permute_list),
)
permute_node.meta["example_value"] = torch.permute(
cat_node.meta["example_value"], permute_list
)
else:
permute_node = cat_node
reshape_node = graph.call_function(
torch.Tensor.view,
args=(permute_node, tuple(stack_node_shape)), # type: ignore[arg-type]
)
reshape_node.meta["example_value"] = torch.Tensor.view(
permute_node.meta["example_value"],
tuple(stack_node_shape), # type: ignore[arg-type]
)
return reshape_node
# ############pattern to be optimized is#########
# | |
# split split (dim=1)
# / \ / \
# getitem ... getitem other ops
# \ | / /
# stack(user=mul, dim=1 or 2) -> can be different dim
# |
# ################after transformation#############
# / \ ... / \
# getitem getitem getitem getitem -> user=multiple
# \ /
# cat(user=mul, dim=1) cat_other_opts
# \ /
# cat
# |
# view
# |
@register_graph_pattern(
CallFunction(
torch.stack,
getitem_split,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_stack_to_cats_pass"),
)
def split_stack_to_cats(match: Match, split_sections: list[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
return
split_node = next(node for node in match.nodes if node.target is torch.split)
split_dim = get_arg_value(split_node, 2, "dim") or 0
graph = match.graph
threshold_to_cat = torch._inductor.config.pre_grad_fusion_options[
"split_stack_to_cats_pass"
].get("threshold_to_cat", 10)
# get the stack_node and check its inputs and meta data
next_users = find_next_users(split_node)
for stack_node in next_users:
if stack_node.target != torch.stack or not is_node_meta_valid(stack_node):
continue
inputs = get_arg_value(stack_node, 0, "tensors") # type: ignore[union-attr]
new_cat_args, new_cat_args_meta = construct_cat_args(
graph,
stack_node,
inputs,
split_node,
threshold_to_cat,
update_args_from_split_getitem,
)
# At least one node would be in the returned new_cat_args
# case 1: only one node in the new cat args, don't need to cat
if len(new_cat_args) == 1:
reshape_cat_node_to_stack(graph, new_cat_args[0], stack_node, split_dim)
counters[backend]["split_stack_to_cats_pass"] += 1
continue
if len(new_cat_args) > 1 and len(new_cat_args) < len(inputs):
with graph.inserting_after(stack_node):
cat_node = graph.call_function(
torch.cat,
args=(new_cat_args,),
kwargs={"dim": split_dim},
)
cat_node.meta["example_value"] = torch.cat( # type: ignore[arg-type]
new_cat_args_meta, dim=split_dim
)
reshape_cat_node_to_stack(graph, cat_node, stack_node, split_dim)
counters[backend]["split_stack_to_cats_pass"] += 1
# ############pattern to be optimized is#########
# unbind(dim=1) -> user=multiple
# \ ... / \
# others getitem getitem getitem -> user=multiple
# \ \ / \
# stack(user=mul, dim=1) other_op
# |
# ################after transformation#############
# input_of_unbind
# | \
# slice
# |
# view others
# | /
# stack
# |
@register_graph_pattern(
CallFunction(
torch.stack,
getitem_unbind,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("unbind_stack_to_slices_pass"),
)
def unbind_stack_to_slices(match: Match, unbind_input: torch.fx.Node, dim: int):
unbind_node = next(node for node in match.nodes if node.target is torch.unbind)
graph = match.graph
# get the cat_node and check its inputs and meta data
next_users = find_next_users(unbind_node)
threshold_to_cat = torch._inductor.config.pre_grad_fusion_options[
"unbind_stack_to_slices_pass"
].get("threshold_to_cat", 10)
# get the cat_node and check its inputs and meta data
for stack_node in next_users:
if stack_node.target != torch.stack or not is_node_meta_valid(stack_node):
continue
inputs = get_arg_value(stack_node, 0, "tensors") # type: ignore[union-attr]
new_cat_args, new_cat_args_meta = construct_cat_args(
graph,
stack_node,
inputs,
unbind_node,
threshold_to_cat,
update_args_from_unbind_getitem,
)
unbind_dim = get_arg_value(unbind_node, 1, "dim") or 0
# At least one node would be in the returned new_cat_args
# case 1: only one node in the new cat args, don't need to cat
if len(new_cat_args) == 1:
reshape_cat_node_to_stack(graph, new_cat_args[0], stack_node, unbind_dim)
counters[backend]["unbind_stack_to_slices_pass"] += 1
continue
if len(new_cat_args) > 1 and len(new_cat_args) < len(inputs):
# get the view shape
cat_dim = get_arg_value(stack_node, 1, "dim")
with graph.inserting_after(stack_node):
new_cat_node = graph.call_function(
torch.cat,
args=(new_cat_args,),
kwargs={"dim": cat_dim},
)
new_cat_node.meta["example_value"] = torch.cat(
new_cat_args_meta, dim=cat_dim
)
reshape_cat_node_to_stack(graph, new_cat_node, stack_node, unbind_dim)
counters[backend]["unbind_stack_to_slices_pass"] += 1
# ############pattern to be optimized is#########
# input
# |
# split(dim=1) -> user=multiple
# \ \
# others getitem getitem
# \ \ /
# reshape reshape reshape other_op
# \ \ / /
# stack(user=mul, dim=0)
# |
# ################after transformation#############
# input
# |
# permute
# |
# reshape others
# | /
# cat (dim=0)
# |
def get_view_shape_list(cat_arg: torch.fx.Node, stack_dim: int) -> list[int]:
# cat_arg must be the split input
view_shape_list = []
for user in cat_arg.users:
if user.target is torch.split:
for getitem in user.users:
if getitem.target is operator.getitem:
reshape_user = [
user for user in getitem.users if user.target is torch.reshape
]
if len(reshape_user) > 0:
view_shape_list = list(
reshape_user[0]
.meta["example_value"]
.unsqueeze(stack_dim)
.shape
)
view_shape_list[stack_dim] = -1
return view_shape_list
return view_shape_list
@register_graph_pattern(
CallFunction(
torch.stack,
reshape_getitem_split,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("move_reshape_out_of_split_stack_pass"),
)
def move_reshape_out_of_split_stack(match: Match, *args, **kwargs):
split_node = next(node for node in match.nodes if node.target is torch.split)
split_dim = _get_dim(split_node)
split_users = list(split_node.users.keys())
stack_nodes = [node for node in match.nodes if node.target is torch.stack]
graph = match.graph
threshold_to_cat = torch._inductor.config.pre_grad_fusion_options[
"move_reshape_out_of_split_stack_pass"
].get("threshold_to_cat", 10)
for stack_node in stack_nodes:
if not is_node_meta_valid(stack_node):
log.debug("example value absent for node: %s", stack_node)
continue
stack_dim = _get_dim(stack_node)
stack_inputs = get_arg_value(stack_node, 0, "tensors") # type: ignore[union-attr]
inputs = []
for stack_input in stack_inputs:
if stack_input.target != torch.reshape:
inputs.append(stack_input)
else:
inputs.append(stack_input.args[0]) # type: ignore[union-attr]
new_cat_args, _new_cat_args_meta = construct_cat_args(
graph,
stack_node,
inputs,
split_node,
threshold_to_cat,
update_args_from_split_getitem,
)
# At least one node would be in the returned new_cat_args
# case 1: only one node in the new cat args, don't need to cat
if len(new_cat_args) == 1:
reshape_node = convert_reshape_cat_arg_to_stack(
graph,
new_cat_args[0],
stack_node,
stack_node.meta["example_value"].shape,
stack_dim,
split_dim,
)
stack_node.replace_all_uses_with(reshape_node)
# remove stack node
graph.erase_node(stack_node)
# check the input of stack node, and remove nodes that have no users
remove_split_unbind_children(graph, stack_inputs) # type: ignore[arg-type]
remove_split_unbind_children(graph, split_users) # type: ignore[arg-type]
counters[backend]["move_reshape_out_of_split_stack_pass"] += 1
continue
if len(new_cat_args) > 1 and len(new_cat_args) < len(inputs):
# decompose the cat args into multiple stack nodes, i.e., we stack
# all the nodes exist in the stack inputs and reshape the rest followed by a cat
stack_node_input, stack_node_input_meta, cat_inputs = [], [], [] # type: ignore[var-annotated]
for cat_arg in new_cat_args:
if cat_arg not in stack_inputs:
if len(stack_node_input) > 0:
with graph.inserting_after(stack_node):
decomposed_stack_node = graph.call_function(
torch.stack,
args=(stack_node_input,),
kwargs={"dim": stack_dim},
)
decomposed_stack_node.meta["example_value"] = torch.stack(
stack_node_input_meta, dim=stack_dim
)
cat_inputs.append(decomposed_stack_node)
# cat_arg must be the split input
view_shape_list = get_view_shape_list(cat_arg, stack_dim)
stack_node_shape = torch.reshape(
cat_arg.meta["example_value"], tuple(view_shape_list)
).shape # type: ignore[union-attr]
cat_inputs.append(
convert_reshape_cat_arg_to_stack(
graph,
cat_arg,
stack_node,
stack_node_shape,
stack_dim,
split_dim,
)
)
stack_node_input, stack_node_input_meta = [], []
else:
stack_node_input.append(cat_arg)
stack_node_input_meta.append(cat_arg.meta["example_value"])
if len(stack_node_input) > 0:
with graph.inserting_after(stack_node):
decomposed_stack_node = graph.call_function(
torch.stack,
args=(stack_node_input,),
kwargs={"dim": stack_dim},
)
decomposed_stack_node.meta["example_value"] = torch.stack(
stack_node_input_meta, dim=stack_dim
)
cat_inputs.append(decomposed_stack_node)
with graph.inserting_after(stack_node):
cat_node = graph.call_function(
torch.cat,
args=(cat_inputs,),
kwargs={"dim": stack_dim},
)
stack_node.replace_all_uses_with(cat_node)
cat_node.meta.update(stack_node.meta)
graph.erase_node(stack_node)
remove_split_unbind_children(graph, stack_inputs) # type: ignore[arg-type]
remove_split_unbind_children(graph, split_users) # type: ignore[arg-type]
counters[backend]["move_reshape_out_of_split_stack_pass"] += 1
view_getitem_split_aten = ListOf(
CallFunction(
[torch.ops.aten.reshape.default],
CallFunction(
operator.getitem,
CallFunctionVarArgs(
torch.ops.aten.split_with_sizes.default, users=MULTIPLE
),
Ignored(),
_users=MULTIPLE,
),
Arg(),
_users=MULTIPLE,
),
partial=True,
)
@register_graph_pattern(
CallFunction(
torch.ops.aten.cat.default,
view_getitem_split_aten,
dim=Ignored(),
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("move_view_after_cat_aten_pass"),
)
def move_view_after_cat(match: Match, *args, **kwargs):
split_node = next(
node
for node in match.nodes
if node.target is torch.ops.aten.split_with_sizes.default
)
split_input, split_section, split_dim = _get_split_args_default(split_node)
split_users = list(split_node.users.keys())
getitem_indices = [
getitem.args[1] for getitem in split_users if getitem.target is operator.getitem
]
if not is_sorted_and_consecutive(getitem_indices): # type: ignore[arg-type]
return
cat_nodes = [
node for node in match.nodes if node.target is torch.ops.aten.cat.default
]
graph = match.graph
for cat_node in cat_nodes:
if not is_node_meta_valid(cat_node):
log.debug("example value absent for node: %s", cat_node)
continue
cat_dim = _get_dim(cat_node)
cat_inputs = get_arg_value(cat_node, 0, "tensors") # type: ignore[union-attr]
# we only consider the following special case
if len(cat_inputs) != len(split_section):
continue
# check if the cat inputs are all the view nodes
if not all(
view_node.target is torch.ops.aten.reshape.default
for view_node in cat_inputs
):
continue
# check if the view nodes are all from getitem nodes
if not all(
view_node.args[0].target is operator.getitem for view_node in cat_inputs
):
continue
view_indices = [view.args[0].args[1] for view in cat_inputs]
if not is_sorted_and_consecutive(view_indices): # type: ignore[arg-type]
continue
if cat_dim != split_dim:
# construct permute node
permute_list = list(range(len(cat_node.meta["val"].shape) + 1))
permute_list[split_dim], permute_list[cat_dim] = (
permute_list[cat_dim],
permute_list[split_dim],
)
permute_node = graph.call_function(
torch.ops.aten.permute.default,
args=(split_input, permute_list),
)
else:
permute_node = split_input
with graph.inserting_before(cat_node):
view_node = graph.call_function(
torch.ops.aten.reshape.default,
args=(permute_node, list(cat_node.meta["val"].shape)),
)
cat_node.replace_all_uses_with(view_node)
view_node.meta.update(cat_node.meta)
graph.erase_node(cat_node)
counters[backend]["move_view_after_cat_aten_pass"] += 1
def match_einsum_strings(s: str) -> bool:
"""
This function takes a string s as input, where s is in the format "3 letter string,
4 letter string -> 3 letter string".
It checks if the strings match the rule and returns True if they do, False otherwise.
The rule is:
- The three strings have the same first two characters.
- The first two strings have the same third character.
- The second and third strings have the same last character.
"""
# Split the input string into parts
parts = s.replace("->", ",").split(",")
# Strip leading/trailing whitespaces from each part
parts = [part.strip() for part in parts]
# Check if we have exactly three parts
if len(parts) != 3:
return False
# Extract the strings
s1, s2, s3 = parts
# Check if the strings have the correct lengths
if len(s1) != 3 or len(s2) != 4 or len(s3) != 3:
return False
# Check the rule
return s1[:2] == s2[:2] == s3[:2] and s1[2] == s2[2] and s2[3] == s3[2]
@register_graph_pattern(
CallFunctionVarArgs(torch.functional.einsum, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("einsum_to_pointwise_pass"),
)
def replace_einsum_to_pointwise(match: Match, *args, **kwargs):
def repl(input, weights):
return (input.unsqueeze(-1) * weights).sum(-2)
def should_replace_einsum(einsum_node) -> bool:
equation = get_arg_value(einsum_node, 0)
users = einsum_node.users.keys()
# for now, we only consider the case of two operands
return (
len(einsum_node.args) == 3
and is_node_meta_valid(input)
and is_node_meta_valid(weights)
and any(
user.target == "add" or user.target is operator.add for user in users
)
and match_einsum_strings(equation)
)
einsum_node = match.nodes[0]
input, weights = get_arg_value(einsum_node, 1), get_arg_value(einsum_node, 2)
if should_replace_einsum(einsum_node):
# pyrefly: ignore [bad-argument-type]
match.replace_by_example(repl, [input, weights])
counters[backend]["einsum_to_pointwise_pass"] += 1
| GetItem |
python | openai__openai-python | src/openai/resources/uploads/parts.py | {
"start": 747,
"end": 3990
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> PartsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return PartsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> PartsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return PartsWithStreamingResponse(self)
def create(
self,
upload_id: str,
*,
data: FileTypes,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UploadPart:
"""
Adds a
[Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
A Part represents a chunk of bytes from the file you are trying to upload.
Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
maximum of 8 GB.
It is possible to add multiple Parts in parallel. You can decide the intended
order of the Parts when you
[complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
Args:
data: The chunk of bytes for this Part.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
body = deepcopy_minimal({"data": data})
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
f"/uploads/{upload_id}/parts",
body=maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=UploadPart,
)
| Parts |
python | django__django | tests/model_formsets/models.py | {
"start": 6797,
"end": 6967
} | class ____(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50)
| ParentWithUUIDAlternateKey |
python | openai__openai-python | src/openai/types/vector_stores/file_list_params.py | {
"start": 204,
"end": 1385
} | class ____(TypedDict, total=False):
after: str
"""A cursor for use in pagination.
`after` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, ending with obj_foo, your
subsequent call can include after=obj_foo in order to fetch the next page of the
list.
"""
before: str
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
filter: Literal["in_progress", "completed", "failed", "cancelled"]
"""Filter by file status.
One of `in_progress`, `completed`, `failed`, `cancelled`.
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
order: Literal["asc", "desc"]
"""Sort order by the `created_at` timestamp of the objects.
`asc` for ascending order and `desc` for descending order.
"""
| FileListParams |
python | rq__rq | rq/job.py | {
"start": 69497,
"end": 71776
} | class ____:
def __init__(self, max: int, interval: Union[int, Iterable[int]] = 0):
"""The main object to defined Retry logics for jobs.
Args:
max (int): The max number of times a job should be retried
interval (Union[int, List[int]], optional): The interval between retries.
Can be a positive number (int) or a list of ints. Defaults to 0 (meaning no interval between retries).
Raises:
ValueError: If the `max` argument is lower than 1
ValueError: If the interval param is negative or the list contains negative numbers
"""
super().__init__()
if max < 1:
raise ValueError('max: please enter a value greater than 0')
if isinstance(interval, int):
if interval < 0:
raise ValueError('interval: negative numbers are not allowed')
intervals = [interval]
elif isinstance(interval, Iterable):
for i in interval:
if i < 0:
raise ValueError('interval: negative numbers are not allowed')
intervals = list(interval)
self.max = max
self.intervals = intervals
@classmethod
def get_interval(cls, count: int, intervals: Union[int, list[int], None]) -> int:
"""Returns the appropriate retry interval based on retry count and intervals.
If intervals is an integer, returns that value directly.
If intervals is a list and retry count is bigger than length of intervals,
the first value in the list will be used.
Args:
count (int): The current retry count
intervals (Union[int, List[int]]): Either a single interval value or list of intervals to use
Returns:
retry_interval (int): The appropriate retry interval
"""
# If intervals is an integer, return it directly
if isinstance(intervals, int):
return intervals
# If intervals is an empty list or None, return 0
if not intervals:
return 0
# Calculate appropriate interval from list
number_of_intervals = len(intervals)
index = min(number_of_intervals - 1, count)
return intervals[index]
| Retry |
python | PrefectHQ__prefect | tests/test_task_engine.py | {
"start": 61620,
"end": 63173
} | class ____:
async def test_task_can_return_result_record(self):
@task
async def async_task():
store = ResultStore()
record = store.create_result_record(42)
store.persist_result_record(record)
return record
assert await async_task() == 42
state = await async_task(return_state=True)
assert await state.result() == 42
async def test_task_loads_result_if_exists_using_result_storage_key(self):
store = ResultStore()
store.write(obj=-92, key="foo-bar")
@task(result_storage_key="foo-bar", persist_result=True)
async def async_task():
return 42
state = await run_task_async(async_task, return_type="state")
assert state.is_completed()
assert await state.result() == -92
assert isinstance(state.data, ResultRecord)
key_path = Path(state.data.metadata.storage_key)
assert key_path.name == "foo-bar"
async def test_task_result_persistence_references_absolute_path(self):
@task(result_storage_key="test-absolute-path", persist_result=True)
async def async_task():
return 42
state = await run_task_async(async_task, return_type="state")
assert state.is_completed()
assert await state.result() == 42
assert isinstance(state.data, ResultRecord)
key_path = Path(state.data.metadata.storage_key)
assert key_path.is_absolute()
assert key_path.name == "test-absolute-path"
| TestPersistence |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink40.py | {
"start": 315,
"end": 937
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink40.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"E9",
self.image_dir + "red.png",
{"url": "https://github.com/jmcnamara#foo"},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/admin_views/admin.py | {
"start": 30879,
"end": 30950
} | class ____(admin.ModelAdmin):
readonly_fields = ("area",)
| SquareAdmin |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 37202,
"end": 37302
} | class ____(Interface):
def __call__(request):
"""Return a locale name"""
| ILocaleNegotiator |
python | python-openxml__python-docx | src/docx/oxml/section.py | {
"start": 2787,
"end": 3301
} | class ____(BaseOxmlElement):
"""``<w:pgSz>`` element, defining page dimensions and orientation."""
w: Length | None = OptionalAttribute( # pyright: ignore[reportAssignmentType]
"w:w", ST_TwipsMeasure
)
h: Length | None = OptionalAttribute( # pyright: ignore[reportAssignmentType]
"w:h", ST_TwipsMeasure
)
orient: WD_ORIENTATION = OptionalAttribute( # pyright: ignore[reportAssignmentType]
"w:orient", WD_ORIENTATION, default=WD_ORIENTATION.PORTRAIT
)
| CT_PageSz |
python | pydata__xarray | xarray/core/coordinates.py | {
"start": 29960,
"end": 33407
} | class ____(Coordinates):
"""Dictionary like container for Dataset coordinates (variables + indexes).
This collection can be passed directly to the :py:class:`~xarray.Dataset`
and :py:class:`~xarray.DataArray` constructors via their `coords` argument.
This will add both the coordinates variables and their index.
"""
_data: Dataset
__slots__ = ("_data",)
def __init__(self, dataset: Dataset):
self._data = dataset
@property
def _names(self) -> set[Hashable]:
return self._data._coord_names
@property
def dims(self) -> Frozen[Hashable, int]:
# deliberately display all dims, not just those on coordinate variables - see https://github.com/pydata/xarray/issues/9466
return self._data.dims
@property
def dtypes(self) -> Frozen[Hashable, np.dtype]:
"""Mapping from coordinate names to dtypes.
Cannot be modified directly, but is updated when adding new variables.
See Also
--------
Dataset.dtypes
"""
return Frozen(
{
n: v.dtype
for n, v in self._data._variables.items()
if n in self._data._coord_names
}
)
@property
def variables(self) -> Mapping[Hashable, Variable]:
return Frozen(
{k: v for k, v in self._data.variables.items() if k in self._names}
)
def __getitem__(self, key: Hashable) -> DataArray:
if key in self._data.data_vars:
raise KeyError(key)
return self._data[key]
def to_dataset(self) -> Dataset:
"""Convert these coordinates into a new Dataset"""
names = [name for name in self._data._variables if name in self._names]
return self._data._copy_listed(names)
def _update_coords(
self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index]
) -> None:
variables = self._data._variables.copy()
variables.update(coords)
# check for inconsistent state *before* modifying anything in-place
dims = calculate_dimensions(variables)
new_coord_names = set(coords)
for dim in dims:
if dim in variables:
new_coord_names.add(dim)
self._data._variables = variables
self._data._coord_names.update(new_coord_names)
self._data._dims = dims
# TODO(shoyer): once ._indexes is always populated by a dict, modify
# it to update inplace instead.
original_indexes = dict(self._data.xindexes)
original_indexes.update(indexes)
self._data._indexes = original_indexes
def _drop_coords(self, coord_names):
# should drop indexed coordinates only
for name in coord_names:
del self._data._variables[name]
del self._data._indexes[name]
self._data._coord_names.difference_update(coord_names)
def __delitem__(self, key: Hashable) -> None:
if key in self:
del self._data[key]
else:
raise KeyError(
f"{key!r} is not in coordinate variables {tuple(self.keys())}"
)
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython."""
return [
key
for key in self._data._ipython_key_completions_()
if key not in self._data.data_vars
]
| DatasetCoordinates |
python | plotly__plotly.py | plotly/graph_objs/indicator/legendgrouptitle/_font.py | {
"start": 233,
"end": 9937
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "indicator.legendgrouptitle"
_path_str = "indicator.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.indicator.lege
ndgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.indicator.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 22356,
"end": 22546
} | class ____(PrefectBaseModel):
"""Filter by BlockSchema.id"""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of IDs to include"
)
| BlockSchemaFilterId |
python | docker__docker-py | docker/transport/npipesocket.py | {
"start": 522,
"end": 6098
} | class ____:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle=None):
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
self._closed = False
def accept(self):
raise NotImplementedError()
def bind(self, address):
raise NotImplementedError()
def close(self):
self._handle.Close()
self._closed = True
@check_closed
def connect(self, address, retry_count=0):
try:
handle = win32file.CreateFile(
address,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
(cSECURITY_ANONYMOUS
| cSECURITY_SQOS_PRESENT
| win32file.FILE_FLAG_OVERLAPPED),
0
)
except win32pipe.error as e:
# See Remarks:
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
if e.winerror == cERROR_PIPE_BUSY:
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
retry_count = retry_count + 1
if (retry_count < MAXIMUM_RETRY_COUNT):
time.sleep(1)
return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
self._handle = handle
self._address = address
@check_closed
def connect_ex(self, address):
return self.connect(address)
@check_closed
def detach(self):
self._closed = True
return self._handle
@check_closed
def dup(self):
return NpipeSocket(self._handle)
def getpeername(self):
return self._address
def getsockname(self):
return self._address
def getsockopt(self, level, optname, buflen=None):
raise NotImplementedError()
def ioctl(self, control, option):
raise NotImplementedError()
def listen(self, backlog):
raise NotImplementedError()
def makefile(self, mode=None, bufsize=None):
if mode.strip('b') != 'r':
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
if bufsize is None or bufsize <= 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
@check_closed
def recv(self, bufsize, flags=0):
err, data = win32file.ReadFile(self._handle, bufsize)
return data
@check_closed
def recvfrom(self, bufsize, flags=0):
data = self.recv(bufsize, flags)
return (data, self._address)
@check_closed
def recvfrom_into(self, buf, nbytes=0, flags=0):
return self.recv_into(buf, nbytes, flags), self._address
@check_closed
def recv_into(self, buf, nbytes=0):
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
err, data = win32file.ReadFile(
self._handle,
readbuf[:nbytes] if nbytes else readbuf,
overlapped
)
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed
def send(self, string, flags=0):
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
win32file.WriteFile(self._handle, string, overlapped)
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed
def sendall(self, string, flags=0):
return self.send(string, flags)
@check_closed
def sendto(self, string, address):
self.connect(address)
return self.send(string)
def setblocking(self, flag):
if flag:
return self.settimeout(None)
return self.settimeout(0)
def settimeout(self, value):
if value is None:
# Blocking mode
self._timeout = win32event.INFINITE
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError('Timeout value out of range')
else:
# Timeout mode - Value converted to milliseconds
self._timeout = int(value * 1000)
def gettimeout(self):
return self._timeout
def setsockopt(self, level, optname, value):
raise NotImplementedError()
@check_closed
def shutdown(self, how):
return self.close()
| NpipeSocket |
python | doocs__leetcode | lcci/16.26.Calculator/Solution.py | {
"start": 0,
"end": 677
} | class ____:
def calculate(self, s: str) -> int:
n = len(s)
x = 0
sign = "+"
stk = []
for i, c in enumerate(s):
if c.isdigit():
x = x * 10 + ord(c) - ord("0")
if i == n - 1 or c in "+-*/":
match sign:
case "+":
stk.append(x)
case "-":
stk.append(-x)
case "*":
stk.append(stk.pop() * x)
case "/":
stk.append(int(stk.pop() / x))
x = 0
sign = c
return sum(stk)
| Solution |
python | ray-project__ray | python/ray/train/_internal/utils.py | {
"start": 6408,
"end": 6851
} | class ____:
"""Wraps an actor to provide same API as using the base class directly."""
def __init__(self, actor: ActorHandle):
self.actor = actor
def __getattr__(self, item):
# The below will fail if trying to access an attribute (not a method) from the
# actor.
actor_method = getattr(self.actor, item)
return lambda *args, **kwargs: ray.get(actor_method.remote(*args, **kwargs))
| ActorWrapper |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/users_response_builder.py | {
"start": 357,
"end": 983
} | class ____(HttpResponseBuilder):
@classmethod
def response(cls, url: Optional[HttpRequest] = None, cursor: Optional[str] = None) -> "UsersResponseBuilder":
return cls(find_template("users", __file__), FieldPath("users"), EndOfStreamPaginationStrategy(http_request_to_str(url), cursor))
@classmethod
def identities_response(cls, url: Optional[HttpRequest] = None, cursor: Optional[str] = None) -> "UsersResponseBuilder":
return cls(
find_template("users", __file__), FieldPath("identities"), EndOfStreamPaginationStrategy(http_request_to_str(url), cursor)
)
| UsersResponseBuilder |
python | pytorch__pytorch | benchmarks/dynamo/runner.py | {
"start": 47590,
"end": 54636
} | class ____:
"""
Aggregates the information and makes a comment to Performance Dashboard.
https://github.com/pytorch/torchdynamo/issues/681
"""
def __init__(self, args):
self.args = args
self.output_dir = args.output_dir
self.lookup_file = os.path.join(self.args.dashboard_archive_path, "lookup.csv")
assert os.path.exists(self.lookup_file)
try:
if not self.args.update_dashboard_test and not self.args.no_update_archive:
self.update_lookup_file()
except subprocess.CalledProcessError:
sys.stderr.write("failed to update lookup file\n")
def update_lookup_file(self):
dtype = self.args.dtypes[0]
day, _ = archive_data(self.args.archive_name)
target_dir = get_archive_name(self.args, dtype)
# Update lookup csv the folder to archived logs
subprocess.check_call(
f'echo "{day},performance,{dtype},{target_dir}" >> {self.lookup_file}',
shell=True,
)
def archive(self):
dtype = self.args.dtypes[0]
# Copy the folder to archived location
archive(
self.output_dir,
self.args.dashboard_archive_path,
self.args.archive_name,
dtype,
)
def upload_graphs(self):
title = "## Performance graphs ##\n"
str_io = io.StringIO()
if not self.args.update_dashboard_test and not self.args.no_graphs:
for name in glob.glob(self.output_dir + "/*png"):
if "over_time" not in name:
output = (
subprocess.check_output(
[self.args.dashboard_image_uploader, name]
)
.decode("ascii")
.rstrip()
)
str_io.write(f"\n{name} : \n")
comment = generate_dropdown_comment(title, str_io.getvalue())
with open(f"{self.output_dir}/gh_graphs.txt", "w") as gh_fh:
gh_fh.write(comment)
def gen_comment(self):
files = [
"gh_title.txt",
"gh_executive_summary.txt",
"gh_summary_diff.txt",
"gh_warnings.txt",
"gh_regression.txt",
"gh_metric_regression.txt",
"gh_training.txt" if self.args.training else "gh_inference.txt",
"gh_graphs.txt",
"gh_build_summary.txt",
]
all_lines = []
for f in files:
try:
with open(os.path.join(self.output_dir, f)) as fh:
all_lines.extend(fh.readlines())
except FileNotFoundError:
pass
return "\n".join([x.rstrip() for x in all_lines])
def comment_on_gh(self, comment):
"""
Send a comment to dashboard
"""
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
f.write(comment)
filename = f.name
issue_number = "93794"
if self.args.dtypes[0] == "float32":
issue_number = "93518"
subprocess.check_call(
[
self.args.dashboard_gh_cli_path,
"issue",
"comment",
"--repo=https://github.com/pytorch/pytorch.git",
issue_number,
"-F",
filename,
]
)
os.remove(filename)
def update(self):
self.upload_graphs()
if not self.args.no_detect_regressions:
SummaryStatDiffer(self.args).generate_comment()
RegressionDetector(self.args).generate_comment()
try:
RegressionTracker(self.args).diff()
except Exception:
log.exception("")
with open(f"{self.args.output_dir}/gh_regression.txt", "w") as gh_fh:
gh_fh.write("")
comment = self.gen_comment()
print(comment)
if not self.args.update_dashboard_test:
if not self.args.no_gh_comment:
self.comment_on_gh(comment)
if not self.args.no_update_archive:
self.archive()
if __name__ == "__main__":
args = parse_args()
def extract(key):
return DEFAULTS[key] if getattr(args, key, None) is None else getattr(args, key)
dtypes = extract("dtypes")
suites = extract("suites")
devices = extract("devices")
if args.inference:
compilers = DEFAULTS["inference"] if args.compilers is None else args.compilers
flag_compilers = (
DEFAULTS["flag_compilers"]["inference"]
if args.flag_compilers is None
else args.flag_compilers
)
else:
assert args.training
compilers = DEFAULTS["training"] if args.compilers is None else args.compilers
flag_compilers = (
DEFAULTS["flag_compilers"]["training"]
if args.flag_compilers is None
else args.flag_compilers
)
output_dir = args.output_dir
args.compilers = compilers
args.devices = devices
args.dtypes = dtypes
flag_compilers = list(set(flag_compilers) & set(compilers))
args.flag_compilers = flag_compilers
args.suites = suites
if args.print_run_commands:
generated_file = generate_commands(
args, dtypes, suites, devices, compilers, output_dir
)
print(
f"Running commands are generated in file {generated_file}. Please run (bash {generated_file})."
)
elif args.visualize_logs:
parse_logs(args, dtypes, suites, devices, compilers, flag_compilers, output_dir)
elif args.run:
generated_file = generate_commands(
args, dtypes, suites, devices, compilers, output_dir
)
# generate memoized archive name now so that the date is reflective
# of when the run started
get_archive_name(args, dtypes[0])
# TODO - Do we need to worry about segfaults
try:
os.system(f"bash {generated_file}")
except Exception as e:
print(
f"Running commands failed. Please run manually (bash {generated_file}) and inspect the errors."
)
raise e
if not args.log_operator_inputs:
if not args.no_update_archive:
archive(
output_dir,
args.dashboard_archive_path,
args.archive_name,
dtypes[0],
)
parse_logs(
args, dtypes, suites, devices, compilers, flag_compilers, output_dir
)
if not args.no_update_archive:
archive(
output_dir,
args.dashboard_archive_path,
args.archive_name,
dtypes[0],
)
if args.update_dashboard:
DashboardUpdater(args).update()
| DashboardUpdater |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_heapq.py | {
"start": 12849,
"end": 13156
} | class ____:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
| S |
python | tornadoweb__tornado | tornado/test/auth_test.py | {
"start": 8318,
"end": 8473
} | class ____(RequestHandler):
def get(self):
self.write("oauth_token=hjkl&oauth_token_secret=vbnm&screen_name=foo")
| TwitterServerAccessTokenHandler |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/types.py | {
"start": 1747,
"end": 2130
} | class ____(TypedDict, total=False):
"""Possible overrides for `ModelRequest.override()` method."""
model: BaseChatModel
system_message: SystemMessage | None
messages: list[AnyMessage]
tool_choice: Any | None
tools: list[BaseTool | dict]
response_format: ResponseFormat | None
model_settings: dict[str, Any]
@dataclass(init=False)
| _ModelRequestOverrides |
python | tensorflow__tensorflow | tensorflow/lite/python/convert_test.py | {
"start": 2421,
"end": 9210
} | class ____(test_util.TensorFlowTestCase):
def testBasic(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32
)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Try running on valid graph
tflite_model = convert.convert_graphdef(
sess.graph_def, input_tensors=[in_tensor], output_tensors=[out_tensor]
)
self.assertTrue(tflite_model)
@mock.patch.object(
convert.wrap_converter, "wrapped_convert", new=_mock_wrapped_convert
)
@mock.patch.object(
metrics_wrapper, "retrieve_collected_errors", new=_mock_retrieve_errors
)
# This test wants to check that in the case of the converter throwing an
# `ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR` error, it will
# retry conversion with the `guarantee_all_funcs_one_use` flag.
# We can wrap the convert call in order to assert it is called appropriately.
@mock.patch.object(convert, "convert", wraps=convert.convert)
def testConversionStatefulPartitionRetry(self, mock_convert):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32
)
out_tensor = in_tensor + in_tensor
sess = session.Session()
model = convert.convert_graphdef(
sess.graph_def,
input_tensors=[in_tensor],
output_tensors=[out_tensor],
guarantee_all_funcs_one_use=False,
)
self.assertTrue(str(model, encoding="utf-8"), "A model")
self.assertEqual(mock_convert.call_count, 2)
def testQuantization(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32
)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0.0, max=1.0
)
sess = session.Session()
tflite_model = convert.convert_graphdef(
sess.graph_def,
input_tensors=[in_tensor],
output_tensors=[out_tensor],
inference_type=dtypes.uint8,
quantized_input_stats=[(0.0, 1.0)],
)
self.assertTrue(tflite_model)
def testGraphDefBasic(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="input"
)
_ = in_tensor + in_tensor
sess = session.Session()
tflite_model = convert.convert_graphdef_with_arrays(
sess.graph_def,
input_arrays_with_shape=[("input", [1, 16, 16, 3])],
output_arrays=["add"],
control_output_arrays=None,
inference_type=dtypes.float32,
)
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual("input", input_details[0]["name"])
self.assertEqual(np.float32, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all()) # type: ignore
self.assertEqual((0.0, 0.0), input_details[0]["quantization"])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("add", output_details[0]["name"])
self.assertEqual(np.float32, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all()) # type: ignore
self.assertEqual((0.0, 0.0), output_details[0]["quantization"])
def testGraphDefQuantization(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA"
)
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB"
)
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0.0, max=1.0, name="output"
)
sess = session.Session()
tflite_model = convert.convert_graphdef_with_arrays(
sess.graph_def,
input_arrays_with_shape=[
("inputA", [1, 16, 16, 3]),
("inputB", [1, 16, 16, 3]),
],
output_arrays=["output"],
control_output_arrays=None,
inference_type=dtypes.uint8,
quantized_input_stats=[(0.0, 1.0), (0.0, 1.0)],
)
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual("inputA", input_details[0]["name"])
self.assertEqual(np.uint8, input_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[0]["shape"]).all()) # type: ignore
self.assertEqual(
(1.0, 0.0), input_details[0]["quantization"]
) # scale, zero_point
self.assertEqual("inputB", input_details[1]["name"])
self.assertEqual(np.uint8, input_details[1]["dtype"])
self.assertTrue(([1, 16, 16, 3] == input_details[1]["shape"]).all()) # type: ignore
self.assertEqual(
(1.0, 0.0), input_details[1]["quantization"]
) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual("output", output_details[0]["name"])
self.assertEqual(np.uint8, output_details[0]["dtype"])
self.assertTrue(([1, 16, 16, 3] == output_details[0]["shape"]).all()) # type: ignore
self.assertGreater(output_details[0]["quantization"][0], 0) # scale
def testGraphDefQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputA"
)
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name="inputB"
)
_ = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0.0, max=1.0, name="output"
)
sess = session.Session()
with self.assertRaises(ValueError) as error:
convert.convert_graphdef_with_arrays(
sess.graph_def,
input_arrays_with_shape=[
("inputA", [1, 16, 16, 3]),
("inputB", [1, 16, 16, 3]),
],
output_arrays=["output"],
control_output_arrays=None,
inference_type=dtypes.uint8,
)
self.assertEqual(
"The `quantized_input_stats` flag must be defined when either "
"`inference_type` flag or `inference_input_type` flag is set to "
"tf.int8 or tf.uint8.",
str(error.exception),
)
| ConvertTest |
python | facebookresearch__faiss | tests/test_index_composite.py | {
"start": 7214,
"end": 7876
} | class ____(unittest.TestCase):
def test_range_search_id_map(self):
sub_index = faiss.IndexFlat(5, 1) # L2 search instead of inner product
xb = np.zeros((10, 5), dtype='float32')
xb[:, 0] = np.arange(10) + 1000
index = faiss.IndexIDMap2(sub_index)
index.add_with_ids(xb, np.arange(10, dtype=np.int64) + 100)
dist = float(np.linalg.norm(xb[3] - xb[0])) * 0.99
res_subindex = sub_index.range_search(xb[[0], :], dist)
res_index = index.range_search(xb[[0], :], dist)
assert len(res_subindex[2]) == 2
np.testing.assert_array_equal(res_subindex[2] + 100, res_index[2])
| TestRangeSearch |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/init_ops_test.py | {
"start": 39470,
"end": 42129
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 3, 10, 10)))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.convolutional_orthogonal_3d()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_orthogonal_3d, dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_orthogonal_3d()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 3, 6, 5])
@test_util.run_deprecated_v1
def testGain(self):
shape = (3, 3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_orthogonal_3d(seed=1, dtype=dtype)
init2 = init_ops.convolutional_orthogonal_3d(
gain=3.14, seed=1, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_deprecated_v1
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 3, 3, 5, 5]
count = 20
tol = 1e-5
with self.session():
for i in range(count):
x = variable_scope.get_variable(
"{}".format(i),
shape=shape,
initializer=init_ops.convolutional_orthogonal_3d)
self.evaluate(x.initializer)
y = np.sum(self.evaluate(x), axis=(0, 1, 2))
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
| ConvolutionOrthogonal3dInitializerTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/atlassian/provider.py | {
"start": 353,
"end": 1123
} | class ____(OAuth2Provider):
id = "atlassian"
name = "Atlassian"
account_class = AtlassianAccount
oauth2_adapter_class = AtlassianOAuth2Adapter
def extract_uid(self, data):
return data["account_id"]
def extract_common_fields(self, data):
return {
"email": data.get("email"),
"name": data.get("name"),
"username": data.get("nickname"),
"email_verified": data.get("email_verified"),
}
def get_default_scope(self):
return ["read:me"]
def get_auth_params(self):
params = super().get_auth_params()
params.update({"audience": "api.atlassian.com", "prompt": "consent"})
return params
provider_classes = [AtlassianProvider]
| AtlassianProvider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/pickleable.py | {
"start": 2163,
"end": 2330
} | class ____:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "Bar(%d, %d)" % (self.x, self.y)
| BarWithoutCompare |
python | plotly__plotly.py | plotly/graph_objs/layout/title/subtitle/_font.py | {
"start": 235,
"end": 9901
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.title.subtitle"
_path_str = "layout.title.subtitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets the subtitle font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.title.subtitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.title.subtitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.title.subtitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | scrapy__scrapy | tests/test_scheduler.py | {
"start": 6762,
"end": 8506
} | class ____:
def test_migration(self, tmpdir):
class PrevSchedulerHandler(SchedulerHandler):
jobdir = tmpdir
@property
def priority_queue_cls(self) -> str:
return "scrapy.pqueues.ScrapyPriorityQueue"
class NextSchedulerHandler(SchedulerHandler):
jobdir = tmpdir
@property
def priority_queue_cls(self) -> str:
return "scrapy.pqueues.DownloaderAwarePriorityQueue"
prev_scheduler_handler = PrevSchedulerHandler()
prev_scheduler_handler.create_scheduler()
for url in _URLS:
prev_scheduler_handler.scheduler.enqueue_request(Request(url))
prev_scheduler_handler.close_scheduler()
next_scheduler_handler = NextSchedulerHandler()
with pytest.raises(
ValueError,
match="DownloaderAwarePriorityQueue accepts ``slot_startprios`` as a dict",
):
next_scheduler_handler.create_scheduler()
def _is_scheduling_fair(enqueued_slots, dequeued_slots):
"""
We enqueued same number of requests for every slot.
Assert correct order, e.g.
>>> enqueued = ['a', 'b', 'c'] * 2
>>> correct = ['a', 'c', 'b', 'b', 'a', 'c']
>>> incorrect = ['a', 'a', 'b', 'c', 'c', 'b']
>>> _is_scheduling_fair(enqueued, correct)
True
>>> _is_scheduling_fair(enqueued, incorrect)
False
"""
if len(dequeued_slots) != len(enqueued_slots):
return False
slots_number = len(set(enqueued_slots))
for i in range(0, len(dequeued_slots), slots_number):
part = dequeued_slots[i : i + slots_number]
if len(part) != len(set(part)):
return False
return True
| TestMigration |
python | bokeh__bokeh | src/bokeh/command/subcommand.py | {
"start": 1742,
"end": 2391
} | class ____:
action: NotRequired[Literal["store", "store_const", "store_true", "append", "append_const", "count", "help", "version", "extend"]] = Unspecified
nargs: NotRequired[int | Literal["?", "*", "+", "..."]] = Unspecified
const: NotRequired[Any] = Unspecified
default: NotRequired[Any] = Unspecified
type: NotRequired[type[Any]] = Unspecified
choices: NotRequired[Sequence[Any]] = Unspecified
required: NotRequired[bool] = Unspecified
help: NotRequired[str] = Unspecified
metavar: NotRequired[str] = Unspecified
Arg: TypeAlias = tuple[str | tuple[str, ...], Argument]
Args: TypeAlias = tuple[Arg, ...]
| Argument |
python | walkccc__LeetCode | solutions/3028. Ant on the Boundary/3028.py | {
"start": 0,
"end": 144
} | class ____:
def returnToBoundaryCount(self, nums: list[int]) -> int:
return sum(prefix == 0 for prefix in itertools.accumulate(nums))
| Solution |
python | ray-project__ray | rllib/core/rl_module/tests/test_multi_rl_module.py | {
"start": 688,
"end": 9521
} | class ____(unittest.TestCase):
def test_from_config(self):
"""Tests whether a MultiRLModule can be constructed from a config."""
env_class = make_multi_agent("CartPole-v0")
env = env_class({"num_agents": 2})
module1 = RLModuleSpec(
module_class=VPGTorchRLModule,
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
)
module2 = RLModuleSpec(
module_class=VPGTorchRLModule,
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
)
multi_rl_module = MultiRLModule(
rl_module_specs={"module1": module1, "module2": module2},
)
self.assertEqual(set(multi_rl_module.keys()), {"module1", "module2"})
self.assertIsInstance(multi_rl_module["module1"], VPGTorchRLModule)
self.assertIsInstance(multi_rl_module["module2"], VPGTorchRLModule)
def test_as_multi_rl_module(self):
env_class = make_multi_agent("CartPole-v0")
env = env_class({"num_agents": 2})
multi_rl_module = VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
).as_multi_rl_module()
self.assertNotIsInstance(multi_rl_module, VPGTorchRLModule)
self.assertIsInstance(multi_rl_module, MultiRLModule)
self.assertEqual({DEFAULT_MODULE_ID}, set(multi_rl_module.keys()))
# Check as_multi_rl_module() for the second time
multi_rl_module2 = multi_rl_module.as_multi_rl_module()
self.assertEqual(id(multi_rl_module), id(multi_rl_module2))
def test_get_state_and_set_state(self):
env_class = make_multi_agent("CartPole-v0")
env = env_class({"num_agents": 2})
module = VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
).as_multi_rl_module()
state = module.get_state()
self.assertIsInstance(state, dict)
self.assertEqual(
set(state.keys()),
set(module.keys()),
)
self.assertEqual(
set(state[DEFAULT_MODULE_ID].keys()),
set(module[DEFAULT_MODULE_ID].get_state().keys()),
)
module2 = VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
).as_multi_rl_module()
state2 = module2.get_state()
check(state[DEFAULT_MODULE_ID], state2[DEFAULT_MODULE_ID], false=True)
module2.set_state(state)
state2_after = module2.get_state()
check(state, state2_after)
def test_add_remove_modules(self):
# TODO (Avnish): Modify this test to make sure that the distributed
# functionality won't break the add / remove.
env_class = make_multi_agent("CartPole-v0")
env = env_class({"num_agents": 2})
module = VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
).as_multi_rl_module()
module.add_module(
"test",
VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
),
)
self.assertEqual(set(module.keys()), {DEFAULT_MODULE_ID, "test"})
module.remove_module("test")
self.assertEqual(set(module.keys()), {DEFAULT_MODULE_ID})
# test if add works with a conflicting name
self.assertRaises(
ValueError,
lambda: module.add_module(
DEFAULT_MODULE_ID,
VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
),
),
)
module.add_module(
DEFAULT_MODULE_ID,
VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
),
override=True,
)
def test_save_to_path_and_from_checkpoint(self):
"""Test saving and loading from checkpoint after adding / removing modules."""
env_class = make_multi_agent("CartPole-v0")
env = env_class({"num_agents": 2})
module = VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
).as_multi_rl_module()
module.add_module(
"test",
VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 32},
),
)
module.add_module(
"test2",
VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 128},
),
)
with tempfile.TemporaryDirectory() as tmpdir:
module.save_to_path(tmpdir)
module2 = MultiRLModule.from_checkpoint(tmpdir)
check(module.get_state(), module2.get_state())
self.assertEqual(module.keys(), module2.keys())
self.assertEqual(module.keys(), {"test", "test2", DEFAULT_MODULE_ID})
self.assertNotEqual(id(module), id(module2))
module.remove_module("test")
# Check that - after removing a module - the checkpoint is correct.
with tempfile.TemporaryDirectory() as tmpdir:
module.save_to_path(tmpdir)
module2 = MultiRLModule.from_checkpoint(tmpdir)
check(module.get_state(), module2.get_state())
self.assertEqual(module.keys(), module2.keys())
self.assertEqual(module.keys(), {"test2", DEFAULT_MODULE_ID})
self.assertNotEqual(id(module), id(module2))
# Check that - after adding a new module - the checkpoint is correct.
module.add_module(
"test3",
VPGTorchRLModule(
observation_space=env.get_observation_space(0),
action_space=env.get_action_space(0),
model_config={"hidden_dim": 120},
),
)
# Check that - after adding a module - the checkpoint is correct.
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = "/tmp/test_multi_rl_module"
module.save_to_path(tmpdir)
module2 = MultiRLModule.from_checkpoint(tmpdir)
check(module.get_state(), module2.get_state())
self.assertEqual(module.keys(), module2.keys())
self.assertEqual(module.keys(), {"test2", "test3", DEFAULT_MODULE_ID})
self.assertNotEqual(id(module), id(module2))
def test_model_config_propagation(self):
"""Test that model_config is correctly added to a MultiRLModule"""
class CustomMultiRLModule(MultiRLModule):
def setup(self):
super().setup()
assert self.model_config is not None
spec = MultiRLModuleSpec(
multi_rl_module_class=CustomMultiRLModule,
rl_module_specs={
"agent_1": RLModuleSpec(
TorchRLModule,
observation_space=gym.spaces.Box(0, 1),
action_space=gym.spaces.Box(0, 1),
)
},
model_config={"some_config": 1},
)
# Verify that model_config propagates when instantiated using MultiRLModuleSpec.build()
spec.build()
# Verify that model_config propagates when instantiated using an AlgorithmConfig
algo_config = (
DQNConfig()
.environment(MultiAgentCartPole)
.rl_module(rl_module_spec=spec)
.multi_agent(
policies={"agent_1"},
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: agent_id,
)
)
MultiAgentEnvRunner(algo_config)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestMultiRLModule |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/call_trees.py | {
"start": 2600,
"end": 7328
} | class ____(converter.Base):
"""Transforms the call tree by renaming transformed symbols."""
def visit_Lambda(self, node):
if not anno.hasanno(node, 'function_context_name'):
# Lambda functions created during the conversion process have no
# context manager.
return self.generic_visit(node)
with self.state[_Function] as fn_scope:
fn_scope.context_name = anno.getanno(node, 'function_context_name')
return self.generic_visit(node)
def visit_FunctionDef(self, node):
# Decorators and arg defaults are part of the outer scope.
node.decorator_list = self.visit_block(node.decorator_list)
node.args.defaults = self.visit_block(node.args.defaults)
for i, d in enumerate(node.args.kw_defaults):
if d is not None:
node.args.kw_defaults[i] = self.visit(d)
with self.state[_Function] as fn_scope:
# Note: if the conversion process ever creates helper functions, this
# assumption will no longer hold.
assert anno.hasanno(node, 'function_context_name'), (
'The function_scopes converter always creates a scope for functions.')
fn_scope.context_name = anno.getanno(node, 'function_context_name')
node.body = self.visit_block(node.body)
if node.returns:
node.returns = self.visit(node.returns)
return node
def visit_With(self, node):
# Context manager calls (in node.items) are not converted.
node.body = self.visit_block(node.body)
return node
def _args_to_tuple(self, node):
"""Ties together all positional and *arg arguments in a single tuple."""
# TODO(mdan): We could rewrite this to just a call to tuple(). Maybe better?
# For example for
# f(a, b, *args)
# instead of writing:
# (a, b) + args
# just write this?
# tuple(a, b, *args)
builder = _ArgTemplateBuilder()
for a in node.args:
if isinstance(a, gast.Starred):
builder.add_stararg(a.value)
else:
builder.add_arg(a)
builder.finalize()
return builder.to_ast()
def _kwargs_to_dict(self, node):
"""Ties together all keyword and **kwarg arguments in a single dict."""
if node.keywords:
return gast.Call(
gast.Name(
'dict', ctx=gast.Load(), annotation=None, type_comment=None),
args=(),
keywords=node.keywords)
else:
return parser.parse_expression('None')
def visit_Call(self, node):
full_name = str(anno.getanno(node.func, anno.Basic.QN, default=''))
function_context_name = self.state[_Function].context_name
node = self.generic_visit(node)
# TODO(mdan): Refactor converted_call as a 'Call' operator.
# Calls to the internal 'ag__' module are never converted (though their
# arguments might be).
if full_name.startswith('ag__.'):
return node
# Calls to the function context manager (inserted by function_scopes) are
# also safe.
if full_name.startswith(function_context_name + '.'):
return node
# Calls to pdb.set_trace or ipdb.set_trace are never converted. We don't use
# the normal mechanisms to bypass these literals because they are sensitive
# to the frame they are being called from.
# TODO(mdan): Generalize this to a "static allowlist" config.
if full_name in ('pdb.set_trace', 'ipdb.set_trace', 'breakpoint'):
global set_trace_warned
if not set_trace_warned:
# TODO(mdan): Update and shorten once available on tensorflow.org.
ag_logging.warning(
'Detected `pdb.set_trace()` in user code. The code'
' generated by AutoGraph is not optimized for step-by-step'
' debugging. See https://github.com/tensorflow/tensorflow/'
'blob/master/tensorflow/python/autograph/g3doc/reference/'
'debugging.md.')
set_trace_warned = True
return node
if (full_name == 'print' and
not self.ctx.user.options.uses(converter.Feature.BUILTIN_FUNCTIONS)):
return node
template = """
ag__.converted_call(func, args, kwargs, function_ctx)
"""
new_call = templates.replace_as_expression(
template,
func=node.func,
args=self._args_to_tuple(node),
kwargs=self._kwargs_to_dict(node),
function_ctx=function_context_name)
return new_call
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
node = qual_names.resolve(node)
node = CallTreeTransformer(ctx).visit(node)
return node
| CallTreeTransformer |
python | django__django | tests/queries/models.py | {
"start": 8909,
"end": 9131
} | class ____(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True)
def __str__(self):
return str(self.num)
# Bug #12252
| Node |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ROI.py | {
"start": 93389,
"end": 94302
} | class ____(LineSegmentROI):
# Used internally by PolyLineROI
def __init__(self, *args, **kwds):
self._parentHovering = False
LineSegmentROI.__init__(self, *args, **kwds)
def setParentHover(self, hover):
# set independently of own hover state
if self._parentHovering != hover:
self._parentHovering = hover
self._updateHoverColor()
def _makePen(self):
if self.mouseHovering or self._parentHovering:
return self.hoverPen
else:
return self.pen
def hoverEvent(self, ev):
# accept drags even though we discard them to prevent competition with parent ROI
# (unless parent ROI is not movable)
if self.parentItem().translatable:
ev.acceptDrags(QtCore.Qt.MouseButton.LeftButton)
return LineSegmentROI.hoverEvent(self, ev)
| _PolyLineSegment |
python | eventlet__eventlet | tests/zmq_test.py | {
"start": 367,
"end": 15400
} | class ____(tests.LimitedTestCase):
TEST_TIMEOUT = 2
@tests.skip_unless(zmq_supported)
def setUp(self):
super().setUp()
self.context = zmq.Context()
self.sockets = []
@tests.skip_unless(zmq_supported)
def tearDown(self):
self.clear_up_sockets()
super().tearDown()
def create_bound_pair(self, type1, type2, interface='tcp://127.0.0.1'):
"""Create a bound socket pair using a random port."""
s1 = self.context.socket(type1)
port = s1.bind_to_random_port(interface)
s2 = self.context.socket(type2)
s2.connect('%s:%s' % (interface, port))
self.sockets.append(s1)
self.sockets.append(s2)
return s1, s2, port
def clear_up_sockets(self):
for sock in self.sockets:
sock.close()
self.sockets = None
self.context.destroy(0)
def assertRaisesErrno(self, errnos, func, *args):
try:
func(*args)
except zmq.ZMQError as e:
if not hasattr(errnos, '__iter__'):
errnos = (errnos,)
if e.errno not in errnos:
raise AssertionError(
"wrong error raised, expected one of ['%s'], got '%s'" % (
", ".join("%s" % zmq.ZMQError(errno) for errno in errnos),
zmq.ZMQError(e.errno)
),
)
else:
self.fail("Function did not raise any error")
@tests.skip_unless(zmq_supported)
def test_close_linger(self):
"""Socket.close() must support linger argument.
https://github.com/eventlet/eventlet/issues/9
"""
sock1, sock2, _ = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
sock1.close(1)
sock2.close(linger=0)
@tests.skip_unless(zmq_supported)
def test_recv_spawned_before_send_is_non_blocking(self):
req, rep, port = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
# req.connect(ipc)
# rep.bind(ipc)
eventlet.sleep()
msg = dict(res=None)
done = eventlet.Event()
def rx():
msg['res'] = rep.recv()
done.send('done')
eventlet.spawn(rx)
req.send(b'test')
done.wait()
self.assertEqual(msg['res'], b'test')
@tests.skip_unless(zmq_supported)
def test_close_socket_raises_enotsup(self):
req, rep, port = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
rep.close()
req.close()
self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, rep.recv)
self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, req.send, b'test')
@tests.skip_unless(zmq_supported)
def test_close_xsocket_raises_enotsup(self):
req, rep, port = self.create_bound_pair(zmq.XREQ, zmq.XREP)
rep.close()
req.close()
self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, rep.recv)
self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, req.send, b'test')
@tests.skip_unless(zmq_supported)
def test_send_1k_req_rep(self):
req, rep, port = self.create_bound_pair(zmq.REQ, zmq.REP)
eventlet.sleep()
done = eventlet.Event()
def tx():
tx_i = 0
req.send(str(tx_i).encode())
while req.recv() != b'done':
tx_i += 1
req.send(str(tx_i).encode())
done.send(0)
def rx():
while True:
rx_i = rep.recv()
if rx_i == b"1000":
rep.send(b'done')
break
rep.send(b'i')
eventlet.spawn(tx)
eventlet.spawn(rx)
final_i = done.wait()
self.assertEqual(final_i, 0)
@tests.skip_unless(zmq_supported)
def test_send_1k_push_pull(self):
down, up, port = self.create_bound_pair(zmq.PUSH, zmq.PULL)
eventlet.sleep()
done = eventlet.Event()
def tx():
tx_i = 0
while tx_i <= 1000:
tx_i += 1
down.send(str(tx_i).encode())
def rx():
while True:
rx_i = up.recv()
if rx_i == b"1000":
done.send(0)
break
eventlet.spawn(tx)
eventlet.spawn(rx)
final_i = done.wait()
self.assertEqual(final_i, 0)
@tests.skip_unless(zmq_supported)
def test_send_1k_pub_sub(self):
pub, sub_all, port = self.create_bound_pair(zmq.PUB, zmq.SUB)
sub1 = self.context.socket(zmq.SUB)
sub2 = self.context.socket(zmq.SUB)
self.sockets.extend([sub1, sub2])
addr = 'tcp://127.0.0.1:%s' % port
sub1.connect(addr)
sub2.connect(addr)
sub_all.setsockopt(zmq.SUBSCRIBE, b'')
sub1.setsockopt(zmq.SUBSCRIBE, b'sub1')
sub2.setsockopt(zmq.SUBSCRIBE, b'sub2')
sub_all_done = eventlet.Event()
sub1_done = eventlet.Event()
sub2_done = eventlet.Event()
eventlet.sleep(0.2)
def rx(sock, done_evt, msg_count=10000):
count = 0
while count < msg_count:
msg = sock.recv()
eventlet.sleep()
if b'LAST' in msg:
break
count += 1
done_evt.send(count)
def tx(sock):
for i in range(1, 1001):
msg = ("sub%s %s" % ([2, 1][i % 2], i)).encode()
sock.send(msg)
eventlet.sleep()
sock.send(b'sub1 LAST')
sock.send(b'sub2 LAST')
eventlet.spawn(rx, sub_all, sub_all_done)
eventlet.spawn(rx, sub1, sub1_done)
eventlet.spawn(rx, sub2, sub2_done)
eventlet.spawn(tx, pub)
sub1_count = sub1_done.wait()
sub2_count = sub2_done.wait()
sub_all_count = sub_all_done.wait()
self.assertEqual(sub1_count, 500)
self.assertEqual(sub2_count, 500)
self.assertEqual(sub_all_count, 1000)
@tests.skip_unless(zmq_supported)
def test_change_subscription(self):
# FIXME: Extensive testing showed this particular test is the root cause
# of sporadic failures on Travis.
pub, sub, port = self.create_bound_pair(zmq.PUB, zmq.SUB)
sub.setsockopt(zmq.SUBSCRIBE, b'test')
eventlet.sleep(0)
sub_ready = eventlet.Event()
sub_last = eventlet.Event()
sub_done = eventlet.Event()
def rx():
while sub.recv() != b'test BEGIN':
eventlet.sleep(0)
sub_ready.send()
count = 0
while True:
msg = sub.recv()
if msg == b'test BEGIN':
# BEGIN may come many times
continue
if msg == b'test LAST':
sub.setsockopt(zmq.SUBSCRIBE, b'done')
sub.setsockopt(zmq.UNSUBSCRIBE, b'test')
eventlet.sleep(0)
# In real application you should either sync
# or tolerate loss of messages.
sub_last.send()
if msg == b'done DONE':
break
count += 1
sub_done.send(count)
def tx():
# Sync receiver ready to avoid loss of first packets
while not sub_ready.ready():
pub.send(b'test BEGIN')
eventlet.sleep(0.005)
for i in range(1, 101):
msg = 'test {}'.format(i).encode()
if i != 50:
pub.send(msg)
else:
pub.send(b'test LAST')
sub_last.wait()
# XXX: putting a real delay of 1ms here fixes sporadic failures on Travis
# just yield eventlet.sleep(0) doesn't cut it
eventlet.sleep(0.001)
pub.send(b'done DONE')
eventlet.spawn(rx)
eventlet.spawn(tx)
rx_count = sub_done.wait()
self.assertEqual(rx_count, 50)
@tests.skip_unless(zmq_supported)
def test_recv_multipart_bug68(self):
req, rep, port = self.create_bound_pair(zmq.REQ, zmq.REP)
msg = [b'']
req.send_multipart(msg)
recieved_msg = rep.recv_multipart()
self.assertEqual(recieved_msg, msg)
# Send a message back the other way
msg2 = [b""]
rep.send_multipart(msg2, copy=False)
# When receiving a copy it's a zmq.core.message.Message you get back
recieved_msg = req.recv_multipart(copy=False)
# So it needs to be converted to a string
# I'm calling str(m) consciously here; Message has a .data attribute
# but it's private __str__ appears to be the way to go
self.assertEqual([m.bytes for m in recieved_msg], msg2)
@tests.skip_unless(zmq_supported)
def test_recv_noblock_bug76(self):
req, rep, port = self.create_bound_pair(zmq.REQ, zmq.REP)
self.assertRaisesErrno(zmq.EAGAIN, rep.recv, zmq.NOBLOCK)
self.assertRaisesErrno(zmq.EAGAIN, rep.recv, zmq.NOBLOCK, True)
@tests.skip_unless(zmq_supported)
def test_send_during_recv(self):
sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ)
eventlet.sleep()
num_recvs = 30
done_evts = [eventlet.Event() for _ in range(num_recvs)]
def slow_rx(done, msg):
self.assertEqual(sender.recv(), msg)
done.send(0)
def tx():
tx_i = 0
while tx_i <= 1000:
sender.send(str(tx_i).encode())
tx_i += 1
def rx():
while True:
rx_i = receiver.recv()
if rx_i == b"1000":
for i in range(num_recvs):
receiver.send(('done%d' % i).encode())
eventlet.sleep()
return
for i in range(num_recvs):
eventlet.spawn(slow_rx, done_evts[i], ("done%d" % i).encode())
eventlet.spawn(tx)
eventlet.spawn(rx)
for evt in done_evts:
self.assertEqual(evt.wait(), 0)
@tests.skip_unless(zmq_supported)
def test_send_during_recv_multipart(self):
sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ)
eventlet.sleep()
num_recvs = 30
done_evts = [eventlet.Event() for _ in range(num_recvs)]
def slow_rx(done, msg):
self.assertEqual(sender.recv_multipart(), msg)
done.send(0)
def tx():
tx_i = 0
while tx_i <= 1000:
sender.send_multipart([str(tx_i).encode(), b'1', b'2', b'3'])
tx_i += 1
def rx():
while True:
rx_i = receiver.recv_multipart()
if rx_i == [b"1000", b'1', b'2', b'3']:
for i in range(num_recvs):
receiver.send_multipart([
('done%d' % i).encode(), b'a', b'b', b'c'])
eventlet.sleep()
return
for i in range(num_recvs):
eventlet.spawn(slow_rx, done_evts[i], [
("done%d" % i).encode(), b'a', b'b', b'c'])
eventlet.spawn(tx)
eventlet.spawn(rx)
for i in range(num_recvs):
final_i = done_evts[i].wait()
self.assertEqual(final_i, 0)
# Need someway to ensure a thread is blocked on send... This isn't working
@tests.skip_unless(zmq_supported)
def test_recv_during_send(self):
sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ)
eventlet.sleep()
done = eventlet.Event()
try:
SNDHWM = zmq.SNDHWM
except AttributeError:
# ZeroMQ <3.0
SNDHWM = zmq.HWM
sender.setsockopt(SNDHWM, 10)
sender.setsockopt(zmq.SNDBUF, 10)
receiver.setsockopt(zmq.RCVBUF, 10)
def tx():
tx_i = 0
while tx_i <= 1000:
sender.send(str(tx_i).encode())
tx_i += 1
done.send(0)
eventlet.spawn(tx)
final_i = done.wait()
self.assertEqual(final_i, 0)
@tests.skip_unless(zmq_supported)
def test_close_during_recv(self):
sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ)
eventlet.sleep()
done1 = eventlet.Event()
done2 = eventlet.Event()
def rx(e):
self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, receiver.recv)
e.send()
eventlet.spawn(rx, done1)
eventlet.spawn(rx, done2)
eventlet.sleep()
receiver.close()
done1.wait()
done2.wait()
@tests.skip_unless(zmq_supported)
def test_getsockopt_events(self):
sock1, sock2, _port = self.create_bound_pair(zmq.DEALER, zmq.DEALER)
eventlet.sleep()
poll_out = zmq.Poller()
poll_out.register(sock1, zmq.POLLOUT)
sock_map = poll_out.poll(100)
self.assertEqual(len(sock_map), 1)
events = sock1.getsockopt(zmq.EVENTS)
self.assertEqual(events & zmq.POLLOUT, zmq.POLLOUT)
sock1.send(b'')
poll_in = zmq.Poller()
poll_in.register(sock2, zmq.POLLIN)
sock_map = poll_in.poll(100)
self.assertEqual(len(sock_map), 1)
events = sock2.getsockopt(zmq.EVENTS)
self.assertEqual(events & zmq.POLLIN, zmq.POLLIN)
@tests.skip_unless(zmq_supported)
def test_cpu_usage_after_bind(self):
"""zmq eats CPU after PUB socket .bind()
https://bitbucket.org/eventlet/eventlet/issue/128
According to the ZeroMQ documentation, the socket file descriptor
can be readable without any pending messages. So we need to ensure
that Eventlet wraps around ZeroMQ sockets do not create busy loops.
A naive way to test it is to measure resource usage. This will require
some tuning to set appropriate acceptable limits.
"""
sock = self.context.socket(zmq.PUB)
self.sockets.append(sock)
sock.bind_to_random_port("tcp://127.0.0.1")
eventlet.sleep()
tests.check_idle_cpu_usage(0.2, 0.1)
@tests.skip_unless(zmq_supported)
def test_cpu_usage_after_pub_send_or_dealer_recv(self):
"""zmq eats CPU after PUB send or DEALER recv.
Same https://bitbucket.org/eventlet/eventlet/issue/128
"""
pub, sub, _port = self.create_bound_pair(zmq.PUB, zmq.SUB)
sub.setsockopt(zmq.SUBSCRIBE, b"")
eventlet.sleep()
pub.send(b'test_send')
tests.check_idle_cpu_usage(0.2, 0.1)
sender, receiver, _port = self.create_bound_pair(zmq.DEALER, zmq.DEALER)
eventlet.sleep()
sender.send(b'test_recv')
msg = receiver.recv()
self.assertEqual(msg, b'test_recv')
tests.check_idle_cpu_usage(0.2, 0.1)
| TestUpstreamDownStream |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/visitors.py | {
"start": 18889,
"end": 19068
} | class ____(Protocol[_ET]):
def __call__(self, element: _ET, **kw: Any) -> Optional[_ET]: ...
_ExtT = TypeVar("_ExtT", bound="ExternalTraversal")
| _TraverseTransformCallableType |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF028.py | {
"start": 360,
"end": 743
} | class ____:
...
def fmt_off_in_else():
x = [1, 2, 3]
for val in x:
print(x)
# fmt: off
else:
print("done")
while False:
print("while")
# fmt: off
# fmt: off
else:
print("done")
if len(x) > 3:
print("huh?")
# fmt: on
# fmt: off
else:
print("expected")
| FmtOffBetweenClassDecorators |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 5447,
"end": 5582
} | class ____(ProtocolError, ValueError):
"""Response needs to be chunked in order to read it as chunks."""
pass
| ResponseNotChunked |
python | doocs__leetcode | solution/0000-0099/0070.Climbing Stairs/Solution.py | {
"start": 0,
"end": 150
} | class ____:
def climbStairs(self, n: int) -> int:
a, b = 0, 1
for _ in range(n):
a, b = b, a + b
return b
| Solution |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 76940,
"end": 81035
} | class ____(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_tunnel_connect_single_send_connection_setup(self):
"""Regresstion test for https://bugs.python.org/issue43332."""
with mock.patch.object(self.conn, 'send') as mock_send:
self.conn.set_tunnel('destination.com')
self.conn.connect()
self.conn.request('GET', '/')
mock_send.assert_called()
# Likely 2, but this test only cares about the first.
self.assertGreater(
len(mock_send.mock_calls), 1,
msg=f'unexpected number of send calls: {mock_send.mock_calls}')
proxy_setup_data_sent = mock_send.mock_calls[0][1][0]
self.assertIn(b'CONNECT destination.com', proxy_setup_data_sent)
self.assertTrue(
proxy_setup_data_sent.endswith(b'\r\n\r\n'),
msg=f'unexpected proxy data sent {proxy_setup_data_sent!r}')
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
| TunnelTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/event_log/base.py | {
"start": 2196,
"end": 2317
} | class ____(NamedTuple):
records: Sequence[EventLogRecord]
cursor: str
has_more: bool
@record
| EventLogConnection |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/command_parser_test.py | {
"start": 8141,
"end": 8767
} | class ____(test_util.TensorFlowTestCase):
def testParseTensorNameWithoutSlicing(self):
(tensor_name,
tensor_slicing) = command_parser.parse_tensor_name_with_slicing(
"hidden/weights/Variable:0")
self.assertEqual("hidden/weights/Variable:0", tensor_name)
self.assertEqual("", tensor_slicing)
def testParseTensorNameWithSlicing(self):
(tensor_name,
tensor_slicing) = command_parser.parse_tensor_name_with_slicing(
"hidden/weights/Variable:0[:, 1]")
self.assertEqual("hidden/weights/Variable:0", tensor_name)
self.assertEqual("[:, 1]", tensor_slicing)
| ParseTensorNameTest |
python | kamyu104__LeetCode-Solutions | Python/second-minimum-node-in-a-binary-tree.py | {
"start": 44,
"end": 846
} | class ____(object):
def findSecondMinimumValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def findSecondMinimumValueHelper(root, max_heap, lookup):
if not root:
return
if root.val not in lookup:
heapq.heappush(max_heap, -root.val)
lookup.add(root.val)
if len(max_heap) > 2:
lookup.remove(-heapq.heappop(max_heap))
findSecondMinimumValueHelper(root.left, max_heap, lookup)
findSecondMinimumValueHelper(root.right, max_heap, lookup)
max_heap, lookup = [], set()
findSecondMinimumValueHelper(root, max_heap, lookup)
if len(max_heap) < 2:
return -1
return -max_heap[0]
| Solution |
python | pyca__cryptography | src/cryptography/x509/general_name.py | {
"start": 4389,
"end": 4983
} | class ____(GeneralName):
def __init__(self, value: Name) -> None:
if not isinstance(value, Name):
raise TypeError("value must be a Name")
self._value = value
@property
def value(self) -> Name:
return self._value
def __repr__(self) -> str:
return f"<DirectoryName(value={self.value})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, DirectoryName):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
| DirectoryName |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/io_ops/record_input_test.py | {
"start": 1020,
"end": 6573
} | class ____(test.TestCase):
def generateTestData(self,
prefix,
n,
m,
compression_type=tf_record.TFRecordCompressionType.NONE):
options = tf_record.TFRecordOptions(compression_type)
for i in range(n):
f = os.path.join(self.get_temp_dir(), prefix + "." + str(i))
w = tf_record.TFRecordWriter(f, options=options)
for j in range(m):
w.write("{0:0{width}}".format(i * m + j, width=10).encode("utf-8"))
w.close()
def testRecordInputSimple(self):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input").get_yield_op()
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleGzip(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.GZIP)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.GZIP).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleZlib(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.ZLIB)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.ZLIB).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
@test_util.run_deprecated_v1
def testRecordInputEpochs(self):
files = 100
records_per_file = 100
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
@test_util.run_deprecated_v1
def testDoesNotDeadlock(self):
# Iterate multiple times to cause deadlock if there is a chance it can occur
for _ in range(30):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=100,
batch_size=1,
name="record_input")
yield_op = records.get_yield_op()
for _ in range(50):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testEmptyGlob(self):
with self.cached_session() as sess:
record_input = data_flow_ops.RecordInput(file_pattern="foo")
yield_op = record_input.get_yield_op()
self.evaluate(variables.global_variables_initializer())
with self.assertRaises(errors_impl.NotFoundError):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testBufferTooSmall(self):
files = 10
records_per_file = 10
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
def testInvalidParams(self):
with self.session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(
data_flow_ops.gen_data_flow_ops.record_input(
file_pattern="nan",
file_buffer_size=-90,
file_parallelism=-438,
file_shuffle_shift_ratio=-784,
batch_size=-933,
file_random_seed=-678,
compression_type="nan",
)
)
if __name__ == "__main__":
test.main()
| RecordInputOpTest |
python | huggingface__transformers | tests/pipelines/test_pipelines_question_answering.py | {
"start": 1364,
"end": 22551
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
if not hasattr(model_mapping, "is_dummy"):
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
if isinstance(model.config, LxmertConfig):
# This is an bimodal model, we need to find a more consistent way
# to switch on those models.
return None, None
question_answerer = QuestionAnsweringPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
examples = [
{"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."},
{"question": "In what field is HuggingFace ?", "context": "HuggingFace is an AI startup."},
]
return question_answerer, examples
def run_pipeline_test(self, question_answerer, _):
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
handle_impossible_answer=True,
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
outputs = question_answerer(
question=["In what field is HuggingFace working ?", "In what field is HuggingFace working ?"],
context="HuggingFace was founded in Paris.",
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
],
)
outputs = question_answerer(
question=["What field is HuggingFace working ?", "In what field is HuggingFace ?"],
context=[
"HuggingFace is a startup based in New-York",
"HuggingFace is a startup founded in Paris",
],
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
],
)
with self.assertRaises(ValueError):
question_answerer(question="", context="HuggingFace was founded in Paris.")
with self.assertRaises(ValueError):
question_answerer(question=None, context="HuggingFace was founded in Paris.")
with self.assertRaises(ValueError):
question_answerer(question="In what field is HuggingFace working ?", context="")
with self.assertRaises(ValueError):
question_answerer(question="In what field is HuggingFace working ?", context=None)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", top_k=20
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}
for i in range(len(outputs))
],
)
for single_output in outputs:
compare_pipeline_output_to_hub_spec(single_output, QuestionAnsweringOutputElement)
# Very long context require multiple features
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
# Using batch is OK
if question_answerer.tokenizer.pad_token_id is None:
question_answerer.tokenizer.pad_token_id = question_answerer.model.config.eos_token_id
new_outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20, batch_size=2
)
self.assertEqual(new_outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
self.assertEqual(nested_simplify(outputs), nested_simplify(new_outputs))
@require_torch
def test_small_model_pt(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad"
)
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_torch
def test_small_model_pt_fp16(self):
question_answerer = pipeline(
"question-answering",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
dtype=torch.float16,
)
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_torch
def test_small_model_pt_bf16(self):
question_answerer = pipeline(
"question-answering",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
dtype=torch.bfloat16,
)
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_torch
def test_small_model_pt_iterator(self):
# https://github.com/huggingface/transformers/issues/18510
pipe = pipeline(model="sshleifer/tiny-distilbert-base-cased-distilled-squad", batch_size=16)
def data():
for i in range(10):
yield {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."}
for outputs in pipe(data()):
self.assertEqual(
nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"}
)
@require_torch
def test_small_model_pt_softmax_trick(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad"
)
real_postprocess = question_answerer.postprocess
# Tweak start and stop to make sure we encounter the softmax logits
# bug.
def ensure_large_logits_postprocess(
model_outputs,
top_k=1,
handle_impossible_answer=False,
max_answer_len=15,
):
for output in model_outputs:
output["start"] = output["start"] * 1e6
output["end"] = output["end"] * 1e6
return real_postprocess(
model_outputs,
top_k=top_k,
handle_impossible_answer=handle_impossible_answer,
max_answer_len=max_answer_len,
)
question_answerer.postprocess = ensure_large_logits_postprocess
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.111, "start": 0, "end": 11, "answer": "HuggingFace"})
@slow
@require_torch
def test_small_model_japanese(self):
question_answerer = pipeline(
"question-answering",
model="KoichiYasuoka/deberta-base-japanese-aozora-ud-head",
)
output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") # fmt: skip
# Wrong answer, the whole text is identified as one "word" since the tokenizer does not include
# a pretokenizer
self.assertEqual(nested_simplify(output),{"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}) # fmt: skip
# Disable word alignment
output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) # fmt: skip
self.assertEqual(
nested_simplify(output),
{"score": 1.0, "start": 15, "end": 18, "answer": "教科書"},
)
@slow
@require_torch
def test_small_model_long_context_cls_slow(self):
question_answerer = pipeline(
"question-answering",
model="deepset/roberta-base-squad2",
handle_impossible_answer=True,
max_seq_length=512,
)
outputs = question_answerer(
question="What country is Paris the capital of?",
context="""London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games. London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games.""",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.988, "start": 0, "end": 0, "answer": ""})
@require_torch
def test_duplicate_handling(self):
question_answerer = pipeline("question-answering", model="deepset/tinyroberta-squad2")
outputs = question_answerer(
question="Who is the chancellor of Germany?",
context="Angela Merkel was the chancellor of Germany.",
top_k=10,
)
answers = [output["answer"] for output in outputs]
self.assertEqual(len(answers), len(set(answers)), "There are duplicate answers in the outputs.")
@slow
@require_torch
def test_large_model_pt(self):
question_answerer = pipeline(
"question-answering",
)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"})
@slow
@require_torch
def test_large_model_issue(self):
qa_pipeline = pipeline(
"question-answering",
model="mrm8488/bert-multi-cased-finetuned-xquadv1",
)
outputs = qa_pipeline(
{
"context": (
"Yes Bank founder Rana Kapoor has approached the Bombay High Court, challenging a special court's"
" order from August this year that had remanded him in police custody for a week in a multi-crore"
" loan fraud case. Kapoor, who is currently lodged in Taloja Jail, is an accused in the loan fraud"
" case and some related matters being probed by the CBI and Enforcement Directorate. A single"
" bench presided over by Justice S K Shinde on Tuesday posted the plea for further hearing on"
" October 14. In his plea filed through advocate Vijay Agarwal, Kapoor claimed that the special"
" court's order permitting the CBI's request for police custody on August 14 was illegal and in"
" breach of the due process of law. Therefore, his police custody and subsequent judicial custody"
" in the case were all illegal. Kapoor has urged the High Court to quash and set aside the special"
" court's order dated August 14. As per his plea, in August this year, the CBI had moved two"
" applications before the special court, one seeking permission to arrest Kapoor, who was already"
" in judicial custody at the time in another case, and the other, seeking his police custody."
" While the special court refused to grant permission to the CBI to arrest Kapoor, it granted the"
" central agency's plea for his custody. Kapoor, however, said in his plea that before filing an"
" application for his arrest, the CBI had not followed the process of issuing him a notice under"
" Section 41 of the CrPC for appearance before it. He further said that the CBI had not taken"
" prior sanction as mandated under section 17 A of the Prevention of Corruption Act for"
" prosecuting him. The special court, however, had said in its order at the time that as Kapoor"
" was already in judicial custody in another case and was not a free man the procedure mandated"
" under Section 41 of the CrPC need not have been adhered to as far as issuing a prior notice of"
" appearance was concerned. ADVERTISING It had also said that case records showed that the"
" investigating officer had taken an approval from a managing director of Yes Bank before"
" beginning the proceedings against Kapoor and such a permission was a valid sanction. However,"
" Kapoor in his plea said that the above order was bad in law and sought that it be quashed and"
" set aside. The law mandated that if initial action was not in consonance with legal procedures,"
" then all subsequent actions must be held as illegal, he said, urging the High Court to declare"
" the CBI remand and custody and all subsequent proceedings including the further custody as"
" illegal and void ab-initio. In a separate plea before the High Court, Kapoor's daughter Rakhee"
" Kapoor-Tandon has sought exemption from in-person appearance before a special PMLA court. Rakhee"
" has stated that she is a resident of the United Kingdom and is unable to travel to India owing"
" to restrictions imposed due to the COVID-19 pandemic. According to the CBI, in the present case,"
" Kapoor had obtained a gratification or pecuniary advantage of ₹ 307 crore, and thereby caused"
" Yes Bank a loss of ₹ 1,800 crore by extending credit facilities to Avantha Group, when it was"
" not eligible for the same"
),
"question": "Is this person involved in fraud?",
}
)
self.assertEqual(
nested_simplify(outputs),
{"answer": "an accused in the loan fraud case", "end": 294, "score": 0.001, "start": 261},
)
@slow
@require_torch
def test_large_model_course(self):
question_answerer = pipeline("question-answering")
long_context = """
🤗 Transformers: State of the Art NLP
🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction,
question answering, summarization, translation, text generation and more in over 100 languages.
Its aim is to make cutting-edge NLP easier to use for everyone.
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and
then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and
can be modified to enable quick research experiments.
Why should I use transformers?
1. Easy-to-use state-of-the-art models:
- High performance on NLU and NLG tasks.
- Low barrier to entry for educators and practitioners.
- Few user-facing abstractions with just three classes to learn.
- A unified API for using all our pretrained models.
- Lower compute costs, smaller carbon footprint:
2. Researchers can share trained models instead of always retraining.
- Practitioners can reduce compute time and production costs.
- Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages.
3. Choose the right framework for every part of a model's lifetime:
- Train state-of-the-art models in 3 lines of code.
- Move a single model between TF2.0/PyTorch frameworks at will.
- Seamlessly pick the right framework for training, evaluation and production.
4. Easily customize a model or an example to your needs:
- We provide examples for each architecture to reproduce the results published by its original authors.
- Model internals are exposed as consistently as possible.
- Model files can be used independently of the library for quick experiments.
🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration
between them. It's straightforward to train your models with one before loading them for inference with the other.
"""
question = "Which deep learning libraries back 🤗 Transformers?"
outputs = question_answerer(question=question, context=long_context)
self.assertEqual(
nested_simplify(outputs),
{"answer": "Jax, PyTorch and TensorFlow", "end": 1919, "score": 0.972, "start": 1892},
)
@require_torch
| QAPipelineTests |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 333059,
"end": 334380
} | class ____(FallbackKernel):
def __init__(
self,
layout: OutputSpec,
kernel: _OpOverloads,
tensor_args: Sequence[IRNode],
nontensor_args: Sequence[Any],
unflatten_args: Callable[..., Any],
kwargs: Optional[dict[str, Any]] = None,
*,
unbacked_bindings: Optional[dict[sympy.Symbol, pytree.KeyPath]] = None,
) -> None:
super().__init__(
layout,
kernel,
tensor_args,
nontensor_args,
unflatten_args,
kwargs=None,
unbacked_bindings=unbacked_bindings,
)
from torch._higher_order_ops.effects import _get_effect
effect_type = _get_effect(kernel)
assert effect_type is not None
self.effect_type = effect_type
self.prev_effect_buffer = V.graph.effectful_ops.get(effect_type, None)
V.graph.effectful_ops[effect_type] = self
def get_read_writes(self) -> dependencies.ReadWrites:
read_writes = super().get_read_writes()
if self.prev_effect_buffer is not None:
read_writes.reads.add(
dependencies.StarDep(self.prev_effect_buffer.get_name())
)
return read_writes
def has_side_effects(self) -> bool:
return True
| EffectfulKernel |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 3465,
"end": 3673
} | class ____(Web3Exception):
"""
Raised when an ABI does not match with supplied parameters, or when an
attempt is made to access a function/event that does not exist in the ABI.
"""
| MismatchedABI |
python | django__django | tests/migrations/routers.py | {
"start": 140,
"end": 454
} | class ____:
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
The Tribble model should be the only one to appear in the 'other' db.
"""
if model_name == "tribble":
return db == "other"
elif db != "default":
return False
| TestRouter |
python | lepture__authlib | authlib/integrations/django_oauth2/requests.py | {
"start": 367,
"end": 957
} | class ____(OAuth2Payload):
def __init__(self, request: HttpRequest):
self._request = request
@cached_property
def data(self):
data = {}
data.update(self._request.GET.dict())
data.update(self._request.POST.dict())
return data
@cached_property
def datalist(self):
values = defaultdict(list)
for k in self._request.GET:
values[k].extend(self._request.GET.getlist(k))
for k in self._request.POST:
values[k].extend(self._request.POST.getlist(k))
return values
| DjangoOAuth2Payload |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 24528,
"end": 25206
} | class ____(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
| WorkerSpecificIterableDataset |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 19972,
"end": 20742
} | class ____:
"""
GrantInfo groups:
- GrantItem: <object:Collection>, <object_name:foo>, <role_name:x>,
<grantor_name:root>, <privilege:Load>
- GrantItem: <object:Global>, <object_name:*>, <role_name:x>,
<grantor_name:root>, <privilege:CreateCollection>
"""
def __init__(self, entities: List[milvus_types.RoleEntity]) -> None:
groups = []
for entity in entities:
if isinstance(entity, milvus_types.GrantEntity):
groups.append(GrantItem(entity))
self._groups = groups
def __repr__(self) -> str:
s = "GrantInfo groups:"
for g in self.groups:
s += f"\n- {g}"
return s
@property
def groups(self):
return self._groups
| GrantInfo |
python | great-expectations__great_expectations | tests/datasource/fluent/test_pandas_filesystem_datasource.py | {
"start": 2831,
"end": 22498
} | class ____:
@pytest.mark.parametrize(
"method_name",
[
param("read_clipboard", marks=pytest.mark.xfail(reason="not path based")),
param("read_csv"),
param("read_excel"),
param("read_feather"),
param("read_fwf"),
param("read_gbq", marks=pytest.mark.xfail(reason="not path based")),
param("read_hdf"),
param("read_html"),
param("read_json"),
param("read_orc"),
param("read_parquet"),
param("read_pickle"),
param("read_sas"),
param("read_spss"),
param(
"read_sql",
marks=pytest.mark.xfail(reason="name conflict & not path based"),
),
param(
"read_sql_query",
marks=pytest.mark.xfail(
reason="type name logic expects 'sqltable' & not path based"
),
),
param(
"read_sql_table",
marks=pytest.mark.xfail(
reason="type name logic expects 'sqltable' & not path based"
),
),
param("read_stata"),
param(
"read_table",
marks=pytest.mark.xfail(reason="name conflict & not path based"),
),
param(
"read_xml",
marks=pytest.mark.skipif(
PANDAS_VERSION < 1.3,
reason=f"read_xml does not exist on {PANDAS_VERSION} ",
),
),
],
)
def test_data_asset_defined_for_io_read_method(self, method_name: str):
_, type_name = method_name.split("read_")
assert type_name
asset_class_names: set[str] = {
t.__name__.lower().split("asset")[0] for t in PandasFilesystemDatasource.asset_types
}
print(asset_class_names)
assert type_name in asset_class_names
@pytest.mark.parametrize("asset_class", PandasFilesystemDatasource.asset_types)
def test_add_asset_method_exists_and_is_functional(self, asset_class: Type[PathDataAsset]):
type_name: str = _get_field_details(asset_class, "type").default_value
method_name: str = f"add_{type_name}_asset"
print(f"{method_name}() -> {asset_class.__name__}")
assert method_name in PandasFilesystemDatasource.__dict__
ds = PandasFilesystemDatasource(
name="ds_for_testing_add_asset_methods",
base_directory=pathlib.Path.cwd(),
)
method = getattr(ds, method_name)
with pytest.raises(pydantic.ValidationError) as exc_info:
method(
f"{asset_class.__name__}_add_asset_test",
batching_regex="great_expectations",
_invalid_key="foobar",
)
# importantly check that the method creates (or attempts to create) the intended asset
assert exc_info.value.model == asset_class
@pytest.mark.parametrize("asset_class", PandasFilesystemDatasource.asset_types)
def test_add_asset_method_signature(self, asset_class: Type[PathDataAsset]):
type_name: str = _get_field_details(asset_class, "type").default_value
method_name: str = f"add_{type_name}_asset"
ds = PandasFilesystemDatasource(
name="ds_for_testing_add_asset_methods",
base_directory=pathlib.Path.cwd(),
)
method = getattr(ds, method_name)
add_asset_method_sig: inspect.Signature = inspect.signature(method)
print(f"\t{method_name}()\n{add_asset_method_sig}\n")
asset_class_init_sig: inspect.Signature = inspect.signature(asset_class)
print(f"\t{asset_class.__name__}\n{asset_class_init_sig}\n")
for i, param_name in enumerate(asset_class_init_sig.parameters):
print(f"{i} {param_name} ", end="")
if param_name == "type":
assert param_name not in add_asset_method_sig.parameters, (
"type should not be part of the `add_<TYPE>_asset` method"
)
print("⏩")
continue
assert param_name in add_asset_method_sig.parameters
print("✅")
@pytest.mark.parametrize(
["asset_model", "extra_kwargs"],
[
(CSVAsset, {"sep": "|", "names": ["col1", "col2", "col3"]}),
(JSONAsset, {"orient": "records", "convert_dates": True}),
],
)
def test_data_asset_defaults(
self,
asset_model: Type[PathDataAsset],
extra_kwargs: dict,
):
"""
Test that an asset dictionary can be dumped with only the original passed keys
present.
"""
kwargs: dict[str, Any] = {
"name": "test",
}
kwargs.update(extra_kwargs)
print(f"extra_kwargs\n{pf(extra_kwargs)}")
asset_instance = asset_model(**kwargs)
assert asset_instance.dict(exclude={"type"}) == kwargs
@pytest.mark.parametrize(
"extra_kwargs",
[
{"sep": "|", "decimal": ","},
{"usecols": [0, 1, 2], "names": ["foo", "bar"]},
{"dtype": {"col_1": "Int64"}},
],
)
def test_data_asset_reader_options_passthrough(
self,
empty_data_context: AbstractDataContext,
csv_path: pathlib.Path,
capture_reader_fn_params: tuple[list[list], list[dict]],
extra_kwargs: dict,
):
batch_request = (
empty_data_context.data_sources.add_pandas_filesystem( # .build_batch_request
"my_pandas",
base_directory=csv_path,
)
.add_csv_asset(
"my_csv",
**extra_kwargs,
)
.build_batch_request(
{"year": "2018"},
partitioner=FileNamePartitionerMonthly(
regex=re.compile(
r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv"
)
),
)
)
with pytest.raises(SpyInterrupt):
empty_data_context.get_validator(batch_request=batch_request)
captured_args, captured_kwargs = capture_reader_fn_params
print(f"positional args:\n{pf(captured_args[-1])}\n")
print(f"keyword args:\n{pf(captured_kwargs[-1])}")
assert captured_kwargs[-1] == extra_kwargs
@pytest.mark.unit
def test_construct_pandas_filesystem_datasource(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
assert pandas_filesystem_datasource.name == "pandas_filesystem_datasource"
@pytest.mark.unit
def test_add_csv_asset_to_datasource(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
)
assert asset.name == "csv_asset"
@pytest.mark.unit
def test_add_csv_asset_with_batching_regex_to_datasource(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
)
assert asset.name == "csv_asset"
@pytest.mark.unit
def test_invalid_connect_options(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
with pytest.raises(pydantic.ValidationError) as exc_info:
pandas_filesystem_datasource.add_csv_asset( # type: ignore[call-arg] # FIXME CoP
name="csv_asset",
glob_foobar="invalid",
)
error_dicts = exc_info.value.errors()
print(pf(error_dicts))
assert error_dicts == [
{
"loc": ("glob_foobar",),
"msg": "extra fields not permitted",
"type": "value_error.extra",
}
]
@pytest.mark.unit
@pytest.mark.parametrize(
["glob_directive", "expected_error"],
[
({"invalid", "type"}, pydantic.ValidationError),
("not_a_dir/*.csv", TestConnectionError),
],
)
def test_invalid_connect_options_value(
pandas_filesystem_datasource: PandasFilesystemDatasource,
glob_directive,
expected_error: Type[Exception],
):
with pytest.raises(expected_error) as exc_info:
pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
glob_directive=glob_directive,
)
print(f"Exception raised:\n\t{exc_info.value!r}")
if isinstance(exc_info.value, pydantic.ValidationError):
error_dicts = exc_info.value.errors()
print(pf(error_dicts))
assert error_dicts == [
{
"loc": ("glob_directive",),
"msg": "str type expected",
"type": "type_error.str",
}
]
@pytest.mark.unit
@pytest.mark.parametrize(
"connect_options",
[
param({"glob_directive": "**/*"}, id="glob **/*"),
param({"glob_directive": "**/*.csv"}, id="glob **/*.csv"),
param({}, id="default connect options"),
],
)
def test_asset_connect_options_in_repr(
pandas_filesystem_datasource: PandasFilesystemDatasource, connect_options: dict
):
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
**connect_options,
)
asset_repr = repr(asset)
print(asset_repr)
if connect_options:
assert "glob_directive" in asset_repr
assert connect_options["glob_directive"] in asset_repr
else:
# if no connect options are provided the defaults should be used and should not
# be part of any serialization. repr == asset.yaml()
assert "glob_directive" not in asset_repr
@pytest.mark.unit
def test_csv_asset_with_batching_regex_named_parameters(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
)
batching_regex = r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv"
batch_def = asset.add_batch_definition_monthly(name="batch def", regex=batching_regex)
options = asset.get_batch_parameters_keys(partitioner=batch_def.partitioner)
assert options == ("path", "year", "month")
@pytest.mark.unit
def test_csv_asset_with_non_string_batching_regex_named_parameters(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
)
with pytest.raises(ge_exceptions.InvalidBatchRequestError):
# year is an int which will raise an error
asset.build_batch_request({"year": 2018, "month": "04"})
@pytest.mark.unit
def test_get_batch_list_from_fully_specified_batch_request(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
)
regex = r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv"
batch_def = asset.add_batch_definition_monthly(name="batch def", regex=regex)
batch_parameters = {"year": "2018", "month": "04"}
batch = batch_def.get_batch(batch_parameters=batch_parameters)
assert batch.batch_request.datasource_name == pandas_filesystem_datasource.name
assert batch.batch_request.data_asset_name == asset.name
path = "yellow_tripdata_sample_2018-04.csv"
assert batch.batch_request.options == {"path": path, "year": "2018", "month": "04"}
assert batch.metadata == {"path": path, "year": "2018", "month": "04"}
assert batch.id == "pandas_filesystem_datasource-csv_asset-year_2018-month_04"
@pytest.mark.unit
@pytest.mark.parametrize(
"year,month,path,batch_count",
[
("2018", "04", "yellow_tripdata_sample_2018-04.csv", 1),
("2018", None, None, 12),
(None, "04", None, 3),
(None, "03", "yellow_tripdata_sample_2018-04.csv", 0),
],
)
def test_get_batch_identifiers_list_count(
year: Optional[str],
month: Optional[str],
path: Optional[str],
batch_count: int,
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
)
request = asset.build_batch_request(
{"year": year, "month": month, "path": path},
partitioner=FileNamePartitionerMonthly(
regex=re.compile(r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv")
),
)
batch_identifier_list = asset.get_batch_identifiers_list(request)
assert len(batch_identifier_list) == batch_count
@pytest.mark.unit
def test_get_batch_identifiers_list_from_partially_specified_batch_request(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
# Verify test directory has files that don't match what we will query for
file_name: PathStr
all_files: list[str] = [
file_name.stem
for file_name in list(pathlib.Path(pandas_filesystem_datasource.base_directory).iterdir())
]
# assert there are files that are not csv files
assert any(not file_name.endswith("csv") for file_name in all_files)
# assert there are 12 files from 2018
files_for_2018 = [file_name for file_name in all_files if file_name.find("2018") >= 0]
assert len(files_for_2018) == 12
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
)
request = asset.build_batch_request(
{"year": "2018"},
partitioner=FileNamePartitionerMonthly(
regex=re.compile(r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv")
),
)
batches = asset.get_batch_identifiers_list(request)
assert (len(batches)) == 12
batch_filenames = [pathlib.Path(batch["path"]).stem for batch in batches]
assert set(files_for_2018) == set(batch_filenames)
@dataclass(frozen=True)
class YearMonth:
year: str
month: str
expected_year_month = {YearMonth(year="2018", month=format(m, "02d")) for m in range(1, 13)}
batch_year_month = {YearMonth(year=batch["year"], month=batch["month"]) for batch in batches}
assert expected_year_month == batch_year_month
@pytest.mark.unit
@pytest.mark.parametrize(
"batch_slice, expected_batch_count",
[
("[-3:]", 3),
("[5:9]", 4),
("[:10:2]", 5),
(slice(-3, None), 3),
(slice(5, 9), 4),
(slice(0, 10, 2), 5),
("-5", 1),
("-1", 1),
(11, 1),
(0, 1),
([3], 1),
(None, 12),
("", 12),
],
)
def test_pandas_slice_batch_count(
pandas_filesystem_datasource: PandasFilesystemDatasource,
batch_slice: BatchSlice,
expected_batch_count: int,
) -> None:
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
)
batch_request = asset.build_batch_request(
options={"year": "2019"},
batch_slice=batch_slice,
partitioner=FileNamePartitionerMonthly(
regex=re.compile(r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv")
),
)
batch_identifiers_list = asset.get_batch_identifiers_list(batch_request=batch_request)
assert len(batch_identifiers_list) == expected_batch_count
def bad_batching_regex_config(
csv_path: pathlib.Path,
) -> tuple[re.Pattern, TestConnectionError]:
batching_regex = re.compile(r"green_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv")
test_connection_error = TestConnectionError(
"No file at base_directory path "
f'"{csv_path.resolve()}" matched regular expressions pattern '
f'"{batching_regex.pattern}" and/or glob_directive "**/*" for '
'DataAsset "csv_asset".'
)
return batching_regex, test_connection_error
@pytest.fixture(params=[bad_batching_regex_config])
def datasource_test_connection_error_messages(
csv_path: pathlib.Path,
pandas_filesystem_datasource: PandasFilesystemDatasource,
request,
) -> tuple[PandasFilesystemDatasource, TestConnectionError]:
_, test_connection_error = request.param(csv_path=csv_path)
csv_asset = CSVAsset( # type: ignore[call-arg] # FIXME CoP
name="csv_asset",
)
csv_asset._datasource = pandas_filesystem_datasource
pandas_filesystem_datasource.assets = [
csv_asset,
]
csv_asset._data_connector = FilesystemDataConnector(
datasource_name=pandas_filesystem_datasource.name,
data_asset_name=csv_asset.name,
base_directory=pandas_filesystem_datasource.base_directory,
data_context_root_directory=pandas_filesystem_datasource.data_context_root_directory,
)
csv_asset._test_connection_error_message = test_connection_error
return pandas_filesystem_datasource, test_connection_error
@pytest.mark.timeout(
5, # deepcopy operation can be slow. Try to eliminate it in the future.
)
@pytest.mark.unit
def test_csv_asset_batch_metadata(
pandas_filesystem_datasource: PandasFilesystemDatasource,
):
my_config_variables = {"pipeline_filename": __file__}
pandas_filesystem_datasource._data_context.config_variables.update( # type: ignore[union-attr] # `_data_context`
my_config_variables
)
asset_specified_metadata = {
"pipeline_name": "my_pipeline",
"no_curly_pipeline_filename": "$pipeline_filename",
"curly_pipeline_filename": "${pipeline_filename}",
}
asset = pandas_filesystem_datasource.add_csv_asset(
name="csv_asset",
batch_metadata=asset_specified_metadata,
)
assert asset.batch_metadata == asset_specified_metadata
batch_request = asset.build_batch_request(
partitioner=FileNamePartitionerMonthly(
regex=re.compile(r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv")
)
)
batch = pandas_filesystem_datasource.get_batch(batch_request)
substituted_batch_metadata: BatchMetadata = copy.deepcopy(asset_specified_metadata)
substituted_batch_metadata.update(
{
"no_curly_pipeline_filename": __file__,
"curly_pipeline_filename": __file__,
}
)
actual_metadata = copy.deepcopy(batch.metadata)
actual_metadata.pop("path")
actual_metadata.pop("year")
actual_metadata.pop("month")
assert len(actual_metadata)
assert actual_metadata == substituted_batch_metadata
@pytest.mark.parametrize(
("sort_ascending", "expected_metadata"),
[
(True, {"year": "2020", "month": "12", "path": "yellow_tripdata_sample_2020-12.csv"}),
(False, {"year": "2018", "month": "01", "path": "yellow_tripdata_sample_2018-01.csv"}),
],
)
@pytest.mark.unit
def test_get_batch_respects_order_ascending(
pandas_filesystem_datasource: PandasFilesystemDatasource,
sort_ascending: bool,
expected_metadata: dict,
) -> None:
asset = pandas_filesystem_datasource.add_csv_asset(name="csv_asset")
regex = r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv"
batch_def = asset.add_batch_definition_monthly(
name="batch def", regex=regex, sort_ascending=sort_ascending
)
batch = batch_def.get_batch()
assert batch.metadata == expected_metadata
@pytest.mark.unit
def test_raises_if_no_matching_batches(
pandas_filesystem_datasource: PandasFilesystemDatasource,
) -> None:
asset = pandas_filesystem_datasource.add_csv_asset(name="csv_asset")
regex = r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv"
batch_def = asset.add_batch_definition_monthly(name="batch def", regex=regex)
with pytest.raises(NoAvailableBatchesError):
batch_def.get_batch(batch_parameters={"year": "1995", "month": "01"})
| TestDynamicPandasAssets |
python | spack__spack | lib/spack/spack/verify_libraries.py | {
"start": 2578,
"end": 7328
} | class ____(BaseDirectoryVisitor):
def __init__(self, allow_unresolved_patterns: List[str]) -> None:
self.problems: Dict[str, Problem] = {}
self._allow_unresolved_regex = re.compile(
"|".join(fnmatch.translate(x) for x in allow_unresolved_patterns)
)
def allow_unresolved(self, needed: bytes) -> bool:
try:
name = needed.decode("utf-8")
except UnicodeDecodeError:
return False
return bool(self._allow_unresolved_regex.match(name))
def visit_file(self, root: str, rel_path: str, depth: int) -> None:
# We work with byte strings for paths.
path = os.path.join(root, rel_path).encode("utf-8")
# For $ORIGIN interpolation: should not have trailing dir seperator.
origin = os.path.dirname(path)
# Retrieve the needed libs + rpaths.
try:
with open(path, "rb") as f:
parsed_elf = elf.parse_elf(f, interpreter=False, dynamic_section=True)
except (OSError, elf.ElfParsingError):
# Not dealing with an invalid ELF file.
return
# If there's no needed libs all is good
if not parsed_elf.has_needed:
return
# Get the needed libs and rpaths (notice: byte strings)
# Don't force an encoding cause paths are just a bag of bytes.
needed_libs = parsed_elf.dt_needed_strs
rpaths = parsed_elf.dt_rpath_str.split(b":") if parsed_elf.has_rpath else []
# We only interpolate $ORIGIN, not $LIB and $PLATFORM, they're not really
# supported in general. Also remove empty paths.
rpaths = [x.replace(b"$ORIGIN", origin) for x in rpaths if x]
# Do not allow relative rpaths (they are relative to the current working directory)
rpaths, relative_rpaths = stable_partition(rpaths, os.path.isabs)
# If there's a / in the needed lib, it's opened directly, otherwise it needs
# a search.
direct_libs, search_libs = stable_partition(needed_libs, lambda x: b"/" in x)
# Do not allow relative paths in direct libs (they are relative to the current working
# directory)
direct_libs, unresolved = stable_partition(direct_libs, os.path.isabs)
resolved: Dict[bytes, bytes] = {}
for lib in search_libs:
if self.allow_unresolved(lib):
continue
for rpath in rpaths:
candidate = os.path.join(rpath, lib)
if candidate_matches(parsed_elf, candidate):
resolved[lib] = candidate
break
else:
unresolved.append(lib)
# Check if directly opened libs are compatible
for lib in direct_libs:
if candidate_matches(parsed_elf, lib):
resolved[lib] = lib
else:
unresolved.append(lib)
if unresolved or relative_rpaths:
self.problems[rel_path] = Problem(resolved, unresolved, relative_rpaths)
def visit_symlinked_file(self, root: str, rel_path: str, depth: int) -> None:
pass
def before_visit_dir(self, root: str, rel_path: str, depth: int) -> bool:
# There can be binaries in .spack/test which shouldn't be checked.
if rel_path == ".spack":
return False
return True
def before_visit_symlinked_dir(self, root: str, rel_path: str, depth: int) -> bool:
return False
def write(self, output: IO[str], *, indent=0, brief: bool = False) -> None:
indent_str = " " * indent
for path, problem in self.problems.items():
output.write(indent_str)
output.write(path)
output.write("\n")
if not brief:
for needed, full_path in problem.resolved.items():
output.write(indent_str)
output.write(" ")
if needed == full_path:
output.write(_decode_or_raw(needed))
else:
output.write(f"{_decode_or_raw(needed)} => {_decode_or_raw(full_path)}")
output.write("\n")
for not_found in problem.unresolved:
output.write(indent_str)
output.write(f" {_decode_or_raw(not_found)} => not found\n")
for relative_rpath in problem.relative_rpaths:
output.write(indent_str)
output.write(f" {_decode_or_raw(relative_rpath)} => relative rpath\n")
def _decode_or_raw(byte_str: bytes) -> str:
try:
return byte_str.decode("utf-8")
except UnicodeDecodeError:
return f"{byte_str!r}"
| ResolveSharedElfLibDepsVisitor |
python | doocs__leetcode | solution/1300-1399/1345.Jump Game IV/Solution.py | {
"start": 0,
"end": 579
} | class ____:
def minJumps(self, arr: List[int]) -> int:
g = defaultdict(list)
for i, x in enumerate(arr):
g[x].append(i)
q = deque([0])
vis = {0}
ans = 0
while 1:
for _ in range(len(q)):
i = q.popleft()
if i == len(arr) - 1:
return ans
for j in (i + 1, i - 1, *g.pop(arr[i], [])):
if 0 <= j < len(arr) and j not in vis:
q.append(j)
vis.add(j)
ans += 1
| Solution |
python | getsentry__sentry | tests/sentry/issues/auto_source_code_config/test_process_event.py | {
"start": 24803,
"end": 25859
} | class ____(LanguageSpecificDeriveCodeMappings):
platform = "php"
frames = [
{"in_app": True, "filename": "/sentry/capybara.php"},
{"in_app": True, "filename": "/sentry/p/kanga.php"},
{"in_app": False, "filename": "/sentry/p/vendor/sentry/src/functions.php"},
]
def test_auto_source_code_config_basic_php(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["sentry/p/kanga.php"]},
frames=[self.frame("/sentry/p/kanga.php", True)],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("/", "")],
)
def test_auto_source_code_config_different_roots_php(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/sentry/p/kanga.php"]},
frames=[self.frame("/sentry/p/kanga.php", True)],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("/sentry/", "src/sentry/")],
)
| TestPhpDeriveCodeMappings |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/shape_output_test.py | {
"start": 2823,
"end": 3285
} | class ____(ShapeOutputTest):
"""Same as the previous test, but with a single input profile."""
def setUp(self):
super().setUp()
self.DisableNonTrtOptimizers()
def GetParams(self):
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[2, 2, 5, 3]], [[4]],
extra_inputs=[],
extra_outputs=[],
input_mask=[[False, True, True, True]],
output_mask=[[True]])
| ShapeOutputWithSingleInputProfile |
python | docker__docker-py | docker/errors.py | {
"start": 2618,
"end": 2655
} | class ____(APIError):
pass
| NotFound |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 115394,
"end": 120892
} | class ____:
def test_slice_scalar_assign(self):
A = self.spcreator((5, 5))
B = np.zeros((5, 5))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
for C in [A, B]:
C[0:1,1] = 1
C[3:0,0] = 4
C[3:4,0] = 9
C[0,4:] = 1
C[3::-1,4:] = 9
assert_array_equal(A.toarray(), B)
def test_slice_assign_2(self):
n, m = (5, 10)
def _test_set(i, j):
msg = f"i={i!r}; j={j!r}"
A = self.spcreator((n, m))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
A[i, j] = 1
B = np.zeros((n, m))
B[i, j] = 1
assert_array_almost_equal(A.toarray(), B, err_msg=msg)
# [i,1:2]
for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)),
(array(2), slice(5, -2))]:
_test_set(i, j)
def test_self_self_assignment(self):
# Tests whether a row of one sparse array can be assigned to another.
B = self.spcreator((4,3))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
B[0,0] = 2
B[1,2] = 7
B[2,1] = 3
B[3,0] = 10
A = B / 10
B[0,:] = A[0,:]
assert_array_equal(A[0,:].toarray(), B[0,:].toarray())
A = B / 10
B[:,:] = A[:1,:1]
assert_array_equal(np.zeros((4,3)) + A[0,0], B.toarray())
A = B / 10
B[:-1,0] = A[None,0,:].T
assert_array_equal(A[0,:].toarray().T, B[:-1,0].toarray())
def test_slice_assignment(self):
B = self.spcreator((4,3))
expected = array([[10,0,0],
[0,0,6],
[0,14,0],
[0,0,0]])
block = [[1,0],[0,4]]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
B[:,:] = B+B
assert_array_equal(B.toarray(), expected)
B[:2,:2] = self.csc_container(array(block))
assert_array_equal(B.toarray()[:2, :2], block)
def test_sparsity_modifying_assignment(self):
B = self.spcreator((4,3))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
B[3,0] = 10
B[:3] = self.csr_container(np.eye(3))
expected = array([[1,0,0],[0,1,0],[0,0,1],[10,0,0]])
assert_array_equal(B.toarray(), expected)
def test_set_slice(self):
A = self.spcreator((5,10))
B = array(zeros((5, 10), float))
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
array(-1), np.int8(-3)]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
for j, a in enumerate(slices):
A[a] = j
B[a] = j
assert_array_equal(A.toarray(), B, repr(a))
for i, a in enumerate(slices):
for j, b in enumerate(slices):
A[a,b] = 10*i + 1000*(j+1)
B[a,b] = 10*i + 1000*(j+1)
assert_array_equal(A.toarray(), B, repr((a, b)))
A[0, 1:10:2] = range(1, 10, 2)
B[0, 1:10:2] = range(1, 10, 2)
assert_array_equal(A.toarray(), B)
A[1:5:2, 0] = np.arange(1, 5, 2)[:, None]
B[1:5:2, 0] = np.arange(1, 5, 2)[:]
assert_array_equal(A.toarray(), B)
# The next commands should raise exceptions
assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100)))
assert_raises(ValueError, A.__setitem__, (0, 0), arange(100))
assert_raises(ValueError, A.__setitem__, (0, slice(None)),
list(range(100)))
assert_raises(ValueError, A.__setitem__, (slice(None), 1),
list(range(100)))
assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy())
assert_raises(ValueError, A.__setitem__,
([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4])
assert_raises(ValueError, A.__setitem__,
([[1, 2, 3], [0, 3, 4], [4, 1, 3]],
[[1, 2, 4], [0, 1, 3]]), [2, 3, 4])
assert_raises(ValueError, A.__setitem__, (slice(4), 0),
[[1, 2], [3, 4]])
def test_assign_empty(self):
A = self.spcreator(np.ones((2, 3)))
B = self.spcreator((1, 2))
# Note: This is not like NumPy!! Incoming shape needs to be (2,) for NumPy
# we are more lenient to accommodate vectors in 2d format as input
A[1, :2] = B
assert_array_equal(A.toarray(), [[1, 1, 1], [0, 0, 1]])
def test_assign_1d_slice(self):
A = self.spcreator(np.ones((3, 3)))
x = np.zeros(3)
A[:, 0] = x
A[1, :] = x
assert_array_equal(A.toarray(), [[0, 1, 1], [0, 0, 0], [0, 1, 1]])
| _TestSlicingAssign |
python | huggingface__transformers | src/transformers/models/layoutlmv3/modeling_layoutlmv3.py | {
"start": 14090,
"end": 14953
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv3SelfAttention(config)
self.output = LayoutLMv3SelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Layer with LayoutLMv2->LayoutLMv3
| LayoutLMv3Attention |
python | huggingface__transformers | src/transformers/models/mra/modeling_mra.py | {
"start": 47726,
"end": 50438
} | class ____(MraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mra = MraModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mra(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| MraForTokenClassification |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_permissions.py | {
"start": 258,
"end": 366
} | class ____(APITestCase):
endpoint = "sentry-api-0-user-permissions"
@control_silo_test
| UserPermissionsTest |
python | huggingface__transformers | src/transformers/models/gemma3n/modular_gemma3n.py | {
"start": 34412,
"end": 35231
} | class ____(Gemma3RMSNorm):
def __init__(self, dim: int, eps: float = 1e-6, with_scale: bool = True):
super().__init__(dim, eps=eps)
del self.weight
self.with_scale = with_scale
if self.with_scale:
self.weight = nn.Parameter(torch.ones(dim))
else:
self.register_buffer("weight", torch.tensor(1.0), persistent=False)
def _norm(self, x):
return x / torch.sqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Llama does x.to(float16) * w whilst Gemma2 is (x * w).to(float16)
# See https://github.com/huggingface/transformers/pull/29402
output = self._norm(x.float()) * self.weight.float()
return output.type_as(x)
# ==== Audio Encoder ====
| Gemma3nRMSNorm |
python | bokeh__bokeh | src/bokeh/protocol/messages/ok.py | {
"start": 1481,
"end": 2492
} | class ____(Message[Empty]):
''' Define the ``OK`` message for acknowledging successful handling of a
previous message.
The ``content`` fragment of for this message is empty.
'''
msgtype = 'OK'
@classmethod
def create(cls, request_id: ID, **metadata: Any) -> ok:
''' Create an ``OK`` message
Args:
request_id (str) :
The message ID for the message the precipitated the OK.
Any additional keyword arguments will be put into the message
``metadata`` fragment as-is.
'''
header = cls.create_header(request_id=request_id)
return cls(header, metadata, Empty())
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ok |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.