language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/930. Binary Subarrays With Sum/930-2.py | {
"start": 0,
"end": 517
} | class ____:
def numSubarraysWithSum(self, nums: list[int], goal: int) -> int:
def numSubarraysWithSumAtMost(goal: int) -> int:
res = 0
count = 0
l = 0
r = 0
while r < len(nums):
count += nums[r]
r += 1
while l < r and count > goal:
count -= nums[l]
l += 1
# nums[l..r), nums[l + 1..r), ..., nums[r - 1]
res += r - l
return res
return numSubarraysWithSumAtMost(goal) - numSubarraysWithSumAtMost(goal - 1)
| Solution |
python | pypa__warehouse | tests/unit/organizations/test_models.py | {
"start": 3480,
"end": 10803
} | class ____:
def test_customer_name(self, db_session):
organization = DBOrganizationFactory.create(
name="pypi", display_name="The Python Package Index"
)
assert (
organization.customer_name()
== "PyPI Organization - The Python Package Index (pypi)"
)
assert (
organization.customer_name("Test PyPI")
== "Test PyPI Organization - The Python Package Index (pypi)"
)
def test_acl(self, db_session):
organization = DBOrganizationFactory.create()
owner1 = DBOrganizationRoleFactory.create(organization=organization)
owner2 = DBOrganizationRoleFactory.create(organization=organization)
billing_mgr1 = DBOrganizationRoleFactory.create(
organization=organization, role_name=OrganizationRoleType.BillingManager
)
billing_mgr2 = DBOrganizationRoleFactory.create(
organization=organization, role_name=OrganizationRoleType.BillingManager
)
account_mgr1 = DBOrganizationRoleFactory.create(
organization=organization, role_name=OrganizationRoleType.Manager
)
account_mgr2 = DBOrganizationRoleFactory.create(
organization=organization, role_name=OrganizationRoleType.Manager
)
member1 = DBOrganizationRoleFactory.create(
organization=organization, role_name=OrganizationRoleType.Member
)
member2 = DBOrganizationRoleFactory.create(
organization=organization, role_name=OrganizationRoleType.Member
)
acls = [
item for location in lineage(organization) for item in location.__acl__()
]
assert acls == [
(
Allow,
"group:admins",
(
Permissions.AdminOrganizationsRead,
Permissions.AdminOrganizationsWrite,
Permissions.AdminOrganizationsNameWrite,
),
),
(Allow, "group:moderators", Permissions.AdminOrganizationsRead),
] + sorted(
[
(
Allow,
f"user:{owner1.user.id}",
[
Permissions.OrganizationsRead,
Permissions.OrganizationTeamsRead,
Permissions.OrganizationsManage,
Permissions.OrganizationTeamsManage,
Permissions.OrganizationsBillingManage,
Permissions.OrganizationProjectsAdd,
Permissions.OrganizationProjectsRemove,
],
),
(
Allow,
f"user:{owner2.user.id}",
[
Permissions.OrganizationsRead,
Permissions.OrganizationTeamsRead,
Permissions.OrganizationsManage,
Permissions.OrganizationTeamsManage,
Permissions.OrganizationsBillingManage,
Permissions.OrganizationProjectsAdd,
Permissions.OrganizationProjectsRemove,
],
),
],
key=lambda x: x[1],
) + sorted(
[
(
Allow,
f"user:{billing_mgr1.user.id}",
[
Permissions.OrganizationsRead,
Permissions.OrganizationTeamsRead,
Permissions.OrganizationsBillingManage,
],
),
(
Allow,
f"user:{billing_mgr2.user.id}",
[
Permissions.OrganizationsRead,
Permissions.OrganizationTeamsRead,
Permissions.OrganizationsBillingManage,
],
),
],
key=lambda x: x[1],
) + sorted(
[
(
Allow,
f"user:{account_mgr1.user.id}",
[
Permissions.OrganizationsRead,
Permissions.OrganizationTeamsRead,
Permissions.OrganizationTeamsManage,
Permissions.OrganizationProjectsAdd,
],
),
(
Allow,
f"user:{account_mgr2.user.id}",
[
Permissions.OrganizationsRead,
Permissions.OrganizationTeamsRead,
Permissions.OrganizationTeamsManage,
Permissions.OrganizationProjectsAdd,
],
),
],
key=lambda x: x[1],
) + sorted(
[
(
Allow,
f"user:{member1.user.id}",
[Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead],
),
(
Allow,
f"user:{member2.user.id}",
[Permissions.OrganizationsRead, Permissions.OrganizationTeamsRead],
),
],
key=lambda x: x[1],
)
def test_record_event_with_geoip(self, db_request):
"""
Test to cover condition when record_event is called with geoip_info as
part of the inbound request.
Possibly could be removed once more comprehensive tests are in place,
but nothing explicitly covers `HasEvents.record_event`
"""
db_request.ip_address.geoip_info = {"country_name": "United States"}
organization = DBOrganizationFactory.create()
organization.record_event(
tag="",
request=db_request,
additional={},
)
event = organization.events[0]
assert event.additional == {
"organization_name": organization.name,
"geoip_info": {"country_name": "United States"},
}
assert event.location_info == "United States"
def test_location_info_without_geoip(self, db_request):
organization = DBOrganizationFactory.create()
organization.record_event(
tag="",
request=db_request,
additional={},
)
event = organization.events[0]
assert event.additional == {
"organization_name": organization.name,
}
assert event.location_info == db_request.ip_address
def test_location_info_with_partial(self, db_request):
db_request.ip_address.geoip_info = {"country_code3": "USA"}
organization = DBOrganizationFactory.create()
organization.record_event(
tag="",
request=db_request,
additional={},
)
event = organization.events[0]
assert event.additional == {
"organization_name": organization.name,
"geoip_info": {"country_code3": "USA"},
}
assert event.location_info == db_request.ip_address
| TestOrganization |
python | ApeWorX__ape | src/ape/api/explorers.py | {
"start": 275,
"end": 2207
} | class ____(BaseInterfaceModel):
"""
An API class representing a blockchain explorer for a particular network
in a particular ecosystem.
"""
name: str # Plugin name
network: NetworkAPI
@abstractmethod
def get_address_url(self, address: "AddressType") -> str:
"""
Get an address URL, such as for a transaction.
Args:
address (:class:`~ape.types.address.AddressType`): The address.
Returns:
str: The URL.
"""
@abstractmethod
def get_transaction_url(self, transaction_hash: str) -> str:
"""
Get the transaction URL for the given transaction.
Args:
transaction_hash (str): The transaction hash.
Returns:
str: The URL.
"""
@abstractmethod
def get_contract_type(self, address: "AddressType") -> Optional["ContractType"]:
"""
Get the contract type for a given address if it has been published to this explorer.
Args:
address (:class:`~ape.types.address.AddressType`): The contract address.
Returns:
Optional[``ContractType``]: If not published, returns ``None``.
"""
@abstractmethod
def publish_contract(self, address: "AddressType"):
"""
Publish a contract to the explorer.
Args:
address (:class:`~ape.types.address.AddressType`): The address of the deployed contract.
"""
@classmethod
def supports_chain(cls, chain_id: int) -> bool:
"""
Returns ``True`` when the given chain ID is claimed to be
supported by this explorer. Adhoc / custom networks rely on
this feature to have automatic-explorer support. Explorer
plugins should override this.
Args:
chain_id (int): The chain ID to check.
Returns:
bool
"""
return False
| ExplorerAPI |
python | walkccc__LeetCode | solutions/150. Evaluate Reverse Polish Notation/150.py | {
"start": 0,
"end": 435
} | class ____:
def evalRPN(self, tokens: list[str]) -> int:
stack = []
op = {
'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'*': lambda a, b: a * b,
'/': lambda a, b: int(a / b),
}
for token in tokens:
if token in op:
b = stack.pop()
a = stack.pop()
stack.append(op[token](a, b))
else:
stack.append(int(token))
return stack.pop()
| Solution |
python | walkccc__LeetCode | solutions/1977. Number of Ways to Separate Numbers/1977.py | {
"start": 0,
"end": 1532
} | class ____:
def numberOfCombinations(self, num: str) -> int:
if num[0] == '0':
return 0
MOD = 1_000_000_007
n = len(num)
# dp[i][k] := the number of possible lists of integers ending in num[i]
# with the length of the last number being 1..k
dp = [[0] * (n + 1) for _ in range(n)]
# lcs[i][j] := the number of the same digits in num[i..n) and num[j..n)
lcs = [[0] * (n + 1) for _ in range(n + 1)]
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
if num[i] == num[j]:
lcs[i][j] = lcs[i + 1][j + 1] + 1
for i in range(n):
for k in range(1, i + 2):
dp[i][k] += dp[i][k - 1]
dp[i][k] %= MOD
# The last number is num[s..i].
s = i - k + 1
if num[s] == '0':
# the number of possible lists of integers ending in num[i] with the
# length of the last number being k
continue
if s == 0: # the whole string
dp[i][k] += 1
continue
if s < k:
# The length k is not enough, so add the number of possible lists of
# integers in num[0..s - 1].
dp[i][k] += dp[s - 1][s]
continue
l = lcs[s - k][s]
if l >= k or num[s - k + l] <= num[s + l]:
# Have enough length k and num[s - k..s - 1] <= num[j..i].
dp[i][k] += dp[s - 1][k]
else:
# Have enough length k but num[s - k..s - 1] > num[j..i].
dp[i][k] += dp[s - 1][k - 1]
return dp[n - 1][n] % MOD
| Solution |
python | huggingface__transformers | src/transformers/models/owlvit/modeling_owlvit.py | {
"start": 6806,
"end": 9683
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
image embeddings for each patch.
class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total
number of patches is (image_size / patch_size)**2.
text_model_output (tuple[`BaseModelOutputWithPooling`]):
The output of the [`OwlViTTextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`OwlViTVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
class_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`OwlViTForObjectDetection.image_guided_detection`].
"""
)
| OwlViTObjectDetectionOutput |
python | ansible__ansible | packaging/release.py | {
"start": 3036,
"end": 3636
} | class ____(Exception):
"""Results from a failed process."""
def __init__(self, message: str, cmd: tuple[str, ...], status: int, stdout: str | None, stderr: str | None) -> None:
if stdout and (stdout := stdout.strip()):
message += f"\n>>> Standard Output\n{stdout}"
if stderr and (stderr := stderr.strip()):
message += f"\n>>> Standard Error\n{stderr}"
super().__init__(message)
self.cmd = cmd
self.status = status
self.stdout = stdout
self.stderr = stderr
@dataclasses.dataclass(frozen=True)
| CalledProcessError |
python | google__pytype | pytype/abstract/function.py | {
"start": 23020,
"end": 33296
} | class ____:
"""Represents the parameters of a function call.
Attributes:
posargs: The positional arguments. A tuple of cfg.Variable.
namedargs: The keyword arguments. A dictionary, mapping strings to
cfg.Variable.
starargs: The *args parameter, or None.
starstarargs: The **kwargs parameter, or None.
"""
posargs: tuple[cfg.Variable, ...]
namedargs: dict[str, cfg.Variable] = attrs.field(
converter=_convert_namedargs, default=None
)
starargs: cfg.Variable | None = None
starstarargs: cfg.Variable | None = None
def has_namedargs(self) -> bool:
return bool(self.namedargs)
def has_non_namedargs(self) -> bool:
return bool(self.posargs or self.starargs or self.starstarargs)
def is_empty(self) -> bool:
return not (self.has_namedargs() or self.has_non_namedargs())
def starargs_as_tuple(
self, node: cfg.CFGNode, ctx: "context.Context"
) -> tuple[Any, ...]:
try:
args: Any | None = (
self.starargs
and abstract_utils.get_atomic_python_constant(self.starargs, tuple)
)
except abstract_utils.ConversionError:
args = None
if not args:
return args # pytype: disable=bad-return-type
return tuple(
var if var.bindings else ctx.convert.empty.to_variable(node)
for var in args
)
def starstarargs_as_dict(self):
"""Return **args as a python dict."""
# NOTE: We can't use get_atomic_python_constant here because starstarargs
# could have is_concrete=False.
if not self.starstarargs or len(self.starstarargs.data) != 1:
return None
(kwdict,) = self.starstarargs.data
if not isinstance(kwdict, _abstract.Dict):
return None
return kwdict.pyval
def _expand_typed_star(
self,
node: cfg.CFGNode,
star: cfg.Variable,
count: int,
ctx: "context.Context",
) -> list[cfg.Variable]:
"""Convert *xs: Sequence[T] -> [T, T, ...]."""
if not count:
return []
p = abstract_utils.merged_type_parameter(node, star, abstract_utils.T)
if not p.data:
p = ctx.new_unsolvable(node)
return [p.AssignToNewVariable(node) for _ in range(count)]
def _unpack_and_match_args(
self,
node: cfg.CFGNode,
ctx: "context.Context",
match_signature: Signature,
starargs_tuple: tuple[cfg.Variable, ...],
) -> tuple[tuple[cfg.Variable, ...], cfg.Variable | None]:
"""Match args against a signature with unpacking."""
posargs = self.posargs
namedargs = self.namedargs
# As we have the function signature we will attempt to adjust the
# starargs into the missing posargs.
pre = []
post = []
stars = collections.deque(starargs_tuple)
while stars and not abstract_utils.is_var_splat(stars[0]):
pre.append(stars.popleft())
while stars and not abstract_utils.is_var_splat(stars[-1]):
post.append(stars.pop())
post.reverse()
n_matched = len(posargs) + len(pre) + len(post)
required_posargs = 0
for p in match_signature.param_names:
if p in namedargs or p in match_signature.defaults:
break
required_posargs += 1
posarg_delta = required_posargs - n_matched
if stars and not post:
star = stars[-1]
if match_signature.varargs_name:
# If the invocation ends with `*args`, return it to match against *args
# in the function signature. For f(<k args>, *xs, ..., *ys), transform
# to f(<k args>, *ys) since ys is an indefinite tuple anyway and will
# match against all remaining posargs.
return posargs + tuple(pre), abstract_utils.unwrap_splat(star)
else:
# If we do not have a `*args` in match_signature, just expand the
# terminal splat to as many args as needed and then drop it.
mid = self._expand_typed_star(node, star, posarg_delta, ctx)
return posargs + tuple(pre + mid), None
elif posarg_delta <= len(stars):
# We have too many args; don't do *xs expansion. Go back to matching from
# the start and treat every entry in starargs_tuple as length 1.
n_params = len(match_signature.param_names)
all_args = posargs + starargs_tuple
if not match_signature.varargs_name:
# If the function sig has no *args, return everything in posargs
pos = _splats_to_any(all_args, ctx)
return pos, None
# Don't unwrap splats here because f(*xs, y) is not the same as f(xs, y).
# TODO(mdemello): Ideally, since we are matching call f(*xs, y) against
# sig f(x, y) we should raise an error here.
pos = _splats_to_any(all_args[:n_params], ctx)
star = []
for var in all_args[n_params:]:
if abstract_utils.is_var_splat(var):
star.append(
abstract_utils.merged_type_parameter(node, var, abstract_utils.T)
)
else:
star.append(var)
if star:
return pos, ctx.convert.tuple_to_value(star).to_variable(node)
else:
return pos, None
elif stars:
if len(stars) == 1:
# Special case (<pre>, *xs) and (*xs, <post>) to fill in the type of xs
# in every remaining arg.
mid = self._expand_typed_star(node, stars[0], posarg_delta, ctx)
else:
# If we have (*xs, <k args>, *ys) remaining, and more than k+2 params to
# match, don't try to match the intermediate params to any range, just
# match all k+2 to Any
mid = [ctx.new_unsolvable(node) for _ in range(posarg_delta)]
return posargs + tuple(pre + mid + post), None
else:
# We have **kwargs but no *args in the invocation
return posargs + tuple(pre), None
def simplify(
self,
node: cfg.CFGNode,
ctx: "context.Context",
match_signature: Signature | None = None,
) -> "Args":
"""Try to insert part of *args, **kwargs into posargs / namedargs."""
# TODO(rechen): When we have type information about *args/**kwargs,
# we need to check it before doing this simplification.
posargs = self.posargs
namedargs = self.namedargs
starargs = self.starargs
starstarargs = self.starstarargs
# Unpack starstarargs into namedargs. We need to do this first so we can see
# what posargs are still required.
starstarargs_as_dict = self.starstarargs_as_dict()
if starstarargs_as_dict is not None:
# Unlike varargs below, we do not adjust starstarargs into namedargs when
# the function signature has matching param_names because we have not
# found a benefit in doing so.
if namedargs is None:
namedargs = {}
abstract_utils.update_args_dict(namedargs, starstarargs_as_dict, node)
# We have pulled out all the named args from the function call, so we need
# to delete them from starstarargs. If the original call contained
# **kwargs, starstarargs will have is_concrete set to False, so
# preserve it as an abstract dict. If not, we just had named args packed
# into starstarargs, so set starstarargs to None.
assert starstarargs is not None
kwdict = starstarargs.data[0]
if isinstance(kwdict, _abstract.Dict) and not kwdict.is_concrete:
cls = kwdict.cls
if isinstance(cls, _abstract.PyTDClass):
# If cls is not already parameterized with the key and value types, we
# parameterize it now to preserve them.
params = {
name: ctx.convert.merge_classes(
kwdict.get_instance_type_parameter(name, node).data
)
for name in (abstract_utils.K, abstract_utils.V)
}
cls = _abstract.ParameterizedClass(ctx.convert.dict_type, params, ctx)
starstarargs = cls.instantiate(node)
else:
starstarargs = None
starargs_as_tuple = self.starargs_as_tuple(node, ctx)
if starargs_as_tuple is not None:
if match_signature:
posargs, starargs = self._unpack_and_match_args(
node, ctx, match_signature, starargs_as_tuple
)
elif starargs_as_tuple and abstract_utils.is_var_splat(
starargs_as_tuple[-1]
):
# If the last arg is an indefinite iterable keep it in starargs. Convert
# any other splats to Any.
# TODO(mdemello): If there are multiple splats should we just fall
# through to the next case (setting them all to Any), and only hit this
# case for a *single* splat in terminal position?
posargs = self.posargs + _splats_to_any(starargs_as_tuple[:-1], ctx)
starargs = abstract_utils.unwrap_splat(starargs_as_tuple[-1])
else:
# Don't try to unpack iterables in any other position since we don't
# have a signature to match. Just set all splats to Any.
posargs = self.posargs + _splats_to_any(starargs_as_tuple, ctx)
starargs = None
simplify = lambda var: abstract_utils.simplify_variable(var, node, ctx)
return Args(
tuple(simplify(posarg) for posarg in posargs),
{k: simplify(namedarg) for k, namedarg in namedargs.items()},
simplify(starargs),
simplify(starstarargs),
)
def get_variables(self) -> list[cfg.Variable]:
variables = list(self.posargs) + list(self.namedargs.values())
if self.starargs is not None:
variables.append(self.starargs)
if self.starstarargs is not None:
variables.append(self.starstarargs)
return variables
def replace_posarg(self, pos: int, val: cfg.Variable) -> "Args":
new_posargs = self.posargs[:pos] + (val,) + self.posargs[pos + 1 :]
return self.replace(posargs=new_posargs)
def replace_namedarg(self, name: str, val: cfg.Variable) -> "Args":
new_namedargs = dict(self.namedargs)
new_namedargs[name] = val
return self.replace(namedargs=new_namedargs)
def delete_namedarg(self, name: str) -> "Args":
new_namedargs = {k: v for k, v in self.namedargs.items() if k != name}
return self.replace(namedargs=new_namedargs)
def replace(self, **kwargs) -> "Args":
return attrs.evolve(self, **kwargs)
def has_opaque_starargs_or_starstarargs(self) -> bool:
return any(
arg and not isinstance(arg, _abstract.PythonConstant)
for arg in (self.starargs, self.starstarargs)
)
| Args |
python | Pylons__pyramid | tests/test_scripts/test_pshell.py | {
"start": 13862,
"end": 14055
} | class ____:
def __init__(self, entry_points):
self._entry_points = entry_points
def entry_points(self):
return DummyEntryPoints(self._entry_points)
| DummyImportlibMetadata |
python | realpython__materials | django-gunicorn-nginx/myapp/apps.py | {
"start": 36,
"end": 142
} | class ____(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "myapp"
| MyappConfig |
python | doocs__leetcode | solution/1200-1299/1274.Number of Ships in a Rectangle/Solution.py | {
"start": 286,
"end": 1088
} | class ____:
def countShips(self, sea: "Sea", topRight: "Point", bottomLeft: "Point") -> int:
def dfs(topRight, bottomLeft):
x1, y1 = bottomLeft.x, bottomLeft.y
x2, y2 = topRight.x, topRight.y
if x1 > x2 or y1 > y2:
return 0
if not sea.hasShips(topRight, bottomLeft):
return 0
if x1 == x2 and y1 == y2:
return 1
midx = (x1 + x2) >> 1
midy = (y1 + y2) >> 1
a = dfs(topRight, Point(midx + 1, midy + 1))
b = dfs(Point(midx, y2), Point(x1, midy + 1))
c = dfs(Point(midx, midy), bottomLeft)
d = dfs(Point(x2, midy), Point(midx + 1, y1))
return a + b + c + d
return dfs(topRight, bottomLeft)
| Solution |
python | mozilla__bleach | bleach/_vendor/html5lib/treewalkers/etree_lxml.py | {
"start": 2980,
"end": 6357
} | class ____(base.NonRecursiveTreeWalker):
def __init__(self, tree):
# pylint:disable=redefined-variable-type
if isinstance(tree, list):
self.fragmentChildren = set(tree)
tree = FragmentRoot(tree)
else:
self.fragmentChildren = set()
tree = Root(tree)
base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = _ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (base.DOCUMENT,)
elif isinstance(node, Doctype):
return base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return base.TEXT, ensure_str(node.obj)
elif node.tag == etree.Comment:
return base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
elif node in self.fragmentChildren:
return None
return node.getparent()
| TreeWalker |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/autograd_cache.py | {
"start": 20784,
"end": 42640
} | class ____(GuardedCache[GenericAOTAutogradResult]):
"""
Caches the results of running AOTAutograd. This class mostly handles the save and load logic, whereas
AOTAutogradResult handles the wrapping/unwrapping logic.
Cache Inputs (AOTAutogradCacheDetails)
- AOTAutogradCache takes in the following inputs, which are analogous to inputs given
to AOTAutograd by dynamo:
- A fx graph module generated by dynamo
- A list of args, which consists of:
- Symint inputs to the graph, generated by dynamo
- The **real tensor** inputs, which inductor uses for cudagraphs
- Notably, the real tensor inputs don't have symints in their metadata.
AOTAutograd then retraces those real tensor arguments into FakeTensors later during execution.
- A set of global configurations that affect AOTAutograd or Inductor behavior.
It then generates a cache key given these values. Notably, this means AOTAutogradCache currently
specializes on the sizes and strides of the real tensor inputs when dynamic shapes are turned on.
In a later PR, we'll likely generate the cache key based on the FakeTensors AOTAutograd generates
based on the real tensor inputs, which can contain symints.
# Cache Outputs (AOTAutogradResult)
- AOTAutogradCache caches the following values:
- The compiled forward and backward functions from inductor, via keys to the FXGraphCache
- Metadata to reconstruct the AOTModule from the compiled inductor artifacts
- See AOTAutogradResult for more info
[Note: Caching guards generated by AOTAutograd and Inductor]
AOTAutograd and inductor both can introduce new guards to the shape environment. FXGraphCache saves guards with each
compiled graph inductor generates. On a cache hit, AOTAutograd reloads the compiled forward and backward functions
from FXGraphCache, giving it new symint arguments from the input args.
FXGraphCache uses those symints and its saved guards to repopulate the ShapeEnv with guards.
**No new guards are generated into the shape env after inductor finishes compiling**, so the guards
saved by inductor are sufficient for correctness for both AOTAutograd and Inductor's caches.
"""
@staticmethod
def clear():
"""Clear the cache"""
try:
shutil.rmtree(AOTAutogradCache._get_tmp_dir())
except FileNotFoundError:
pass
@staticmethod
def try_load(
mod: Union[torch.fx.GraphModule, torch._dynamo.utils.GmWrapper],
args,
aot_config: AOTConfig,
cudagraphs: BoxedBool,
boxed_forward_device_index: Optional[BoxedDeviceIndex],
local: bool,
remote: bool,
) -> Optional[Callable]:
"""
Load a result from the cache, and reconstruct a runtime wrapper around the object
"""
gm = mod.gm if isinstance(mod, torch._dynamo.utils.GmWrapper) else mod
with sanitize_gm_for_cache(gm):
compiled_fn = None
cache_info: dict[str, Any] = {}
cache_key = None
debug_lines: list[str] = []
cache_event_time = time.time_ns()
cache_state = None
fx_config: _CompileFxKwargs = {
"cudagraphs": cudagraphs,
"boxed_forward_device_index": boxed_forward_device_index,
}
try:
cache_key, debug_lines = autograd_cache_key(
gm, args, aot_config, fx_config
)
result: Optional[tuple[GenericAOTAutogradResult, bytes]] = (
AOTAutogradCache._lookup(
cache_key, local, remote, args, cache_info, aot_config
)
)
if result is not None:
(entry, pickled_content) = result
compiled_fn = entry.wrap_post_compile(args, aot_config, fx_config)
# Make the compiled_fn serializable, where the serialize function just
# makes a copy of the original entry before post compile via the pickled content
compiled_fn = SerializableCompiledFunction(
compiled_fn, lambda: pickle.loads(pickled_content)
)
log.info("AOTAutograd cache hit for key %s", cache_key)
counters["aot_autograd"]["autograd_cache_hit"] += 1
cache_state = "hit"
cache_event_time = time.time_ns()
forward_time_saved = entry.forward_time_taken_ns // 1e6
backward_time_saved = entry.backward_time_taken_ns // 1e6
cache_info.update(
{
"forward_time_saved_ms": forward_time_saved,
"backward_time_saved_ms": backward_time_saved,
"time_saved_ms": forward_time_saved + backward_time_saved,
}
)
time_saved_ns = (
entry.forward_time_taken_ns + entry.backward_time_taken_ns
)
# TODO: should we use the same field for remote cache time saved for both
# FXGraphCache and AOTAutogradCache?
# get_metrics_context().increment(...)
if (
ephemeral_increase
:= add_ephemeral_timeout_increase_for_distributed(time_saved_ns)
) != 0:
cache_info["ephemeral_timeout_increase"] = ephemeral_increase
if compiled_fn is None:
log.info("AOTAutograd cache miss for key %s", cache_key)
counters["aot_autograd"]["autograd_cache_miss"] += 1
cache_state = "miss"
cache_event_time = time.time_ns()
# Count missing the FXGraphCache as a miss not a bypass
except FXGraphCacheMiss as e:
counters["aot_autograd"]["autograd_cache_miss"] += 1
cache_state = "miss"
if (
config.strict_autograd_cache
or torch._dynamo.config.strict_precompile
):
raise e
# Most often this is BypassAOTAutogradCache, but
# if there's ever different reason we can't cache,
# we still never want to hard throw an exception, since
# we can always fallback to a cache bypass.
# As an example, if the user calls autograd via
# standalone inductor, we will sometimes get a GraphModule
# that doesn't actually have a `.graph` on it. Instead
# of checking every single case, we safely catch the exception
# in those cases.
except Exception as e:
cache_key = None
counters["aot_autograd"]["autograd_cache_bypass"] += 1
log.info("Bypassing autograd cache due to: %s", e) # noqa: G200
cache_state = "bypass"
cache_event_time = time.time_ns()
cache_info["cache_bypass_reason"] = str(e)
cache_info["cache_bypass_exception_type"] = type(e).__name__
cache_info["cache_bypass_traceback"] = traceback.format_exc().split(
"\n"
)
# TODO: this gets logged implicitly by cache_bypass_reason,
# and here we explicitly log it into tlparse.
# We may want to log this as an extra column in Scuba, though.
cache_info["cache_bypass_hard_exception"] = not isinstance(
e, BypassAOTAutogradCache
)
if remote:
log_cache_bypass("bypass_aot_autograd", str(e))
if (
config.strict_autograd_cache
or torch._dynamo.config.strict_precompile
):
raise e
if compiled_fn is None:
# Set the cache key so we can save a cache result later
symints = AOTAutogradCache._filter_backed_symints(args)
if cache_key is not None:
aot_config.cache_info = AOTAutogradCacheInfo(
cache_key,
time.time_ns(),
forward_symints=symints,
)
cache_info.update(
{
"key": cache_key,
"cache_state": cache_state,
"components": debug_lines,
}
)
if chromium_event_log_active():
CompileEventLogger.instant(
f"autograd_cache_{cache_state}",
metadata=cache_info,
time_ns=cache_event_time,
)
CompileEventLogger.try_add_pt2_compile(
"backend_compile",
cache_state=cache_state,
cache_event_time=cache_event_time,
key=cache_info.get("key"),
components=cache_info.get("components"),
cache_bypass_reason=cache_info.get("cache_bypass_reason"),
remote_cache_enabled=remote,
local_cache_enabled=local,
)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": f"aotautograd_cache_{cache_state}",
"encoding": "json",
},
payload_fn=lambda: json.dumps(cache_info),
)
return compiled_fn
@classmethod
def generate_guards_expression(
cls: type[AOTAutogradCache], cache_info: AOTAutogradCacheInfo
) -> Optional[str]:
shape_env = cls._get_shape_env()
assert shape_env is not None
symints = cache_info.forward_symints
guards = shape_env.get_pruned_guards(symints)
return shape_env.produce_guards_expression(placeholders=symints, guards=guards)
@classmethod
def _get_tmp_dir(cls: type[AOTAutogradCache]) -> str:
"""
Get the toplevel temporary directory for storing compiled graphs.
"""
return os.path.join(cache_dir(), "aotautograd")
@classmethod
def _get_tmp_dir_for_key(cls: type[AOTAutogradCache], key) -> str:
"""
Get the toplevel temporary directory for storing compiled graphs.
"""
return os.path.join(cls._get_tmp_dir(), key)
@staticmethod
def evaluate_guards(guard_expr: str, hints: Union[list[int], list[torch.SymInt]]):
if torch._inductor.config.unsafe_skip_cache_dynamic_shape_guards:
return True
shape_env = AOTAutogradCache._get_shape_env()
assert shape_env is not None
result = shape_env.evaluate_guards_expression(guard_expr, hints)
return result
@staticmethod
def _lookup(
key: str,
local: bool,
remote: bool,
args: list[Any],
cache_info: dict[str, Any],
aot_config: Optional[AOTConfig],
) -> Optional[tuple[GenericAOTAutogradResult, bytes]]:
"""Given a key generated by AOTAutogradCachePickler, look up its location in the cache."""
remote_cache: Optional[RemoteCache[JsonDataTy]] = None
if remote:
remote_cache = AOTAutogradCache.get_remote_cache()
symints = AOTAutogradCache._filter_backed_symints(args)
hints = [hint_int(s) for s in symints]
entry = None
pickled_content = None
try:
(
entry,
pickled_content,
guard_info,
) = AOTAutogradCache.find_guarded_entry(
key, local, remote_cache, AOTAutogradCache.evaluate_guards, hints
)
if entry is None and guard_info["cache_status_detailed"] == "guard_miss":
counters["aot_autograd"]["autograd_cache_guard_miss"] += 1
cache_info.update(guard_info)
if pickled_content is not None:
CacheArtifactManager.record_artifact(
AOTAutogradCacheArtifact.type(), key, pickled_content
)
if (
should_bundle_autograd_cache()
and aot_config is not None
and aot_config.precompile_backend_id is not None
):
# NB: We don't want to use the cached aot_config.precompile_backend_id
# 1. because we set it to None on save 2. even if we didn't, this new run
# that cache hit has a *new* backend id associated with it.
PrecompileContext.record_artifact(
BundledAOTAutogradCacheArtifact(
aot_config.precompile_backend_id, entry
),
)
except Exception as e:
log.info("AOTAutograd cache unable to load compiled graph: %s", e) # noqa: G200
if config.strict_autograd_cache:
raise e
if entry is not None:
assert pickled_content is not None
return (entry, pickled_content)
else:
return None
@staticmethod
def _write_to_local_cache(key: str, content: bytes):
"""Write an entry to the local cache."""
subdir = AOTAutogradCache._get_tmp_dir_for_key(key)
if not os.path.exists(subdir):
os.makedirs(subdir, exist_ok=True)
# Use a hash of the serialized entry to get a unique file
# name. The specific name doesn't matter since a lookup involves
# iterating over all entries in the parent subdir.
path = os.path.join(subdir, sha256_hash(content))
log.info("Writing AOTAutograd cache entry to %s", path)
write_atomic(path, content)
@staticmethod
def save(key: str, entry: GenericAOTAutogradResult, remote: bool):
"""Save a single entry into the cache."""
try:
entry.pre_save()
content = pickle.dumps(entry)
CacheArtifactManager.record_artifact(
AOTAutogradCacheArtifact.type(), key, content
)
if (
should_bundle_autograd_cache()
and entry.sanitized_aot_config.precompile_backend_id is not None
):
precompile_key = entry.sanitized_aot_config.precompile_backend_id
artifact = BundledAOTAutogradCacheArtifact(precompile_key, entry)
# Now that we're saving it, the precompile_backend_id field is no longer
# useful, remove it from the entry.
entry.sanitized_aot_config.precompile_backend_id = None
PrecompileContext.record_artifact(artifact)
AOTAutogradCache._write_to_local_cache(key, content)
counters["aot_autograd"]["autograd_cache_saved"] += 1
except BypassAOTAutogradCache as e:
counters["aot_autograd"]["autograd_cache_bypass"] += 1
log.info("Bypassing autograd cache due to: %s", e) # noqa: G200
if remote:
log_cache_bypass("bypass_aot_autograd", str(e))
return None
except Exception as e:
log.info("AOTAutograd cache unable to serialize compiled graph: %s", e) # noqa: G200
if remote:
log_cache_bypass(
"bypass_aot_autograd", "Unable to serialize: " + str(e)
)
if config.strict_autograd_cache:
raise e
return None
if remote:
remote_cache: Optional[RemoteCache[JsonDataTy]] = (
AOTAutogradCache.get_remote_cache()
)
if remote_cache is not None:
time_taken_ms = int(
(entry.forward_time_taken_ns + entry.backward_time_taken_ns) // 1e6
)
cache_data: JsonDataTy = {
"data": base64.b64encode(content).decode("ascii"),
"time_taken_ms": time_taken_ms,
}
remote_cache.put(key, cache_data)
@staticmethod
@functools.cache
def get_remote_cache() -> Optional[RemoteCache[JsonDataTy]]:
"""
Attempts to load the remote cache, returns None on error.
"""
cache_id = "autograd-experimental"
return create_cache(
cache_id,
config.is_fbcode(),
"FbRemoteAOTAutogradCache",
"RemoteAOTAutogradCache",
)
@staticmethod
def make_entry(
compiled_fw_func: OutputCode,
compiled_bw_func: Optional[OutputCode],
aot_joint_graph_str: Optional[str],
aot_forward_graph_str: Optional[str],
aot_backward_graph_str: Optional[str],
runtime_metadata: ViewAndMutationMeta,
dispatch_wrappers: list[CompilerWrapper],
maybe_subclass_meta: Optional[SubclassMeta],
num_fw_outs_saved_for_bw: Optional[int],
indices_of_inps_to_detach: list[int],
forward_time_taken_ns: int,
backward_time_taken_ns: int,
sanitized_aot_config: AOTConfig,
guards_expr: Optional[str],
backward_state_indices: Optional[list[int]],
num_symints_saved_for_bw: Optional[int],
serialized_bw_module: Optional[SerializedGraphModule],
) -> GenericAOTAutogradResult:
if should_bundle_autograd_cache():
# Helper function to unwrap all the wrappers we added during aotdispatch
# They get reapplied on cache load
def unwrap_output_code(obj):
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
assert isinstance(obj, OutputCode)
return obj
compiled_fw_graph = unwrap_output_code(compiled_fw_func)
bundled_compiled_forward = BundledCompiledForward(compiled_fw_graph)
bundled_compiled_backward = None
if compiled_bw_func is not None:
assert backward_state_indices is not None
assert num_symints_saved_for_bw is not None
compiled_bw_graph = unwrap_output_code(compiled_bw_func)
bundled_compiled_backward = BundledCompiledBackward(
compiled_bw_graph, backward_state_indices, num_symints_saved_for_bw
)
return BundledAOTAutogradResult(
compiled_fw=bundled_compiled_forward,
compiled_bw=bundled_compiled_backward,
aot_joint_graph_str=aot_joint_graph_str,
aot_forward_graph_str=aot_forward_graph_str,
aot_backward_graph_str=aot_backward_graph_str,
runtime_metadata=runtime_metadata,
dispatch_wrappers=dispatch_wrappers,
maybe_subclass_meta=maybe_subclass_meta,
num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw,
indices_of_inps_to_detach=indices_of_inps_to_detach,
forward_time_taken_ns=forward_time_taken_ns,
backward_time_taken_ns=backward_time_taken_ns,
sanitized_aot_config=sanitized_aot_config,
guards_expr=guards_expr,
serialized_bw_module=serialized_bw_module,
)
else:
fw_key = getattr(compiled_fw_func, "_fx_graph_cache_key", None)
fw_debug_lines = getattr(
compiled_fw_func, "_fx_graph_cache_debug_lines", []
)
assert fw_key is not None
compiled_forward = CompiledForward(
fx_graph_cache_info=(fw_key, fw_debug_lines),
fx_graph_guard_expr=getattr(compiled_fw_func, "guards_expr", None),
)
compiled_backward = None
if compiled_bw_func is not None:
bw_key = getattr(compiled_bw_func, "_fx_graph_cache_key", None)
bw_debug_lines = getattr(
compiled_bw_func, "_fx_graph_cache_debug_lines", []
)
assert bw_key is not None
assert backward_state_indices is not None
assert num_symints_saved_for_bw is not None
compiled_backward = CompiledBackward(
fx_graph_cache_info=(bw_key, bw_debug_lines),
fx_graph_guard_expr=getattr(compiled_bw_func, "guards_expr", None),
backward_state_indices=backward_state_indices,
num_symints_saved_for_bw_=num_symints_saved_for_bw,
)
return AOTAutogradResult(
compiled_fw=compiled_forward,
compiled_bw=compiled_backward,
aot_joint_graph_str=aot_joint_graph_str,
aot_forward_graph_str=aot_forward_graph_str,
aot_backward_graph_str=aot_backward_graph_str,
runtime_metadata=runtime_metadata,
dispatch_wrappers=dispatch_wrappers,
maybe_subclass_meta=maybe_subclass_meta,
num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw,
indices_of_inps_to_detach=indices_of_inps_to_detach,
forward_time_taken_ns=forward_time_taken_ns,
backward_time_taken_ns=backward_time_taken_ns,
sanitized_aot_config=sanitized_aot_config,
guards_expr=guards_expr,
serialized_bw_module=serialized_bw_module,
)
| AOTAutogradCache |
python | apache__airflow | providers/apache/spark/tests/unit/apache/spark/hooks/test_spark_jdbc_script.py | {
"start": 1332,
"end": 7454
} | class ____:
jdbc_arguments = [
"-cmdType",
"spark_to_jdbc",
"-url",
"jdbc:postgresql://localhost:5432/default",
"-user",
"user",
"-password",
"supersecret",
"-metastoreTable",
"hiveMcHiveFace",
"-jdbcTable",
"tableMcTableFace",
"-jdbcDriver",
"org.postgresql.Driver",
"-jdbcTruncate",
"false",
"-saveMode",
"append",
"-saveFormat",
"parquet",
"-batchsize",
"100",
"-fetchsize",
"200",
"-name",
"airflow-spark-jdbc-script-test",
"-numPartitions",
"10",
"-partitionColumn",
"columnMcColumnFace",
"-lowerBound",
"10",
"-upperBound",
"20",
"-createTableColumnTypes",
"columnMcColumnFace INTEGER(100), name CHAR(64),comments VARCHAR(1024)",
]
default_arguments = {
"cmd_type": "spark_to_jdbc",
"url": "jdbc:postgresql://localhost:5432/default",
"user": "user",
"password": "supersecret",
"metastore_table": "hiveMcHiveFace",
"jdbc_table": "tableMcTableFace",
"jdbc_driver": "org.postgresql.Driver",
"truncate": "false",
"save_mode": "append",
"save_format": "parquet",
"batch_size": "100",
"fetch_size": "200",
"name": "airflow-spark-jdbc-script-test",
"num_partitions": "10",
"partition_column": "columnMcColumnFace",
"lower_bound": "10",
"upper_bound": "20",
"create_table_column_types": "columnMcColumnFace INTEGER(100), name CHAR(64),comments VARCHAR(1024)",
}
def test_parse_arguments(self):
# When
parsed_arguments = _parse_arguments(args=self.jdbc_arguments)
# Then
for argument_name, argument_value in self.default_arguments.items():
assert getattr(parsed_arguments, argument_name) == argument_value
@mock.patch("airflow.providers.apache.spark.hooks.spark_jdbc_script.spark_write_to_jdbc")
def test_run_spark_write_to_jdbc(self, mock_spark_write_to_jdbc, mock_spark_session):
# Given
arguments = _parse_arguments(["-cmdType", SPARK_WRITE_TO_JDBC] + self.jdbc_arguments[2:])
spark_session = mock_spark_session.builder.appName(arguments.name).enableHiveSupport().getOrCreate()
# When
_run_spark(arguments=arguments)
# Then
mock_spark_write_to_jdbc.assert_called_once_with(
spark_session,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.truncate,
arguments.save_mode,
arguments.batch_size,
arguments.num_partitions,
arguments.create_table_column_types,
)
@mock.patch("airflow.providers.apache.spark.hooks.spark_jdbc_script.spark_read_from_jdbc")
def test_run_spark_read_from_jdbc(self, mock_spark_read_from_jdbc, mock_spark_session):
# Given
arguments = _parse_arguments(["-cmdType", SPARK_READ_FROM_JDBC] + self.jdbc_arguments[2:])
spark_session = mock_spark_session.builder.appName(arguments.name).enableHiveSupport().getOrCreate()
# When
_run_spark(arguments=arguments)
# Then
mock_spark_read_from_jdbc.assert_called_once_with(
spark_session,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.save_mode,
arguments.save_format,
arguments.fetch_size,
arguments.num_partitions,
arguments.partition_column,
arguments.lower_bound,
arguments.upper_bound,
)
@pytest.mark.system
@mock.patch.object(DataFrameWriter, "save")
def test_spark_write_to_jdbc(self, mock_writer_save):
# Given
arguments = _parse_arguments(self.jdbc_arguments)
spark_session = _create_spark_session(arguments)
spark_session.sql(f"CREATE TABLE IF NOT EXISTS {arguments.metastore_table} (key INT)")
# When
spark_write_to_jdbc(
spark_session=spark_session,
url=arguments.url,
user=arguments.user,
password=arguments.password,
metastore_table=arguments.metastore_table,
jdbc_table=arguments.jdbc_table,
driver=arguments.jdbc_driver,
truncate=arguments.truncate,
save_mode=arguments.save_mode,
batch_size=arguments.batch_size,
num_partitions=arguments.num_partitions,
create_table_column_types=arguments.create_table_column_types,
)
# Then
mock_writer_save.assert_called_once_with(mode=arguments.save_mode)
@pytest.mark.system
@mock.patch.object(DataFrameReader, "load")
def test_spark_read_from_jdbc(self, mock_reader_load):
# Given
arguments = _parse_arguments(self.jdbc_arguments)
spark_session = _create_spark_session(arguments)
spark_session.sql(f"CREATE TABLE IF NOT EXISTS {arguments.metastore_table} (key INT)")
# When
spark_read_from_jdbc(
spark_session,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.save_mode,
arguments.save_format,
arguments.fetch_size,
arguments.num_partitions,
arguments.partition_column,
arguments.lower_bound,
arguments.upper_bound,
)
# Then
mock_reader_load().write.saveAsTable.assert_called_once_with(
arguments.metastore_table, format=arguments.save_format, mode=arguments.save_mode
)
| TestSparkJDBCScrip |
python | tiangolo__fastapi | docs_src/header_param_models/tutorial003_py310.py | {
"start": 86,
"end": 377
} | class ____(BaseModel):
host: str
save_data: bool
if_modified_since: str | None = None
traceparent: str | None = None
x_tag: list[str] = []
@app.get("/items/")
async def read_items(headers: CommonHeaders = Header(convert_underscores=False)):
return headers
| CommonHeaders |
python | falconry__falcon | tests/test_middleware.py | {
"start": 302,
"end": 529
} | class ____:
def process_response(self, req, resp, resource, req_succeeded):
self.req = req
self.resp = resp
self.resource = resource
self.req_succeeded = req_succeeded
| CaptureResponseMiddleware |
python | numba__numba | numba/tests/test_gdb_bindings.py | {
"start": 7737,
"end": 8317
} | class ____(TestCase):
def test_call_gdb(self):
def nop_compiler(x):
return x
for compiler in [nop_compiler, jit(forceobj=True), njit]:
for meth in [gdb, gdb_init]:
def python_func():
meth()
with self.assertRaises(errors.TypingError) as raises:
compiler(python_func)()
msg = "gdb support is only available on unix-like systems"
self.assertIn(msg, str(raises.exception))
if __name__ == '__main__':
unittest.main()
| TestGdbExceptions |
python | google__flatbuffers | tests/py_test.py | {
"start": 5193,
"end": 13736
} | class ____(unittest.TestCase):
"""Tests the generated object based API."""
def test_consistency_with_repeated_pack_and_unpack(self):
"""Checks the serialization and deserialization between a buffer and
its python object. It tests in the same way as the C++ object API test,
ObjectFlatBuffersTest in test.cpp.
"""
buf, off = make_monster_from_generated_code()
# Turns a buffer into Python object (T class).
monster1 = _MONSTER.Monster.GetRootAs(buf, off)
monsterT1 = _MONSTER.MonsterT.InitFromObj(monster1)
for sizePrefix in [True, False]:
# Re-serialize the data into a buffer.
b1 = flatbuffers.Builder(0)
if sizePrefix:
b1.FinishSizePrefixed(monsterT1.Pack(b1))
else:
b1.Finish(monsterT1.Pack(b1))
CheckReadBuffer(b1.Bytes, b1.Head(), sizePrefix)
# Deserializes the buffer into Python object again.
monster2 = _MONSTER.Monster.GetRootAs(b1.Bytes, b1.Head())
# Re-serializes the data into a buffer for one more time.
monsterT2 = _MONSTER.MonsterT.InitFromObj(monster2)
for sizePrefix in [True, False]:
# Re-serializes the data into a buffer
b2 = flatbuffers.Builder(0)
if sizePrefix:
b2.FinishSizePrefixed(monsterT2.Pack(b2))
else:
b2.Finish(monsterT2.Pack(b2))
CheckReadBuffer(b2.Bytes, b2.Head(), sizePrefix)
def test_default_values_with_pack_and_unpack(self):
"""Serializes and deserializes between a buffer with default values (no
specific values are filled when the buffer is created) and its python
object.
"""
# Creates a flatbuffer with default values.
b1 = flatbuffers.Builder(0)
_MONSTER.MonsterStart(b1)
gen_mon = _MONSTER.MonsterEnd(b1)
b1.Finish(gen_mon)
# Converts the flatbuffer into the object class.
monster1 = _MONSTER.Monster.GetRootAs(b1.Bytes, b1.Head())
monsterT1 = _MONSTER.MonsterT.InitFromObj(monster1)
# Packs the object class into another flatbuffer.
b2 = flatbuffers.Builder(0)
b2.Finish(monsterT1.Pack(b2))
monster2 = _MONSTER.Monster.GetRootAs(b2.Bytes, b2.Head())
# Checks the default values.
self.assertTrue(monster2.Pos() is None)
self.assertEqual(monster2.Mana(), 150)
self.assertEqual(monster2.Hp(), 100)
self.assertTrue(monster2.Name() is None)
self.assertEqual(monster2.Inventory(0), 0)
self.assertEqual(monster2.InventoryAsNumpy(), 0)
self.assertEqual(monster2.InventoryLength(), 0)
self.assertTrue(monster2.InventoryIsNone())
self.assertEqual(monster2.Color(), 8)
self.assertEqual(monster2.TestType(), 0)
self.assertTrue(monster2.Test() is None)
self.assertTrue(monster2.Test4(0) is None)
self.assertEqual(monster2.Test4Length(), 0)
self.assertTrue(monster2.Test4IsNone())
self.assertEqual(monster2.Testarrayofstring(0), '')
self.assertEqual(monster2.TestarrayofstringLength(), 0)
self.assertTrue(monster2.TestarrayofstringIsNone())
self.assertTrue(monster2.Testarrayoftables(0) is None)
self.assertEqual(monster2.TestarrayoftablesLength(), 0)
self.assertTrue(monster2.TestarrayoftablesIsNone())
self.assertTrue(monster2.Enemy() is None)
self.assertEqual(monster2.Testnestedflatbuffer(0), 0)
self.assertEqual(monster2.TestnestedflatbufferAsNumpy(), 0)
self.assertEqual(monster2.TestnestedflatbufferLength(), 0)
self.assertTrue(monster2.TestnestedflatbufferIsNone())
self.assertTrue(monster2.Testempty() is None)
self.assertFalse(monster2.Testbool())
self.assertEqual(monster2.Testhashs32Fnv1(), 0)
self.assertEqual(monster2.Testhashu32Fnv1(), 0)
self.assertEqual(monster2.Testhashs64Fnv1(), 0)
self.assertEqual(monster2.Testhashu64Fnv1(), 0)
self.assertEqual(monster2.Testhashs32Fnv1a(), 0)
self.assertEqual(monster2.Testhashu32Fnv1a(), 0)
self.assertEqual(monster2.Testhashs64Fnv1a(), 0)
self.assertEqual(monster2.Testhashu64Fnv1a(), 0)
self.assertEqual(monster2.Testarrayofbools(0), 0)
self.assertEqual(monster2.TestarrayofboolsAsNumpy(), 0)
self.assertEqual(monster2.TestarrayofboolsLength(), 0)
self.assertTrue(monster2.TestarrayofboolsIsNone())
self.assertEqual(monster2.Testf(), 3.14159)
self.assertEqual(monster2.Testf2(), 3.0)
self.assertEqual(monster2.Testf3(), 0.0)
self.assertEqual(monster2.Testarrayofstring2(0), '')
self.assertEqual(monster2.Testarrayofstring2Length(), 0)
self.assertTrue(monster2.Testarrayofstring2IsNone())
self.assertTrue(monster2.Testarrayofsortedstruct(0) is None)
self.assertEqual(monster2.TestarrayofsortedstructLength(), 0)
self.assertTrue(monster2.TestarrayofsortedstructIsNone())
self.assertEqual(monster2.Flex(0), 0)
self.assertEqual(monster2.FlexAsNumpy(), 0)
self.assertEqual(monster2.FlexLength(), 0)
self.assertTrue(monster2.FlexIsNone())
self.assertTrue(monster2.Test5(0) is None)
self.assertEqual(monster2.Test5Length(), 0)
self.assertTrue(monster2.Test5IsNone())
self.assertEqual(monster2.VectorOfLongs(0), 0)
self.assertEqual(monster2.VectorOfLongsAsNumpy(), 0)
self.assertEqual(monster2.VectorOfLongsLength(), 0)
self.assertTrue(monster2.VectorOfLongsIsNone())
self.assertEqual(monster2.VectorOfDoubles(0), 0)
self.assertEqual(monster2.VectorOfDoublesAsNumpy(), 0)
self.assertEqual(monster2.VectorOfDoublesLength(), 0)
self.assertTrue(monster2.VectorOfDoublesIsNone())
self.assertTrue(monster2.ParentNamespaceTest() is None)
self.assertTrue(monster2.VectorOfReferrables(0) is None)
self.assertEqual(monster2.VectorOfReferrablesLength(), 0)
self.assertTrue(monster2.VectorOfReferrablesIsNone())
self.assertEqual(monster2.SingleWeakReference(), 0)
self.assertEqual(monster2.VectorOfWeakReferences(0), 0)
self.assertEqual(monster2.VectorOfWeakReferencesAsNumpy(), 0)
self.assertEqual(monster2.VectorOfWeakReferencesLength(), 0)
self.assertTrue(monster2.VectorOfWeakReferencesIsNone())
self.assertTrue(monster2.VectorOfStrongReferrables(0) is None)
self.assertEqual(monster2.VectorOfStrongReferrablesLength(), 0)
self.assertTrue(monster2.VectorOfStrongReferrablesIsNone())
self.assertEqual(monster2.CoOwningReference(), 0)
self.assertEqual(monster2.VectorOfCoOwningReferences(0), 0)
self.assertEqual(monster2.VectorOfCoOwningReferencesAsNumpy(), 0)
self.assertEqual(monster2.VectorOfCoOwningReferencesLength(), 0)
self.assertTrue(monster2.VectorOfCoOwningReferencesIsNone())
self.assertEqual(monster2.NonOwningReference(), 0)
self.assertEqual(monster2.VectorOfNonOwningReferences(0), 0)
self.assertEqual(monster2.VectorOfNonOwningReferencesAsNumpy(), 0)
self.assertEqual(monster2.VectorOfNonOwningReferencesLength(), 0)
self.assertTrue(monster2.VectorOfNonOwningReferencesIsNone())
self.assertEqual(monster2.AnyUniqueType(), 0)
self.assertTrue(monster2.AnyUnique() is None)
self.assertEqual(monster2.AnyAmbiguousType(), 0)
self.assertTrue(monster2.AnyAmbiguous() is None)
self.assertEqual(monster2.VectorOfEnums(0), 0)
self.assertEqual(monster2.VectorOfEnumsAsNumpy(), 0)
self.assertEqual(monster2.VectorOfEnumsLength(), 0)
self.assertTrue(monster2.VectorOfEnumsIsNone())
def test_optional_scalars_with_pack_and_unpack(self):
"""Serializes and deserializes between a buffer with optional values (no
specific values are filled when the buffer is created) and its python
object.
"""
# Creates a flatbuffer with optional values.
b1 = flatbuffers.Builder(0)
optional_scalars.ScalarStuff.ScalarStuffStart(b1)
gen_opt = optional_scalars.ScalarStuff.ScalarStuffEnd(b1)
b1.Finish(gen_opt)
# Converts the flatbuffer into the object class.
opts1 = optional_scalars.ScalarStuff.ScalarStuff.GetRootAs(
b1.Bytes, b1.Head()
)
optsT1 = optional_scalars.ScalarStuff.ScalarStuffT.InitFromObj(opts1)
# Packs the object class into another flatbuffer.
b2 = flatbuffers.Builder(0)
b2.Finish(optsT1.Pack(b2))
opts2 = optional_scalars.ScalarStuff.ScalarStuff.GetRootAs(
b2.Bytes, b2.Head()
)
optsT2 = optional_scalars.ScalarStuff.ScalarStuffT.InitFromObj(opts2)
# Checks the default values.
self.assertTrue(opts2.JustI8() == 0)
self.assertTrue(opts2.MaybeF32() is None)
self.assertTrue(opts2.DefaultBool() is True)
self.assertTrue(optsT2.justU16 == 0)
self.assertTrue(optsT2.maybeEnum is None)
self.assertTrue(optsT2.defaultU64 == 42)
| TestObjectBasedAPI |
python | huggingface__transformers | src/transformers/pipelines/text_classification.py | {
"start": 591,
"end": 1638
} | class ____(ExplicitEnum):
SIGMOID = "sigmoid"
SOFTMAX = "softmax"
NONE = "none"
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True),
r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output. In case of regression tasks, will not
apply any function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.""",
)
| ClassificationFunction |
python | pypa__pip | src/pip/_vendor/pygments/formatters/__init__.py | {
"start": 4847,
"end": 5385
} | class ____(types.ModuleType):
"""Automatically import formatters."""
def __getattr__(self, name):
info = FORMATTERS.get(name)
if info:
_load_formatters(info[0])
cls = _formatter_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
| _automodule |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 96067,
"end": 96520
} | class ____(str, Enum):
"""
* `majority` - send N/2+1 random request and return points, which present on all of them * `quorum` - send requests to all nodes and return points which present on majority of nodes * `all` - send requests to all nodes and return points which present on all nodes
"""
def __str__(self) -> str:
return str(self.value)
MAJORITY = "majority"
QUORUM = "quorum"
ALL = "all"
| ReadConsistencyType |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/recursiveTypeAlias16.py | {
"start": 213,
"end": 315
} | class ____(Generic[T]):
pass
TA1 = A["TA2[U]"] | B["TA2[U]"]
TA2 = TA1[U] | C[TA1[U]]
TA3 = TA2[U]
| C |
python | simonw__datasette | datasette/filters.py | {
"start": 6627,
"end": 6949
} | class ____:
def __init__(
self, where_clauses, params=None, human_descriptions=None, extra_context=None
):
self.where_clauses = where_clauses
self.params = params or {}
self.human_descriptions = human_descriptions or []
self.extra_context = extra_context or {}
| FilterArguments |
python | PyCQA__pylint | tests/functional/u/unused/unused_private_member.py | {
"start": 3603,
"end": 4506
} | class ____:
# pylint: disable=protected-access, no-member, unreachable
def __new__(cls, func, *args):
if args:
true_obj = super(FalsePositive4668, cls).__new__(cls)
true_obj.func = func
true_obj.__args = args # Do not emit message here
return true_obj
false_obj = super(FalsePositive4668, cls).__new__(cls)
false_obj.func = func
false_obj.__args = args # Do not emit message here
false_obj.__secret_bool = False
false_obj.__unused = None # [unused-private-member]
return false_obj
# unreachable but non-Name return value
return 3+4
def exec(self):
print(self.__secret_bool)
return self.func(*self.__args)
# https://github.com/pylint-dev/pylint/issues/4673
# Nested functions shouldn't cause a false positive if they are properly used
| FalsePositive4668 |
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 2557,
"end": 2726
} | class ____(BaseModelWithConfig):
enum: Annotated[Optional[List[str]], Field(min_length=1)] = None
default: str
description: Optional[str] = None
| ServerVariable |
python | getsentry__sentry | src/sentry/integrations/messaging/linkage.py | {
"start": 11391,
"end": 12880
} | class ____(IdentityLinkageView, ABC):
@property
def confirmation_template(self) -> str:
return "sentry/auth-unlink-identity.html"
@property
def no_identity_template(self) -> str | None:
"""Optional page to show if identities were not found."""
return None
@property
def filter_by_user_id(self) -> bool:
# TODO: Is it okay to just make this True everywhere?
return False
@property
def metrics_operation_key(self) -> str:
return "unlink_identity_view"
def persist_identity(
self, idp: IdentityProvider | None, external_id: str, request: HttpRequest
) -> HttpResponse | None:
if isinstance(request.user, AnonymousUser):
raise TypeError("Cannot link identity without a logged-in user")
try:
identities = Identity.objects.filter(external_id=external_id)
if idp is not None:
identities = identities.filter(idp=idp)
if self.filter_by_user_id:
identities = identities.filter(user_id=request.user.id)
if self.no_identity_template and not identities:
return render_to_response(self.no_identity_template, request=request, context={})
identities.delete()
except IntegrityError:
tag = f"{self.provider_slug}.unlink.integrity-error"
logger.exception(tag)
raise Http404
return None
@region_silo_view
| UnlinkIdentityView |
python | sqlalchemy__sqlalchemy | test/orm/test_backref_mutations.py | {
"start": 16745,
"end": 18110
} | class ____(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (
cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User,
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"address": relationship(
Address,
uselist=False,
backref=backref(
"user",
single_parent=True,
cascade="all, delete-orphan",
),
)
},
)
def test_m2o_event(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session(future=True)
a1 = Address(email_address="address1")
u1 = User(name="jack", address=a1)
sess.add(u1)
sess.commit()
sess.expunge(u1)
u2 = User(name="ed")
# the _SingleParent extension sets the backref get to "active" !
# u1 gets loaded and deleted
sess.add(u2)
u2.address = a1
sess.commit()
assert sess.query(User).count() == 1
| O2OScalarOrphanTest |
python | networkx__networkx | networkx/generators/tests/test_atlas.py | {
"start": 229,
"end": 698
} | class ____:
"""Unit tests for the :func:`~networkx.graph_atlas` function."""
def test_index_too_small(self):
with pytest.raises(ValueError):
graph_atlas(-1)
def test_index_too_large(self):
with pytest.raises(ValueError):
graph_atlas(NUM_GRAPHS)
def test_graph(self):
G = graph_atlas(6)
assert nodes_equal(G.nodes(), range(3))
assert edges_equal(G.edges(), [(0, 1), (0, 2)])
| TestAtlasGraph |
python | tensorflow__tensorflow | tensorflow/python/tools/api/generator2/extractor/extractor_test.py | {
"start": 2746,
"end": 7114
} | class ____(): # 14
pass # 15
@api_export.tf_export("e", "e_v2", v1=[]) # 16
def _e(): # 17
pass # 18
tf_export(v1=["f", "f_alias"])( # 19
dispatch.dispatch(deprecation(_f)) # 20
) # 21
@other_export("not-exported") # 22
def _not_exported(): # 23
pass # 24
""",
)
self.assertEqual(
exporter,
exported_api.ExportedApi(
symbols=[
exported_api.ExportedSymbol(
file_name='test.py',
line_no=10,
symbol_name='_a',
v1_apis=('tf.a',),
v2_apis=('tf.a',),
),
exported_api.ExportedSymbol(
file_name='test.py',
line_no=11,
symbol_name='_b',
v1_apis=('tf.v1_b',),
v2_apis=('tf.b',),
),
exported_api.ExportedSymbol(
file_name='test.py',
line_no=12,
symbol_name='_c',
v1_apis=('tf.c',),
v2_apis=('tf.c',),
),
exported_api.ExportedSymbol(
file_name='test.py',
line_no=13,
symbol_name='_D',
v1_apis=('tf.d',),
v2_apis=('tf.d',),
),
exported_api.ExportedSymbol(
file_name='test.py',
line_no=16,
symbol_name='_e',
v1_apis=(),
v2_apis=('tf.e', 'tf.e_v2'),
),
exported_api.ExportedSymbol(
file_name='test.py',
line_no=19,
symbol_name='_f',
v1_apis=('tf.f', 'tf.f_alias'),
v2_apis=(),
),
],
),
)
def test_exported_symbol_not_at_top_level(self):
exporter = exported_api.ExportedApi()
p = extractor.Parser(
exporter,
decorator='tf.tf_export',
api_name='tf',
)
self.assertRaisesRegex(
extractor.BadExportError,
'test.py:4',
lambda: p.process( # pylint: disable=g-long-lambda
'test.py',
"""# 1
from tf import tf_export # 2
def method(): # 3
tf_export("a")(a) # 4
""",
),
)
def test_exported_symbol_not_applied(self):
exporter = exported_api.ExportedApi()
p = extractor.Parser(
exporter,
decorator='tf.tf_export',
api_name='tf',
)
self.assertRaisesRegex(
extractor.BadExportError,
'test.py:3',
lambda: p.process( # pylint: disable=g-long-lambda
'test.py',
"""# 1
from tf import tf_export # 2
tf_export("a") # 3
""",
),
)
def test_exported_symbol_non_literal_args(self):
exporter = exported_api.ExportedApi()
p = extractor.Parser(
exporter,
decorator='tf.tf_export',
api_name='tf',
)
self.assertRaisesRegex(
extractor.BadExportError,
'test.py:3',
lambda: p.process( # pylint: disable=g-long-lambda
'test.py',
"""# 1
from tf import tf_export # 2
tf_export(a)(b) # 3
""",
),
)
def test_exported_symbol_unknown_args(self):
exporter = exported_api.ExportedApi()
p = extractor.Parser(
exporter,
decorator='tf.tf_export',
api_name='tf',
)
self.assertRaisesRegex(
extractor.BadExportError,
'test.py:3',
lambda: p.process( # pylint: disable=g-long-lambda
'test.py',
"""# 1
from tf import tf_export # 2
tf_export(a)(b) # 3
""",
),
)
def test_exported_symbol_includes_module(self):
exporter = exported_api.ExportedApi()
p = extractor.Parser(
exporter,
decorator='tf.tf_export',
api_name='tf',
)
self.assertRaisesRegex(
extractor.BadExportError,
'test.py:3',
lambda: p.process( # pylint: disable=g-long-lambda
'test.py',
"""# 1
from tf import tf_export # 2
tf_export(a)(x.b) # 3
""",
),
)
if __name__ == '__main__':
absltest.main()
| _D |
python | kamyu104__LeetCode-Solutions | Python/number-of-divisible-triplet-sums.py | {
"start": 591,
"end": 1089
} | class ____(object):
def divisibleTripletCount(self, nums, d):
"""
:type nums: List[int]
:type d: int
:rtype: int
"""
result = 0
cnt = collections.Counter()
for i in xrange(len(nums)):
if nums[i]%d in cnt:
result += cnt[nums[i]%d]
for j in xrange(i):
cnt[-(nums[i]+nums[j])%d] += 1
return result
# Time: O(n^2)
# Space: O(n)
import collections
# freq table
| Solution2 |
python | python-pillow__Pillow | Tests/test_image_resample.py | {
"start": 10004,
"end": 14223
} | class ____:
def make_levels_case(self, mode: str) -> Image.Image:
i = Image.new(mode, (256, 16))
px = i.load()
assert px is not None
for y in range(i.size[1]):
for x in range(i.size[0]):
pix = [x] * len(mode)
pix[-1] = 255 - y * 16
px[x, y] = tuple(pix)
return i
def run_levels_case(self, i: Image.Image) -> None:
px = i.load()
assert px is not None
for y in range(i.size[1]):
used_colors = set()
for x in range(i.size[0]):
value = px[x, y]
assert isinstance(value, tuple)
used_colors.add(value[0])
assert 256 == len(used_colors), (
"All colors should be present in resized image. "
f"Only {len(used_colors)} on line {y}."
)
@pytest.mark.xfail(reason="Current implementation isn't precise enough")
def test_levels_rgba(self) -> None:
case = self.make_levels_case("RGBA")
self.run_levels_case(case.resize((512, 32), Image.Resampling.BOX))
self.run_levels_case(case.resize((512, 32), Image.Resampling.BILINEAR))
self.run_levels_case(case.resize((512, 32), Image.Resampling.HAMMING))
self.run_levels_case(case.resize((512, 32), Image.Resampling.BICUBIC))
self.run_levels_case(case.resize((512, 32), Image.Resampling.LANCZOS))
@pytest.mark.xfail(reason="Current implementation isn't precise enough")
def test_levels_la(self) -> None:
case = self.make_levels_case("LA")
self.run_levels_case(case.resize((512, 32), Image.Resampling.BOX))
self.run_levels_case(case.resize((512, 32), Image.Resampling.BILINEAR))
self.run_levels_case(case.resize((512, 32), Image.Resampling.HAMMING))
self.run_levels_case(case.resize((512, 32), Image.Resampling.BICUBIC))
self.run_levels_case(case.resize((512, 32), Image.Resampling.LANCZOS))
def make_dirty_case(
self, mode: str, clean_pixel: tuple[int, ...], dirty_pixel: tuple[int, ...]
) -> Image.Image:
i = Image.new(mode, (64, 64), dirty_pixel)
px = i.load()
assert px is not None
xdiv4 = i.size[0] // 4
ydiv4 = i.size[1] // 4
for y in range(ydiv4 * 2):
for x in range(xdiv4 * 2):
px[x + xdiv4, y + ydiv4] = clean_pixel
return i
def run_dirty_case(self, i: Image.Image, clean_pixel: tuple[int, ...]) -> None:
px = i.load()
assert px is not None
for y in range(i.size[1]):
for x in range(i.size[0]):
value = px[x, y]
assert isinstance(value, tuple)
if value[-1] != 0 and value[:-1] != clean_pixel:
message = (
f"pixel at ({x}, {y}) is different:\n{value}\n{clean_pixel}"
)
assert value[:3] == clean_pixel, message
def test_dirty_pixels_rgba(self) -> None:
case = self.make_dirty_case("RGBA", (255, 255, 0, 128), (0, 0, 255, 0))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BOX), (255, 255, 0))
self.run_dirty_case(
case.resize((20, 20), Image.Resampling.BILINEAR), (255, 255, 0)
)
self.run_dirty_case(
case.resize((20, 20), Image.Resampling.HAMMING), (255, 255, 0)
)
self.run_dirty_case(
case.resize((20, 20), Image.Resampling.BICUBIC), (255, 255, 0)
)
self.run_dirty_case(
case.resize((20, 20), Image.Resampling.LANCZOS), (255, 255, 0)
)
def test_dirty_pixels_la(self) -> None:
case = self.make_dirty_case("LA", (255, 128), (0, 0))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BOX), (255,))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BILINEAR), (255,))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.HAMMING), (255,))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.BICUBIC), (255,))
self.run_dirty_case(case.resize((20, 20), Image.Resampling.LANCZOS), (255,))
| TestCoreResampleAlphaCorrect |
python | walkccc__LeetCode | solutions/1243. Array Transformation/1243.py | {
"start": 0,
"end": 358
} | class ____:
def transformArray(self, arr: list[int]) -> list[int]:
if len(arr) < 3:
return arr
ans = []
while ans != arr:
ans = arr[:]
for i in range(1, len(arr) - 1):
if ans[i - 1] > ans[i] < ans[i + 1]:
arr[i] += 1
elif ans[i - 1] < ans[i] > ans[i + 1]:
arr[i] -= 1
return ans
| Solution |
python | getsentry__sentry | tests/sentry/tasks/test_llm_issue_detection.py | {
"start": 10693,
"end": 13564
} | class ____(APITransactionTestCase, SnubaTestCase, SpanTestCase):
def setUp(self) -> None:
super().setUp()
self.ten_mins_ago = before_now(minutes=10)
def test_get_evidence_trace_for_llm_detection(self) -> None:
transaction_name = "api/users/profile"
# Create multiple traces with different span counts
traces_data = [
(5, "trace-medium", 0),
(2, "trace-small", 10),
(8, "trace-large", 20),
]
spans = []
trace_ids = []
expected_trace_id = None
for span_count, trace_suffix, start_offset_minutes in traces_data:
trace_id = uuid.uuid4().hex
trace_ids.append(trace_id)
if trace_suffix == "trace-medium":
expected_trace_id = trace_id
for i in range(span_count):
span = self.create_span(
{
"description": f"span-{i}-{trace_suffix}",
"sentry_tags": {"transaction": transaction_name},
"trace_id": trace_id,
"parent_span_id": None if i == 0 else f"parent-{i-1}",
"is_segment": i == 0,
},
start_ts=self.ten_mins_ago + timedelta(minutes=start_offset_minutes + i),
)
spans.append(span)
self.store_spans(spans, is_eap=True)
# Call the LLM detection function
result = get_evidence_trace_for_llm_detection(transaction_name, self.project.id)
# Verify basic structure
assert result is not None
assert result.transaction_name == transaction_name
assert result.project_id == self.project.id
assert result.trace_id in trace_ids
assert result.trace_id == expected_trace_id
assert result.total_spans == 5
assert len(result.spans) == 5
# Verify it's EvidenceTraceData with EvidenceSpan objects
assert isinstance(result, EvidenceTraceData)
for result_span in result.spans:
assert isinstance(result_span, EvidenceSpan)
assert result_span.span_id is not None
assert result_span.description is not None
assert result_span.description.startswith("span-")
assert "trace-medium" in result_span.description
assert hasattr(result_span, "op")
assert hasattr(result_span, "exclusive_time")
assert hasattr(result_span, "data")
assert result_span.data is not None
assert "duration" in result_span.data
assert "status" in result_span.data
# Verify parent-child relationships are preserved
root_spans = [s for s in result.spans if s.parent_span_id is None]
assert len(root_spans) == 1
| TestGetEvidenceTraceForLLMDetection |
python | realpython__materials | python-selenium/src/bandcamp/web/locators.py | {
"start": 370,
"end": 653
} | class ____:
PLAY_BUTTON = (By.CSS_SELECTOR, "button.play-pause-button")
URL = (By.CSS_SELECTOR, "div.meta p a")
ALBUM = (By.CSS_SELECTOR, "div.meta p a strong")
GENRE = (By.CSS_SELECTOR, "div.meta p.genre")
ARTIST = (By.CSS_SELECTOR, "div.meta p a span")
| TrackLocator |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/embedding_ops_test.py | {
"start": 27853,
"end": 39957
} | class ____(test.TestCase, parameterized.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size, ragged=False):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
if ragged:
sp_ids = ragged_tensor.RaggedTensor.from_sparse(sp_ids)
sp_weights = ragged_tensor.RaggedTensor.from_sparse(sp_weights)
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
@parameterized.parameters(
itertools.product(
[1, 5],
["sum", "mean", "sqrtn"],
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64],
[True, False],
[True, False],
[True, False],
)
)
@test_util.run_deprecated_v1
def testEmbeddingLookupSparse(
self,
num_shards,
combiner,
dtype,
ignore_weights,
ragged,
allow_fast_lookup,
):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size, ragged)
)
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry
)
with self.cached_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype
)
embedding_sum = embedding_ops.embedding_lookup_sparse(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner,
allow_fast_lookup=allow_fast_lookup,
)
self.assertEqual(
embedding_sum.get_shape().as_list(), expected_lookup_result_shape
)
self.assertEqual(embedding_sum.dtype, dtype)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights
else grouped_weights,
)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1)
)
rtol = 1e-6
if dtype == dtypes.bfloat16:
rtol = 1e-2
elif dtype == dtypes.float16:
rtol = 1e-3
atol = rtol
self.assertAllClose(np_embedding_sum, tf_embedding_sum, rtol, atol)
@parameterized.parameters(
itertools.product(["sum", "mean", "sqrtn"], [True, False], [True, False])
)
def testMissingInSparseIds(self, combiner, ragged, allow_fast_lookup):
# Github issue, 36359
with self.test_session():
x = array_ops.ones((4, 5))
indices = [[1, 0], [3, 0]]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant([0, 2], dtypes.int32),
constant_op.constant([4, 1], dtypes.int64),
)
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant([1, 1], dtypes.float32),
constant_op.constant([4, 1], dtypes.int64),
)
if ragged:
sp_ids = ragged_tensor.RaggedTensor.from_sparse(sp_ids)
sp_weights = ragged_tensor.RaggedTensor.from_sparse(sp_weights)
embedding_sum = embedding_ops.embedding_lookup_sparse(
x,
sp_ids,
sp_weights,
combiner=combiner,
allow_fast_lookup=allow_fast_lookup,
)
tf_embedding_sum = ops.convert_to_tensor(embedding_sum)
self.assertAllClose(tf_embedding_sum[0], np.zeros(5))
self.assertAllClose(tf_embedding_sum[1], np.ones(5))
self.assertAllClose(tf_embedding_sum[2], np.zeros(5))
self.assertAllClose(tf_embedding_sum[3], np.ones(5))
@parameterized.parameters(
itertools.product(
[1, 3],
["sum", "mean", "sqrtn"],
[dtypes.float32, dtypes.float64],
[True, False],
[True, False],
[True, False],
)
)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupSparse(
self,
num_shards,
combiner,
dtype,
ignore_weights,
ragged,
allow_fast_lookup,
):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = self._RandomIdsAndWeights(
batch_size, vocab_size, ragged
)
with self.cached_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype
)
y = embedding_ops.embedding_lookup_sparse(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner,
allow_fast_lookup=allow_fast_lookup,
)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value
)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
@parameterized.parameters(itertools.product([True, False], [True, False]))
@test_util.run_deprecated_v1
def testIncompatibleShapes(self, ragged, allow_fast_lookup):
with self.cached_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
indices = [[0, 0], [0, 1], [1, 0]]
indices_weights = [[0, 0], [0, 1]]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64),
)
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices_weights, dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64),
)
if ragged:
sp_ids = ragged_tensor.RaggedTensor.from_sparse(sp_ids)
sp_weights = ragged_tensor.RaggedTensor.from_sparse(sp_weights)
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse(
x,
sp_ids,
sp_weights,
combiner="mean",
allow_fast_lookup=allow_fast_lookup,
)
@test_util.run_deprecated_v1
def test_incompatible_types(self):
with self.cached_session():
x = array_ops.ones((4, 5))
indices = [[1, 0], [3, 0]]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant([0, 2], dtypes.int32),
constant_op.constant([4, 1], dtypes.int64),
)
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant([1, 1], dtypes.float32),
constant_op.constant([4, 1], dtypes.int64),
)
sp_weights = ragged_tensor.RaggedTensor.from_sparse(sp_weights)
self.assertRaises(
TypeError,
embedding_ops.embedding_lookup_sparse,
x,
sp_ids,
sp_weights,
)
def _SortByKey(self, keys, vals):
perm = sort_ops.argsort(keys)
return array_ops.gather(keys, perm), array_ops.gather(vals, perm)
def _ExpectedSparseGradient(
self, nnz, param_shape, np_type, sp_ids, sp_weights, combiner
):
"""Returns the expected indices and values corresponding to the (sparse)
gradient of a sparse embedding lookup.
"""
expected_values = np.ones([nnz] + param_shape, dtype=np_type)
segment_ids = sp_ids.indices[:, 0]
ignore_weights = sp_weights is None
weights = (
array_ops.ones(nnz, dtype=dtypes.float32)
if ignore_weights
else sp_weights.values
)
if combiner == "sqrtn":
weights = weights**2
segment_weights = math_ops.segment_sum(weights, segment_ids)
if combiner != "sum":
grad_scale = 1.0 / array_ops.gather(segment_weights, segment_ids)
if combiner == "sqrtn":
grad_scale = math_ops.sqrt(grad_scale)
expected_values *= grad_scale[:, None]
if not ignore_weights:
expected_values *= sp_weights.values[:, None]
expected_indices = sp_ids.values
# Sort and deduplicate the indices in the expected sparse tensor.
expected_indices, expected_values = self._SortByKey(
expected_indices, expected_values
)
expected_indices, unique_mapping = array_ops.unique(expected_indices)
expected_values = math_ops.segment_sum(expected_values, unique_mapping)
return expected_indices, expected_values
def testResourceVariableGradientEmbeddingLookupSparse(self):
"""Explicitly checks the gradient of a sparse embedding lookup with
ResourceVariable input.
"""
vocab_size = 128
batch_size = 32
param_shape = [16]
sp_ids, sp_weights, _, _, _ = self._RandomIdsAndWeights(
batch_size, vocab_size
)
for combiner, dtype, ignore_weights in itertools.product(
["sum", "mean", "sqrtn"],
[dtypes.float32, dtypes.float64],
[True, False],
):
with self.test_session(), forward_compat.forward_compatibility_horizon(
2023, 9, 26
):
x_shape = [vocab_size] + param_shape
np_type = "f" if dtype == dtypes.float32 else "d"
x = np.random.uniform(size=x_shape).astype(np_type) + 1
x = resource_variable_ops.ResourceVariable(x)
self.evaluate(variables.global_variables_initializer())
def forward(x_):
y_ = embedding_ops.embedding_lookup_sparse(
x_,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner,
)
return y_
with gradients.GradientTape() as g:
y = forward(x)
dx = g.gradient(y, x)
self.assertAllEqual(dx.dense_shape, x_shape)
actual_indices, actual_values = dx.indices, dx.values
# The sort order of the output is not guaranteed, so we must sort it
# into a consistent order before comparing.
actual_indices, actual_values = self._SortByKey(
actual_indices, actual_values
)
nnz = sp_ids.values.get_shape()[0]
expected_indices, expected_values = self._ExpectedSparseGradient(
nnz,
param_shape,
np_type,
sp_ids,
None if ignore_weights else sp_weights,
combiner,
)
self.assertAllEqual(actual_indices, expected_indices)
self.assertAllClose(actual_values, expected_values)
| EmbeddingLookupSparseTest |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0047_build_default_triggered.py | {
"start": 149,
"end": 979
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0046_identifier_null"),
]
operations = [
migrations.AlterField(
model_name="build",
name="state",
field=models.CharField(
choices=[
("triggered", "Triggered"),
("cloning", "Cloning"),
("installing", "Installing"),
("building", "Building"),
("uploading", "Uploading"),
("finished", "Finished"),
("cancelled", "Cancelled"),
],
db_index=True,
default="triggered",
max_length=55,
verbose_name="State",
),
),
]
| Migration |
python | doocs__leetcode | solution/1300-1399/1374.Generate a String With Characters That Have Odd Counts/Solution.py | {
"start": 0,
"end": 121
} | class ____:
def generateTheString(self, n: int) -> str:
return 'a' * n if n & 1 else 'a' * (n - 1) + 'b'
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 186116,
"end": 188354
} | class ____(Request):
"""
Delete models from task
:param task: ID of the task
:type task: str
:param models: The list of models to delete
:type models: Sequence[dict]
"""
_service = "tasks"
_action = "delete_models"
_version = "2.20"
_schema = {
"definitions": {"model_type_enum": {"enum": ["input", "output"], "type": "string"}},
"properties": {
"models": {
"description": "The list of models to delete",
"items": {
"properties": {
"name": {
"description": "The task model name",
"type": "string",
},
"type": {
"$ref": "#/definitions/model_type_enum",
"description": "The task model type",
},
},
"required": ["name", "type"],
"type": "object",
},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
},
"required": ["task", "models"],
"type": "object",
}
def __init__(self, task: str, models: List[dict], **kwargs: Any) -> None:
super(DeleteModelsRequest, self).__init__(**kwargs)
self.task = task
self.models = models
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("models")
def models(self) -> List[dict]:
return self._property_models
@models.setter
def models(self, value: List[dict]) -> None:
if value is None:
self._property_models = None
return
self.assert_isinstance(value, "models", (list, tuple))
self.assert_isinstance(value, "models", (dict,), is_array=True)
self._property_models = value
| DeleteModelsRequest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/fastapi/FAST001.py | {
"start": 141,
"end": 2421
} | class ____(BaseModel):
name: str
# Errors
@app.post("/items/", response_model=Item)
async def create_item(item: Item) -> Item:
return item
@app.post("/items/", response_model=list[Item])
async def create_item(item: Item) -> list[Item]:
return item
@app.post("/items/", response_model=List[Item])
async def create_item(item: Item) -> List[Item]:
return item
@app.post("/items/", response_model=Dict[str, Item])
async def create_item(item: Item) -> Dict[str, Item]:
return item
@app.post("/items/", response_model=str)
async def create_item(item: Item) -> str:
return item
@app.get("/items/", response_model=Item)
async def create_item(item: Item) -> Item:
return item
@app.get("/items/", response_model=Item)
@app.post("/items/", response_model=Item)
async def create_item(item: Item) -> Item:
return item
@router.get("/items/", response_model=Item)
async def create_item(item: Item) -> Item:
return item
# OK
async def create_item(item: Item) -> Item:
return item
@app("/items/", response_model=Item)
async def create_item(item: Item) -> Item:
return item
@cache
async def create_item(item: Item) -> Item:
return item
@app.post("/items/", response_model=str)
async def create_item(item: Item) -> Item:
return item
@app.post("/items/")
async def create_item(item: Item) -> Item:
return item
@app.post("/items/", response_model=str)
async def create_item(item: Item):
return item
@app.post("/items/", response_model=list[str])
async def create_item(item: Item) -> Dict[str, Item]:
return item
@app.post("/items/", response_model=list[str])
async def create_item(item: Item) -> list[str, str]:
return item
@app.post("/items/", response_model=Dict[str, int])
async def create_item(item: Item) -> Dict[str, str]:
return item
app = None
@app.post("/items/", response_model=Item)
async def create_item(item: Item) -> Item:
return item
# Routes might be defined inside functions
def setup_app(app_arg: FastAPI, non_app: str) -> None:
# Error
@app_arg.get("/", response_model=str)
async def get_root() -> str:
return "Hello World!"
# Ok
@non_app.get("/", response_model=str)
async def get_root() -> str:
return "Hello World!"
| Item |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py | {
"start": 13471,
"end": 13637
} | class ____(AdsInsights):
breakdowns = ["publisher_platform", "device_platform"]
action_breakdowns = ["action_type"]
| AdsInsightsDeliveryPlatformAndDevicePlatform |
python | tensorflow__tensorflow | tensorflow/lite/testing/generate_examples_lib.py | {
"start": 14881,
"end": 15902
} | class ____:
"""State of multiple set generation process.
This state class stores the information needed when generating the examples
for multiple test set. The stored informations are open archive object to be
shared, information on test target for current iteration of generation,
accumulated generation results.
"""
def __init__(self):
# Open archive.
self.archive = None
# Test name for current generation.
self.test_name = None
# Label base path containing the test name.
# Each of the test data path in the zip archive is derived from this path.
# If this path is "a/b/c/d.zip", an example of generated test data path
# is "a/b/c/d_input_type=tf.float32,input_shape=[2,2].inputs".
# The test runner interpretes the test name of this path as "d".
# Label base path also should finish with ".zip".
self.label_base_path = None
# Zip manifests.
self.zip_manifest = []
# Number of all parameters accumulated.
self.parameter_count = 0
| MultiGenState |
python | PyCQA__pylint | tests/functional/m/membership_protocol.py | {
"start": 987,
"end": 1270
} | class ____:
def __getitem__(self, key):
if key < 10:
return 2 ** key
else:
raise IndexError("bad index")
64 in OldStyleIterable()
# do not emit warning if class has unknown bases
from some_missing_module import ImportedClass
| OldStyleIterable |
python | pytorch__pytorch | torch/backends/_nnapi/serializer.py | {
"start": 3626,
"end": 3903
} | class ____(NamedTuple):
"""Configuration arguments for a convolution."""
kernel_h: int
kernel_w: int
stride_h: int
stride_w: int
pad_t: int
pad_b: int
pad_l: int
pad_r: int
dilation_h: int
dilation_w: int
group: int
| ConvPoolArgs2d |
python | sanic-org__sanic | guide/webapp/display/page/page.py | {
"start": 772,
"end": 5170
} | class ____:
path: Path
content: str
meta: PageMeta = field(default_factory=PageMeta)
_relative_path: Path | None = None
next_page: Page | None = None
previous_page: Page | None = None
anchors: list[str] = field(default_factory=list)
DEFAULT_LANGUAGE = _DEFAULT
def get_layout(self) -> type[BaseLayout]:
return _LAYOUTS_CACHE[self.meta.layout]
@property
def relative_path(self) -> Path:
if self._relative_path is None:
raise RuntimeError("Page not initialized")
return self._relative_path
@classmethod
def get(
cls, language: str, path: str
) -> tuple[Page | None, Page | None, Page | None]:
if path.endswith("/") or not path:
path += "index.html"
if not path.endswith(".md"):
path = path.removesuffix(".html") + ".md"
if language == "api":
path = f"/api/{path}"
return _PAGE_CACHE.get(language, {}).get(path, (None, None, None))
@classmethod
def load_pages(cls, base_path: Path, page_order: list[str]) -> list[Page]:
output: list[Page] = []
for path in base_path.glob("**/*.md"):
relative = path.relative_to(base_path)
language = relative.parts[0]
name = "/".join(relative.parts[1:])
page = cls._load_page(path)
output.append(page)
page._relative_path = relative
_PAGE_CACHE.setdefault(language, {})[name] = (
None,
page,
None,
)
_PAGE_CACHE["api"] = {}
for language, pages in _PAGE_CACHE.items():
for name, (_, current, _) in pages.items():
previous_page = None
next_page = None
try:
index = page_order.index(name)
except ValueError:
continue
try:
if index > 0:
previous_page = pages[page_order[index - 1]][1]
except KeyError:
pass
try:
if index < len(page_order) - 1:
next_page = pages[page_order[index + 1]][1]
except KeyError:
pass
pages[name] = (previous_page, current, next_page)
previous_page = None
next_page = None
api_pages = cls._load_api_pages()
filtered_order = [ref for ref in page_order if ref in api_pages]
for idx, ref in enumerate(filtered_order):
current_page = api_pages[ref]
previous_page = None
next_page = None
try:
if idx > 0:
previous_page = api_pages[filtered_order[idx - 1]]
except KeyError:
pass
try:
if idx < len(filtered_order) - 1:
next_page = api_pages[filtered_order[idx + 1]]
except KeyError:
pass
_PAGE_CACHE["api"][ref] = (previous_page, current_page, next_page)
return output
@staticmethod
def _load_page(path: Path) -> Page:
raw = path.read_text()
metadata, raw_content = parse(raw)
content = render_markdown(raw_content)
page = Page(
path=path,
content=content,
meta=PageMeta(**metadata),
)
if not page.meta.title:
page.meta.title = page.path.stem.replace("-", " ").title()
for line in raw.splitlines():
if line.startswith("##") and not line.startswith("###"):
line = line.lstrip("#").strip()
page.anchors.append(line)
return page
@staticmethod
def _load_api_pages() -> dict[str, Page]:
docstring_content = organize_docobjects("sanic")
output: dict[str, Page] = {}
for module, content in docstring_content.items():
path = Path(module)
page = Page(
path=path,
content=content,
meta=PageMeta(
title=path.stem,
description="",
layout="main",
),
)
page._relative_path = Path(f"./{module}")
output[module] = page
return output
| Page |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1158807,
"end": 1162146
} | class ____(sgqlc.types.Type, Node):
"""An identity provider configured to provision identities for an
enterprise. Visible to enterprise owners or enterprise owners'
personal access tokens (classic) with read:enterprise or
admin:enterprise scope.
"""
__schema__ = github_schema
__field_names__ = (
"digest_method",
"enterprise",
"external_identities",
"idp_certificate",
"issuer",
"recovery_codes",
"signature_method",
"sso_url",
)
digest_method = sgqlc.types.Field(SamlDigestAlgorithm, graphql_name="digestMethod")
"""The digest algorithm used to sign SAML requests for the identity
provider.
"""
enterprise = sgqlc.types.Field(Enterprise, graphql_name="enterprise")
"""The enterprise this identity provider belongs to."""
external_identities = sgqlc.types.Field(
sgqlc.types.non_null(ExternalIdentityConnection),
graphql_name="externalIdentities",
args=sgqlc.types.ArgDict(
(
("members_only", sgqlc.types.Arg(Boolean, graphql_name="membersOnly", default=None)),
("login", sgqlc.types.Arg(String, graphql_name="login", default=None)),
("user_name", sgqlc.types.Arg(String, graphql_name="userName", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""ExternalIdentities provisioned by this identity provider.
Arguments:
* `members_only` (`Boolean`): Filter to external identities with
valid org membership only
* `login` (`String`): Filter to external identities with the users
login
* `user_name` (`String`): Filter to external identities with the
users userName/NameID attribute
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
idp_certificate = sgqlc.types.Field(X509Certificate, graphql_name="idpCertificate")
"""The x509 certificate used by the identity provider to sign
assertions and responses.
"""
issuer = sgqlc.types.Field(String, graphql_name="issuer")
"""The Issuer Entity ID for the SAML identity provider."""
recovery_codes = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="recoveryCodes")
"""Recovery codes that can be used by admins to access the enterprise
if the identity provider is unavailable.
"""
signature_method = sgqlc.types.Field(SamlSignatureAlgorithm, graphql_name="signatureMethod")
"""The signature algorithm used to sign SAML requests for the
identity provider.
"""
sso_url = sgqlc.types.Field(URI, graphql_name="ssoUrl")
"""The URL endpoint for the identity provider's SAML SSO."""
| EnterpriseIdentityProvider |
python | pytorch__pytorch | torch/_inductor/shape_propagation.py | {
"start": 232,
"end": 2241
} | class ____(Protocol):
@property
def shape(self) -> BlockShapeType: ...
ShapeArg = Union[ShapeVar, torch.types.Number, str, OpsValue, torch.dtype]
# Inputs need to be cacheable (e.g., not a CSEVar) in order for the cache to be effective
# So first decompose CSEVars -> tuple before calling this
@functools.lru_cache(None)
def get_broadcasted_shape(a: BlockShapeType, b: BlockShapeType) -> BlockShapeType:
assert isinstance(a, Sequence)
assert isinstance(b, Sequence)
if len(a) > len(b):
return get_broadcasted_shape(a, (*[1] * (len(a) - len(b)), *b))
elif len(a) < len(b):
b, a = a, b
return get_broadcasted_shape(a, (*[1] * (len(a) - len(b)), *b))
else:
def _get_broadcasted_dim(
d1: Union[int, str], d2: Union[int, str]
) -> Union[int, str]:
if str(d1) == "1":
return d2
elif str(d2) == "1":
return d1
assert str(d1) == str(d2)
return d1
return tuple(_get_broadcasted_dim(d1, d2) for d1, d2 in zip(a, b))
def broadcast_shapes_for_args(args: Sequence[ShapeArg]) -> BlockShapeType:
result_shape: BlockShapeType = None
for arg in args:
if hasattr(arg, "shape"):
shape = arg.shape
if shape is None:
return None
elif result_shape is None:
result_shape = tuple(shape)
else:
result_shape = get_broadcasted_shape(result_shape, tuple(shape))
elif isinstance(arg, (int, float)):
if result_shape is None:
result_shape = ()
elif isinstance(arg, torch.dtype):
continue
else:
from torch._inductor.loop_body import LoopBody, LoopBodyBlock
if isinstance(arg, (LoopBodyBlock, LoopBody, OpsValue)):
# TODO: fix me
return None
raise TypeError(f"Unknown type: {type(arg)}")
return result_shape
| ShapeVar |
python | walkccc__LeetCode | solutions/407. Trapping Rain Water II/407.py | {
"start": 0,
"end": 1111
} | class ____:
def trapRainWater(self, heightMap: list[list[int]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(heightMap)
n = len(heightMap[0])
ans = 0
minHeap = []
seen = set()
for i in range(m):
heapq.heappush(minHeap, (heightMap[i][0], i, 0))
heapq.heappush(minHeap, (heightMap[i][n - 1], i, n - 1))
seen.add((i, 0))
seen.add((i, n - 1))
for j in range(1, n - 1):
heapq.heappush(minHeap, (heightMap[0][j], 0, j))
heapq.heappush(minHeap, (heightMap[m - 1][j], m - 1, j))
seen.add((0, j))
seen.add((m - 1, j))
while minHeap:
h, i, j = heapq.heappop(minHeap)
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if (x, y) in seen:
continue
if heightMap[x][y] < h:
ans += h - heightMap[x][y]
# Fill water in grid[x][y].
heapq.heappush(minHeap, (h, x, y))
else:
heapq.heappush(minHeap, (heightMap[x][y], x, y))
seen.add((x, y))
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/closest-node-to-path-in-tree.py | {
"start": 6115,
"end": 7194
} | class ____(object): # Time: O(N), Space: O(N), N is the number of nodes
def __init__(self, children): # modified
def preprocess(curr, parent):
# depth of the node i
D[curr] = 1 if parent == -1 else D[parent]+1
# ancestors of the node i
P[curr] = parent
def divide(curr, parent):
for i in reversed(xrange(len(children[curr]))):
child = children[curr][i]
if child == parent:
continue
stk.append(partial(divide, child, curr))
stk.append(partial(preprocess, curr, parent))
N = len(children)
D, P = [0]*N, [0]*N
stk = []
stk.append(partial(divide, 0, -1))
while stk:
stk.pop()()
self.D, self.P = D, P
def lca(self, a, b): # Time: O(h)
while self.D[a] > self.D[b]:
a = self.P[a]
while self.D[a] < self.D[b]:
b = self.P[b]
while a != b:
a, b = self.P[a], self.P[b]
return a
# lca
| TreeInfos3 |
python | python-markdown__markdown | markdown/extensions/fenced_code.py | {
"start": 1642,
"end": 8300
} | class ____(Preprocessor):
""" Find and extract fenced code blocks. """
FENCED_BLOCK_RE = re.compile(
dedent(r'''
(?P<fence>^(?:~{3,}|`{3,}))[ ]* # opening fence
((\{(?P<attrs>[^\n]*)\})| # (optional {attrs} or
(\.?(?P<lang>[\w#.+-]*)[ ]*)? # optional (.)lang
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot)[ ]*)?) # optional hl_lines)
\n # newline (end of opening fence)
(?P<code>.*?)(?<=\n) # the code block
(?P=fence)[ ]*$ # closing fence
'''),
re.MULTILINE | re.DOTALL | re.VERBOSE
)
def __init__(self, md: Markdown, config: dict[str, Any]):
super().__init__(md)
self.config = config
self.checked_for_deps = False
self.codehilite_conf: dict[str, Any] = {}
self.use_attr_list = False
# List of options to convert to boolean values
self.bool_options = [
'linenums',
'guess_lang',
'noclasses',
'use_pygments'
]
def run(self, lines: list[str]) -> list[str]:
""" Match and store Fenced Code Blocks in the `HtmlStash`. """
# Check for dependent extensions
if not self.checked_for_deps:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.getConfigs()
if isinstance(ext, AttrListExtension):
self.use_attr_list = True
self.checked_for_deps = True
text = "\n".join(lines)
index = 0
while 1:
m = self.FENCED_BLOCK_RE.search(text, index)
if m:
lang, id, classes, config = None, '', [], {}
if m.group('attrs'):
attrs, remainder = get_attrs_and_remainder(m.group('attrs'))
if remainder: # Does not have correctly matching curly braces, so the syntax is invalid.
index = m.end('attrs') # Explicitly skip over this, to prevent an infinite loop.
continue
id, classes, config = self.handle_attrs(attrs)
if len(classes):
lang = classes.pop(0)
else:
if m.group('lang'):
lang = m.group('lang')
if m.group('hl_lines'):
# Support `hl_lines` outside of `attrs` for backward-compatibility
config['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
# If `config` is not empty, then the `codehighlite` extension
# is enabled, so we call it to highlight the code
if self.codehilite_conf and self.codehilite_conf['use_pygments'] and config.get('use_pygments', True):
local_config = self.codehilite_conf.copy()
local_config.update(config)
# Combine classes with `cssclass`. Ensure `cssclass` is at end
# as Pygments appends a suffix under certain circumstances.
# Ignore ID as Pygments does not offer an option to set it.
if classes:
local_config['css_class'] = '{} {}'.format(
' '.join(classes),
local_config['css_class']
)
highliter = CodeHilite(
m.group('code'),
lang=lang,
style=local_config.pop('pygments_style', 'default'),
**local_config
)
code = highliter.hilite(shebang=False)
else:
id_attr = lang_attr = class_attr = kv_pairs = ''
if lang:
prefix = self.config.get('lang_prefix', 'language-')
lang_attr = f' class="{prefix}{_escape_attrib_html(lang)}"'
if classes:
class_attr = f' class="{_escape_attrib_html(" ".join(classes))}"'
if id:
id_attr = f' id="{_escape_attrib_html(id)}"'
if self.use_attr_list and config and not config.get('use_pygments', False):
# Only assign key/value pairs to code element if `attr_list` extension is enabled, key/value
# pairs were defined on the code block, and the `use_pygments` key was not set to `True`. The
# `use_pygments` key could be either set to `False` or not defined. It is omitted from output.
kv_pairs = ''.join(
f' {k}="{_escape_attrib_html(v)}"' for k, v in config.items() if k != 'use_pygments'
)
code = self._escape(m.group('code'))
code = f'<pre{id_attr}{class_attr}><code{lang_attr}{kv_pairs}>{code}</code></pre>'
placeholder = self.md.htmlStash.store(code)
text = f'{text[:m.start()]}\n{placeholder}\n{text[m.end():]}'
# Continue from after the replaced text in the next iteration.
index = m.start() + 1 + len(placeholder)
else:
break
return text.split("\n")
def handle_attrs(self, attrs: Iterable[tuple[str, str]]) -> tuple[str, list[str], dict[str, Any]]:
""" Return tuple: `(id, [list, of, classes], {configs})` """
id = ''
classes = []
configs = {}
for k, v in attrs:
if k == 'id':
id = v
elif k == '.':
classes.append(v)
elif k == 'hl_lines':
configs[k] = parse_hl_lines(v)
elif k in self.bool_options:
configs[k] = parseBoolValue(v, fail_on_errors=False, preserve_none=True)
else:
configs[k] = v
return id, classes, configs
def _escape(self, txt: str) -> str:
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(**kwargs): # pragma: no cover
return FencedCodeExtension(**kwargs)
| FencedBlockPreprocessor |
python | google__flatbuffers | tests/py_flexbuffers_test.py | {
"start": 8749,
"end": 36735
} | class ____(unittest.TestCase):
"""Tests to check FlexBuffer decoding functions.
Common variable names used in the tests for compactness:
bw: byte_width
ebw: element_byte_width
kbw: key_byte_width
vbw: value_byte_width
tbw: type_byte_width
Having '_ignored' suffix means that variable doesn't affect the constructed
byte buffer size.
"""
def test_null(self):
for bw in 1, 2, 4, 8:
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*uint_bytes(0, bw),
packed_type(Type.NULL, ebw_ignored),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsNull)
self.assertEqual(root.AsBool, False)
self.assertEqual(root.AsInt, 0)
self.assertEqual(root.AsFloat, 0.0)
for prop in (
type(root).AsKey,
type(root).AsString,
type(root).AsBlob,
type(root).AsVector,
type(root).AsTypedVector,
type(root).AsFixedTypedVector,
type(root).AsMap,
):
with self.assertRaises(TypeError):
prop.fget(root)
self.assertEqual(root.Value, None)
self.assertIsNone(flexbuffers.Loads(data))
def test_bool(self):
for value in False, True:
for bw in 1, 2, 4, 8:
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*uint_bytes(int(value), bw),
packed_type(Type.BOOL, ebw_ignored),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsBool)
self.assertEqual(root.AsBool, value)
self.assertEqual(root.AsInt, int(value))
self.assertEqual(root.AsFloat, float(value))
for prop in (
type(root).AsKey,
type(root).AsString,
type(root).AsBlob,
type(root).AsVector,
type(root).AsTypedVector,
type(root).AsFixedTypedVector,
type(root).AsMap,
):
with self.assertRaises(TypeError):
prop.fget(root)
self.assertEqual(root.Value, value)
self.assertEqual(flexbuffers.Loads(data), value)
def test_mutate_bool(self):
root = flexbuffers.GetRoot(flexbuffers.Dumps(True))
self.assertTrue(root.IsBool)
self.assertTrue(root.AsBool)
self.assertTrue(root.MutateBool(False))
self.assertTrue(root.IsBool)
self.assertFalse(root.AsBool)
self.assertTrue(root.MutateBool(True))
self.assertTrue(root.IsBool)
self.assertTrue(root.AsBool)
def _check_int(self, data, value):
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsInt)
self.assertEqual(root.AsInt, value)
self.assertEqual(root.AsBool, bool(value))
self.assertEqual(root.AsFloat, float(value))
for prop in (
type(root).AsKey,
type(root).AsString,
type(root).AsBlob,
type(root).AsVector,
type(root).AsTypedVector,
type(root).AsFixedTypedVector,
type(root).AsMap,
):
with self.assertRaises(TypeError):
prop.fget(root)
self.assertEqual(root.Value, value)
self.assertEqual(flexbuffers.Loads(data), value)
def test_int(self):
for value in (0, 1, -1, 15, -17, *INT_MIN_MAX_VALUES):
for bw in int_sizes(value):
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(value=value, bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*int_bytes(value, bw),
packed_type(Type.INT, ebw_ignored),
bw,
])
self._check_int(data, value)
def test_indirect_int(self):
for value in (0, 1, -1, 15, -17, *INT_MIN_MAX_VALUES):
for bw in 1, 2, 4, 8:
for ebw in int_sizes(value):
with self.subTest(value=value, bw=bw, ebw=ebw):
data = bytes([
# Int
*int_bytes(value, ebw),
# Root
*uint_bytes(ebw, bw),
packed_type(Type.INDIRECT_INT, ebw),
bw,
])
self._check_int(data, value)
def test_uint(self):
for value in (1, *UINT_MIN_MAX_VALUES):
for bw in uint_sizes(value):
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(value=value, bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*uint_bytes(value, bw),
packed_type(Type.UINT, ebw_ignored),
bw,
])
self._check_int(data, value)
def test_inidirect_uint(self):
for value in (1, *UINT_MIN_MAX_VALUES):
for bw in 1, 2, 4, 8:
for ebw in uint_sizes(value):
with self.subTest(value=value, bw=bw, ebw=ebw):
data = bytes([
# UInt
*uint_bytes(value, ebw),
# Root
*uint_bytes(ebw, bw),
packed_type(Type.INDIRECT_UINT, ebw),
bw,
])
self._check_int(data, value)
def test_mutate_ints(self):
# Signed
for type_ in Type.INT, Type.INDIRECT_INT:
with self.subTest(type=type_):
root = flexbuffers.GetRoot(encode_type(type_, 56))
self.assertEqual(root.AsInt, 56)
for new_value in 0, 1, -1, -128, 127:
self.assertTrue(root.MutateInt(new_value))
self.assertEqual(root.AsInt, new_value)
for new_value in -129, 128:
self.assertFalse(root.MutateInt(new_value))
# Unsigned
for type_ in Type.UINT, Type.INDIRECT_UINT:
with self.subTest(type=type_):
root = flexbuffers.GetRoot(encode_type(type_, 1))
self.assertEqual(root.AsInt, 1)
for new_value in 0, 1, 255:
self.assertTrue(root.MutateInt(new_value))
self.assertEqual(root.AsInt, new_value)
self.assertFalse(root.MutateInt(256))
# Inside vector
fbb = flexbuffers.Builder()
fbb.VectorFromElements([13, 0, -15])
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), [13, 0, -15])
self.assertTrue(flexbuffers.GetRoot(data).AsVector[0].MutateInt(0))
self.assertTrue(flexbuffers.GetRoot(data).AsVector[1].MutateInt(-7))
self.assertTrue(flexbuffers.GetRoot(data).AsVector[2].MutateInt(45))
self.assertEqual(flexbuffers.Loads(data), [0, -7, 45])
# Inside map
fbb = flexbuffers.Builder()
fbb.MapFromElements({'x': -7, 'y': 46})
data = fbb.Finish()
self.assertEqual(flexbuffers.Loads(data), {'x': -7, 'y': 46})
self.assertTrue(flexbuffers.GetRoot(data).AsMap['x'].MutateInt(14))
self.assertTrue(flexbuffers.GetRoot(data).AsMap['y'].MutateInt(-1))
self.assertEqual(flexbuffers.Loads(data), {'x': 14, 'y': -1})
def _check_float(self, data, value):
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsFloat)
self.assertAlmostEqual(root.AsFloat, value)
for prop in (
type(root).AsKey,
type(root).AsString,
type(root).AsBlob,
type(root).AsVector,
type(root).AsTypedVector,
type(root).AsFixedTypedVector,
type(root).AsMap,
):
with self.assertRaises(TypeError):
prop.fget(root)
self.assertAlmostEqual(root.Value, value)
self.assertAlmostEqual(flexbuffers.Loads(data), value)
def test_float(self):
for value in -1.0, 0.0, 1.0, 3.141592, 1.5e6:
for bw in 4, 8:
for ebw_ignored in 1, 2, 4, 8:
with self.subTest(value=value, bw=bw, ebw_ignored=ebw_ignored):
data = bytes([
*float_bytes(value, bw),
packed_type(Type.FLOAT, ebw_ignored),
bw,
])
self._check_float(data, value)
def test_indirect_float(self):
for value in -1.0, 0.0, 1.0, 3.141592, 1.5e6:
for bw in 1, 2, 4, 8:
for ebw in 4, 8:
with self.subTest(value=value, bw=bw, ebw=ebw):
data = bytes([
# Float
*float_bytes(value, ebw),
# Root
*uint_bytes(ebw, bw),
packed_type(Type.INDIRECT_FLOAT, ebw),
bw,
])
self._check_float(data, value)
def test_mutate_float(self):
for type_ in Type.FLOAT, Type.INDIRECT_FLOAT:
for bw in 4, 8:
value = 3.141592
root = flexbuffers.GetRoot(encode_type(type_, value, bw))
self.assertAlmostEqual(root.AsFloat, value)
value = 2.71828
self.assertTrue(root.MutateFloat(value))
self.assertAlmostEqual(root.AsFloat, value, places=5)
# Inside vector
data = flexbuffers.Dumps([2.4, 1.5, -7.2])
self.assertTrue(flexbuffers.GetRoot(data).AsVector[0].MutateFloat(0.0))
self.assertTrue(flexbuffers.GetRoot(data).AsVector[1].MutateFloat(15.2))
self.assertTrue(flexbuffers.GetRoot(data).AsVector[2].MutateFloat(-5.1))
for a, b in zip(flexbuffers.Loads(data), [0.0, 15.2, -5.1]):
self.assertAlmostEqual(a, b)
def test_string(self):
for value in 'red', 'green', 'blue', 'flatbuffers + flexbuffers':
value_bytes = value.encode('utf-8')
for bw in 1, 2, 4, 8:
for lbw in 1, 2, 4, 8:
with self.subTest(bw=bw, lbw=lbw):
data = bytes([
# String
*uint_bytes(len(value_bytes), lbw),
*value_bytes,
0,
# Root
*uint_bytes(len(value_bytes) + 1, bw), # offset
packed_type(Type.STRING, lbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsString)
self.assertEqual(root.AsString, value)
self.assertEqual(root.Value, value)
self.assertEqual(root.AsInt, len(value))
self.assertEqual(flexbuffers.Loads(data), value)
def test_mutate_string(self):
data = encode_type(Type.STRING, '12345')
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsString)
self.assertEqual(root.AsString, '12345')
self.assertFalse(root.MutateString('543210'))
self.assertTrue(root.MutateString('54321'))
self.assertTrue(root.IsString)
self.assertEqual(root.AsString, '54321')
self.assertTrue(root.MutateString('543'))
self.assertTrue(root.IsString)
self.assertEqual(root.AsString, '543')
self.assertFalse(root.MutateString('54321'))
def test_empty_blob(self):
for bw in 1, 2, 4, 8:
for lbw in 1, 2, 4, 8:
with self.subTest(bw=bw, lbw=lbw):
data = bytes([
# Blob
*uint_bytes(0, lbw),
# Root
*uint_bytes(0, bw),
packed_type(Type.BLOB, lbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsBlob)
self.assertEqual(root.AsBlob, bytes())
self.assertEqual(root.Value, bytes())
self.assertEqual(flexbuffers.Loads(data), bytes())
def test_blob(self):
for blob in [], [215], [23, 75, 124, 0, 45, 15], 255 * [0]:
for bw in 1, 2, 4, 8:
for lbw in 1, 2, 4, 8:
with self.subTest(blob=blob, bw=bw, lbw=lbw):
data = bytes([
# Blob
*uint_bytes(len(blob), lbw),
*blob,
# Root
*uint_bytes(len(blob), bw),
packed_type(Type.BLOB, lbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsBlob)
self.assertEqual(root.AsBlob, bytes(blob))
self.assertEqual(root.Value, bytes(blob))
self.assertEqual(flexbuffers.Loads(data), bytes(blob))
def test_key(self):
for value in '', 'x', 'color':
for bw in 1, 2, 4, 8:
with self.subTest(value=value, bw=bw):
value_bytes = value.encode('ascii')
data = bytes([
# Key
*value_bytes,
0,
# Root
*uint_bytes(len(value_bytes) + 1, bw),
packed_type(Type.KEY, 1),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsKey)
self.assertEqual(root.AsKey, value)
self.assertEqual(root.Value, value)
self.assertEqual(flexbuffers.Loads(data), value)
def _check_fixed_typed_vector(self, data, vector, type_):
self.assertEqual(flexbuffers.Loads(data), vector)
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsFixedTypedVector)
v = root.AsFixedTypedVector
self.assertEqual(len(v), len(vector))
self.assertIs(v.ElementType, type_)
self.assertEqual([e.Value for e in v], vector)
self.assertSequenceEqual(v.Value, vector)
self.assertEqual(root.AsInt, len(vector))
def test_fixed_typed_vector_float(self):
for type_, vector in (
(Type.VECTOR_FLOAT2, [-75.0, 34.89]),
(Type.VECTOR_FLOAT3, [-75.0, 34.89, 12.0]),
(Type.VECTOR_FLOAT4, [-75.0, 34.89, -1.0, 1.0]),
):
for bw in 1, 2, 4, 8:
for ebw in 4, 8:
with self.subTest(type=type_, vector=vector, bw=bw, ebw=ebw):
data = bytes([
# FixedTypedVector
*b''.join(float_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(len(vector) * ebw, bw),
packed_type(type_, ebw),
bw,
])
for a, b in zip(flexbuffers.Loads(data), vector):
self.assertAlmostEqual(a, b, places=2)
def test_fixed_typed_vector_int(self):
for type_, vector in (
(Type.VECTOR_INT2, [0, -13]),
(Type.VECTOR_INT3, [127, 0, -13]),
(Type.VECTOR_INT4, [127, 0, -13, 0]),
):
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(type=type_, vector=vector, bw=bw, ebw=ebw):
data = bytes([
# FixedTypeVector
*b''.join(int_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(ebw * len(vector), bw),
packed_type(type_, ebw),
bw,
])
self._check_fixed_typed_vector(data, vector, Type.INT)
def test_fixed_typed_vector_uint(self):
for type_, vector in (
(Type.VECTOR_UINT2, [0, 13]),
(Type.VECTOR_UINT3, [127, 0, 13]),
(Type.VECTOR_UINT4, [127, 0, 13, 0]),
):
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(type=type_, vector=vector, bw=bw, ebw=ebw):
data = bytes([
# FixedTypeVector
*b''.join(uint_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(ebw * len(vector), bw),
packed_type(type_, ebw),
bw,
])
self._check_fixed_typed_vector(data, vector, Type.UINT)
def _check_typed_vector(self, data, vector, type_):
self.assertEqual(flexbuffers.Loads(data), vector)
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsTypedVector)
v = root.AsTypedVector
self.assertIs(v.ElementType, type_)
self.assertEqual(len(v), len(vector))
self.assertEqual([e.Value for e in v], vector)
self.assertSequenceEqual(v.Value, vector)
self.assertEqual(root.AsInt, len(vector))
def test_empty_typed_vector(self):
for type_ in (
Type.VECTOR_BOOL,
Type.VECTOR_INT,
Type.VECTOR_UINT,
Type.VECTOR_FLOAT,
Type.VECTOR_KEY,
Type.VECTOR_STRING_DEPRECATED,
):
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(type=type_, bw=bw, ebw=ebw):
data = bytes([
# TypedVector[type_]
*uint_bytes(0, ebw),
# Root
*uint_bytes(0, bw),
packed_type(type_, ebw),
bw,
])
element_type = Type.ToTypedVectorElementType(type_)
if element_type == Type.STRING:
element_type = Type.KEY
self._check_typed_vector(data, [], element_type)
def test_typed_vector_bool(self):
vector = [True, False, False, False, True]
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# TypedVector[Type.BOOL]
*uint_bytes(len(vector), ebw),
*b''.join(uint_bytes(int(e), ebw) for e in vector),
# Root
*uint_bytes(len(vector) * ebw, bw),
packed_type(Type.VECTOR_BOOL, ebw),
bw,
])
self._check_typed_vector(data, vector, Type.BOOL)
def test_typed_vector_int(self):
vector = [-100, 200, -300]
for bw in 1, 2, 4, 8:
for ebw in 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# TypedVector[Type.INT]
*uint_bytes(len(vector), ebw),
*b''.join(int_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(len(vector) * ebw, bw),
packed_type(Type.VECTOR_INT, ebw),
bw,
])
self._check_typed_vector(data, vector, Type.INT)
def test_typed_vector_uint(self):
vector = [100, 200, 300, 400, 0]
for bw in 1, 2, 4, 8:
for ebw in 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# TypedVector[Type.UINT]
*uint_bytes(len(vector), ebw),
*b''.join(int_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(len(vector) * ebw, bw),
packed_type(Type.VECTOR_UINT, ebw),
bw,
])
self._check_typed_vector(data, vector, Type.UINT)
def test_typed_vector_float(self):
vector = [3.64, -6.36, 3.14, 634.0, -42.0]
for bw in 1, 2, 4, 8:
for ebw in 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# TypedVector[Type.FLOAT]
*uint_bytes(len(vector), ebw),
*b''.join(float_bytes(e, ebw) for e in vector),
# Root
*uint_bytes(ebw * len(vector), bw),
packed_type(Type.VECTOR_FLOAT, ebw),
bw,
])
for a, b in zip(flexbuffers.Loads(data), vector):
self.assertAlmostEqual(a, b, places=2)
def test_typed_vector_key(self):
vector = ['red', 'green', 'blue']
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# Keys
*key_bytes(vector[0]),
*key_bytes(vector[1]),
*key_bytes(vector[2]),
# TypedVector[Type.KEY]
*uint_bytes(len(vector), ebw),
*uint_bytes(15 + 1 * ebw, ebw), # offset to vector[0]
*uint_bytes(11 + 2 * ebw, ebw), # offset to vector[1]
*uint_bytes(5 + 3 * ebw, ebw), # offset to vector[2]
# Root
*uint_bytes(len(vector) * ebw, bw), # offset to vector
packed_type(Type.VECTOR_KEY, ebw),
bw,
])
self._check_typed_vector(data, vector, Type.KEY)
def test_typed_vector_string(self):
vector = ['red', 'green', 'blue']
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw):
data = bytes([
# Strings
*str_bytes(vector[0], 1), # 5 bytes
*str_bytes(vector[1], 1), # 7 bytes
*str_bytes(vector[2], 1), # 6 bytes
# TypedVector[Type.STRING]
*uint_bytes(len(vector), ebw),
*uint_bytes(17 + 1 * ebw, ebw), # offset to vector[0]
*uint_bytes(12 + 2 * ebw, ebw), # offset to vector[1]
*uint_bytes(5 + 3 * ebw, ebw), # offset to vector[2]
# Root
*uint_bytes(len(vector) * ebw, bw), # offset to vector
packed_type(Type.VECTOR_STRING_DEPRECATED, ebw),
bw,
])
# We have to pass Type.KEY because of Type.VECTOR_STRING_DEPRECATED.
self._check_typed_vector(data, vector, Type.KEY)
def test_typed_vector_string_deprecated(self):
# Check FlexBuffersDeprecatedTest() inside test.cpp for details.
vector = [300 * 'A', 'test']
fbb = flexbuffers.Builder()
with fbb.TypedVector():
for e in vector:
fbb.String(e)
data = fbb.Finish()
# We have to pass Type.KEY because of Type.VECTOR_STRING_DEPRECATED.
self._check_typed_vector(data, vector, Type.KEY)
def test_typed_vector_invalid(self):
fbb = flexbuffers.Builder()
with self.assertRaises(RuntimeError):
fbb.TypedVectorFromElements(['string', 423])
def test_empty_vector(self):
for bw in 1, 2, 4, 8:
for ebw in 1, 2, 4, 8:
data = bytes([
*uint_bytes(0, ebw),
# Root
*uint_bytes(0, bw),
packed_type(Type.VECTOR, ebw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsVector)
self.assertEqual(len(root.AsVector), 0)
self.assertEqual(flexbuffers.Loads(data), [])
def test_vector1(self):
vector = [300, 400, 500]
for bw in 1, 2, 4, 8:
for ebw in 2, 4, 8:
for tbw_ignored in 1, 2, 4, 8:
with self.subTest(bw=bw, ebw=ebw, ignore=tbw_ignored):
data = bytes([
# Vector length
*uint_bytes(len(vector), ebw),
# Vector elements
*int_bytes(vector[0], ebw),
*int_bytes(vector[1], ebw),
*int_bytes(vector[2], ebw),
# Vector types
packed_type(Type.INT, tbw_ignored),
packed_type(Type.INT, tbw_ignored),
packed_type(Type.INT, tbw_ignored),
# Root
*uint_bytes(ebw * len(vector) + len(vector), bw),
packed_type(Type.VECTOR, ebw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsVector)
self.assertFalse(root.IsMap)
v = root.AsVector
self.assertEqual(len(v), len(vector))
for i in range(len(v)):
self.assertTrue(v[i].IsInt)
self.assertEqual(v[i].AsInt, vector[i])
for i, e in enumerate(v):
self.assertTrue(e.IsInt)
self.assertEqual(e.AsInt, vector[i])
with self.assertRaises(IndexError):
v[-1].AsInt # pylint: disable=pointless-statement
with self.assertRaises(IndexError):
v[3].AsInt # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
root.AsMap # pylint: disable=pointless-statement
self.assertEqual(root.AsInt, len(vector))
self.assertEqual(root.AsFloat, float(len(vector)))
self.assertEqual(flexbuffers.Loads(data), vector)
def test_vector2(self):
vector = [1984, 'August', True]
for bw in 1, 2, 4, 8:
with self.subTest(bw=bw):
data = bytes([
*str_bytes(vector[1], 1),
# Vector
*uint_bytes(len(vector), 2),
*int_bytes(vector[0], 2),
*uint_bytes(11, 2), # offset to 'August'
*uint_bytes(int(vector[2]), 2),
packed_type(Type.INT, 2),
packed_type(Type.STRING, 1),
packed_type(Type.BOOL, 2),
# Root
*uint_bytes(2 * len(vector) + len(vector), bw), # offset to vector
packed_type(Type.VECTOR, 2),
bw,
])
self.assertEqual(flexbuffers.Loads(data), vector)
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsVector)
v = root.AsVector
self.assertTrue(v[0].IsInt)
self.assertEqual(v[0].AsInt, 1984)
self.assertTrue(v[1].IsString)
self.assertEqual(v[1].AsString, 'August')
self.assertTrue(v[2].IsBool)
self.assertTrue(v[2].AsBool)
self.assertEqual(v.Value, vector)
self.assertEqual(root.AsInt, len(vector))
def test_empty_map(self):
for bw in 1, 2, 4, 8:
for kbw in 1, 2, 4, 8:
for vbw in 1, 2, 4, 8:
data = bytes([
*uint_bytes(0, kbw), # Keys length
*uint_bytes(0, vbw),
*uint_bytes(kbw, vbw),
*uint_bytes(0, vbw), # Values length
# Root
*uint_bytes(0, bw),
packed_type(Type.MAP, vbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsMap)
self.assertEqual(len(root.AsMap), 0)
self.assertEqual(flexbuffers.Loads(data), {})
def test_map(self):
value = {'foo': 13, 'bar': 14}
for bw in 1, 2, 4, 8:
for kbw in 1, 2, 4, 8:
for vbw in 1, 2, 4, 8:
with self.subTest(kbw=kbw, vbw=vbw, bw=bw):
data = bytes([
*key_bytes('foo'), # 4 bytes
*key_bytes('bar'), # 4 bytes
# Map
*uint_bytes(len(value), kbw),
*uint_bytes(4 + 1 * kbw, kbw), # offset to 'bar'
*uint_bytes(8 + 2 * kbw, kbw), # offset to 'foo'
*uint_bytes(len(value) * kbw, vbw), # offset to keys
*uint_bytes(kbw, vbw),
*uint_bytes(len(value), vbw),
*int_bytes(value['bar'], vbw),
*int_bytes(value['foo'], vbw),
packed_type(Type.INT, vbw),
packed_type(Type.INT, vbw),
# Root
*uint_bytes(
vbw * len(value) + len(value), bw
), # offset to values
packed_type(Type.MAP, vbw),
bw,
])
root = flexbuffers.GetRoot(data)
self.assertTrue(root.IsMap)
m = root.AsMap
self.assertEqual(len(m), 2)
self.assertEqual(m[0].AsInt, 14)
self.assertEqual(m[1].AsInt, 13)
self.assertEqual(m['bar'].AsInt, 14)
self.assertEqual(m['foo'].AsInt, 13)
for invalid_key in 'a', 'b', 'no':
with self.assertRaises(KeyError):
m[invalid_key] # pylint: disable=pointless-statement
values = m.Values
self.assertEqual(len(values), 2)
self.assertEqual(values[0].AsInt, 14)
self.assertEqual(values[1].AsInt, 13)
keys = m.Keys
self.assertEqual(len(keys), 2)
self.assertEqual(len(keys[0].AsKey), 3)
self.assertEqual(keys[0].AsKey, 'bar')
self.assertEqual(len(keys[1].AsKey), 3)
self.assertEqual(keys[1].AsKey, 'foo')
keys = [key.AsKey for key in keys]
self.assertEqual(sorted(keys), keys)
self.assertEqual(root.AsInt, len(value))
self.assertEqual(flexbuffers.Loads(data), value)
def test_alignment(self):
value = ['test', 7]
data = bytes([
*key_bytes('test'), # 5 bytes: 'test' and \0
0,
0,
0, # 3 bytes: alignment
# Vector
*uint_bytes(len(value), byte_width=8),
*uint_bytes(16, byte_width=8),
*uint_bytes(7, byte_width=8),
packed_type(Type.KEY, 1),
packed_type(Type.INT, 8),
# Root
*uint_bytes(8 * len(value) + len(value), 1),
packed_type(Type.VECTOR, 8),
1,
])
self.assertEqual(flexbuffers.Loads(data), value)
| DecoderTest |
python | pytest-dev__pytest-xdist | src/xdist/workermanage.py | {
"start": 950,
"end": 7402
} | class ____:
EXIT_TIMEOUT = 10
DEFAULT_IGNORES = [".*", "*.pyc", "*.pyo", "*~"]
def __init__(
self,
config: pytest.Config,
specs: Sequence[execnet.XSpec | str] | None = None,
defaultchdir: str = "pyexecnetcache",
) -> None:
self.config = config
self.trace = self.config.trace.get("nodemanager")
self.testrunuid = self.config.getoption("testrunuid")
if self.testrunuid is None:
self.testrunuid = uuid.uuid4().hex
self.group = execnet.Group(execmodel="main_thread_only")
for proxy_spec in self._getpxspecs():
# Proxy gateways do not run workers, and are meant to be passed with the `via` attribute
# to additional gateways.
# They are useful for running multiple workers on remote machines.
if getattr(proxy_spec, "id", None) is None:
raise pytest.UsageError(
f"Proxy gateway {proxy_spec} must include an id"
)
self.group.makegateway(proxy_spec)
if specs is None:
specs = self._gettxspecs()
self.specs: list[execnet.XSpec] = []
for spec in specs:
if not isinstance(spec, execnet.XSpec):
spec = execnet.XSpec(spec)
if getattr(spec, "execmodel", None) != "main_thread_only":
spec = execnet.XSpec(f"execmodel=main_thread_only//{spec}")
if not spec.chdir and not spec.popen:
spec.chdir = defaultchdir
self.group.allocate_id(spec)
self.specs.append(spec)
self.roots = self._getrsyncdirs()
self.rsyncoptions = self._getrsyncoptions()
self._rsynced_specs: set[tuple[Any, Any]] = set()
def rsync_roots(self, gateway: execnet.Gateway) -> None:
"""Rsync the set of roots to the node's gateway cwd."""
if self.roots:
for root in self.roots:
self.rsync(gateway, root, **self.rsyncoptions)
def setup_nodes(
self,
putevent: Callable[[tuple[str, dict[str, Any]]], None],
) -> list[WorkerController]:
self.config.hook.pytest_xdist_setupnodes(config=self.config, specs=self.specs)
self.trace("setting up nodes")
return [self.setup_node(spec, putevent) for spec in self.specs]
def setup_node(
self,
spec: execnet.XSpec,
putevent: Callable[[tuple[str, dict[str, Any]]], None],
) -> WorkerController:
if getattr(spec, "execmodel", None) != "main_thread_only":
spec = execnet.XSpec(f"execmodel=main_thread_only//{spec}")
gw = self.group.makegateway(spec)
self.config.hook.pytest_xdist_newgateway(gateway=gw)
self.rsync_roots(gw)
node = WorkerController(self, gw, self.config, putevent)
# Keep the node alive.
gw.node = node # type: ignore[attr-defined]
node.setup()
self.trace("started node %r" % node)
return node
def teardown_nodes(self) -> None:
self.group.terminate(self.EXIT_TIMEOUT)
def _gettxspecs(self) -> list[execnet.XSpec]:
return [execnet.XSpec(x) for x in parse_tx_spec_config(self.config)]
def _getpxspecs(self) -> list[execnet.XSpec]:
return [execnet.XSpec(x) for x in self.config.getoption("px")]
def _getrsyncdirs(self) -> list[Path]:
for spec in self.specs:
if not spec.popen or spec.chdir:
break
else:
return []
import _pytest
import pytest
def get_dir(p: str) -> str:
"""Return the directory path if p is a package or the path to the .py file otherwise."""
stripped = p.rstrip("co")
if os.path.basename(stripped) == "__init__.py":
return os.path.dirname(p)
else:
return stripped
pytestpath = get_dir(pytest.__file__)
pytestdir = get_dir(_pytest.__file__)
config = self.config
candidates = [pytestpath, pytestdir]
candidates += config.option.rsyncdir
rsyncroots = config.getini("rsyncdirs")
if rsyncroots:
candidates.extend(rsyncroots)
roots = []
for root in candidates:
root_path = Path(root).resolve()
if not root_path.exists():
raise pytest.UsageError(f"rsyncdir doesn't exist: {root!r}")
if root_path not in roots:
roots.append(root_path)
return roots
def _getrsyncoptions(self) -> dict[str, Any]:
"""Get options to be passed for rsync."""
ignores = list(self.DEFAULT_IGNORES)
ignores += [str(path) for path in self.config.option.rsyncignore]
ignores += [str(path) for path in self.config.getini("rsyncignore")]
return {
"ignores": ignores,
"verbose": getattr(self.config.option, "verbose", 0),
}
def rsync(
self,
gateway: execnet.Gateway,
source: str | os.PathLike[str],
notify: (
Callable[[str, execnet.XSpec, str | os.PathLike[str]], Any] | None
) = None,
verbose: int = False,
ignores: Sequence[str] | None = None,
) -> None:
"""Perform rsync to remote hosts for node."""
# XXX This changes the calling behaviour of
# pytest_xdist_rsyncstart and pytest_xdist_rsyncfinish to
# be called once per rsync target.
rsync = HostRSync(source, verbose=verbose > 0, ignores=ignores)
spec = gateway.spec
if spec.popen and not spec.chdir:
# XXX This assumes that sources are python-packages
# and that adding the basedir does not hurt.
gateway.remote_exec(
"""
import sys ; sys.path.insert(0, %r)
"""
% os.path.dirname(str(source))
).waitclose()
return
if (spec, source) in self._rsynced_specs:
return
def finished() -> None:
if notify:
notify("rsyncrootready", spec, source)
rsync.add_target_host(gateway, finished=finished)
self._rsynced_specs.add((spec, source))
self.config.hook.pytest_xdist_rsyncstart(source=source, gateways=[gateway])
rsync.send()
self.config.hook.pytest_xdist_rsyncfinish(source=source, gateways=[gateway])
| NodeManager |
python | sympy__sympy | sympy/physics/quantum/gate.py | {
"start": 25805,
"end": 26743
} | class ____(OneQubitGate):
"""The single qubit pi/8 gate.
This gate rotates the phase of the state by pi/4 if the state is ``|1>`` and
does nothing if the state is ``|0>``.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
========
"""
is_hermitian = False
gate_name = 'T'
gate_name_latex = 'T'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('T', format)
def _eval_commutator_ZGate(self, other, **hints):
return _S.Zero
def _eval_commutator_PhaseGate(self, other, **hints):
return _S.Zero
# Aliases for gate names.
H = HadamardGate
X = XGate
Y = YGate
Z = ZGate
T = TGate
Phase = S = PhaseGate
#-----------------------------------------------------------------------------
# 2 Qubit Gates
#-----------------------------------------------------------------------------
| TGate |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/choose_from_datasets_test.py | {
"start": 6136,
"end": 7515
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self,
num_datasets,
num_elements_per_dataset,
options=None):
datasets = [
dataset_ops.Dataset.range(num_elements_per_dataset)
for _ in range(num_datasets)
]
indices = []
for i in range(num_datasets):
indices = indices + ([i] * num_elements_per_dataset)
shuffled_indices = stateless_random_ops.stateless_shuffle(
np.int64(indices), seed=[1, 2])
choice_dataset = dataset_ops.Dataset.from_tensor_slices(shuffled_indices)
dataset = dataset_ops.Dataset.choose_from_datasets(datasets, choice_dataset)
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def test(self, verify_fn, symbolic_checkpoint):
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self, lambda: self._build_dataset(5, 20, options), num_outputs=100)
if __name__ == "__main__":
test.main()
| ChooseFromDatasetsCheckpointTest |
python | spyder-ide__spyder | spyder/api/widgets/comboboxes.py | {
"start": 1247,
"end": 2410
} | class ____(QStyledItemDelegate):
"""
Delegate to make separators color follow our theme.
Adapted from https://stackoverflow.com/a/33464045/438386
"""
def __init__(self, parent, elide_mode=None):
super().__init__(parent)
self._elide_mode = elide_mode
def paint(self, painter, option, index):
data = index.data(Qt.AccessibleDescriptionRole)
if data and data == "separator":
painter.setPen(QColor(SpyderPalette.COLOR_BACKGROUND_6))
painter.drawLine(
option.rect.left() + AppStyle.MarginSize,
option.rect.center().y(),
option.rect.right() - AppStyle.MarginSize,
option.rect.center().y()
)
return
if self._elide_mode is not None:
option.textElideMode = self._elide_mode
super().paint(painter, option, index)
def sizeHint(self, option, index):
data = index.data(Qt.AccessibleDescriptionRole)
if data and data == "separator":
return QSize(0, 3 * AppStyle.MarginSize)
return super().sizeHint(option, index)
| _SpyderComboBoxDelegate |
python | huggingface__transformers | tests/utils/test_generic.py | {
"start": 5707,
"end": 8104
} | class ____(unittest.TestCase):
def test_cases_no_warning(self):
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter("always")
# basic test
@filter_out_non_signature_kwargs()
def func1(a):
return a
result = func1(1)
self.assertEqual(result, 1)
# include extra kwarg
@filter_out_non_signature_kwargs(extra=["extra_arg"])
def func2(a, **kwargs):
return a, kwargs
a, kwargs = func2(1)
self.assertEqual(a, 1)
self.assertEqual(kwargs, {})
a, kwargs = func2(1, extra_arg=2)
self.assertEqual(a, 1)
self.assertEqual(kwargs, {"extra_arg": 2})
# multiple extra kwargs
@filter_out_non_signature_kwargs(extra=["extra_arg", "extra_arg2"])
def func3(a, **kwargs):
return a, kwargs
a, kwargs = func3(2)
self.assertEqual(a, 2)
self.assertEqual(kwargs, {})
a, kwargs = func3(3, extra_arg2=3)
self.assertEqual(a, 3)
self.assertEqual(kwargs, {"extra_arg2": 3})
a, kwargs = func3(1, extra_arg=2, extra_arg2=3)
self.assertEqual(a, 1)
self.assertEqual(kwargs, {"extra_arg": 2, "extra_arg2": 3})
# Check that no warnings were raised
self.assertEqual(len(raised_warnings), 0, f"Warning raised: {[w.message for w in raised_warnings]}")
def test_cases_with_warnings(self):
@filter_out_non_signature_kwargs()
def func1(a):
return a
with self.assertWarns(UserWarning):
func1(1, extra_arg=2)
@filter_out_non_signature_kwargs(extra=["extra_arg"])
def func2(a, **kwargs):
return kwargs
with self.assertWarns(UserWarning):
kwargs = func2(1, extra_arg=2, extra_arg2=3)
self.assertEqual(kwargs, {"extra_arg": 2})
@filter_out_non_signature_kwargs(extra=["extra_arg", "extra_arg2"])
def func3(a, **kwargs):
return kwargs
with self.assertWarns(UserWarning):
kwargs = func3(1, extra_arg=2, extra_arg2=3, extra_arg3=4)
self.assertEqual(kwargs, {"extra_arg": 2, "extra_arg2": 3})
@require_torch
| ValidationDecoratorTester |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_north_dakota_zip.py | {
"start": 767,
"end": 1782
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_north_dakota_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_north_dakota_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidNorthDakotaZip |
python | doocs__leetcode | solution/0100-0199/0137.Single Number II/Solution.py | {
"start": 0,
"end": 325
} | class ____:
def singleNumber(self, nums: List[int]) -> int:
ans = 0
for i in range(32):
cnt = sum(num >> i & 1 for num in nums)
if cnt % 3:
if i == 31:
ans -= 1 << i
else:
ans |= 1 << i
return ans
| Solution |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_collection_membership_files.py | {
"start": 1569,
"end": 2191
} | class ____(
GQLResult
):
node: Optional[FileFragment]
ArtifactCollectionMembershipFiles.model_rebuild()
ArtifactCollectionMembershipFilesProject.model_rebuild()
ArtifactCollectionMembershipFilesProjectArtifactCollection.model_rebuild()
ArtifactCollectionMembershipFilesProjectArtifactCollectionArtifactMembership.model_rebuild()
ArtifactCollectionMembershipFilesProjectArtifactCollectionArtifactMembershipFiles.model_rebuild()
ArtifactCollectionMembershipFilesProjectArtifactCollectionArtifactMembershipFilesEdges.model_rebuild()
| ArtifactCollectionMembershipFilesProjectArtifactCollectionArtifactMembershipFilesEdges |
python | django-compressor__django-compressor | compressor/tests/test_templatetags.py | {
"start": 719,
"end": 7873
} | class ____(TestCase):
def setUp(self):
self.context = {"STATIC_URL": settings.COMPRESS_URL}
def test_empty_tag(self):
template = """{% load compress %}{% compress js %}{% block js %}
{% endblock %}{% endcompress %}"""
self.assertEqual("", render(template, self.context))
def test_css_tag(self):
template = """{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ STATIC_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="stylesheet" href="{{ STATIC_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/static/CACHE/css/output.600674ea1d3d.css")
self.assertEqual(out, render(template, self.context))
def test_css_tag_with_block(self):
template = """{% load compress %}{% compress css file block_name %}
<link rel="stylesheet" href="{{ STATIC_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid blue;}</style>
<link rel="stylesheet" href="{{ STATIC_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/static/CACHE/css/block_name.a1e074d0c4ac.css")
self.assertEqual(out, render(template, self.context))
def test_missing_rel_leaves_empty_result(self):
template = """{% load compress %}{% compress css %}
<link href="{{ STATIC_URL }}css/one.css" type="text/css">
{% endcompress %}"""
out = ""
self.assertEqual(out, render(template, self.context))
def test_missing_rel_only_on_one_resource(self):
template = """{% load compress %}{% compress css %}
<link href="{{ STATIC_URL }}css/wontmatter.css" type="text/css">
<link rel="stylesheet" href="{{ STATIC_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="stylesheet" href="{{ STATIC_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/static/CACHE/css/output.600674ea1d3d.css")
self.assertEqual(out, render(template, self.context))
def test_uppercase_rel(self):
template = """{% load compress %}{% compress css %}
<link rel="StyleSheet" href="{{ STATIC_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="StyleSheet" href="{{ STATIC_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/static/CACHE/css/output.600674ea1d3d.css")
self.assertEqual(out, render(template, self.context))
def test_nonascii_css_tag(self):
template = """{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ STATIC_URL }}css/nonasc.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
{% endcompress %}
"""
out = css_tag("/static/CACHE/css/output.d5444a1ab4a3.css")
self.assertEqual(out, render(template, self.context))
def test_js_tag(self):
template = """{% load compress %}{% compress js %}
<script src="{{ STATIC_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
out = '<script src="/static/CACHE/js/output.8a0fed36c317.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_nonascii_js_tag(self):
template = """{% load compress %}{% compress js %}
<script src="{{ STATIC_URL }}js/nonasc.js" type="text/javascript"></script>
<script type="text/javascript">var test_value = "\u2014";</script>
{% endcompress %}
"""
out = '<script src="/static/CACHE/js/output.8c00f1cf1e0a.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_nonascii_latin1_js_tag(self):
template = """{% load compress %}{% compress js %}
<script src="{{ STATIC_URL }}js/nonasc-latin1.js" type="text/javascript" charset="latin-1"></script>
<script type="text/javascript">var test_value = "\u2014";</script>
{% endcompress %}
"""
out = '<script src="/static/CACHE/js/output.06a98ccfd380.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_compress_tag_with_illegal_arguments(self):
template = """{% load compress %}{% compress pony %}
<script type="pony/application">unicorn</script>
{% endcompress %}"""
self.assertRaises(TemplateSyntaxError, render, template, {})
@override_settings(COMPRESS_DEBUG_TOGGLE="togglecompress")
def test_debug_toggle(self):
template = """{% load compress %}{% compress js %}
<script src="{{ STATIC_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
class MockDebugRequest:
GET = {settings.COMPRESS_DEBUG_TOGGLE: "true"}
context = dict(self.context, request=MockDebugRequest())
out = """<script src="/static/js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>"""
self.assertEqual(out, render(template, context))
def test_inline(self):
template = """{% load compress %}{% compress js inline %}
<script src="{{ STATIC_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}{% compress css inline %}
<link rel="stylesheet" href="{{ STATIC_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="stylesheet" href="{{ STATIC_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out_js = '<script>obj={};;obj.value="value";;</script>'
out_css = (
'<style type="text/css">body{background:#990}'
"p{border:5px solid green}"
"body{color:#fff}</style>"
)
self.assertEqual(out_js + out_css, render(template, self.context))
def test_named_compress_tag(self):
template = """{% load compress %}{% compress js inline foo %}
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
def listener(sender, **kwargs):
pass
callback = Mock(wraps=listener)
post_compress.connect(callback)
render(template)
args, kwargs = callback.call_args
context = kwargs["context"]
self.assertEqual("foo", context["compressed"]["name"])
def test_sekizai_only_once(self):
template = """{% load sekizai_tags %}{% addtoblock "js" %}
<script type="text/javascript">var tmpl="{% templatetag openblock %} if x == 3 %}x IS 3{% templatetag openblock %} endif %}"</script>
{% endaddtoblock %}{% render_block "js" postprocessor "compressor.contrib.sekizai.compress" %}
"""
out = '<script src="/static/CACHE/js/output.ffc39dec05fd.js"></script>'
self.assertEqual(out, render(template, self.context, SekizaiContext))
| TemplatetagTestCase |
python | explosion__spaCy | spacy/lang/uk/lemmatizer.py | {
"start": 195,
"end": 1716
} | class ____(RussianLemmatizer):
def __init__(
self,
vocab: Vocab,
model: Optional[Model],
name: str = "lemmatizer",
*,
mode: str = "pymorphy3",
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
if mode in {"pymorphy2", "pymorphy2_lookup"}:
try:
from pymorphy2 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Ukrainian lemmatizer mode 'pymorphy2' requires the "
"pymorphy2 library and dictionaries. Install them with: "
"pip install pymorphy2 pymorphy2-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Ukrainian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3 pymorphy3-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
| UkrainianLemmatizer |
python | getsentry__sentry | src/sentry/sentry_apps/services/app_request/model.py | {
"start": 184,
"end": 525
} | class ____(RpcModel):
date: str
response_code: int
webhook_url: str
organization_id: int | None
event_type: str
error_id: str | None = None
project_id: int | None = None
request_body: str | None = None
request_headers: Mapping[str, str] | None = None
response_body: str | None = None
| RpcSentryAppRequest |
python | streamlit__streamlit | lib/tests/streamlit/connections/snowflake_connection_test.py | {
"start": 1199,
"end": 8533
} | class ____(unittest.TestCase):
def tearDown(self) -> None:
st.cache_data.clear()
@patch(
"snowflake.snowpark.context.get_active_session",
)
@patch(
"streamlit.connections.snowflake_connection.running_in_sis",
MagicMock(return_value=True),
)
def test_uses_active_session_if_in_sis(self, patched_get_active_session):
active_session_mock = MagicMock()
active_session_mock.connection = "some active session"
patched_get_active_session.return_value = active_session_mock
conn = SnowflakeConnection("my_snowflake_connection")
assert conn._instance == "some active session"
@patch(
"streamlit.connections.snowflake_connection.SnowflakeConnection._secrets",
PropertyMock(
return_value=AttrDict({"account": "some_val_1", "some_key": "some_val_2"})
),
)
@patch("snowflake.connector.connect")
def test_uses_streamlit_secrets_if_available(self, patched_connect):
SnowflakeConnection("my_snowflake_connection")
patched_connect.assert_called_once_with(
account="some_val_1", some_key="some_val_2"
)
@patch("snowflake.connector.connect")
def test_uses_config_manager_if_available(self, patched_connect):
SnowflakeConnection("snowflake", some_kwarg="some_value")
patched_connect.assert_called_once_with()
def test_throws_friendly_error_if_no_config_set(self):
with pytest.raises(StreamlitAPIException) as e:
SnowflakeConnection("snowflake")
assert "Missing Snowflake connection configuration." in str(e.value)
@patch(
"streamlit.connections.snowflake_connection.SnowflakeConnection._connect",
MagicMock(),
)
def test_query_caches_value(self):
# Caching functions rely on an active script run ctx
add_script_run_ctx(threading.current_thread(), create_mock_script_run_ctx())
mock_cursor = MagicMock()
mock_cursor.fetch_pandas_all = MagicMock(return_value="i am a dataframe")
conn = SnowflakeConnection("my_snowflake_connection")
conn._instance.cursor.return_value = mock_cursor
assert conn.query("SELECT 1;") == "i am a dataframe"
assert conn.query("SELECT 1;") == "i am a dataframe"
conn._instance.cursor.assert_called_once()
mock_cursor.execute.assert_called_once_with("SELECT 1;", params=None)
@patch(
"streamlit.connections.snowflake_connection.SnowflakeConnection._connect",
MagicMock(),
)
def test_does_not_reset_cache_when_ttl_changes(self):
# Caching functions rely on an active script run ctx
add_script_run_ctx(threading.current_thread(), create_mock_script_run_ctx())
mock_cursor = MagicMock()
mock_cursor.fetch_pandas_all = MagicMock(return_value="i am a dataframe")
conn = SnowflakeConnection("my_snowflake_connection")
conn._instance.cursor.return_value = mock_cursor
conn.query("SELECT 1;", ttl=10)
conn.query("SELECT 2;", ttl=20)
conn.query("SELECT 1;", ttl=10)
conn.query("SELECT 2;", ttl=20)
assert conn._instance.cursor.call_count == 2
assert mock_cursor.execute.call_count == 2
@patch(
"streamlit.connections.snowflake_connection.SnowflakeConnection._connect",
MagicMock(),
)
def test_scopes_caches_by_connection_name(self):
# Caching functions rely on an active script run ctx
add_script_run_ctx(threading.current_thread(), create_mock_script_run_ctx())
mock_cursor = MagicMock()
mock_cursor.fetch_pandas_all = MagicMock(return_value="i am a dataframe")
conn1 = SnowflakeConnection("my_snowflake_connection1")
conn1._instance.cursor.return_value = mock_cursor
conn2 = SnowflakeConnection("my_snowflake_connection2")
conn2._instance.cursor.return_value = mock_cursor
conn1.query("SELECT 1;")
conn1.query("SELECT 1;")
conn2.query("SELECT 1;")
conn2.query("SELECT 1;")
assert conn1._instance.cursor is conn2._instance.cursor
assert conn1._instance.cursor.call_count == 2
assert mock_cursor.execute.call_count == 2
@patch(
"streamlit.connections.snowflake_connection.SnowflakeConnection._connect",
MagicMock(),
)
def test_retry_behavior(self):
mock_cursor = MagicMock()
mock_cursor.fetch_pandas_all = MagicMock(
side_effect=SomeError("oh no", sqlstate="08001")
)
conn = SnowflakeConnection("my_snowflake_connection")
conn._instance.cursor.return_value = mock_cursor
with patch.object(conn, "reset", wraps=conn.reset) as wrapped_reset:
with pytest.raises(SomeError):
conn.query("SELECT 1;")
# Our connection should have been reset after each failed attempt to call
# query.
assert wrapped_reset.call_count == 3
# conn._connect should have been called three times: once in the initial
# connection, then once each after the second and third attempts to call
# query.
assert conn._connect.call_count == 3
@patch(
"streamlit.connections.snowflake_connection.SnowflakeConnection._connect",
MagicMock(),
)
def test_retry_fails_fast_for_programming_errors_with_wrong_sqlstate(self):
mock_cursor = MagicMock()
mock_cursor.fetch_pandas_all = MagicMock(
side_effect=SomeError("oh no", sqlstate="42")
)
conn = SnowflakeConnection("my_snowflake_connection")
conn._instance.cursor.return_value = mock_cursor
with pytest.raises(SomeError):
conn.query("SELECT 1;")
# conn._connect should have just been called once when first creating the
# connection.
assert conn._connect.call_count == 1
@patch(
"streamlit.connections.snowflake_connection.SnowflakeConnection._connect",
MagicMock(),
)
def test_retry_fails_fast_for_general_snowflake_errors(self):
from snowflake.connector.errors import Error as SnowflakeError
mock_cursor = MagicMock()
mock_cursor.fetch_pandas_all = MagicMock(side_effect=SnowflakeError("oh no"))
conn = SnowflakeConnection("my_snowflake_connection")
conn._instance.cursor.return_value = mock_cursor
with pytest.raises(SnowflakeError):
conn.query("SELECT 1;")
# conn._connect should have just been called once when first creating the
# connection.
assert conn._connect.call_count == 1
@patch(
"streamlit.connections.snowflake_connection.SnowflakeConnection._connect",
MagicMock(),
)
def test_retry_fails_fast_for_other_errors(self):
mock_cursor = MagicMock()
mock_cursor.fetch_pandas_all = MagicMock(side_effect=Exception("oh no"))
conn = SnowflakeConnection("my_snowflake_connection")
conn._instance.cursor.return_value = mock_cursor
with pytest.raises(Exception, match="oh no"):
conn.query("SELECT 1;")
# conn._connect should have just been called once when first creating the
# connection.
assert conn._connect.call_count == 1
| SnowflakeConnectionTest |
python | scikit-learn__scikit-learn | asv_benchmarks/benchmarks/ensemble.py | {
"start": 358,
"end": 1365
} | class ____(Predictor, Estimator, Benchmark):
"""
Benchmarks for RandomForestClassifier.
"""
param_names = ["representation", "n_jobs"]
params = (["dense", "sparse"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, n_jobs = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, n_jobs = params
n_estimators = 500 if Benchmark.data_size == "large" else 100
estimator = RandomForestClassifier(
n_estimators=n_estimators,
min_samples_split=10,
max_features="log2",
n_jobs=n_jobs,
random_state=0,
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
| RandomForestClassifierBenchmark |
python | kamyu104__LeetCode-Solutions | Python/make-two-arrays-equal-by-reversing-sub-arrays.py | {
"start": 324,
"end": 557
} | class ____(object):
def canBeEqual(self, target, arr):
"""
:type target: List[int]
:type arr: List[int]
:rtype: bool
"""
target.sort(), arr.sort()
return target == arr
| Solution2 |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 78751,
"end": 78854
} | class ____:
xlDownThenOver = 1 # from enum XlOrder
xlOverThenDown = 2 # from enum XlOrder
| Order |
python | getsentry__sentry | src/sentry/notifications/api/endpoints/user_notification_settings_options.py | {
"start": 839,
"end": 2842
} | class ____(UserEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ALERTS_NOTIFICATIONS
def get(self, request: Request, user: User) -> Response:
"""
Retrieve the notification preferences for a user.
Returns a list of NotificationSettingOption rows.
"""
notification_type = request.GET.get("type")
notifications_settings = NotificationSettingOption.objects.filter(user_id=user.id)
if notification_type:
try:
validate_type(notification_type)
except ParameterValidationError:
return self.respond({"type": ["Invalid type"]}, status=status.HTTP_400_BAD_REQUEST)
notifications_settings = notifications_settings.filter(type=notification_type)
notification_preferences = serialize(
list(notifications_settings), request.user, NotificationSettingsOptionSerializer()
)
return Response(notification_preferences)
def put(self, request: Request, user: User) -> Response:
"""
Update the notification preferences for a user.
Returns the new row of NotificationSettingOption.
"""
serializer = UserNotificationSettingOptionWithValueSerializer(data=request.data)
if not serializer.is_valid():
return self.respond(serializer.errors, status=400)
data = serializer.validated_data
notification_option, _ = NotificationSettingOption.objects.update_or_create(
user_id=user.id,
scope_type=data["scope_type"],
scope_identifier=data["scope_identifier"],
type=data["type"],
defaults={"value": data["value"]},
)
return Response(
serialize(notification_option, request.user, NotificationSettingsOptionSerializer()),
status=status.HTTP_201_CREATED,
)
| UserNotificationSettingsOptionsEndpoint |
python | huggingface__transformers | src/transformers/models/colpali/processing_colpali.py | {
"start": 1547,
"end": 2775
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": "longest",
},
"images_kwargs": {
"data_format": "channels_first",
"do_convert_rgb": True,
},
"common_kwargs": {"return_tensors": "pt"},
}
IMAGE_TOKEN = "<image>"
EXTRA_TOKENS = [f"<loc{i:0>4}>" for i in range(1024)] + [f"<seg{i:0>3}>" for i in range(128)]
def build_string_from_input(prompt, bos_token, image_seq_len, image_token, num_images):
"""
Builds a string from the input prompt and image tokens.
For example, for the call:
build_string_from_input(
prompt="Prefix str"
bos_token="<s>",
image_seq_len=3,
image_token="<im>",
)
The output will be:
"<im><im><im><s>Initial str"
Args:
prompt (`list[Union[str, ImageInput]]`): The input prompt.
bos_token (`str`): The beginning of sentence token.
image_seq_len (`int`): The length of the image sequence.
image_token (`str`): The image token.
num_images (`int`): Number of images in the prompt.
"""
return f"{image_token * image_seq_len * num_images}{bos_token}{prompt}\n"
| ColPaliProcessorKwargs |
python | ray-project__ray | python/ray/tune/tests/test_tuner.py | {
"start": 2648,
"end": 17997
} | class ____(unittest.TestCase):
"""The e2e test for hparam tuning using Tuner API."""
@pytest.fixture(autouse=True)
def tmp_path(self, tmp_path):
self.tmp_path = tmp_path
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
def test_tuner_with_xgboost_trainer(self):
"""Test a successful run."""
trainer = XGBoostTrainer(
label_column="target",
params={},
datasets={"train": gen_dataset_func_eager()},
)
param_space = {
"scaling_config": ray.train.ScalingConfig(
num_workers=tune.grid_search([1, 2])
),
"datasets": {
"train": tune.grid_search(
[gen_dataset_func(), gen_dataset_func(do_shuffle=True)]
),
},
"params": {
"objective": "binary:logistic",
"tree_method": "approx",
"eval_metric": ["logloss", "error"],
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9),
},
}
tuner = Tuner(
trainable=trainer,
run_config=RunConfig(name="test_tuner"),
param_space=param_space,
tune_config=TuneConfig(mode="min", metric="train-error"),
# limiting the number of trials running at one time.
# As the unit test only has access to 4 CPUs on Buildkite.
_tuner_kwargs={"max_concurrent_trials": 1},
)
results = tuner.fit()
assert len(results) == 4
def test_tuner_with_xgboost_trainer_driver_fail_and_resume(self):
# So that we have some global checkpointing happening.
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "1"
trainer = XGBoostTrainer(
label_column="target",
params={},
datasets={"train": gen_dataset_func_eager()},
)
# prep_v1 = StandardScaler(["worst radius", "worst area"])
# prep_v2 = StandardScaler(["worst concavity", "worst smoothness"])
param_space = {
"scaling_config": ray.train.ScalingConfig(
num_workers=tune.grid_search([1, 2])
),
"datasets": {
"train": tune.grid_search(
[gen_dataset_func(), gen_dataset_func(do_shuffle=True)]
),
},
"params": {
"objective": "binary:logistic",
"tree_method": "approx",
"eval_metric": ["logloss", "error"],
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9),
},
}
class FailureInjectionCallback(Callback):
"""Inject failure at the configured iteration number."""
def __init__(self, num_iters=10):
self.num_iters = num_iters
def on_step_end(self, iteration, trials, **kwargs):
if iteration == self.num_iters:
print(f"Failing after {self.num_iters} iters.")
raise RuntimeError
tuner = Tuner(
trainable=trainer,
run_config=RunConfig(
name="test_tuner_driver_fail",
storage_path=str(self.tmp_path),
callbacks=[FailureInjectionCallback()],
),
param_space=param_space,
tune_config=TuneConfig(mode="min", metric="train-error"),
# limiting the number of trials running at one time.
# As the unit test only has access to 4 CPUs on Buildkite.
_tuner_kwargs={"max_concurrent_trials": 1},
)
with self.assertRaises(RuntimeError):
tuner.fit()
# Test resume
restore_path = os.path.join(self.tmp_path, "test_tuner_driver_fail")
tuner = Tuner.restore(restore_path, trainable=trainer, param_space=param_space)
# A hack before we figure out RunConfig semantics across resumes.
tuner._local_tuner._run_config.callbacks = None
results = tuner.fit()
assert len(results) == 4
assert not results.errors
def test_tuner_with_torch_trainer(self):
"""Test a successful run using torch trainer."""
# The following two should be tunable.
config = {"lr": 1e-2, "hidden_size": 1, "batch_size": 4, "epochs": 10}
scaling_config = ray.train.ScalingConfig(num_workers=1, use_gpu=False)
trainer = TorchTrainer(
train_loop_per_worker=linear_train_func,
train_loop_config=config,
scaling_config=scaling_config,
)
param_space = {
"scaling_config": ray.train.ScalingConfig(
num_workers=tune.grid_search([1, 2])
),
"train_loop_config": {
"batch_size": tune.grid_search([4, 8]),
"epochs": tune.grid_search([5, 10]),
},
}
tuner = Tuner(
trainable=trainer,
run_config=RunConfig(name="test_tuner"),
param_space=param_space,
tune_config=TuneConfig(mode="min", metric="loss"),
)
results = tuner.fit()
assert len(results) == 8
def test_tuner_run_config_override(self):
trainer = DummyTrainer(run_config=RunConfig(stop={"metric": 4}))
tuner = Tuner(trainer)
assert tuner._local_tuner._run_config.stop == {"metric": 4}
@pytest.mark.parametrize(
"params_expected",
[
(
{"run_config": RunConfig(progress_reporter=CLIReporter())},
lambda kw: isinstance(kw["progress_reporter"], CLIReporter),
),
(
{"tune_config": TuneConfig(reuse_actors=True)},
lambda kw: kw["reuse_actors"] is True,
),
(
{"run_config": RunConfig(log_to_file="some_file")},
lambda kw: kw["log_to_file"] == "some_file",
),
(
{"tune_config": TuneConfig(max_concurrent_trials=3)},
lambda kw: kw["max_concurrent_trials"] == 3,
),
(
{"tune_config": TuneConfig(time_budget_s=60)},
lambda kw: kw["time_budget_s"] == 60,
),
],
)
def test_tuner_api_kwargs(shutdown_only, params_expected):
tuner_params, assertion = params_expected
tuner = Tuner(lambda config: 1, **tuner_params)
caught_kwargs = {}
class MockExperimentAnalysis:
trials = []
def catch_kwargs(**kwargs):
caught_kwargs.update(kwargs)
return MockExperimentAnalysis()
with patch("ray.tune.impl.tuner_internal.run", catch_kwargs):
tuner.fit()
assert assertion(caught_kwargs)
def test_tuner_fn_trainable_invalid_checkpoint_config(shutdown_only):
tuner = Tuner(
lambda config: 1,
run_config=RunConfig(
checkpoint_config=CheckpointConfig(checkpoint_at_end=True)
),
)
with pytest.raises(ValueError):
tuner.fit()
tuner = Tuner(
lambda config: 1,
run_config=RunConfig(
checkpoint_config=CheckpointConfig(checkpoint_frequency=1)
),
)
with pytest.raises(ValueError):
tuner.fit()
def test_tuner_trainer_checkpoint_config(shutdown_only):
custom_training_loop_trainer = DataParallelTrainer(
train_loop_per_worker=lambda config: 1
)
tuner = Tuner(
custom_training_loop_trainer,
run_config=RunConfig(
checkpoint_config=CheckpointConfig(checkpoint_at_end=True)
),
)
with pytest.raises(ValueError):
tuner.fit()
tuner = Tuner(
custom_training_loop_trainer,
run_config=RunConfig(
checkpoint_config=CheckpointConfig(checkpoint_frequency=1)
),
)
with pytest.raises(ValueError):
tuner.fit()
handles_checkpoints_trainer = XGBoostTrainer(
label_column="target",
params={},
datasets={"train": ray.data.from_items(list(range(5)))},
)
tuner = Tuner(
handles_checkpoints_trainer,
run_config=RunConfig(
checkpoint_config=CheckpointConfig(
checkpoint_at_end=True, checkpoint_frequency=1
)
),
)._local_tuner
# Check that validation passes for a Trainer that does handle checkpointing
tuner._get_tune_run_arguments(tuner.converted_trainable)
def test_tuner_fn_trainable_checkpoint_at_end_false(shutdown_only):
tuner = Tuner(
lambda config: 1,
run_config=RunConfig(
checkpoint_config=CheckpointConfig(checkpoint_at_end=False)
),
)
tuner.fit()
def test_tuner_fn_trainable_checkpoint_at_end_none(shutdown_only):
tuner = Tuner(
lambda config: 1,
run_config=RunConfig(
checkpoint_config=CheckpointConfig(checkpoint_at_end=None)
),
)
tuner.fit()
def test_nonserializable_trainable():
import threading
lock = threading.Lock()
# Check that the `inspect_serializability` trace was printed
with pytest.raises(TypeError, match=r".*was found to be non-serializable.*"):
Tuner(lambda config: print(lock))
# TODO: [V2] Delete the `trainer` variant once V1 is fully removed.
def _test_no_chdir(runner_type, runtime_env, use_deprecated_config=False):
# Write a data file that we want to read in our training loop
with open("./read.txt", "w") as f:
f.write("data")
ray.init(num_cpus=4, runtime_env=runtime_env)
def train_func(config):
# Make sure we can access the data from the original working dir
assert os.path.exists("./read.txt") and open("./read.txt", "r").read() == "data"
# Write operations should happen in each trial's independent logdir to
# prevent write conflicts
trial_dir = Path(tune.get_context().get_trial_dir())
trial_dir.joinpath("write.txt").touch()
if runner_type == "trainer":
trainer = DataParallelTrainer(
train_func, scaling_config=ray.train.ScalingConfig(num_workers=2)
)
result = trainer.fit()
results = [result]
elif runner_type == "tuner":
tuner = Tuner(
train_func,
param_space={"id": tune.grid_search(list(range(4)))},
tune_config=(
TuneConfig(chdir_to_trial_dir=False) if use_deprecated_config else None
),
)
results = tuner.fit()
assert not results.errors
else:
raise NotImplementedError(f"Invalid: {runner_type}")
for result in results:
assert os.path.exists(os.path.join(result.path, "write.txt"))
def test_tuner_no_chdir_to_trial_dir_deprecated(shutdown_only, chdir_tmpdir):
"""Test the deprecated `chdir_to_trial_dir` config."""
with pytest.raises(DeprecationWarning):
_test_no_chdir("tuner", {}, use_deprecated_config=True)
@pytest.mark.parametrize("runtime_env", [{}, {"working_dir": "."}])
def test_tuner_no_chdir_to_trial_dir(
shutdown_only, chdir_tmpdir, monkeypatch, runtime_env
):
"""Tests that disabling the env var to keep the working directory the same
works for a Tuner run."""
from ray.train.constants import RAY_CHDIR_TO_TRIAL_DIR
monkeypatch.setenv(RAY_CHDIR_TO_TRIAL_DIR, "0")
_test_no_chdir("tuner", runtime_env)
@pytest.mark.parametrize("runtime_env", [{}, {"working_dir": "."}])
def test_trainer_no_chdir_to_trial_dir(
shutdown_only, chdir_tmpdir, monkeypatch, runtime_env
):
"""Tests that disabling the env var to keep the working directory the same
works for a Trainer run."""
from ray.train.constants import RAY_CHDIR_TO_TRIAL_DIR
monkeypatch.setenv(RAY_CHDIR_TO_TRIAL_DIR, "0")
_test_no_chdir("trainer", runtime_env)
@pytest.mark.parametrize("runtime_env", [{}, {"working_dir": "."}])
def test_tuner_relative_pathing_with_env_vars(
shutdown_only, chdir_tmpdir, tmp_path, runtime_env
):
"""Tests that `TUNE_ORIG_WORKING_DIR` environment variable can be used to access
relative paths to the original working directory.
"""
# Write a data file that we want to read in our training loop
with open("./read.txt", "w") as f:
f.write("data")
# Even if we set our runtime_env `{"working_dir": "."}` to the current directory,
# Tune should still chdir to the trial directory.
ray.init(num_cpus=1, runtime_env=runtime_env)
def train_func(config):
orig_working_dir = Path(os.environ["TUNE_ORIG_WORKING_DIR"])
assert (
str(orig_working_dir) != os.getcwd()
), f"Working directory should have changed from {orig_working_dir}"
# Make sure we can access the data from the original working dir
# Different from above: create an absolute path using the env variable
data_path = orig_working_dir / "read.txt"
assert os.path.exists(data_path) and open(data_path, "r").read() == "data"
# Tune chdirs to the trial working directory
storage = tune.get_context().get_storage()
assert Path(storage.trial_working_directory).resolve() == Path.cwd().resolve()
with open("write.txt", "w") as f:
f.write(f"{config['id']}")
tuner = Tuner(
train_func,
param_space={"id": tune.grid_search(list(range(4)))},
run_config=RunConfig(
storage_path=str(tmp_path),
sync_config=tune.SyncConfig(sync_artifacts=True),
),
)
results = tuner.fit()
assert not results.errors
for result in results:
artifact_data = open(os.path.join(result.path, "write.txt"), "r").read()
assert artifact_data == f"{result.config['id']}"
def test_invalid_param_space(shutdown_only):
"""Check that Tune raises an error on invalid param_space types."""
def trainable(config):
return {"metric": 1}
with pytest.raises(ValueError):
Tuner(trainable, param_space="not allowed")
from ray.tune.tune import _Config
class CustomConfig(_Config):
def to_dict(self) -> dict:
return {"hparam": 1}
with pytest.raises(ValueError):
Tuner(trainable, param_space="not allowed").fit()
with pytest.raises(ValueError):
tune.run(trainable, config="not allowed")
# Dict and custom _Config subclasses are fine
Tuner(trainable, param_space={}).fit()
Tuner(trainable, param_space=CustomConfig()).fit()
tune.run(trainable, config=CustomConfig())
def test_tuner_restore_classmethod():
tuner = Tuner(lambda x: None)
# Calling `tuner.restore()` on an instance should raise an AttributeError
with pytest.raises(AttributeError):
tuner.restore("/", lambda x: None)
# Calling `Tuner.restore()` on the class should work. This will throw a
# FileNotFoundError because no checkpoint exists at that location. Since
# this happens in the downstream restoration code, this means that the
# classmethod check successfully passed.
with pytest.raises(FileNotFoundError):
tuner = Tuner.restore("/invalid", lambda x: None)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
| TunerTest |
python | apache__airflow | devel-common/src/sphinx_exts/operators_and_hooks_ref.py | {
"start": 15604,
"end": 15987
} | class ____(BaseJinjaReferenceDirective):
"""Generate list of logging handlers"""
def render_content(
self, *, tags: set[str] | None, header_separator: str = DEFAULT_HEADER_SEPARATOR
) -> str:
return _common_render_list_content(
header_separator=header_separator, resource_type="logging", template="logging.rst.jinja2"
)
| LoggingDirective |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/relationships/tutorial001.py | {
"start": 179,
"end": 263
} | class ____(SQLModel):
name: str = Field(index=True)
headquarters: str
| TeamBase |
python | fastapi__sqlmodel | docs_src/tutorial/indexes/tutorial001.py | {
"start": 100,
"end": 1219
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Deadpond")
results = session.exec(statement)
for hero in results:
print(hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 128834,
"end": 129121
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
repair_history: Optional[List[RepairHistoryItem]] = Field(
None, description="The repair history of the run."
)
| RepairHistory |
python | getsentry__sentry | src/sentry/sentry_metrics/indexer/limiters/writes.py | {
"start": 3039,
"end": 7700
} | class ____:
def __init__(self, namespace: str, **options: Mapping[str, str]) -> None:
self.namespace = namespace
self.rate_limiter: RedisSlidingWindowRateLimiter = RedisSlidingWindowRateLimiter(**options)
def _build_quota_key(self, use_case_id: UseCaseID, org_id: OrgId | None = None) -> str:
if org_id is not None:
return f"metrics-indexer-{use_case_id.value}-org-{org_id}"
else:
return f"metrics-indexer-{use_case_id.value}-global"
@metrics.wraps("sentry_metrics.indexer.construct_quotas")
def _construct_quotas(self, use_case_id: UseCaseID) -> Sequence[Quota]:
"""
Construct write limit's quotas based on current sentry options.
This value can potentially cached globally as long as it is invalidated
when sentry.options are.
"""
option_name = USE_CASE_ID_WRITES_LIMIT_QUOTA_OPTIONS.get(
use_case_id, "sentry-metrics.writes-limiter.limits.generic-metrics"
)
return [
Quota(prefix_override=self._build_quota_key(use_case_id), **args)
for args in options.get(f"{option_name}.global")
] + [Quota(prefix_override=None, **args) for args in options.get(f"{option_name}.per-org")]
@metrics.wraps("sentry_metrics.indexer.construct_quota_requests")
def _construct_quota_requests(
self, keys: UseCaseKeyCollection
) -> tuple[Sequence[UseCaseID], Sequence[OrgId], Sequence[RequestedQuota]]:
use_case_ids = []
org_ids = []
requests = []
for use_case_id, key_collection in keys.mapping.items():
quotas = self._construct_quotas(use_case_id)
if not quotas:
continue
for org_id, strings in key_collection.mapping.items():
use_case_ids.append(use_case_id)
org_ids.append(org_id)
requests.append(
RequestedQuota(
prefix=self._build_quota_key(use_case_id, org_id),
requested=len(strings),
quotas=quotas,
)
)
return use_case_ids, org_ids, requests
@metrics.wraps("sentry_metrics.indexer.check_write_limits")
def check_write_limits(
self,
use_case_keys: UseCaseKeyCollection,
) -> RateLimitState:
"""
Takes a UseCaseKeyCollection and applies DB write limits as configured via sentry.options.
Returns a context manager that, upon entering, returns a tuple of:
1. A UseCaseKeyCollection containing all unmapped keys that passed through the
rate limiter.
2. All unmapped keys that did not pass through the rate limiter.
Upon (successful) exit, rate limits are consumed.
"""
use_case_ids, org_ids, requests = self._construct_quota_requests(use_case_keys)
timestamp, grants = self.rate_limiter.check_within_quotas(requests)
accepted_keys = {
use_case_id: {org_id: strings for org_id, strings in key_collection.mapping.items()}
for use_case_id, key_collection in use_case_keys.mapping.items()
}
dropped_strings = []
for use_case_id, org_id, grant in zip(use_case_ids, org_ids, grants):
if len(accepted_keys[use_case_id][org_id]) <= grant.granted:
continue
allowed_strings = set(accepted_keys[use_case_id][org_id])
while len(allowed_strings) > grant.granted:
dropped_strings.append(
DroppedString(
use_case_key_result=UseCaseKeyResult(
use_case_id=use_case_id,
org_id=org_id,
string=allowed_strings.pop(),
id=None,
),
fetch_type=FetchType.RATE_LIMITED,
fetch_type_ext=FetchTypeExt(
is_global=any(
quota.prefix_override is not None for quota in grant.reached_quotas
),
),
)
)
accepted_keys[use_case_id][org_id] = allowed_strings
state = RateLimitState(
_writes_limiter=self,
_namespace=self.namespace,
_requests=requests,
_grants=grants,
_timestamp=timestamp,
accepted_keys=UseCaseKeyCollection(accepted_keys),
dropped_strings=dropped_strings,
)
return state
| WritesLimiter |
python | mwaskom__seaborn | doc/sphinxext/gallery_generator.py | {
"start": 3296,
"end": 10739
} | class ____:
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename) as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
file_mtime = op.getmtime(filename)
if not op.exists(outfilename) or op.getmtime(outfilename) < file_mtime:
self.exec_file()
else:
print(f"skipping {self.filename}")
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
@property
def components(self):
objects = re.findall(r"sns\.(\w+)\(", self.filetext)
refs = []
for obj in objects:
if obj[0].isupper():
refs.append(f":class:`{obj}`")
else:
refs.append(f":func:`{obj}`")
return ", ".join(refs)
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iter = lines.__iter__()
tokens = tokenize.generate_tokens(lambda: next(line_iter))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print(f"running {self.filename}")
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = f"<img src=../{self.pngfilename}>"
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return f" ./{op.splitext(self.htmlfilename)[0]}\n\n"
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='thumb align-center'>\n"
" <a href=./{}>\n"
" <img src=../_static/{}>\n"
" <span class='thumb-label'>\n"
" <p>{}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir, '..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in sorted(glob.glob(op.join(source_dir, "*.py"))):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
components=ex.components,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
| ExampleGenerator |
python | huggingface__transformers | src/transformers/models/mra/configuration_mra.py | {
"start": 780,
"end": 6240
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MraModel`]. It is used to instantiate an MRA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Mra
[uw-madison/mra-base-512-4](https://huggingface.co/uw-madison/mra-base-512-4) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the Mra model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MraModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 1):
The vocabulary size of the `token_type_ids` passed when calling [`MraModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
block_per_row (`int`, *optional*, defaults to 4):
Used to set the budget for the high resolution scale.
approx_mode (`str`, *optional*, defaults to `"full"`):
Controls whether both low and high resolution approximations are used. Set to `"full"` for both low and
high resolution and `"sparse"` for only low resolution.
initial_prior_first_n_blocks (`int`, *optional*, defaults to 0):
The initial number of blocks for which high resolution is used.
initial_prior_diagonal_n_blocks (`int`, *optional*, defaults to 0):
The number of diagonal blocks for which high resolution is used.
Example:
```python
>>> from transformers import MraConfig, MraModel
>>> # Initializing a Mra uw-madison/mra-base-512-4 style configuration
>>> configuration = MraConfig()
>>> # Initializing a model (with random weights) from the uw-madison/mra-base-512-4 style configuration
>>> model = MraModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mra"
def __init__(
self,
vocab_size=50265,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=1,
initializer_range=0.02,
layer_norm_eps=1e-5,
block_per_row=4,
approx_mode="full",
initial_prior_first_n_blocks=0,
initial_prior_diagonal_n_blocks=0,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.block_per_row = block_per_row
self.approx_mode = approx_mode
self.initial_prior_first_n_blocks = initial_prior_first_n_blocks
self.initial_prior_diagonal_n_blocks = initial_prior_diagonal_n_blocks
__all__ = ["MraConfig"]
| MraConfig |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/functional_modules.py | {
"start": 4551,
"end": 9222
} | class ____(torch.nn.Module):
r"""Wrapper class for quantized operations.
The instance of this class can be used instead of the
``torch.ops.quantized`` prefix. See example usage below.
.. note::
This class does not provide a ``forward`` hook. Instead, you must use
one of the underlying functions (e.g. ``add``).
Examples::
>>> q_add = QFunctional()
>>> # xdoctest: +SKIP
>>> a = torch.quantize_per_tensor(torch.tensor(3.0), 1.0, 0, torch.qint32)
>>> b = torch.quantize_per_tensor(torch.tensor(4.0), 1.0, 0, torch.qint32)
>>> q_add.add(a, b) # Equivalent to ``torch.ops.quantized.add(a, b, 1.0, 0)``
Valid operation names:
- add
- cat
- mul
- add_relu
- add_scalar
- mul_scalar
"""
def __init__(self) -> None:
super().__init__()
self.scale = 1.0
self.zero_point = 0
self.activation_post_process = torch.nn.Identity()
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + "scale"] = torch.tensor(self.scale)
destination[prefix + "zero_point"] = torch.tensor(self.zero_point)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
self.scale = float(state_dict.pop(prefix + "scale"))
self.zero_point = int(state_dict.pop(prefix + "zero_point"))
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
def _get_name(self):
return "QFunctional"
def extra_repr(self):
return f"scale={self.scale}, zero_point={self.zero_point}"
def forward(self, x):
raise RuntimeError(
"Functional is not intended to use the "
+ "'forward'. Please use the underlying operation"
)
r"""Operation equivalent to ``torch.ops.quantized.add``"""
def add(self, x: Tensor, y: Tensor) -> Tensor:
r = ops.quantized.add(x, y, scale=self.scale, zero_point=self.zero_point)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.ops.quantized.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
r = ops.quantized.add_scalar(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.ops.quantized.mul(Tensor, Tensor)``"""
def mul(self, x: Tensor, y: Tensor) -> Tensor:
r = ops.quantized.mul(x, y, scale=self.scale, zero_point=self.zero_point)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.ops.quantized.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
r = ops.quantized.mul_scalar(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.ops.quantized.cat``"""
def cat(self, x: list[Tensor], dim: int = 0) -> Tensor:
r = ops.quantized.cat(x, scale=self.scale, zero_point=self.zero_point, dim=dim)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.ops.quantized.add_relu``"""
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
r = ops.quantized.add_relu(x, y, scale=self.scale, zero_point=self.zero_point)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.ops.quantized.matmul(Tensor, Tensor)``"""
def matmul(self, x: Tensor, y: Tensor) -> Tensor:
r = ops.quantized.matmul(x, y, scale=self.scale, zero_point=self.zero_point)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
assert type(mod) is FloatFunctional, (
"QFunctional.from_float expects an instance of FloatFunctional"
)
scale, zero_point = mod.activation_post_process.calculate_qparams() # type: ignore[operator]
new_mod = QFunctional()
new_mod.scale = float(scale)
new_mod.zero_point = int(zero_point)
return new_mod
| QFunctional |
python | getsentry__sentry | src/sentry/api/serializers/models/team.py | {
"start": 11988,
"end": 12063
} | class ____(TypedDict):
value: str
display: str
| SCIMTeamMemberListItem |
python | apache__airflow | providers/fab/src/airflow/providers/fab/www/extensions/init_views.py | {
"start": 2370,
"end": 2924
} | class ____(Resolver):
"""
OpenAPI endpoint resolver that loads lazily on first use.
This re-implements ``connexion.Resolver.resolve()`` to not eagerly resolve
the endpoint function (and thus avoid importing it in the process), but only
return a placeholder that will be actually resolved when the contained
function is accessed.
"""
def resolve(self, operation):
operation_id = self.resolve_operation_id(operation)
return _LazyResolution(self.resolve_function_from_operation_id, operation_id)
| _LazyResolver |
python | great-expectations__great_expectations | tests/expectations/test_conditions.py | {
"start": 12604,
"end": 19395
} | class ____:
"""Tests for deserialization (converting dicts back to Condition objects)."""
def test_deserialize_comparison_condition(self):
"""Test deserializing a ComparisonCondition from a dict."""
cond_dict = {
"type": "comparison",
"column": {"name": "age"},
"operator": "<",
"parameter": 18,
}
result = deserialize_row_condition(cond_dict)
expected = ComparisonCondition(
column=Column("age"), operator=Operator.LESS_THAN, parameter=18
)
assert result == expected
def test_deserialize_nullity_condition_is_null(self):
"""Test deserializing a NullityCondition with is_null=True from a dict."""
cond_dict = {
"type": "nullity",
"column": {"name": "email"},
"is_null": True,
}
result = deserialize_row_condition(cond_dict)
expected = NullityCondition(column=Column("email"), is_null=True)
assert result == expected
def test_deserialize_nullity_condition_is_not_null(self):
"""Test deserializing a NullityCondition with is_null=False from a dict."""
cond_dict = {
"type": "nullity",
"column": {"name": "email"},
"is_null": False,
}
result = deserialize_row_condition(cond_dict)
expected = NullityCondition(column=Column("email"), is_null=False)
assert result == expected
def test_deserialize_and_condition(self):
"""Test deserializing an AndCondition from a dict."""
cond_dict = {
"type": "and",
"conditions": [
{
"type": "comparison",
"column": {"name": "quantity"},
"operator": ">",
"parameter": 0,
},
{
"type": "comparison",
"column": {"name": "quantity"},
"operator": "<",
"parameter": 10,
},
],
}
result = deserialize_row_condition(cond_dict)
col = Column("quantity")
expected = AndCondition(
conditions=[
ComparisonCondition(column=col, operator=Operator.GREATER_THAN, parameter=0),
ComparisonCondition(column=col, operator=Operator.LESS_THAN, parameter=10),
]
)
assert result == expected
def test_deserialize_or_condition(self):
"""Test deserializing an OrCondition from a dict."""
cond_dict = {
"type": "or",
"conditions": [
{
"type": "comparison",
"column": {"name": "status"},
"operator": "==",
"parameter": "active",
},
{
"type": "comparison",
"column": {"name": "status"},
"operator": "==",
"parameter": "pending",
},
],
}
result = deserialize_row_condition(cond_dict)
col = Column("status")
expected = OrCondition(
conditions=[
ComparisonCondition(column=col, operator=Operator.EQUAL, parameter="active"),
ComparisonCondition(column=col, operator=Operator.EQUAL, parameter="pending"),
]
)
assert result == expected
def test_deserialize_nested_and_or_condition(self):
"""Test deserializing nested AND/OR conditions from a dict."""
cond_dict = {
"type": "or",
"conditions": [
{
"type": "and",
"conditions": [
{
"type": "comparison",
"column": {"name": "age"},
"operator": ">",
"parameter": 18,
},
{
"type": "comparison",
"column": {"name": "status"},
"operator": "==",
"parameter": "active",
},
],
},
{
"type": "comparison",
"column": {"name": "age"},
"operator": "<",
"parameter": 65,
},
],
}
result = deserialize_row_condition(cond_dict)
age = Column("age")
status = Column("status")
expected = OrCondition(
conditions=[
AndCondition(
conditions=[
ComparisonCondition(
column=age, operator=Operator.GREATER_THAN, parameter=18
),
ComparisonCondition(
column=status, operator=Operator.EQUAL, parameter="active"
),
]
),
ComparisonCondition(column=age, operator=Operator.LESS_THAN, parameter=65),
]
)
assert result == expected
def test_deserialize_string_returns_string(self):
"""Test that deserializing a string returns the string unchanged."""
result = deserialize_row_condition("some_condition")
assert result == "some_condition"
assert isinstance(result, str)
def test_deserialize_none_returns_none(self):
"""Test that deserializing None returns None."""
result = deserialize_row_condition(None)
assert result is None
def test_deserialize_invalid_type_raises_error(self):
"""Test that deserializing an invalid type raises InvalidConditionTypeError."""
with pytest.raises(InvalidConditionTypeError):
deserialize_row_condition(12345)
def test_deserialize_dict_without_type_raises_error(self):
"""Test that deserializing a dict without a type field raises ConditionParserError."""
cond_dict = {
"column": {"name": "age"},
"some_field": "value",
}
with pytest.raises(ConditionParserError):
deserialize_row_condition(cond_dict)
def test_deserialize_dict_with_unknown_type_raises_error(self):
"""Test that deserializing a dict with unknown type raises ConditionParserError."""
cond_dict = {
"type": "unknown_type",
"column": {"name": "age"},
}
with pytest.raises(ConditionParserError):
deserialize_row_condition(cond_dict)
| TestConditionDeserialization |
python | dask__dask | dask/tests/test_task_spec.py | {
"start": 17946,
"end": 18368
} | class ____:
def __getstate__(self):
return "Nope"
def __setstate__(self, state):
raise ValueError(state)
def __call__(self):
return 1
# This is duplicated from distributed/utils_test.py
def _get_gc_overhead():
class _CustomObject:
def __sizeof__(self):
return 0
return sys.getsizeof(_CustomObject())
_size_obj = _get_gc_overhead()
| RaiseOnDeSerialization |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 370577,
"end": 380931
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(1808365978)
# expected statistic and p-values generated using R at
# https://rdrr.io/cran/cultevo/, e.g.
# library(cultevo)
# data = rbind(c(72, 47, 73, 35, 47, 96, 30, 59, 41, 36, 56, 49, 81, 43,
# 70, 47, 28, 28, 62, 20, 61, 20, 80, 24, 50),
# c(68, 52, 60, 34, 44, 20, 65, 88, 21, 81, 48, 31, 31, 67,
# 69, 94, 30, 24, 40, 87, 70, 43, 50, 96, 43),
# c(81, 13, 85, 35, 79, 12, 92, 86, 21, 64, 16, 64, 68, 17,
# 16, 89, 71, 43, 43, 36, 54, 13, 66, 51, 55))
# result = page.test(data, verbose=FALSE)
# Most test cases generated to achieve common critical p-values so that
# results could be checked (to limited precision) against tables in
# scipy.stats.page_trend_test reference [1]
rng = np.random.default_rng(3113562111)
data_3_25 = rng.random((3, 25))
rng = np.random.default_rng(3113562111)
data_10_26 = rng.random((10, 26))
ts = [
(12949, 0.275539045444, False, 'asymptotic', data_3_25),
(47221, 0.5703651063709, False, 'asymptotic', data_10_26),
(12332, 0.7722477197436702, False, 'asymptotic',
[[72, 47, 73, 35, 47, 96, 30, 59, 41, 36, 56, 49, 81,
43, 70, 47, 28, 28, 62, 20, 61, 20, 80, 24, 50],
[68, 52, 60, 34, 44, 20, 65, 88, 21, 81, 48, 31, 31,
67, 69, 94, 30, 24, 40, 87, 70, 43, 50, 96, 43],
[81, 13, 85, 35, 79, 12, 92, 86, 21, 64, 16, 64, 68,
17, 16, 89, 71, 43, 43, 36, 54, 13, 66, 51, 55]]),
(266, 4.121656378600823e-05, False, 'exact',
[[1.5, 4., 8.3, 5, 19, 11],
[5, 4, 3.5, 10, 20, 21],
[8.4, 3.2, 10, 12, 14, 15]]),
(332, 0.9566400920502488, True, 'exact',
[[4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1],
[4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1],
[3, 4, 1, 2], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4]]),
(241, 0.9622210164861476, True, 'exact',
[[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[3, 2, 1], [2, 1, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3],
[1, 2, 3], [1, 2, 3], [1, 2, 3]]),
(197, 0.9619432897162209, True, 'exact',
[[6, 5, 4, 3, 2, 1], [6, 5, 4, 3, 2, 1], [1, 3, 4, 5, 2, 6]]),
(423, 0.9590458306880073, True, 'exact',
[[5, 4, 3, 2, 1], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1],
[5, 4, 3, 2, 1], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1],
[4, 1, 3, 2, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]]),
(217, 0.9693058575034678, True, 'exact',
[[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[2, 1, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3],
[1, 2, 3]]),
(395, 0.991530289351305, True, 'exact',
[[7, 6, 5, 4, 3, 2, 1], [7, 6, 5, 4, 3, 2, 1],
[6, 5, 7, 4, 3, 2, 1], [1, 2, 3, 4, 5, 6, 7]]),
(117, 0.9997817843373017, True, 'exact',
[[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
[3, 2, 1], [3, 2, 1], [3, 2, 1], [2, 1, 3], [1, 2, 3]]),
]
@pytest.mark.parametrize("L, p, ranked, method, data", ts)
def test_accuracy(self, L, p, ranked, method, data):
res = stats.page_trend_test(data, ranked=ranked, method=method)
assert_equal(L, res.statistic)
assert_allclose(p, res.pvalue)
assert_equal(method, res.method)
ts2 = [
(542, 0.9481266260876332, True, 'exact',
[[10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
[1, 8, 4, 7, 6, 5, 9, 3, 2, 10]]),
(1322, 0.9993113928199309, True, 'exact',
[[10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [9, 2, 8, 7, 6, 5, 4, 3, 10, 1],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]),
(2286, 0.9908688345484833, True, 'exact',
[[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [1, 3, 5, 6, 4, 7, 2, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8]]),
]
# only the first of these appears slow because intermediate data are
# cached and used on the rest
@pytest.mark.parametrize("L, p, ranked, method, data", ts2)
@pytest.mark.slow()
def test_accuracy2(self, L, p, ranked, method, data):
res = stats.page_trend_test(data, ranked=ranked, method=method)
assert_equal(L, res.statistic)
assert_allclose(p, res.pvalue)
assert_equal(method, res.method)
def test_options(self):
rng = np.random.default_rng(183973867)
m, n = 10, 20
predicted_ranks = np.arange(1, n+1)
perm = rng.permutation(np.arange(n))
data = rng.random((m, n))
ranks = stats.rankdata(data, axis=1)
res1 = stats.page_trend_test(ranks)
res2 = stats.page_trend_test(ranks, ranked=True)
res3 = stats.page_trend_test(data, ranked=False)
res4 = stats.page_trend_test(ranks, predicted_ranks=predicted_ranks)
res5 = stats.page_trend_test(ranks[:, perm],
predicted_ranks=predicted_ranks[perm])
assert_equal(res1.statistic, res2.statistic)
assert_equal(res1.statistic, res3.statistic)
assert_equal(res1.statistic, res4.statistic)
assert_equal(res1.statistic, res5.statistic)
def test_Ames_assay(self):
# test from _page_trend_test.py [2] page 151; data on page 144
data = [[101, 117, 111], [91, 90, 107], [103, 133, 121],
[136, 140, 144], [190, 161, 201], [146, 120, 116]]
data = np.array(data).T
predicted_ranks = np.arange(1, 7)
res = stats.page_trend_test(data, ranked=False,
predicted_ranks=predicted_ranks,
method="asymptotic")
assert_equal(res.statistic, 257)
assert_almost_equal(res.pvalue, 0.0035, decimal=4)
res = stats.page_trend_test(data, ranked=False,
predicted_ranks=predicted_ranks,
method="exact")
assert_equal(res.statistic, 257)
assert_almost_equal(res.pvalue, 0.0023, decimal=4)
def test_input_validation(self):
# test data not a 2d array
with assert_raises(ValueError, match="`data` must be a 2d array."):
stats.page_trend_test(None)
with assert_raises(ValueError, match="`data` must be a 2d array."):
stats.page_trend_test([])
with assert_raises(ValueError, match="`data` must be a 2d array."):
stats.page_trend_test([1, 2])
with assert_raises(ValueError, match="`data` must be a 2d array."):
stats.page_trend_test([[[1]]])
# test invalid dimensions
rng = np.random.default_rng(2482566048)
with assert_raises(ValueError, match="Page's L is only appropriate"):
stats.page_trend_test(rng.random((1, 3)))
with assert_raises(ValueError, match="Page's L is only appropriate"):
stats.page_trend_test(rng.random((2, 2)))
# predicted ranks must include each integer [1, 2, 3] exactly once
message = "`predicted_ranks` must include each integer"
with assert_raises(ValueError, match=message):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
predicted_ranks=[0, 1, 2])
with assert_raises(ValueError, match=message):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
predicted_ranks=[1.1, 2, 3])
with assert_raises(ValueError, match=message):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
predicted_ranks=[1, 2, 3, 3])
with assert_raises(ValueError, match=message):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
predicted_ranks="invalid")
# test improperly ranked data
with assert_raises(ValueError, match="`data` is not properly ranked"):
stats.page_trend_test([[0, 2, 3], [1, 2, 3]], True)
with assert_raises(ValueError, match="`data` is not properly ranked"):
stats.page_trend_test([[1, 2, 3], [1, 2, 4]], True)
# various
with assert_raises(ValueError, match="`data` contains NaNs"):
stats.page_trend_test([[1, 2, 3], [1, 2, np.nan]],
ranked=False)
with assert_raises(ValueError, match="`method` must be in"):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
method="ekki")
with assert_raises(TypeError, match="`ranked` must be boolean."):
stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
ranked="ekki")
rng = np.random.default_rng(902340982)
x = rng.random(10)
y = rng.random(10)
@pytest.mark.parametrize("fun, args",
[(stats.wilcoxon, (x,)),
(stats.ks_1samp, (x, stats.norm.cdf)), # type: ignore[attr-defined] # noqa: E501
(stats.ks_2samp, (x, y)),
(stats.kstest, (x, y)),
])
def test_rename_mode_method(fun, args):
res = fun(*args, method='exact')
res2 = fun(*args, mode='exact')
assert_equal(res, res2)
err = rf"{fun.__name__}() got multiple values for argument"
with pytest.raises(TypeError, match=re.escape(err)):
fun(*args, method='exact', mode='exact')
| TestPageTrendTest |
python | ray-project__ray | python/ray/dashboard/modules/metrics/dashboards/common.py | {
"start": 11708,
"end": 11966
} | class ____(Enum):
GRAPH = GRAPH_PANEL_TEMPLATE
HEATMAP = HEATMAP_TEMPLATE
PIE_CHART = PIE_CHART_TEMPLATE
STAT = STAT_PANEL_TEMPLATE
GAUGE = GAUGE_PANEL_TEMPLATE
BAR_CHART = BAR_CHART_PANEL_TEMPLATE
@DeveloperAPI
@dataclass
| PanelTemplate |
python | google__flatbuffers | grpc/examples/python/greeter/models/HelloRequest.py | {
"start": 175,
"end": 1372
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = HelloRequest()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsHelloRequest(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# HelloRequest
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# HelloRequest
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def HelloRequestStart(builder):
builder.StartObject(1)
def Start(builder):
HelloRequestStart(builder)
def HelloRequestAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
HelloRequestAddName(builder, name)
def HelloRequestEnd(builder):
return builder.EndObject()
def End(builder):
return HelloRequestEnd(builder)
| HelloRequest |
python | huggingface__transformers | src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py | {
"start": 27860,
"end": 31734
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
dropout = config.adaptor_dropout
self.kernel_size = config.adaptor_kernel_size
self.stride = config.adaptor_stride
# 1. residual convolution
self.residual_layer_norm = nn.LayerNorm(embed_dim)
self.residual_conv = nn.Conv1d(
embed_dim,
2 * embed_dim,
self.kernel_size,
stride=self.stride,
padding=self.stride // 2,
)
self.activation = nn.GLU(dim=1)
# Self-Attention
self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
self.self_attn_conv = nn.Conv1d(
embed_dim,
2 * embed_dim,
self.kernel_size,
stride=self.stride,
padding=self.stride // 2,
)
self.self_attn = SeamlessM4Tv2ConformerSelfAttention(config, use_position_embeddings=False)
self.self_attn_dropout = nn.Dropout(dropout)
# Feed-forward
self.ffn_layer_norm = nn.LayerNorm(embed_dim)
self.ffn = SeamlessM4Tv2ConformerFeedForward(config, act_fn="relu", dropout=dropout)
def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
pad = self.kernel_size // 2
seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1
return seq_lens.floor()
def forward(
self,
hidden_states,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
):
residual = self.residual_layer_norm(hidden_states)
# Apply pooling to the residual to match the sequence length of the
# multi-head attention output.
# (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len)
residual = residual.transpose(1, 2)
residual = self.residual_conv(residual)
residual = self.activation(residual)
# (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim)
residual = residual.transpose(1, 2)
hidden_states = self.self_attn_layer_norm(hidden_states)
# Apply pooling before feeding to the multihead-attention layer.
# (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.self_attn_conv(hidden_states)
hidden_states = self.activation(hidden_states)
# (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim)
hidden_states = hidden_states.transpose(1, 2)
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
hidden_states.device
)
attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths)
attention_mask = _prepare_4d_attention_mask(
attention_mask,
hidden_states.dtype,
)
# The rest of the computation is identical to a vanilla Transformer
# encoder layer.
hidden_states, attn_weights = self.self_attn(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = self.self_attn_dropout(hidden_states)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states) + residual
return hidden_states
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerAdapter with SeamlessM4T->SeamlessM4Tv2
| SeamlessM4Tv2ConformerAdapterLayer |
python | walkccc__LeetCode | solutions/2042. Check if Numbers Are Ascending in a Sentence/2042.py | {
"start": 0,
"end": 241
} | class ____:
def areNumbersAscending(self, s: str) -> bool:
prev = 0
for token in s.split():
if token.isdigit():
num = int(token)
if num <= prev:
return False
prev = num
return True
| Solution |
python | pytorch__pytorch | test/distributed/test_composability.py | {
"start": 1155,
"end": 1896
} | class ____(torch.nn.Module):
def __init__(self, d_hid: int):
super().__init__()
self.net1 = torch.nn.Linear(d_hid, d_hid)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(d_hid, d_hid)
self.init_weights()
def init_weights(self):
# ensure a proper init otherwise gradient tests will be more likely to get zero grad values
torch.nn.init.kaiming_uniform_(
self.net1.weight, mode="fan_in", nonlinearity="relu"
)
torch.nn.init.kaiming_uniform_(
self.net2.weight, mode="fan_in", nonlinearity="relu"
)
def forward(self, x):
x = self.net1(x)
x = self.relu(x)
x = self.net2(x)
return x
| MLPModule |
python | sympy__sympy | sympy/physics/optics/gaussopt.py | {
"start": 6556,
"end": 6920
} | class ____(RayTransferMatrix):
"""
Ray Transfer Matrix for reflection.
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.optics import FlatMirror
>>> FlatMirror()
Matrix([
[1, 0],
[0, 1]])
"""
def __new__(cls):
return RayTransferMatrix.__new__(cls, 1, 0, 0, 1)
| FlatMirror |
python | pypa__pip | src/pip/_vendor/urllib3/util/url.py | {
"start": 3003,
"end": 14296
} | class ____(namedtuple("Url", url_attrs)):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(
cls,
scheme=None,
auth=None,
host=None,
port=None,
path=None,
query=None,
fragment=None,
):
if path and not path.startswith("/"):
path = "/" + path
if scheme is not None:
scheme = scheme.lower()
return super(Url, cls).__new__(
cls, scheme, auth, host, port, path, query, fragment
)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or "/"
if self.query is not None:
uri += "?" + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return "%s:%d" % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = u""
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + u"://"
if auth is not None:
url += auth + u"@"
if host is not None:
url += host
if port is not None:
url += u":" + str(port)
if path is not None:
url += path
if query is not None:
url += u"?" + query
if fragment is not None:
url += u"#" + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, "", None
return s[:min_idx], s[min_idx + 1 :], min_delim
def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component.
"""
if component is None:
return component
component = six.ensure_text(component)
# Normalize existing percent-encoded bytes.
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
component, percent_encodings = PERCENT_RE.subn(
lambda match: match.group(0).upper(), component
)
uri_bytes = component.encode("utf-8", "surrogatepass")
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring on both Python 2 & 3
byte = uri_bytes[i : i + 1]
byte_ord = ord(byte)
if (is_percent_encoded and byte == b"%") or (
byte_ord < 128 and byte.decode() in allowed_chars
):
encoded_component += byte
continue
encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
return encoded_component.decode(encoding)
def _remove_path_dot_segments(path):
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
segments = path.split("/") # Turn the path into a list of segments
output = [] # Initialize the variable to use to store output
for segment in segments:
# '.' is the current directory, so ignore it, it is superfluous
if segment == ".":
continue
# Anything other than '..', should be appended to the output
elif segment != "..":
output.append(segment)
# In this case segment == '..', if we can, we should pop the last
# element
elif output:
output.pop()
# If the path starts with '/' and the output is empty or the first string
# is non-empty
if path.startswith("/") and (not output or output[0]):
output.insert(0, "")
# If the path starts with '/.' or '/..' ensure we add one more empty
# string to add a trailing '/'
if path.endswith(("/.", "/..")):
output.append("")
return "/".join(output)
def _normalize_host(host, scheme):
if host:
if isinstance(host, six.binary_type):
host = six.ensure_str(host)
if scheme in NORMALIZABLE_SCHEMES:
is_ipv6 = IPV6_ADDRZ_RE.match(host)
if is_ipv6:
# IPv6 hosts of the form 'a::b%zone' are encoded in a URL as
# such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID
# separator as necessary to return a valid RFC 4007 scoped IP.
match = ZONE_ID_RE.search(host)
if match:
start, end = match.span(1)
zone_id = host[start:end]
if zone_id.startswith("%25") and zone_id != "%25":
zone_id = zone_id[3:]
else:
zone_id = zone_id[1:]
zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
return host[:start].lower() + zone_id + host[end:]
else:
return host.lower()
elif not IPV4_RE.match(host):
return six.ensure_str(
b".".join([_idna_encode(label) for label in host.split(".")])
)
return host
def _idna_encode(name):
if name and any(ord(x) >= 128 for x in name):
try:
from pip._vendor import idna
except ImportError:
six.raise_from(
LocationParseError("Unable to parse URL without the 'idna' module"),
None,
)
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
six.raise_from(
LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
)
return name.lower().encode("ascii")
def _encode_target(target):
"""Percent-encodes a request target so that there are no invalid characters"""
path, query = TARGET_RE.match(target).groups()
target = _encode_invalid_chars(path, PATH_CHARS)
query = _encode_invalid_chars(query, QUERY_CHARS)
if query is not None:
target += "?" + query
return target
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 and RFC 6874 compliant.
The parser logic and helper functions are based heavily on
work done in the ``rfc3986`` module.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
source_url = url
if not SCHEME_RE.search(url):
url = "//" + url
try:
scheme, authority, path, query, fragment = URI_RE.match(url).groups()
normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
if scheme:
scheme = scheme.lower()
if authority:
auth, _, host_port = authority.rpartition("@")
auth = auth or None
host, port = _HOST_PORT_RE.match(host_port).groups()
if auth and normalize_uri:
auth = _encode_invalid_chars(auth, USERINFO_CHARS)
if port == "":
port = None
else:
auth, host, port = None, None, None
if port is not None:
port = int(port)
if not (0 <= port <= 65535):
raise LocationParseError(url)
host = _normalize_host(host, scheme)
if normalize_uri and path:
path = _remove_path_dot_segments(path)
path = _encode_invalid_chars(path, PATH_CHARS)
if normalize_uri and query:
query = _encode_invalid_chars(query, QUERY_CHARS)
if normalize_uri and fragment:
fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
except (ValueError, AttributeError):
return six.raise_from(LocationParseError(source_url), None)
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
if not path:
if query is not None or fragment is not None:
path = ""
else:
path = None
# Ensure that each part of the URL is a `str` for
# backwards compatibility.
if isinstance(url, six.text_type):
ensure_func = six.ensure_text
else:
ensure_func = six.ensure_str
def ensure_type(x):
return x if x is None else ensure_func(x)
return Url(
scheme=ensure_type(scheme),
auth=ensure_type(auth),
host=ensure_type(host),
port=port,
path=ensure_type(path),
query=ensure_type(query),
fragment=ensure_type(fragment),
)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or "http", p.hostname, p.port
| Url |
python | arrow-py__arrow | tests/test_parser.py | {
"start": 58763,
"end": 61283
} | class ____:
def test_parse_search(self):
assert self.parser.parse(
"Today is 25 of September of 2003", "DD of MMMM of YYYY"
) == datetime(2003, 9, 25)
def test_parse_search_with_numbers(self):
assert self.parser.parse(
"2000 people met the 2012-01-01 12:05:10", "YYYY-MM-DD HH:mm:ss"
) == datetime(2012, 1, 1, 12, 5, 10)
assert self.parser.parse(
"Call 01-02-03 on 79-01-01 12:05:10", "YY-MM-DD HH:mm:ss"
) == datetime(1979, 1, 1, 12, 5, 10)
def test_parse_search_with_names(self):
assert self.parser.parse("June was born in May 1980", "MMMM YYYY") == datetime(
1980, 5, 1
)
def test_parse_search_locale_with_names(self):
p = parser.DateTimeParser("sv-se")
assert p.parse("Jan föddes den 31 Dec 1980", "DD MMM YYYY") == datetime(
1980, 12, 31
)
assert p.parse("Jag föddes den 25 Augusti 1975", "DD MMMM YYYY") == datetime(
1975, 8, 25
)
def test_parse_search_fails(self):
with pytest.raises(parser.ParserError):
self.parser.parse("Jag föddes den 25 Augusti 1975", "DD MMMM YYYY")
def test_escape(self):
format = "MMMM D, YYYY [at] h:mma"
assert self.parser.parse(
"Thursday, December 10, 2015 at 5:09pm", format
) == datetime(2015, 12, 10, 17, 9)
format = "[MMMM] M D, YYYY [at] h:mma"
assert self.parser.parse("MMMM 12 10, 2015 at 5:09pm", format) == datetime(
2015, 12, 10, 17, 9
)
format = "[It happened on] MMMM Do [in the year] YYYY [a long time ago]"
assert self.parser.parse(
"It happened on November 25th in the year 1990 a long time ago", format
) == datetime(1990, 11, 25)
format = "[It happened on] MMMM Do [in the][ year] YYYY [a long time ago]"
assert self.parser.parse(
"It happened on November 25th in the year 1990 a long time ago", format
) == datetime(1990, 11, 25)
format = "[I'm][ entirely][ escaped,][ weee!]"
assert self.parser.parse("I'm entirely escaped, weee!", format) == datetime(
1, 1, 1
)
# Special RegEx characters
format = "MMM DD, YYYY |^${}().*+?<>-& h:mm A"
assert self.parser.parse(
"Dec 31, 2017 |^${}().*+?<>-& 2:00 AM", format
) == datetime(2017, 12, 31, 2, 0)
@pytest.mark.usefixtures("dt_parser")
| TestDateTimeParserSearchDate |
python | langchain-ai__langchain | libs/langchain/langchain_classic/retrievers/ensemble.py | {
"start": 1413,
"end": 10863
} | class ____(BaseRetriever):
"""Retriever that ensembles the multiple retrievers.
It uses a rank fusion.
Args:
retrievers: A list of retrievers to ensemble.
weights: A list of weights corresponding to the retrievers. Defaults to equal
weighting for all retrievers.
c: A constant added to the rank, controlling the balance between the importance
of high-ranked items and the consideration given to lower-ranked items.
id_key: The key in the document's metadata used to determine unique documents.
If not specified, page_content is used.
"""
retrievers: list[RetrieverLike]
weights: list[float]
c: int = 60
id_key: str | None = None
@property
def config_specs(self) -> list[ConfigurableFieldSpec]:
"""List configurable fields for this runnable."""
return get_unique_config_specs(
spec for retriever in self.retrievers for spec in retriever.config_specs
)
@model_validator(mode="before")
@classmethod
def _set_weights(cls, values: dict[str, Any]) -> Any:
if not values.get("weights"):
n_retrievers = len(values["retrievers"])
values["weights"] = [1 / n_retrievers] * n_retrievers
return values
@override
def invoke(
self,
input: str,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> list[Document]:
from langchain_core.callbacks import CallbackManager
config = ensure_config(config)
callback_manager = CallbackManager.configure(
config.get("callbacks"),
None,
verbose=kwargs.get("verbose", False),
inheritable_tags=config.get("tags", []),
local_tags=self.tags,
inheritable_metadata=config.get("metadata", {}),
local_metadata=self.metadata,
)
run_manager = callback_manager.on_retriever_start(
None,
input,
name=config.get("run_name") or self.get_name(),
**kwargs,
)
try:
result = self.rank_fusion(input, run_manager=run_manager, config=config)
except Exception as e:
run_manager.on_retriever_error(e)
raise
else:
run_manager.on_retriever_end(
result,
**kwargs,
)
return result
@override
async def ainvoke(
self,
input: str,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> list[Document]:
from langchain_core.callbacks import AsyncCallbackManager
config = ensure_config(config)
callback_manager = AsyncCallbackManager.configure(
config.get("callbacks"),
None,
verbose=kwargs.get("verbose", False),
inheritable_tags=config.get("tags", []),
local_tags=self.tags,
inheritable_metadata=config.get("metadata", {}),
local_metadata=self.metadata,
)
run_manager = await callback_manager.on_retriever_start(
None,
input,
name=config.get("run_name") or self.get_name(),
**kwargs,
)
try:
result = await self.arank_fusion(
input,
run_manager=run_manager,
config=config,
)
except Exception as e:
await run_manager.on_retriever_error(e)
raise
else:
await run_manager.on_retriever_end(
result,
**kwargs,
)
return result
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""Get the relevant documents for a given query.
Args:
query: The query to search for.
run_manager: The callback handler to use.
Returns:
A list of reranked documents.
"""
# Get fused result of the retrievers.
return self.rank_fusion(query, run_manager)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
run_manager: The callback handler to use.
Returns:
A list of reranked documents.
"""
# Get fused result of the retrievers.
return await self.arank_fusion(query, run_manager)
def rank_fusion(
self,
query: str,
run_manager: CallbackManagerForRetrieverRun,
*,
config: RunnableConfig | None = None,
) -> list[Document]:
"""Rank fusion.
Retrieve the results of the retrievers and use rank_fusion_func to get
the final result.
Args:
query: The query to search for.
run_manager: The callback handler to use.
config: Optional configuration for the retrievers.
Returns:
A list of reranked documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
patch_config(
config,
callbacks=run_manager.get_child(tag=f"retriever_{i + 1}"),
),
)
for i, retriever in enumerate(self.retrievers)
]
# Enforce that retrieved docs are Documents for each list in retriever_docs
for i in range(len(retriever_docs)):
retriever_docs[i] = [
Document(page_content=cast("str", doc)) if isinstance(doc, str) else doc # type: ignore[unreachable]
for doc in retriever_docs[i]
]
# apply rank fusion
return self.weighted_reciprocal_rank(retriever_docs)
async def arank_fusion(
self,
query: str,
run_manager: AsyncCallbackManagerForRetrieverRun,
*,
config: RunnableConfig | None = None,
) -> list[Document]:
"""Rank fusion.
Asynchronously retrieve the results of the retrievers
and use rank_fusion_func to get the final result.
Args:
query: The query to search for.
run_manager: The callback handler to use.
config: Optional configuration for the retrievers.
Returns:
A list of reranked documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*[
retriever.ainvoke(
query,
patch_config(
config,
callbacks=run_manager.get_child(tag=f"retriever_{i + 1}"),
),
)
for i, retriever in enumerate(self.retrievers)
],
)
# Enforce that retrieved docs are Documents for each list in retriever_docs
for i in range(len(retriever_docs)):
retriever_docs[i] = [
Document(page_content=doc) if not isinstance(doc, Document) else doc
for doc in retriever_docs[i]
]
# apply rank fusion
return self.weighted_reciprocal_rank(retriever_docs)
def weighted_reciprocal_rank(
self,
doc_lists: list[list[Document]],
) -> list[Document]:
"""Perform weighted Reciprocal Rank Fusion on multiple rank lists.
You can find more details about RRF here:
https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf.
Args:
doc_lists: A list of rank lists, where each rank list contains unique items.
Returns:
The final aggregated list of items sorted by their weighted RRF
scores in descending order.
"""
if len(doc_lists) != len(self.weights):
msg = "Number of rank lists must be equal to the number of weights."
raise ValueError(msg)
# Associate each doc's content with its RRF score for later sorting by it
# Duplicated contents across retrievers are collapsed & scored cumulatively
rrf_score: dict[str, float] = defaultdict(float)
for doc_list, weight in zip(doc_lists, self.weights, strict=False):
for rank, doc in enumerate(doc_list, start=1):
rrf_score[
(
doc.page_content
if self.id_key is None
else doc.metadata[self.id_key]
)
] += weight / (rank + self.c)
# Docs are deduplicated by their contents then sorted by their scores
all_docs = chain.from_iterable(doc_lists)
return sorted(
unique_by_key(
all_docs,
lambda doc: (
doc.page_content
if self.id_key is None
else doc.metadata[self.id_key]
),
),
reverse=True,
key=lambda doc: rrf_score[
doc.page_content if self.id_key is None else doc.metadata[self.id_key]
],
)
| EnsembleRetriever |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 35407,
"end": 36707
} | class ____(FromClause):
"""A :class:`.FromClause` that has a name.
Examples include tables, subqueries, CTEs, aliased tables.
.. versionadded:: 2.0
"""
named_with_column = True
name: str
@util.preload_module("sqlalchemy.sql.sqltypes")
def table_valued(self) -> TableValuedColumn[Any]:
"""Return a :class:`_sql.TableValuedColumn` object for this
:class:`_expression.FromClause`.
A :class:`_sql.TableValuedColumn` is a :class:`_sql.ColumnElement` that
represents a complete row in a table. Support for this construct is
backend dependent, and is supported in various forms by backends
such as PostgreSQL, Oracle Database and SQL Server.
E.g.:
.. sourcecode:: pycon+sql
>>> from sqlalchemy import select, column, func, table
>>> a = table("a", column("id"), column("x"), column("y"))
>>> stmt = select(func.row_to_json(a.table_valued()))
>>> print(stmt)
{printsql}SELECT row_to_json(a) AS row_to_json_1
FROM a
.. versionadded:: 1.4.0b2
.. seealso::
:ref:`tutorial_functions` - in the :ref:`unified_tutorial`
"""
return TableValuedColumn(self, type_api.TABLEVALUE)
| NamedFromClause |
python | pytorch__pytorch | torch/_inductor/graph.py | {
"start": 9533,
"end": 107259
} | class ____(torch.fx.Interpreter):
graph_outputs: list[ir.IRNode]
def __init__(
self,
gm: torch.fx.GraphModule,
example_inputs: Optional[Sequence[object]] = None,
shape_env: Optional[ShapeEnv] = None,
graph_id: Optional[int] = None,
cpp_wrapper: bool = False,
aot_mode: bool = False,
layout_opt: Optional[bool] = None,
extern_node_serializer: Optional[
Callable[[list[ir.ExternKernelNode]], Any]
] = None,
is_inference: bool = False,
is_backward: bool = False,
is_const_graph: bool = False,
const_output_index: Optional[dict[str, int]] = None,
const_wrapper_code: Optional[str] = None,
const_kernel_code: Optional[str] = None,
const_module: Optional[GraphLowering] = None,
name: Optional[str] = None,
inputs_to_check: Optional[Sequence[int]] = None,
fx_wrapper: bool = False,
) -> None:
super().__init__(gm)
self.example_inputs = example_inputs
self.layout_opt = (
layout_opt
if layout_opt is not None
else self.decide_layout_opt(gm, is_inference=is_inference)
)
self.num_channels_last_conv = 0
self.is_inference = is_inference
self.is_backward = is_backward
self.is_const_graph = is_const_graph
self.const_wrapper_code = const_wrapper_code
self.const_kernel_code = const_kernel_code
self.const_module = const_module
self.inputs_to_check = inputs_to_check
self.extra_traceback = False # we do our own error wrapping
if shape_env is None:
shape_env = ShapeEnv()
self.reuse_shape_env = False
else:
self.reuse_shape_env = True
self._shape_env = shape_env
# We're going to mutate ras_by_symbol as we finish generating them
self.ras_by_symbol: dict[Optional[sympy.Symbol], list[RuntimeAssert]] = (
shape_env.deferred_runtime_asserts.copy()
)
self.bound_unbacked_symbols = OrderedSet[sympy.Symbol]()
self.sizevars = SizeVarAllocator(shape_env)
self.graph_input_names: list[str] = []
self.graph_inputs: dict[str, Union[TensorBox, TorchBindObject, sympy.Expr]] = {}
self.graph_inputs_original: dict[str, InputBuffer] = {}
self.partition_maps: Optional[list[GraphPartitionMap]] = None
self.zero_dim_cpu_tensor_list: OrderedSet[str] = OrderedSet()
self.device_types: OrderedSet[str] = (
const_module.device_types if const_module else OrderedSet()
)
self.device_idxs: OrderedSet[int] = (
const_module.device_idxs if const_module else OrderedSet()
)
self.device_type = "cpu"
self.additional_buffer_deps: dict[str, OrderedSet[str]] = defaultdict(
OrderedSet
)
# Inplace padding may require Inductor to allocate slightly larger
# tensor for padding.
self.buffer_to_padded_size: dict[str, list[int]] = {}
self.buffers: list[ir.Buffer] = []
self.operations: list[ir.Operation] = []
self.const_output_index: dict[str, int] = (
const_output_index if const_output_index else {}
)
self.folded_constants: OrderedSet[str] = (
OrderedSet(const_output_index.keys())
if const_output_index
else OrderedSet()
)
self.constants: dict[str, torch.Tensor] = (
const_module.constants if const_module else {}
)
self.named_buffers: dict[str, torch.Tensor] = (
const_module.named_buffers if const_module else {}
)
self.named_parameters: dict[str, torch.Tensor] = (
const_module.named_parameters if const_module else {}
)
self.torchbind_constants: dict[
str, Union[torch._C.ScriptObject, FakeScriptObject]
] = {}
self.seen_subgraphs: dict[str, ir.Subgraph] = {}
self.constant_reprs: dict[str, str] = {}
self.removed_operations: OrderedSet[str] = OrderedSet()
self.removed_buffers: OrderedSet[str] = OrderedSet()
self.removed_inplace_buffers: OrderedSet[str] = OrderedSet()
self.mutated_buffers: OrderedSet[str] = OrderedSet()
self.never_reuse_buffers: OrderedSet[str] = OrderedSet()
self.inplaced_to_remove: OrderedSet[str] = OrderedSet()
self.device_ops: DeviceOpOverrides = None # type: ignore[assignment]
self.wrapper_code: PythonWrapperCodegen = None # type: ignore[assignment]
from torch._inductor.extern_node_serializer import extern_node_json_serializer
self.extern_node_serializer: Callable[[list[ir.ExternKernelNode]], Any] = (
extern_node_serializer
if config.is_fbcode() and extern_node_serializer
else extern_node_json_serializer
)
self.current_node: torch.fx.Node = None # type: ignore[assignment]
self.lists: dict[str, list[str]] = {}
self.mutated_inputs: OrderedSet[str] = OrderedSet()
self.mutated_input_idxs: list[int] = []
self.name_to_buffer: dict[str, ir.Buffer] = {}
self.name_to_users: defaultdict[str, list[ir.IRNode]] = defaultdict(list)
self.name_to_op: dict[str, ir.Operation] = {}
self.creation_time = time.time()
self.name = name # type: ignore[assignment]
self.cpp_wrapper = cpp_wrapper
self.fx_wrapper = fx_wrapper
# record multi_kernel choice for cpp_wrapper so the second pass knows
# which sub-kernel is picked. Copy cpp_wrapper to another variable
# since cpp_wrapper flag is OrderedSet to false for the first pass of codegen.
self.record_multi_kernel_choice = cpp_wrapper
self.multi_kernel_to_choice: dict[str, str] = {}
self.aot_mode = aot_mode
self.graph_id = graph_id
self.post_grad_graph_id = next(_post_grad_graph_counter)
self.scheduler: torch._inductor.scheduler.Scheduler = None # type: ignore[assignment]
# record intermediate results for input of UsedDefinedTritonKernels
# This will be used if autotuning is done in one pass.
self.autotuning_inputs: Optional[list[torch.Tensor]] = None
self.autotuning_mapping: Optional[dict[str, dict[str, int]]] = None
self.autotuning_grids: Optional[dict[str, Any]] = None
# current_device is set only during codegen of a device-specific kernel
# a graph can have many devices
self.current_device: Optional[torch.device] = None
self.nodes_prefer_channels_last = (
self.find_nodes_prefer_channels_last() if self.layout_opt else OrderedSet()
)
self._warned_fallback = OrderedSet(["aten.convolution_backward"])
self.user_visible_output_strides = get_user_visible_output_strides(gm.graph)
mark_nodes_dislike_padding(gm.graph, self.user_visible_output_strides)
self.cache_key: str = "" # This is the cache key for the compiled artifact
self.cache_path: str = "" # This is the path in the filesystem where the compiled artifact is stored
self.cache_linemap: list[
tuple[int, str]
] = [] # This is the linemap used by the profiler to mark custom compiled kernels getting run
# Used if lowering encounters cases where cudagraphs are not supported
self.disable_cudagraphs_reason: Optional[str] = None
# only keeping one node per device for stack trace purposes
self.device_node_mapping: dict[torch.device, torch.fx.Node] = {}
self.orig_gm: torch.fx.GraphModule = gm.__copy__()
for k, v in self.orig_gm.named_buffers():
self.named_buffers[k] = v
for k, v in self.orig_gm.named_parameters():
self.named_parameters[k] = v
self.dynamo_flat_name_to_original_fqn = self.module.meta.get( # type: ignore[operator, union-attr]
"dynamo_flat_name_to_original_fqn", {}
)
self.allocated_constant_name: dict[str, str] = (
const_module.allocated_constant_name if const_module is not None else {}
)
init_backend_registration()
self.get_backend_features = functools.lru_cache(None)(get_backend_features)
self.effectful_ops: dict[_EffectType, ir.Buffer] = {}
# Track the buffers that we know is unaligned
# This can either be a graph input or the output of fallback
# kernels.
self.unaligned_buffers: OrderedSet[str] = OrderedSet()
self.no_fuse_buffer_names: OrderedSet[str] = OrderedSet()
self.low_precision_codegen_ops: OrderedSet[str] = OrderedSet()
# more aggressive prologue fusion
self.invoke_quant_ops: OrderedSet[str] = OrderedSet()
# Below field is related to printing debug intermediate tensor values info for debugging
self.all_codegen_kernel_names: OrderedSet[str] = OrderedSet()
# state used by for KernelArgs.workspace
self.workspace_id = itertools.count()
# track the current placeholder index that we are processing
self.placeholder_idx = -1
self.bw_donated_idxs = get_donated_idxs()
# Cache for dep size hints to avoid expensive recomputation
self.dep_size_hint_cache: dict[Dep, int] = {}
def freeze_runtime_asserts(self) -> None:
self._shape_env.freeze_runtime_asserts()
def symbolic_sizes_strides(
self, ex: torch.Tensor
) -> tuple[Sequence[Union[int, Expr]], Sequence[Union[int, Expr]]]:
"""
Support dynamic shapes and dynamic strides by assigning variables
to each dimension. We duck-shape tensors, so if two tensors
have the same size they get assigned the same symbolic variable.
"""
if self.reuse_shape_env:
return convert_shape_to_inductor(ex.size()), convert_shape_to_inductor(
ex.stride()
)
else:
from torch._dynamo.source import ConstantSource
# TODO: this should not be needed once #93059 lands
# https://github.com/pytorch/pytorch/pull/94031#discussion_r1096044816
# TODO: make a dedicated UnknownSource for this?
# NB: This is using the legacy default behavior from
# create_symbolic_sizes_strides_storage_offset but we hope we can
# just delete this entirely
source = ConstantSource(
f"__inductor_unknown_tensor_{len(self._shape_env.var_to_val)}"
)
(
size,
stride,
_,
) = self._shape_env.create_symbolic_sizes_strides_storage_offset(
ex,
source,
)
r_size = [i.node.expr if isinstance(i, torch.SymInt) else i for i in size]
r_stride = [i.node.expr if isinstance(i, torch.SymInt) else i for i in stride]
return r_size, r_stride
def static_sizes_strides(
self, ex: torch.Tensor
) -> tuple[list[sympy.Expr], list[sympy.Expr]]:
"""
Primarily used to weights
"""
size = [sympy.Integer(i) for i in ex.size()]
stride = [sympy.Integer(i) for i in ex.stride()]
return size, stride
def get_allocation_size(
self,
node: Union[
ir.TensorBox, ir.StorageBox, ir.Buffer, WorkspaceArg, ir.TorchBindObject
],
) -> Sequence[Expr]:
if isinstance(node, ir.TensorBox):
node = node.data # type: ignore[assignment]
if isinstance(node, ir.StorageBox):
node = node.data # type: ignore[assignment]
if (
isinstance(node, ir.ComputedBuffer)
and node.name in self.buffer_to_padded_size
):
# pyrefly: ignore [index-error]
return self.buffer_to_padded_size[node.name]
else:
return node.get_size()
def get_allocation_storage_size(
self, node: Union[ir.Buffer, WorkspaceArg, ir.TorchBindObject]
) -> Expr:
layout = node.get_layout()
size = self.get_allocation_size(node) # consider inplace padding
stride = layout.stride
offset = layout.offset
return compute_required_storage_length(size, stride, offset) # type: ignore[arg-type]
def has_feature(
self,
device: Union[torch._inductor.ir.IRNode, device, None],
feature: BackendFeature,
) -> bool:
assert isinstance(feature, BackendFeature), feature
return feature in self.get_backend_features(get_device_type(device))
def get_dep_size_hint(self, dep: Dep) -> int:
"""
Get the size hint for a dependency with caching to avoid expensive recomputation.
"""
if dep not in self.dep_size_hint_cache:
res = 0
try:
if not dep.has_unbacked_symbols():
res = dep.numbytes_hint()
except KeyError:
# In at least one test (test/inductor/test_torchbind.py) we
# create a StarDep that doesn't exist in the graph and calling
# `has_unbacked_symbols()` throws an error.
pass
self.dep_size_hint_cache[dep] = res
return self.dep_size_hint_cache[dep]
def get_current_device_or_throw(self) -> torch.device:
if device := self.current_device:
return device
else:
raise RuntimeError("No current device")
@contextlib.contextmanager
def set_current_device(self, device: torch.device) -> Iterator[None]:
prior = self.current_device
self.current_device = device
try:
yield
finally:
self.current_device = prior
def get_training_phase(self) -> str:
if self.is_inference:
return "inference"
if self.is_backward:
return "backward"
return "forward"
@staticmethod
def decide_layout_opt(gm: GraphModule, *, is_inference: bool) -> bool:
"""
Decide if we should enable layout optimization for this graph based on
heuristics.
"""
if not config.layout_optimization:
return False
if config.force_layout_optimization:
return True
conv_nodes = [
n for n in gm.graph.nodes if n.target is torch.ops.aten.convolution.default
]
nconv = len(conv_nodes)
if nconv == 0:
return False
# For cpu backend and mkldnn enabled, we always use channels_last for better performance.
if (
torch.backends.mkldnn.enabled
and torch.backends.mkldnn.is_available()
and all(
n.args[idx].meta["val"].device.type in SUPPORTED_MKLDNN_DEVICES
for n in conv_nodes
for idx in [0, 1]
)
):
return True
# Following models are skipped due to this:
# jx_nest_base
# volo_d1_224
if len(list(gm.graph.nodes)) >= 300 * nconv:
log.debug("Skipped layout opt because only a few conv")
return False
if any(
has_free_symbols(n.args[idx].meta["val"])
for n in conv_nodes
for idx in [0, 1]
):
log.debug(
"See perf regression with dynamic shape. Follow up in https://github.com/pytorch/pytorch/issues/102670"
)
return False
def is_grouped(n: Any) -> bool:
meta_val = n.args[1].meta["val"] # type: ignore[union-attr, operator]
assert isinstance(meta_val, torch.Tensor)
return n.args[-1] > 1 and meta_val.size(1) > 1 # type: ignore[union-attr, operator]
def is_in_out_channel(n: torch.fx.Node) -> bool:
return (
n.args[1].meta["val"].size(0) * 2 <= n.args[1].meta["val"].size(1) # type: ignore[union-attr, operator]
and n.args[1].meta["val"].size(2) > 1 # type: ignore[union-attr, operator]
)
def is_small_channel(n: torch.fx.Node) -> bool:
return (
n.args[1].meta["val"].size(0) <= 64 # type: ignore[union-attr, operator]
and n.args[1].meta["val"].size(1) <= 64 # type: ignore[union-attr, operator]
)
# only grouped convolutions benchmarked as slower in conv samples for inference only
if is_inference:
flop_counts: dict[str, float] = defaultdict(float)
for node in conv_nodes:
counted_flops = count_flops_fx(node)
if counted_flops is None:
continue
if is_grouped(node):
node_type = "grouped"
elif is_small_channel(node):
node_type = "small"
elif is_in_out_channel(node):
node_type = "in_out"
else:
node_type = "default"
flop_counts[node_type] += counted_flops
else:
log.debug("Conv inputs meta not found")
# average benchmarked channels last speedup / slowdown, < 1 is speedup.
# taken from the set of convolution inputs in benchmarks/dynamo/microbenchmarks/operator_inp_logs/torchbench_train/
# To regenerate these numbers follow https://gist.github.com/eellison/55d7a6ed6f39829d68ac56f95f4df5bb
GROUPED_MULTIPLIER = 1.358
DEFAULT_MULTIPLIER = 0.823
IN_OUT_MULTIPLIER = 0.725
SMALL_MULTIPLIER = 0.783
total_flops = sum(flop_counts.values())
# TODO - get different values per hardware
weighted_flops = (
flop_counts["grouped"] * GROUPED_MULTIPLIER
+ flop_counts["small"] * SMALL_MULTIPLIER
+ flop_counts["in_out"] * IN_OUT_MULTIPLIER
+ flop_counts["default"] * DEFAULT_MULTIPLIER
)
do_layout_opt = weighted_flops <= total_flops
if not do_layout_opt:
log.debug(
"Skipped layout opt in inference because weighted flops indicate slowdown, default: %d, channels last: %d",
total_flops,
weighted_flops,
)
return do_layout_opt
# Channels last layout can dramatically hurt grouped conv perf. E.g.
# Conv with arguments like
# {"input_shape": [32, 224, 112, 112], "weight_shape": [224, 112, 3, 3],
# "stride": [2, 2], "padding": [1, 1], "groups": 2}
# slows down 31x using channels last..
# But a lot of timm models use depthwise separable convolution which will
# result in grouped convolution with in-channel size == 1.
# For those grouped convolution, channels last still helps a lot.
# E.g.
# Conv with arguments
# {"input_shape": [128, 58, 56, 56], "weight_shape": [58, 1, 3, 3],
# "stride": [2, 2], "padding": [1, 1], "groups": 58}
# get 1.86x speedup with channels last layout.
#
# The following heuristics skip using channels-last if the model contains
# grouped convolution with in-channels > 1.
if any(map(is_grouped, conv_nodes)):
log.debug(
"Skip layout opt because found grouped convolution with >1 in_channels!"
)
return False
# For some models that contain convolution with larger in-channel than out-channel, applying
# channels last hurts performance.
# Following models are skipped due to this:
# - pytorch_unet
# - phlippe_densenet (slightly worse)
# - Background_Matting (1.22x -> 0.821x)
# - pytorch_CycleGAN_and_pix2pix (1.597x -> 1.294x)
if any(map(is_in_out_channel, conv_nodes)):
log.debug(
"Skip layout opt because some convolutions have smaller out_channel"
)
return False
# Following models are skipped due to this:
# - functorch_maml_omniglot
if all(map(is_small_channel, conv_nodes)):
log.debug("Skip layout opt because all convolution channels are too small")
return False
return True
def qualify_name(self, name: str) -> str:
"""Prepend the given name with the graph name if any."""
if self.name is not None:
return f"{self.name}_{name}"
return name
def make_subgraph(
self,
gm: torch.fx.GraphModule,
example_inputs: list[torch.Tensor],
subgraph_name: str,
) -> SubgraphLowering:
"""
Make a subgraph of the current graph with all inherited parts, except
the graph module (`gm`) and `example_inputs`. The subgraphs are lowered
separately and lifted into a separate function in the parent output
wrapper code. The subgraph name is qualified by the parent graph's
name. Note that the lifting of subgraph is supported for python wrapper
only. For cpp wrapper, we inline the subgraphs in the parent wrapper.
"""
return SubgraphLowering(
parent=self,
gm=gm,
example_inputs=example_inputs,
shape_env=self._shape_env,
cpp_wrapper=self.cpp_wrapper,
aot_mode=self.aot_mode,
extern_node_serializer=self.extern_node_serializer,
is_inference=self.is_inference,
is_backward=self.is_backward,
name=self.qualify_name(subgraph_name),
)
def find_nodes_prefer_channels_last(self) -> OrderedSet[Node]:
"""
The rule to decide if an node prefer channels last is simple.
1. if it's input/output of a convolution
2. if one of its user prefers channels last
We have rule 1 because cudnn runs a faster convolution kernel for channels last inputs;
Rule 2 is also important. It makes sure that indirect inputs to convolution also prefers
channels last.
Consider the scenario: conv -> batch-norm -> relu -> conv
Without rule 2, batch-norm output may use a contiguous layout. That will cause 2 extra copies:
1. the output of batch-norm should be channels last initially since its input is a conv's output.
Forcing the batch-norm's output to be contiguous results in the first copy
2. The second conv's input is initially contiguous. This layout is propagated from the batch-norm's output.
We need convert it to channels last layout which results in the second copy.
With rule 2, we makes sure all the tensors in the chain uses channels last layout. So both copies
can be saved.
"""
last_conv = None
nodes_cannot_propagate = [torch.ops.aten.bmm.default]
output_set = OrderedSet[Node]()
for n in reversed(self.module.graph.nodes): # type: ignore[arg-type, union-attr]
if n.target is torch.ops.aten.convolution.default:
output_set.add(n)
if last_conv is None:
last_conv = n
continue
if n.target in nodes_cannot_propagate:
continue
for user in n.users:
if user in output_set:
output_set.add(n)
break
# need a second pass to add downstream nodes of those channel last nodes to the sets.
# This pass is especially needed to avoid mix-layout kernel inputs in backward pass.
#
# Let's say a conv-batchnorm 's output is passed to relu whose output is in turn returned
# from the fwd graph. Without this second pass, we will force relu's output to be contiguous.
# Then in the kernel in backward pass, the contiguous output of relu may be mix with other channels last
# tensors and passed to a kernel.
#
# This pass improve yolov3 training speedup from 1.116x (worse than disabling layout optimization speedup 1.196x) to 1.457x.
# It also improves dla102 training speedup from 1.240x (worse than disabling layout optimization speedup 1.523x) to 1.835x .
# This also helps the following models:
# - res2net101_26w_4s
# - res2net50_14w_8s
# - sebotnet33ts_256
for n in self.module.graph.nodes: # type: ignore[union-attr]
# layout propagation ends at last conv node, which will benefit vison transformers.
if last_conv is not None and n == last_conv:
break
if n in output_set:
for user in n.users:
if user.target in nodes_cannot_propagate:
continue
output_set.add(user)
return output_set
def warn_fallback(self, name: str) -> None:
if name not in self._warned_fallback:
self._warned_fallback.add(name)
perf_hint_log.info("Using FallbackKernel: %s", name)
def add_device_info(self, device: torch.device) -> None:
self.device_types.add(device.type)
if device.index is not None:
self.device_idxs.add(device.index)
if V.graph.current_node and device not in self.device_node_mapping:
self.device_node_mapping[device] = V.graph.current_node
@property
def fake_mode(self) -> torch._subclasses.fake_tensor.FakeTensorMode:
return V.fake_mode
def try_get_buffer(
self, buffer_name: str
) -> Optional[Union[ir.TensorBox, ir.Buffer, ir.TorchBindObject]]:
if buffer_name in self.name_to_buffer:
return self.name_to_buffer[buffer_name]
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name]
if buffer_name in self.constants:
data = V.graph.constants[buffer_name]
return ir.ConstantBuffer(
name=buffer_name,
layout=ir.FixedLayout(
data.device, data.dtype, *V.graph.static_sizes_strides(data)
),
)
return None
def add_symbol_graph_input(self, symbol: sympy.Expr) -> None:
raise RuntimeError("Should not be called for the main graph")
def get_buffer(
self, buffer_name: str
) -> Union[ir.TensorBox, ir.Buffer, ir.TorchBindObject]:
buf = self.try_get_buffer(buffer_name)
if buf is not None:
return buf
raise RuntimeError(f"Failed to find buffer matching name {buffer_name}")
def get_dtype(self, buffer_name: str) -> torch.dtype:
if buffer_name in self.constants:
return self.constants[buffer_name].dtype
# For a mutation op we should return the dtype of the buffer being mutated
if (
hasattr(self.scheduler, "mutation_real_name")
and buffer_name in self.scheduler.mutation_real_name
):
mutated_buf = self.scheduler.mutation_real_name[buffer_name]
if mutated_buf in self.name_to_buffer:
return self.name_to_buffer[mutated_buf].get_dtype()
if mutated_buf in self.graph_inputs:
return self.graph_inputs[mutated_buf].get_dtype()
if buffer_name in self.name_to_buffer:
return self.name_to_buffer[buffer_name].get_dtype()
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name].get_dtype()
m = re.match(r"(as_strided|reinterpret_tensor)\(([a-zA-Z0-9_]+),", buffer_name)
if m:
return self.get_dtype(m.group(1))
raise KeyError(f"could not find {buffer_name}")
def get_numel(self, buffer_name: str) -> Union[int, Expr]:
if buffer_name in self.constants:
return self.constants[buffer_name].numel()
if buffer_name in self.name_to_buffer:
buf = self.name_to_buffer[buffer_name]
if not buf.has_tensor_output():
return 1
return buf.get_numel()
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name].get_numel()
raise KeyError(f"could not find {buffer_name}")
def run(self, *args: Any) -> Any: # type: ignore[override]
with dynamo_timed("GraphLowering.run"):
return super().run(*args)
def register_operation(self, op: ir.Operation) -> str:
assert op.operation_name is None, f"Operation registered twice: {op}"
assert isinstance(op, ir.Operation)
name = self.qualify_name(f"op{len(self.operations)}")
self.operations.append(op)
self.name_to_op[name] = op
op.operation_name = name
return name
def register_buffer(self, buffer: ir.Buffer, *, set_name: bool = False) -> str:
name = self.qualify_name(f"buf{len(self.buffers)}")
self.buffers.append(buffer)
self.name_to_buffer[name] = buffer
device = buffer.get_device()
if (
# Skip empty CPU tensor so that CUDA graphs can succeed, see https://github.com/pytorch/pytorch/pull/114144
device is not None
and not (
isinstance(buffer, ir.ComputedBuffer)
and buffer.is_zero_elements()
and device == torch.device("cpu")
)
):
self.add_device_info(device)
if set_name:
buffer.name = name
return name
def register_operation_list(self, operation_names: list[str]) -> str:
name = self.qualify_name("list_" + "_".join(operation_names))
self.lists[name] = operation_names
return name
def register_users_of(
self, node_output: Union[Iterable[ir.IRNode], ir.IRNode]
) -> None:
def register(value: Union[Iterable[ir.IRNode], ir.IRNode]) -> None:
if isinstance(value, (list, tuple)):
for x in value:
register(x)
if isinstance(value, ir.TensorBox):
for read_name in value.get_read_names():
self.name_to_users[read_name].append(value)
register(node_output)
def mark_buffer_mutated(self, name: str) -> None:
"""
When a buffer is mutated we need to make sure all the reads to
the old version are realized before the mutation happens.
"""
assert isinstance(name, str)
self.mutated_buffers.add(name)
if name not in self.name_to_users:
return
for user in self.name_to_users[name]:
user.realize()
def get_original_value_of_constant(self, name: str) -> torch.Tensor:
"""
In AOTI, module buffers may have been mutated during the tracing and compilation.
Thus we need to read from previously stored original buffers, to make sure the
generated model.so uses correct initial values.
"""
assert name in self.allocated_constant_name and name in self.constants, (
"Can not find the original value for " + name
)
orig_name = get_cloned_parameter_buffer_name(self.allocated_constant_name[name])
return (
self.module.meta[orig_name] # type: ignore[index]
if orig_name in self.module.meta # type: ignore[operator]
else self.constants[name]
)
def allocate_non_dup_const_name(
self, name: Optional[str], data: Union[Tensor]
) -> str:
if not config.aot_inductor.use_runtime_constant_folding:
for constant_name, value in self.constants.items():
if is_same_tensor(data, value):
return constant_name
if name is None:
name = f"constant{len(self.constants)}"
orig_name = name
if name[0].isdigit():
name = f"constant_{name}"
name = self.qualify_name(name)
# We may generate a var name for each constant in the codegen.
# Let's only keep sane characters.
prefix = normalize_name(name)
name = prefix
cnt = 0
while name in self.constants:
name = f"{prefix}_{cnt}"
cnt += 1
self.constants[name] = data
self.constant_reprs[name] = (
f"{data.device!r} {data.dtype!r} "
f"{tuple(data.size())!r} {tuple(data.stride())!r} "
f"{hash(data):x}"
)
self.allocated_constant_name[name] = orig_name # type: ignore[assignment]
return name
def add_tensor_constant(
self, data: Tensor, name: Optional[str] = None
) -> Union[TensorBox, ir.ShapeAsConstantBuffer]:
new_name = self.allocate_non_dup_const_name(name, data)
return TensorBox.create(
ir.ConstantBuffer(
name=new_name,
layout=FixedLayout(
data.device, data.dtype, *self.static_sizes_strides(data)
),
)
)
def constant_name(self, name: str, device_override: Optional[torch.device]) -> str:
"""
We AOT copy constants to the devices they are needed on.
If device_override doesn't match the constant's device, then
copy it and return a different name.
"""
if self.constants[name].device == device_override or device_override is None:
return name
with torch.utils._python_dispatch._disable_current_modes():
# caller might have OrderedSet fake tensor mode which will create a fake tensor
# when calling .to, so unset modes here
non_dup_const_name = self.allocate_non_dup_const_name(
f"{name}_{device_override.type}{device_override.index or 0}",
self.constants[name].to(device_override),
)
assert non_dup_const_name in self.constants, (
f"{non_dup_const_name} should be in V.graph.constants already"
)
# register device-copied buffers and parameters to graph as well
# to codegen correct torch::aot_inductor::ConstantType for them rather than `Unknown`
if any(
name == normalize_name(buffer_name)
for buffer_name in self.named_buffers
):
self.named_buffers[non_dup_const_name] = self.constants[
non_dup_const_name
]
if any(
name == normalize_name(param_name)
for param_name in self.named_parameters
):
self.named_parameters[non_dup_const_name] = self.constants[
non_dup_const_name
]
return non_dup_const_name
# pyrefly: ignore [bad-override]
def placeholder(
self,
target: str, # type: ignore[override]
args: tuple[object], # type: ignore[override]
kwargs: dict[str, object],
) -> Union[Expr, TensorBox, None]:
self.placeholder_idx += 1
example = super().placeholder(target, args, kwargs) # type: ignore[arg-type]
target = self.qualify_name(target)
if isinstance(example, SymTypes):
# TODO fix partitioning issue and re-enable for backward
# https://github.com/pytorch/pytorch/issues/155468.
if not V.graph.is_backward:
expr = _get_placeholder_expr(example.node)
else:
expr = example.node.expr
self.graph_inputs[target] = expr
self.graph_input_names.append(target)
return expr
elif isinstance(example, (int, bool, float)):
expr = sympy.sympify(example)
self.graph_inputs[target] = expr
self.graph_input_names.append(target)
return expr
elif isinstance(example, FakeScriptObject):
obj = TorchBindObject(name=target, value=example)
self.graph_inputs[target] = obj
self.graph_input_names.append(target)
return obj
elif example is None:
self.graph_input_names.append(target)
return None
if isinstance(example, BackwardState):
# Ignored arg, must be unused
# Alternately we could filter this out in AotAutograd
self.graph_input_names.append(target)
return None
# See note: Note: [Generator arguments in AOTDispatcher]
elif isinstance(example, torch.Generator):
assert len(V.graph.current_node.users) == 1 and next(
iter(V.graph.current_node.users)
).target in (
torch._prims.rng_prims.graphsafe_run_with_rng_state,
torch.ops.higher_order.invoke_subgraph,
)
gen = ir.GeneratorState(name=target, device=example.device)
self.graph_inputs[target] = gen # type: ignore[assignment]
self.graph_input_names.append(target)
return gen
assert isinstance(example, torch.Tensor), example
# todo(chilli): We can remove the last check once we turn buffers into
# static shape tensors. That's a hack to workaround Inductor believing
# the buffer should be static but us passing in a fake tensor with
# symbolic shapes.
if not example._has_symbolic_sizes_strides:
# the first N inputs are weights
sizes, strides = self.static_sizes_strides(example)
else:
sizes, strides = self.symbolic_sizes_strides(example) # type: ignore[assignment]
if (
self.is_backward
and self.bw_donated_idxs
and self.placeholder_idx in self.bw_donated_idxs
):
tensor = TensorBox.create(
DonatedBuffer(
name=target,
layout=FixedLayout(example.device, example.dtype, sizes, strides),
)
)
else:
# TODO(jansel): handle input aliasing
tensor = TensorBox.create(
InputBuffer(
name=target,
layout=FixedLayout(example.device, example.dtype, sizes, strides),
)
)
self.graph_inputs[target] = tensor
self.graph_input_names.append(target)
self.graph_inputs_original[target] = tensor.data.data # type: ignore[union-attr]
if self.current_node.users: # cudagraphs should work with an unused CPU input
self.add_device_info(example.device)
# Note: [Input Alignment handling in Inductor]
# Alignment matters for generating efficient code. Some operations,
# e.g. vectorized loads, can only be performed on aligned inputs.
#
# But if we codegen assuming aligned inputs and then get unaligned
# inputs at runtime, then we are forced to clone - which is bad for
# both perf and memory usage.
#
# One option would be to guard on storage_offset%ALIGNMENT, and then
# codegen based on this. But storage_offset guards turned out to be
# expensive and cause recompiles; Instead, we're generating code
# based on the alignment of the example input without guarding.
with maybe_get_suppress_shape_guards_ctx():
if not should_assume_input_aligned(example):
self.unaligned_buffers.add(target)
return tensor
def call_function(self, target: Callable, args: Any, kwargs: dict[str, Any]) -> Any: # type: ignore[type-arg, override]
if target is operator.getitem and isinstance(args[0], (list, tuple, dict)):
return super().call_function(target, args, kwargs)
# hasattr on OpOverloadPacket is slow, check isinstance first
if not isinstance(target, torch._ops.OpOverloadPacket) and hasattr(
target, "_inductor_lowering_function"
):
# passthrough lowerings from .pattern_matcher
return target(*args, **kwargs)
if target not in lowerings:
assert isinstance(target, torch._ops.OpOverload), (
f"{target} is not an OpOverload"
)
base_name = target.name().split(".")[0]
if base_name in FALLBACK_ALLOW_LIST:
make_fallback(target, warn=False, override_decomp=True)
elif config.implicit_fallbacks:
error = (
MissingOperatorWithDecomp
if get_decompositions([target])
else MissingOperatorWithoutDecomp
)
log.info(
"Creating implicit fallback for:\n%s",
error.operator_str(target, args, kwargs),
)
tag: Optional[torch._C.Tag] = get_layout_constraint_tag(
target, with_default=False
)
if (
tag is None
and torch._library.utils.is_builtin(target)
and self.is_backward
):
# for implicit fallback ATen ops during backward, if there
# is no layout constraint tag, we conservatively require contiguous
# input since some eager kernels do not
# support non-contiguous inputs. Otherwise they may silently cause
# accuracy problems. Check https://github.com/pytorch/pytorch/issues/140452
# We only do this For ATen ops and for backward.
#
# TODO: should really switch to "needs_fixed_stride" constraint on these
# and identify them one by one.
decided_constraint = require_contiguous # type: ignore[assignment]
else:
default_tag: torch._C.Tag = get_layout_constraint_tag(
target, with_default=True
)
decided_constraint = tag_to_layout_constraint(default_tag)
make_fallback(target, layout_constraint=decided_constraint)
elif get_decompositions([target]):
# There isn't a good way to dynamically patch this in
# since AOT Autograd already ran. The error message tells
# the user how to fix it.
raise MissingOperatorWithDecomp(target, args, kwargs)
else:
raise MissingOperatorWithoutDecomp(target, args, kwargs)
try:
log.debug(" via %s", lowerings[target]) # type: ignore[index]
n = self.current_node
layout_constraints = maybe_layout_constraints(target)
if layout_constraints:
old_args, old_kwargs = args, kwargs
if layout_constraints is constrain_to_fake_tensors:
# only constrain_to_fake_tensor if this exists.
# otherwise, no constraints at all: the implication is
# that this operator was inserted by a custom pass
# so we'll give them the freedom.
if "eager_input_vals" in n.meta:
fake_args, fake_kwargs = n.meta["eager_input_vals"]
# (fake_args, fake_kwargs) might not align with (args, kwargs).
# we need to normalize them based on the schema
assert isinstance(target, torch._ops.OpOverload)
def normalize(args: Any, kwargs: Any) -> tuple[Any, Any]:
result = torch.fx.operator_schemas.normalize_function(
target, args, kwargs
)
assert result is not None
return result[0], result[1]
fake_args, fake_kwargs = normalize(fake_args, fake_kwargs)
args, kwargs = normalize(args, kwargs)
old_args, old_kwargs = normalize(old_args, old_kwargs)
args, kwargs = constrain_to_fake_tensors(
args, kwargs, fake_args, fake_kwargs
)
else:
args, kwargs = layout_constraints(n, *args, **kwargs)
if "should_fallback" in n.meta:
out = fallback_handler(target, add_to_fallback_set=False)(
*args, **kwargs
)
else:
out = lowerings[target](*args, **kwargs) # type: ignore[index]
if layout_constraints:
# layout_constraints are allowed to make new copies of the inputs.
# if they do, and if the target is mutable, then we need to
# write the new values back into the original inputs.
self.propagate_mutation(n, old_args, old_kwargs, args, kwargs) # type: ignore[possibly-undefined]
return out
except Exception as e:
raise LoweringException(e, target, args, kwargs).with_traceback(
e.__traceback__
) from None
@staticmethod
def can_inline_constant(t: torch.Tensor) -> bool:
"""
True if this is a small constant attr that will be inlined.
"""
return len(t.shape) == 1 and t.shape[0] <= 8
# pyrefly: ignore [bad-override]
def get_attr(
self,
target: str, # type: ignore[override]
args: tuple[()], # type: ignore[override]
kwargs: dict[str, object],
) -> Union[
Constant, TensorBox, ShapeAsConstantBuffer, ir.Subgraph, TorchBindObject
]:
# this is a constant
value = getattr_recursive(self.module, target) # type: ignore[arg-type]
if isinstance(value, torch.fx.GraphModule):
# Reuse the existing subgraph if we have seen it before already.
if target in self.seen_subgraphs:
return self.seen_subgraphs[target]
out = ir.Subgraph(name=target, graph_module=value)
self.seen_subgraphs[target] = out
return out
if isinstance(value, torch._C.ScriptObject):
self.torchbind_constants[target] = value
self.constant_reprs[target] = ""
return TorchBindObject(name=target, value=value)
elif isinstance(value, FakeScriptObject):
self.torchbind_constants[target] = value
self.constant_reprs[target] = ""
return TorchBindObject(name=target, value=value)
assert isinstance(value, torch.Tensor)
if (
config.aot_inductor.use_runtime_constant_folding
or config.always_keep_tensor_constants
or unsupported_output_tensor(value)
):
return self.add_tensor_constant(value, target)
with no_dispatch():
if value.shape == ():
return Constant(
value=value.item(), dtype=value.dtype, device=value.device
)
if self.can_inline_constant(value):
log.debug("Inlining constant: %s ", str(target))
# tensor lowering has constant inlining logic
from .lowering import tensor
return tensor(value.tolist(), dtype=value.dtype, device=value.device)
return self.add_tensor_constant(value, target)
def call_module(self, target: Any, args: Any, kwargs: Any) -> NoReturn:
raise AssertionError
def call_method(self, target: Any, args: Any, kwargs: Any) -> NoReturn:
raise AssertionError
# pyrefly: ignore [bad-override]
def output(
self,
target: str, # type: ignore[override]
args: tuple[object], # type: ignore[override]
kwargs: dict[str, object],
) -> None:
result = super().output(target, args, kwargs) # type: ignore[arg-type]
if not isinstance(result, (tuple, list)):
# nested subgraphs can have singleton outputs
result = (result,)
assert isinstance(result, (tuple, list)), type(result)
assert all(
isinstance(
x,
(
TensorBox,
ir.Constant,
type(None),
ir.ConstantBuffer,
sympy.Expr,
sympy.logic.boolalg.Boolean,
int,
ir.EffectfulKernel,
ir.ShapeAsConstantBuffer,
),
)
for x in result
), result
fx_node_args = V.graph.current_node.args[0] # type: ignore[arg-type]
if not isinstance(fx_node_args, (tuple, list)):
# nested subgraphs can have singleton outputs
fx_node_args = (fx_node_args,)
result = [ir.ExternKernel.realize_input(x) for x in result]
result_correct_strides = []
assert len(fx_node_args) == len(result)
for r, fx_node in zip(result, fx_node_args):
if not isinstance(r, (ir.TensorBox, ir.BaseView)):
result_correct_strides.append(r)
elif isinstance(r.get_output_spec(), ir.CommBufferLayout):
# Active references to persistent comm buffers are not allowed
# outside of graphs
result_correct_strides.append(ir.ExternKernel.copy_input(r))
else:
# AOT Autograd tries to detect stride divergence of inductor from output metadata.
# Here, we try to avoid spurious divergence by matching insignificant strides such as
# should have already been realized
assert torch._inductor.ir.is_storage_and_layout(r)
meta_strides = [
s.node.expr if isinstance(s, torch.SymInt) else s
for s in fx_node.meta["val"].stride()
]
result_correct_strides.append(
ir.try_match_insignificant_strides(r, meta_strides)
)
self.graph_outputs = result_correct_strides
value: ir.IRNode
for name, value in self.graph_inputs.items():
if isinstance(value, TorchBindObject):
continue
assert isinstance(
value, (TensorBox, sympy.Expr, torch._inductor.ir.GeneratorState)
), f"Unsupported inductor graph input type: {type(value)}"
if not isinstance(value, TensorBox):
continue
value.realize()
assert isinstance(value, TensorBox)
value = value.data
assert isinstance(value, ir.StorageBox)
value_storage_box = value
value = value.data
if not isinstance(value, InputBuffer) or value.get_name() != name:
# one of our inputs was mutated, need to turn that into a copy
ir.MutationLayoutSHOULDREMOVE.realize_into(
value, self.graph_inputs_original[name]
)
# replace output with mutated input
try:
ind = self.graph_outputs.index(value_storage_box)
self.graph_outputs[ind] = self.graph_inputs_original[name]
except ValueError:
pass
self.finalize()
log.debug(
"Force channels last inputs for %d conv for the current graph with id %d",
self.num_channels_last_conv,
self.graph_id if self.graph_id is not None else -1,
)
def finalize(self) -> None:
for buf in self.buffers:
buf.decide_layout()
@contextmanager
def set_current_node(self, node: torch.fx.Node): # type: ignore[no-untyped-def]
old = self.current_node
try:
self.current_node = node
yield
finally:
self.current_node = old
@contextmanager
def set_current_wrapper_code(self) -> Iterator[None]:
old = self.wrapper_code
try:
yield
finally:
self.wrapper_code = old
def propagate_mutation(
self,
fx_node: torch.fx.Node,
old_args: tuple[Any],
old_kwargs: dict[str, Any],
new_args: tuple[Any],
new_kwargs: dict[str, Any],
) -> None:
"""Propagate mutations on new_args/new_kwargs back to old_args/old_kwargs.
Assumes we may have cloned old_args/old_kwargs into new_args/new_kwargs
and then called fx_node(*new_args, **new_kwargs).
If fx_node mutates any of new_args/new_kwargs, and they are different from
old_args/old_kwargs, then we need to update the original tensor.
"""
assert len(old_args) == len(new_args)
assert len(old_kwargs) == len(new_kwargs)
if fx_node.target is torch.ops.higher_order.triton_kernel_wrapper_mutation:
kwargs = fx_node.kwargs["kwargs"]
assert isinstance(kwargs, dict)
mutated = torch._higher_order_ops.triton_kernel_wrap.get_mutated_tensors(
old_kwargs["kernel_idx"],
old_kwargs["constant_args_idx"],
{
k: v.meta["val"] if isinstance(v, torch.fx.Node) else v
for k, v in kwargs.items()
},
old_kwargs["tma_descriptor_metadata"],
)
for name in mutated:
old_arg = old_kwargs["kwargs"][name]
new_arg = new_kwargs["kwargs"][name]
if old_arg is new_arg:
continue
self.call_function(torch.ops.aten.copy_.default, (old_arg, new_arg), {})
return
assert isinstance(fx_node.target, torch._ops.OpOverload)
def maybe_propagate(
schema_arg: torch._C.Argument, old_arg: ir.IRNode, new_arg: ir.IRNode
) -> None:
if old_arg is new_arg:
return
if schema_arg.alias_info is not None and schema_arg.alias_info.is_write:
# The lowering for copy_ is smart enough to "replace" old_arg with
# new_arg in all future uses so a copy_ kernel never gets emitted.
# old_arg, new_arg may be immutable_list
if isinstance(old_arg, ir.IRNode):
old_arg = (old_arg,) # type: ignore[assignment]
new_arg = (new_arg,) # type: ignore[assignment]
for old_arg_item, new_arg_item in zip(old_arg, new_arg): # type: ignore[call-overload]
if old_arg_item is new_arg_item:
continue
self.call_function(
torch.ops.aten.copy_.default, (old_arg_item, new_arg_item), {}
)
schema = fx_node.target._schema
for idx, (old_arg, new_arg) in enumerate(zip(old_args, new_args)):
schema_arg = schema.arguments[idx]
maybe_propagate(schema_arg, old_arg, new_arg)
schema_kwargs = {arg.name: arg for arg in schema.arguments}
for key in old_kwargs:
old_arg = old_kwargs[key]
new_arg = new_kwargs[key]
schema_arg = schema_kwargs[key]
maybe_propagate(schema_arg, old_arg, new_arg)
def run_node(self, n: torch.fx.Node) -> object:
def debug(msg: str) -> None:
log.debug("lowering %s %s", LazyString(n.format_node), msg) # type: ignore[arg-type]
from torch._inductor.compiler_bisector import CompilerBisector
buffer_watermark = len(self.buffers)
operation_watermark = len(self.operations)
# origins: OrderedSet[Union[Node, ir.IRNode]] = OrderedSet([n])
origins: OrderedSet[Any] = OrderedSet([n])
is_call_function = n.op == "call_function"
if is_call_function:
args, kwargs = self.fetch_args_kwargs_from_env(n)
origins |= gather_origins(args, kwargs)
with (
ir.IRNode.current_origins(origins),
self.set_current_node(n),
V.set_current_node(n),
):
if (
n.op == "call_function"
# this path only for built-in operators
and n.target
and isinstance(n.target, torch._ops.OpOverload)
and torch._library.utils.is_builtin(n.target)
and (
fallback_node_due_to_unsupported_type(n)
or CompilerBisector.disable_subsystem(
"inductor", "lowerings", lambda: repr(n)
)
)
):
debug("fallback_handler")
result = fallback_handler(n.target, add_to_fallback_set=False)(
*args, # type: ignore[possibly-undefined]
**kwargs, # type: ignore[possibly-undefined]
)
elif (
n.op == "call_function"
and isinstance(
n.target, (torch._ops.OpOverload, torch._ops.HigherOrderOperator)
)
and should_fallback_by_default(n)
):
# this path supports fallback due to inductor lite mode. It supports
# both OpOverload and HOPs (e.g., triton_kernel_wrapper_functional).
debug("fallback_handler")
result = fallback_handler(n.target, add_to_fallback_set=False)(
*args, # type: ignore[possibly-undefined]
**kwargs, # type: ignore[possibly-undefined]
)
elif (
n.op == "call_function"
and n.target is torch.ops.higher_order.triton_kernel_wrapper_mutation
and config.triton_kernel_default_layout_constraint != "flexible_layout"
):
debug("user_defined_triton_kernel_layout_constraints")
if (
config.triton_kernel_default_layout_constraint
== "needs_fixed_stride_order"
):
old_args = args # type: ignore[possibly-undefined]
old_kwargs = kwargs # type: ignore[possibly-undefined]
if eager_input_vals := n.meta.get("eager_input_vals"):
inp_args = eager_input_vals[0]
inp_kwargs = eager_input_vals[1]
args, kwargs = constrain_to_fake_tensors(
# pyrefly: ignore [unbound-name]
args,
# pyrefly: ignore [unbound-name]
kwargs,
inp_args,
inp_kwargs,
)
else:
args, kwargs = constrain_to_fx_strides(n, *args, **kwargs) # type: ignore[index]
result = self.call_function(n.target, args, kwargs) # type: ignore[arg-type]
self.propagate_mutation(n, old_args, old_kwargs, args, kwargs) # type: ignore[possibly-undefined]
else:
raise RuntimeError(
f"Unknown triton_kernel_default_layout_constraint: {config.triton_kernel_default_layout_constraint}"
)
elif is_magic_method(n.target):
# TODO: this is sus, it probably should be handled in the
# lowerings themselves similarly to sym_size/sym-stride
# https://github.com/pytorch/pytorch/issues/127789
debug("is_magic_method")
if isinstance(
n.meta["val"], (torch.SymInt, torch.SymFloat, torch.SymBool)
):
result = n.meta["val"].node.expr
else:
result = super().run_node(n)
else:
debug("")
result = super().run_node(n)
# require the same stride order for dense outputs,
# 1. user-land view() will not throw because inductor
# output different strides than eager
# long term the solution is to make view() always succeed
# with infallible strides.
# 2: as_strided ops, we need make sure its input has same size/stride with
# eager model to align with eager behavior.
as_strided_ops = [
torch.ops.aten.as_strided.default,
torch.ops.aten.as_strided_.default,
torch.ops.aten.as_strided_scatter.default,
torch.ops.aten.resize.default,
torch.ops.aten.resize_as.default,
]
is_output = any(user.op == "output" for user in n.users)
is_user_visible = n in self.user_visible_output_strides
is_input_for_as_strided = any(
user.target in as_strided_ops for user in n.users
)
if n.meta.get("inductor_realize_to_strides", False) and isinstance(
result, TensorBox
):
result.realize()
strides = n.meta["val"].stride()
sym_strides = torch._inductor.utils.any_is_symbolic(*strides)
if result.maybe_get_stride() != strides and not sym_strides:
stride_order = ir.get_stride_order(strides)
result = ir.ExternKernel.require_stride_order(result, stride_order)
if (
is_output
and isinstance(result, TensorBox)
and isinstance(result.data, ir.BaseView)
):
# Realize so that outputs are correctly aliased
result.realize()
if (is_output or is_input_for_as_strided) and isinstance(
n.meta["val"], torch.Tensor
):
if is_user_visible:
strides = self.user_visible_output_strides.get(n)
else:
strides = n.meta["val"].stride()
if strides is not None and len(strides) > 0:
allow_padding = (
config.pad_outputs or not is_user_visible
) and not is_input_for_as_strided
dense = torch._prims_common.is_non_overlapping_and_dense(
n.meta["val"]
)
unbacked_symbols_in_strides = (
len(free_unbacked_symbols(strides)) > 0
)
if (
not unbacked_symbols_in_strides
and dense
and len(result.get_size()) == 4
and n in self.nodes_prefer_channels_last
and not is_user_visible
and not is_input_for_as_strided
):
strides = ir.FlexibleLayout.stride_ordered_for_memory_format(
result.get_size(), torch.channels_last
)
if not unbacked_symbols_in_strides and len(strides):
# To avoid converting possible view ops to a copy kernel, we use the previous
# require_exact_strides to handle views. But ultimately it's better to require
# the right strides at the tensor definition.
if n.meta["val"]._is_view() or isinstance(
# pyrefly: ignore [missing-attribute]
result.data,
ir.BaseView,
):
result = ir.ExternKernel.require_stride_order(
result,
ir.get_stride_order(strides),
allow_padding=allow_padding,
)
else:
# Fix for 0-d tensors: if result size is empty,
# strides should also be empty
if len(result.get_size()) == 0 and len(strides) > 0:
strides = []
else:
strides = [
s.node.expr if isinstance(s, torch.SymInt) else s
for s in strides
]
result = ir.ExternKernel.require_exact_strides(
result, strides, allow_padding=allow_padding
)
# Realize if (1) any user need inputs realized, or (2) there is
# already too many reads and rematerializing can be bad.
num_users = len(OrderedSet(n.users))
if num_users > 1 and isinstance(result, TensorBox):
for user in n.users:
if user.target in needs_realized_inputs:
result.realize_hint()
# This inclusion is somewhat controversial (from
# discussion between Horace, Natalia, and Elias).
# Currently, it's not very clear why this is helpful.
# The general idea here is that even though a node may
# have FlexibleLayout, we still often *treat* it as if
# it was contiguous. This appears to sometimes result in
# suboptimal behavior.
#
# When we do a better job selecting layout, we should
# revisit this.
need_fixed_layout = [
torch.ops.aten.convolution_backward.default,
torch.ops.aten.mm.default,
torch.ops.aten._int_mm.default,
]
need_fixed_channels_last_layout = []
if not self.layout_opt:
need_fixed_layout.append(torch.ops.aten.convolution.default)
if torch._C._has_mkldnn:
need_fixed_layout += [
torch.ops.mkldnn._linear_pointwise.default,
torch.ops.mkldnn._linear_pointwise.binary,
torch.ops.aten.mkldnn_rnn_layer.default,
torch.ops.onednn.qlinear_pointwise.default,
torch.ops.onednn.qlinear_pointwise.tensor,
torch.ops.onednn.qlinear_pointwise.binary,
torch.ops.onednn.qlinear_pointwise.binary_tensor,
]
need_fixed_channels_last_layout += [
torch.ops.mkldnn._convolution_pointwise.default,
torch.ops.mkldnn._convolution_pointwise.binary,
torch.ops.mkldnn._convolution_pointwise_.binary,
torch.ops.mkldnn._convolution_transpose_pointwise.default,
torch.ops.onednn.qconv_pointwise.default,
torch.ops.onednn.qconv2d_pointwise.binary,
]
if torch._C.has_mkl:
need_fixed_layout += [torch.ops.mkl._mkl_linear.default]
if user.target in need_fixed_layout:
result = ir.ExternKernel.require_stride_order(
result,
ir.get_stride_order(n.meta["val"].stride()),
allow_padding=True,
)
if (
user.target in need_fixed_channels_last_layout
and n is user.args[0]
):
result = ir.ExternKernel.require_stride_order(
result,
ir.get_stride_order(
make_channels_last_strides_for(n.meta["val"].shape)
),
)
if user.op == "output":
# pyrefly: ignore [missing-attribute]
if isinstance(result.data.data, (Pointwise, Reduction)):
result.realize()
# TODO(jansel): introduce a store vs inline choice
result.mark_reuse(len(n.users))
# Realize if the IRNode already has accumulated lots of reads
if isinstance(result, TensorBox) and result.has_exceeded_max_reads():
# Prevent excessive accumulation in a computed buffer, when
# there are multiple branches each with small number of memory
# reads, but they converge to a user.
result.realize_hint()
# Realize if a Pointwise has too much stuff to be inlined.
# As this may cause RecursionError during Inductor's evaluation.
if isinstance(result, TensorBox) and isinstance(result.data, StorageBox):
curr = result.data.data
if isinstance(curr, Pointwise):
# Use inner fn as a rough proxy. Good enough.
if curr.has_large_inner_fn(threshold=100):
result.realize()
assign_origin_node(result, n)
self.register_users_of(result)
new_unbacked_defs = OrderedSet[sympy.Symbol]()
for buf in self.buffers[buffer_watermark:]:
new_unbacked_defs |= buf.get_unbacked_symbol_defs()
for op in self.operations[operation_watermark:]:
new_unbacked_defs |= op.get_unbacked_symbol_defs()
shape_env = V.graph.sizevars.shape_env
# An input can be unbacked symint i.e.: when mark_unbacked is used.
# in that case add it to new_unbacked_defs.
if (
n.op == "placeholder"
and isinstance(result, sympy.Symbol)
and shape_env.is_unbacked_symint(result)
):
new_unbacked_defs.add(result)
def format_new_defs() -> str:
r = [
f"unbacked_symbol_defs={buf.get_unbacked_symbol_defs()} in:\n{buf}\n"
for buf in self.buffers[buffer_watermark:]
]
r.extend(
f"unbacked_symbol_defs={op.get_unbacked_symbol_defs()} in:\n{op}\n"
for op in self.operations[operation_watermark:]
)
return "***\n".join(r)
# We do not skip unbacked symints that are input for backward see the note below.
if V.graph.is_backward and n.op == "placeholder":
return result
# Note [Backwards runtime asserts]
# Backwards poses an interesting problem for deferred runtime
# asserts. In the easy case, we may solely close over data
# dependent sized tensors, and there are no binding sites for
# unbacked SymInts. In this case, we can just drop all the
# runtime asserts on the floor: no non-placeholder bindings, no
# problem.
#
# However, it is *possible* for a fresh runtime assert to show up
# between forwards and backwards. Right now, the freezing process
# that happens when we lower forwards means that we will freeze
# runtime asserts, and then the moment the backwards lowering
# process attempts to add a new deferred runtime assert, we will
# fail. Let's say you remove that assert. Now when we get here,
# we need to make sure we actually emit these asserts (because we
# can't emit them in forwards, we already compiled it). So we
# have to do something here. But we don't want to reemit ALL
# deferred runtime asserts, we only want to emit the NEW ones.
# Therefore needing some sort of stratification in the ShapeEnv.
# This is all doable, it just hasn't been done yet.
unbacked_bindings = resolve_unbacked_bindings(
V.graph.sizevars.shape_env, n.meta.get("unbacked_bindings", {})
)
assert unbacked_bindings is not None
# When we do lowering, it is possible we reallocate unbacked SymInts.
# So we need to line up the unbacked SymInts when performing the test
# here
#
# In principle, we could permit lowering to introduce MORE unbacked
# SymInts: as long as all the old unbacked ones are accounted for,
# it's fine for inductor to introduce extra calls to item()/unbacked()
# whatever. This actually happens in practice when an unbacked SymInt
# gets memoized away; naively, when Inductor reprocesses a kernel, it
# doesn't know that the memo still applies, and ends up allocating a
# new symbol. However, this is generally a bad thing: we may still
# end up needing to test equalities on the symbols, and a fresh
# symbol is likely to hit lots of GuardOnDataDependent errors that
# we already know facts for.
renamed_unbacked_bindings = OrderedSet(
V.fake_mode.shape_env.unbacked_renamings.get(s, s)
for s in unbacked_bindings
)
assert new_unbacked_defs >= renamed_unbacked_bindings, (
f"failed {new_unbacked_defs} >= {renamed_unbacked_bindings} (inductor >= fx)\n"
f"fx node is: {n.format_node()}\n"
f"new operations are:\n\n{format_new_defs()}"
)
self.create_deferred_runtime_asserts(n, new_unbacked_defs)
return result
def create_deferred_runtime_asserts(
self, n: torch.fx.Node, new_unbacked_defs: OrderedSet[sympy.Symbol]
) -> None:
# [NOTE] Codegen runtime asserts in Inductor
#
# We need to generate runtime asserts directly in Inductor instead
# of just reusing the asserts from input graphs because we reuse the
# same ShapeEnv as before. In particular, on subsequent graph passes,
# we would immediately turn all of these assertions into noops,
# because when we evaluated their expressions, we would see that
# because we had a deferred runtime assert in the ShapeEnv, we
# know "oh, of course this expression is True" already.
# One example is below:
#
# class Model(torch.nn.Module):
# def forward(self, a, b, c):
# nz = torch.nonzero(a)
# ones = a.new_ones([nz.size(0), b.size(0)])
# torch._check(ones.size(0) >= 1)
# equals = torch.add(ones, c)
# return equals
# torch._dynamo.mark_dynamic(c, 0)
# When we reuse the ShapeEnv in Inductor lowering, the check that checks
# a and nonzero have the same shape would be evaluated to True after we resolve
# unbacked bindings using the ShapeEnv.
# See test_unbacked_equals_input_size_runtime_assertion in test_aot_inductor.
#
#
# In addition to the Inductor generated runtime asserts, we also
# need the runtime asserts from the input graph, because some derived
# runtime asserts on backed symints are not generated in Inductor. One example is
# this: `y = x.reshape(100, -1).clone()`. x.shape[0] needs to be a multiple of 100.
# See test_aoti_runtime_asserts_backed_symint in test_aot_inductor.
def make_assert(expr: SympyBoolean, msg: str) -> None:
assert_op = ir.AssertScalar(expr, msg)
self.register_buffer(assert_op, set_name=True)
self.register_operation(assert_op)
if (
full_aoti_runtime_assert()
and n.target is torch.ops.aten._assert_scalar.default
and self.aot_mode
):
node_args, _ = self.fetch_args_kwargs_from_env(n)
if node_args[0] != True: # noqa: E712
make_assert(node_args[0], f"{node_args[0]} to be True")
else:
# bound_unbacked_symbols tracks the symbols that are created so far,
# we use it to make sure that runtime assertions are added after all
# symbols used in them are defined.
self.bound_unbacked_symbols |= new_unbacked_defs
shape_env = V.graph.sizevars.shape_env
# Emit code for runtime asserts that can be inserted at this point.
for i0 in new_unbacked_defs:
ras = self.ras_by_symbol.pop(i0, [])
# NB: size-like not needed, we won't retrace
vr = shape_env.var_to_range[i0]
if not shape_env._default_unspecified_value_range().issubset(vr):
def is_convertible(s: Expr) -> bool:
if s in (int_oo, -int_oo):
return False
try:
int(s)
return True
except TypeError:
return False
if is_convertible(vr.lower):
make_assert(i0 >= vr.lower, f"{i0} >= {vr.lower}")
if is_convertible(vr.upper):
make_assert(i0 <= vr.upper, f"{i0} <= {vr.upper}")
for ra in ras:
fvs = free_unbacked_symbols(ra.expr)
missing = fvs - self.bound_unbacked_symbols
if missing:
i1 = min(missing, key=str)
self.ras_by_symbol.setdefault(i1, []).append(ra)
else:
make_assert(ra.expr, f"{ra.expr}")
def validate_can_generate_cpp_wrapper(self) -> None:
if config.disable_cpp_codegen:
raise CppWrapperCodegenError("C++ codegen is disabled")
if sys.platform not in ("linux", "darwin", "win32"):
raise CppWrapperCodegenError(f"Unsupported platform {sys.platform}")
def init_wrapper_code(
self,
is_subgraph: bool = False,
subgraph_name: Optional[str] = None,
parent_wrapper_code: Optional[PythonWrapperCodegen] = None,
partition_signatures: Optional[GraphPartitionSignature] = None,
) -> None:
device_types = self.device_types.copy()
device_types.discard("cpu")
device_types.discard("meta")
# TODO(Eikan): Only support mixing cpu and other device now.
assert len(device_types) <= 1, "Does not support mixing {}".format(
"+".join(device_types)
)
only_cpu = len(device_types) == 0
self.device_type = "cpu" if only_cpu else device_types.pop()
if self.cpp_wrapper:
self.validate_can_generate_cpp_wrapper()
self.device_ops = get_device_op_overrides(self.device_type)
wrapper_code_gen_cls = get_wrapper_codegen_for_device(
self.device_type, self.cpp_wrapper, self.fx_wrapper
)
assert wrapper_code_gen_cls is not None, (
f"Device {self.device_type} not supported"
)
self.wrapper_code = wrapper_code_gen_cls.create(
is_subgraph,
subgraph_name,
parent_wrapper_code,
partition_signatures,
)
if self.const_module:
self.wrapper_code._names_iter = self.const_module.wrapper_code._names_iter
def extract_autotune_inputs(
self, example_inputs: list[Union[int, float, torch.Tensor]]
) -> None:
import copy
cloned_gm = copy.deepcopy(self.orig_gm)
example_inputs = copy.deepcopy(example_inputs)
triton_nodes = []
for node in cloned_gm.graph.nodes:
if (
node.op == "call_function"
and node.target is torch.ops.higher_order.triton_kernel_wrapper_mutation
):
triton_nodes.append(node)
# Store grid related nodes
grid_inputs: list[torch.fx.Node] = []
visited_grids: dict[torch.fx.Node, int] = {}
# Store kwargs related nodes
triton_inputs: dict[str, Any] = {}
kwargs_inputs: list[torch.fx.Node] = []
visited_kwargs: dict[Any, int] = {}
for node in triton_nodes:
# first check whether we have fx node in grid settings.
for grid in node.kwargs["grid"]:
for val in grid:
if val in visited_grids:
continue
if isinstance(val, torch.fx.Node):
visited_grids[val] = len(grid_inputs)
grid_inputs.append(val)
kwargs = node.kwargs["kwargs"]
# identify which args might be mutated, those should be cloned.
mutated = torch._higher_order_ops.triton_kernel_wrap.get_mutated_tensors(
node.kwargs["kernel_idx"],
node.kwargs["constant_args_idx"],
{
k: v.meta["val"] if isinstance(v, torch.fx.Node) else v
for k, v in kwargs.items()
},
node.kwargs["tma_descriptor_metadata"],
)
new_kwargs: dict[str, int] = {}
with cloned_gm.graph.inserting_before(node):
for k, v in kwargs.items():
if k in mutated:
new_node = cloned_gm.graph.call_function(torch.clone, args=(v,))
new_kwargs[k] = len(kwargs_inputs)
kwargs_inputs.append(new_node)
continue
if v in visited_kwargs:
new_kwargs[k] = visited_kwargs[v]
continue
visited_kwargs[v] = len(kwargs_inputs)
kwargs_inputs.append(v)
new_kwargs[k] = visited_kwargs[v]
triton_inputs[node.name] = new_kwargs
new_outputs = kwargs_inputs + grid_inputs
for node in cloned_gm.graph.nodes:
if node.op == "output":
node.args = (tuple(new_outputs),)
break
cloned_gm.recompile()
runner = torch.fx.Interpreter(cloned_gm)
returned_outputs = runner.run(example_inputs)
# Extract and store the grid for autotuning
if len(grid_inputs) > 0:
grid_outputs = returned_outputs[len(kwargs_inputs) :]
self.autotuning_grids = {}
for node in triton_nodes:
dynamic_grid = False
new_grids: list[tuple[Any]] = []
for grid in node.kwargs["grid"]:
new_grid = []
for val in grid:
if not isinstance(val, torch.fx.Node):
new_grid.append(val)
continue
dynamic_grid = True
new_grid.append(grid_outputs[visited_grids[val]])
# pyrefly: ignore [bad-argument-type]
new_grids.append(tuple(new_grid))
if dynamic_grid:
self.autotuning_grids[node.name] = new_grids
# Store the kwargs input for autotuning
self.autotuning_inputs = returned_outputs[: len(kwargs_inputs)]
self.autotuning_mapping = triton_inputs
def codegen_with_cpp_wrapper(
self,
) -> tuple[ValueWithLineMap, ValueWithLineMap]:
"""
For GPU, Triton kernels are autotuned and stored as cubin files
"""
if any(device in self.device_types for device in ["cuda", "xpu"]):
def extract_real_inputs() -> list[Union[int, float, torch.Tensor]]:
def materialize(
x: Union[torch.SymInt, torch.SymFloat, torch.Tensor],
) -> Union[int, float, torch.Tensor]:
if x is None:
# pyrefly: ignore [bad-return]
return None
elif isinstance(x, (torch.SymInt, torch.SymFloat)):
# Need concrete value to run dynamic shapes and tune the result
return x.node.hint
elif isinstance(x, FakeTensor):
return defake(x)
else:
assert isinstance(x, torch.Tensor), (
"Unknown type when creating real inputs" + str(type(x))
)
return x
tracing_context = torch._guards.TracingContext.try_get()
if tracing_context is not None and not isinstance(
V.real_inputs, NullHandler
):
if tracing_context.output_strides:
tracing_context.output_strides.clear()
params_flat = [
param
for param in tracing_context.params_flat # type: ignore[union-attr]
if param is not None
]
real_inputs = [
materialize(x)
for x in itertools.chain(params_flat, V.real_inputs)
]
else:
# In the backward pass, V.real_inputs is not OrderedSet.
# Generating random inputs based on self.example_inputs sometimes can be problematic,
# e.g. illegal memory access. A comprehensive fix is to autotune in a separate process.
real_inputs = [
materialize(x) # type:ignore[arg-type]
for x in (
self.example_inputs # type:ignore[union-attr]
if isinstance(V.real_inputs, NullHandler)
else V.real_inputs
)
]
if self.mutated_inputs:
from .compile_fx import clone_preserve_strides
mutated_input_idxs = [
idx
for idx, name in enumerate(self.graph_inputs)
if name in self.mutated_inputs
and isinstance(real_inputs[idx], torch.Tensor)
]
for idx in mutated_input_idxs:
# clone mutated Tensor inputs to avoid mutating them in
# the first pass of the CPP wrapper-based compilation, as
# this will lead to a side effect on the example inputs:
# e.g. if torch.compile(f)(x) if called on input-mutating
# f, the inputs x will be mutated twice in the process:
# once here, and again when running the compiled model;
# this will also lead to a numerically incorrect output
mutated_inp = real_inputs[idx]
assert isinstance(mutated_inp, torch.Tensor)
real_inputs[idx] = clone_preserve_strides(mutated_inp)
del mutated_inp
return real_inputs
if config.triton.autotune_at_compile_time:
# If autotune_at_compile_time is True, we can do the codegen in one-pass
# We will construct the autotuning values if user defined kernel exists.
if config.triton.autotune_with_sample_inputs:
user_defined_kernels = False
for op in self.operations:
if isinstance(op, ir.UserDefinedTritonKernel):
user_defined_kernels = True
break
if user_defined_kernels:
real_inputs = extract_real_inputs()
self.extract_autotune_inputs(real_inputs)
return self.codegen()
else:
# first pass
self.cpp_wrapper = False
compiled = self.compile_to_module().call
real_inputs = extract_real_inputs()
with torch.utils._python_dispatch._disable_current_modes():
compiled(real_inputs)
del real_inputs
# second pass
self.cpp_wrapper = True
self.removed_buffers.clear()
self.removed_operations.clear()
self.inplaced_to_remove.clear()
V.graph.sizevars.precomputed_replacements.clear()
V.graph.sizevars.inv_precomputed_replacements.clear()
metrics.reset()
with config.patch({"triton.autotune_at_compile_time": False}):
return self.codegen()
else:
# cpu
return self.codegen()
def _update_scheduler(self) -> None:
"""
(Re)initializes the scheduler member. When initializing the scheduler, no CUBIN
files should be generated (to avoid biasing any benchmarks and pessimizing
fusion decisions).
"""
from .scheduler import Scheduler
with config.patch("triton.store_cubin", False):
self.scheduler = Scheduler(self.operations)
def codegen(self) -> tuple[ValueWithLineMap, ValueWithLineMap]:
with dynamo_timed("GraphLowering.codegen", log_pt2_compile_event=True):
self.init_wrapper_code()
self._update_scheduler()
V.debug.draw_orig_fx_graph(self.orig_gm, self.scheduler.nodes)
self.wrapper_code.push_codegened_graph(self)
self.scheduler.codegen()
log.debug(
"Finished codegen for all nodes. The list of kernel names available: %s",
V.graph.all_codegen_kernel_names,
)
result = self.wrapper_code.generate(self.is_inference)
self.wrapper_code.pop_codegened_graph()
return result
def codegen_subgraph(self, parent_graph: GraphLowering) -> None:
"""
This is a more compact version of the `codegen()` above
where we codegen this graph as a subgraph of some parent
graph. The parent graph is passed as an argument: the
intention is to inline codegening of the subgraph in
the parent graph's wrapper code (including the generated
kernels). The wrapper code is not finalized (via `.generate()`
call), as this will be done in the parent graph's `codegen()`.
"""
with dynamo_timed("GraphLowering.codegen_subgraph", log_pt2_compile_event=True):
self.wrapper_code = parent_graph.wrapper_code
self.device_ops = parent_graph.device_ops
self.cpp_wrapper = parent_graph.cpp_wrapper
self._update_scheduler()
self.scheduler.codegen()
def count_bytes(
self,
) -> tuple[
int, list[tuple[BaseSchedulerNode, int]], list[tuple[BaseSchedulerNode, float]]
]:
total_bytes = 0
node_counts = []
node_runtimes = []
for node in self.scheduler.nodes:
num_bytes = node.get_read_write_buffers_sizes()
total_bytes += num_bytes
node_counts.append((node, num_bytes // 4))
node_runtimes.append((node, node.get_estimated_runtime()))
return total_bytes, node_counts, node_runtimes
# No-op to be patched for unit tests
save_output_code: Optional[Callable[[str], None]] = None
def compile_to_module(self) -> CompiledModule:
with dynamo_timed(
"GraphLowering.compile_to_module",
phase_name="code_gen",
log_pt2_compile_event=True,
dynamo_compile_column_us="inductor_code_gen_cumulative_compile_time_us",
):
return self._compile_to_module()
def _compile_to_module(self) -> CompiledModule:
# If we're here, we don't have to worry about the kernel code, which is only
# returned separately in AOTInductor mode.
wrapper_code, _ = (
self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()
)
if isinstance(wrapper_code, ValueWithLineMap):
mod = self._compile_to_module_lines(wrapper_code)
elif isinstance(wrapper_code, FileBackedGraphModule):
mod = wrapper_code
else:
raise NotImplementedError(
f"Unrecognized wrapper code type: {type(wrapper_code)}"
)
# Logged twice as per https://github.com/pytorch/pytorch/pull/99038#discussion_r1167826029
# TODO. Revisit this once the logging API is more mature
assert mod.__file__ is not None
log_module_code(mod.__file__)
log.debug("Output code written to: %s", mod.__file__)
output_code_log.info("Output code written to: %s", mod.__file__)
if config.benchmark_kernel:
print(f"Compiled module path: {mod.__file__}", file=sys.stderr)
if isinstance(wrapper_code, FileBackedGraphModule):
V.debug.output_code(mod.__file__)
V.debug.copy(os.path.splitext(mod.__file__)[0] + ".debug")
return mod
def _compile_to_module_lines(
self, wrapper_code: ValueWithLineMap
) -> CompiledModule:
from .codecache import PyCodeCache
if config.triton.autotune_at_compile_time:
# sanitize docstrings in kernel defs (#155006)
kernel_autotune_defs = self.wrapper_code.kernel_autotune_defs.getvalue()
kernel_autotune_defs = kernel_autotune_defs.replace('"""', '\\"\\"\\"')
tuning_code = (
'"""\n'
+ "Compile-time auto-tuning block: \n"
+ kernel_autotune_defs
+ self.wrapper_code.kernel_autotune_calls.getvalue()
+ '"""\n'
)
wrapper_code.value = tuning_code + wrapper_code.value
if GraphLowering.save_output_code is not None:
GraphLowering.save_output_code(wrapper_code.value)
output_code_log.debug("Output code: \n%s", wrapper_code.value)
inductor_meta = autotune_cache.inductor_meta_from_config()
AutotuneCacheBundler.begin_compile(inductor_meta, code=wrapper_code.value)
try:
linemap = [
(line_no, node.stack_trace) # type: ignore[attr-defined]
for line_no, node in wrapper_code.line_map
]
key, path = PyCodeCache.write(wrapper_code.value)
output_code_log.debug("Output code written to: %s", path)
V.debug.output_code(path)
V.debug.copy(os.path.splitext(path)[0] + ".debug")
except Exception:
trace_structured(
"inductor_output_code",
# Just omit the filename, I still want the code though!
payload_fn=lambda: wrapper_code.value,
)
raise
else:
trace_structured(
"inductor_output_code",
lambda: {
"filename": path,
"file_path": os.path.abspath(path),
},
payload_fn=lambda: wrapper_code.value,
)
with dynamo_timed("PyCodeCache.load_by_key_path", log_pt2_compile_event=True):
mod = PyCodeCache.load_by_key_path(
key,
path,
linemap=linemap, # type: ignore[arg-type]
attrs={**self.constants, **self.torchbind_constants},
)
self.cache_key = key
self.cache_path = path
self.cache_linemap = linemap # type: ignore[assignment]
if config.benchmark_harness and config.profile_bandwidth_output:
# run the inputs code gen to get the bandwidth info
mod.benchmark_compiled_module(times=1, repeat=1)
return mod
def _get_output_names(self, graph_outputs: list[ir.IRNode]) -> list[str]:
names = []
shape_counter = itertools.count(0)
none_counter = itertools.count(0)
for node in graph_outputs:
if isinstance(node, ir.NoneAsConstantBuffer):
names.append(f"{self.name}_none{next(none_counter)}")
elif isinstance(node, ir.ShapeAsConstantBuffer):
names.append(f"{self.name}_shape{next(shape_counter)}")
else:
names.append(node.get_name())
return names
def get_output_names(self) -> list[str]:
return self._get_output_names(self.graph_outputs)
def is_unspec_arg(self, name: str) -> bool:
# dynamo wraps unspec variable as 0d CPU tensor,
# need to convert to scalar during codegen (triton only)
return (
name in self.graph_inputs
and self.graph_inputs[name].get_numel() == 1
and len(self.graph_inputs[name].get_size()) == 0
and get_device_type(self.graph_inputs[name]) == "cpu"
) or name in self.zero_dim_cpu_tensor_list
| GraphLowering |
python | pytest-dev__pytest | src/_pytest/mark/expression.py | {
"start": 8049,
"end": 9041
} | class ____(Protocol):
"""A callable which, given an identifier and optional kwargs, should return
whether it matches in an :class:`Expression` evaluation.
Should be prepared to handle arbitrary strings as input.
If no kwargs are provided, the expression of the form `foo`.
If kwargs are provided, the expression is of the form `foo(1, b=True, "s")`.
If the expression is not supported (e.g. don't want to accept the kwargs
syntax variant), should raise :class:`~pytest.UsageError`.
Example::
def matcher(name: str, /, **kwargs: str | int | bool | None) -> bool:
# Match `cat`.
if name == "cat" and not kwargs:
return True
# Match `dog(barks=True)`.
if name == "dog" and kwargs == {"barks": False}:
return True
return False
"""
def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: ...
@dataclasses.dataclass
| ExpressionMatcher |
python | walkccc__LeetCode | solutions/1409. Queries on a Permutation With Key/1409.py | {
"start": 421,
"end": 1039
} | class ____:
def processQueries(self, queries: list[int], m: int) -> list[int]:
ans = []
# Map [-m, m] to [0, 2 * m].
tree = FenwickTree(2 * m + 1)
numToIndex = {num: num + m for num in range(1, m + 1)}
for num in range(1, m + 1):
tree.add(num + m, 1)
nextEmptyIndex = m # Map 0 to m.
for query in queries:
index = numToIndex[query]
ans.append(tree.get(index - 1))
# Move `query` from `index` to `nextEmptyIndex`.
tree.add(index, -1)
tree.add(nextEmptyIndex, 1)
numToIndex[query] = nextEmptyIndex
nextEmptyIndex -= 1
return ans
| Solution |
python | ray-project__ray | release/benchmark-worker-startup/benchmark_worker_startup.py | {
"start": 7434,
"end": 13114
} | class ____:
num_jobs: int
num_runs_per_job: int
num_tasks_or_actors_per_run: int
with_gpu: bool
with_tasks: bool
with_runtime_env: bool
import_to_try: str
num_cpus_in_cluster: int
num_gpus_in_cluster: int
num_nodes_in_cluster: int
def __repr__(self):
with_gpu_str = "with_gpu" if self.with_gpu else "without_gpu"
executable_unit = "tasks" if self.with_tasks else "actors"
cold_or_warm_start = "cold" if self.num_jobs > 1 else "warm"
with_runtime_env_str = (
"with_runtime_env" if self.with_runtime_env else "without_runtime_env"
)
single_node_or_multi_node = (
"single_node" if self.num_nodes_in_cluster == 1 else "multi_node"
)
import_torch_or_none = (
"import_torch" if self.import_to_try == "torch" else "no_import"
)
return "-".join(
[
f"seconds_to_{cold_or_warm_start}_start_"
f"{self.num_tasks_or_actors_per_run}_{executable_unit}",
import_torch_or_none,
with_gpu_str,
single_node_or_multi_node,
with_runtime_env_str,
f"{self.num_cpus_in_cluster}_CPU_{self.num_gpus_in_cluster}"
"_GPU_cluster",
]
)
async def run_and_stream_logs(
metrics_actor_name, metrics_actor_namespace, test: TestConfiguration
):
"""
Run a particular test configuration by invoking ./test_single_configuration.py.
"""
client = JobSubmissionClient("http://127.0.0.1:8265")
entrypoint = generate_entrypoint(metrics_actor_name, metrics_actor_namespace, test)
for _ in range(test.num_jobs):
print(f"Running {entrypoint}")
if not test.with_runtime_env:
# On non-workspaces, this will run as a job but without a runtime env.
subprocess.check_call(entrypoint, shell=True)
else:
job_id = client.submit_job(
entrypoint=entrypoint,
runtime_env={"working_dir": "./"},
)
try:
async for lines in client.tail_job_logs(job_id):
print(lines, end="")
except KeyboardInterrupt:
print(f"Stopping job {job_id}")
client.stop_job(job_id)
raise
job_status = client.get_job_status(job_id)
if job_status != JobStatus.SUCCEEDED:
raise ValueError(
f"Job {job_id} was not successful; status is {job_status}"
)
def generate_entrypoint(
metrics_actor_name: str, metrics_actor_namespace: str, test: TestConfiguration
):
task_or_actor_arg = "--with_tasks" if test.with_tasks else "--with_actors"
with_gpu_arg = "--with_gpu" if test.with_gpu else "--without_gpu"
with_runtime_env_arg = (
"--with_runtime_env" if test.with_runtime_env else "--without_runtime_env"
)
return " ".join(
[
"python ./test_single_configuration.py",
f"--metrics_actor_name {metrics_actor_name}",
f"--metrics_actor_namespace {metrics_actor_namespace}",
f"--test_name {test}",
f"--num_runs {test.num_runs_per_job} ",
f"--num_tasks_or_actors_per_run {test.num_tasks_or_actors_per_run}",
f"--num_cpus_in_cluster {test.num_cpus_in_cluster}",
f"--num_gpus_in_cluster {test.num_gpus_in_cluster}",
task_or_actor_arg,
with_gpu_arg,
with_runtime_env_arg,
f"--library_to_import {test.import_to_try}",
]
)
def parse_args():
parser = argparse.ArgumentParser(
description="This release test measures Ray worker startup time. "
"Specifically, it measures the time to start N different tasks or"
" actors, where each task or actor imports a large library ("
"currently PyTorch). N is configurable.\nThe test runs under a "
"few different configurations: {task, actor} x {runtime env, "
"no runtime env} x {GPU, no GPU} x {cold start, warm start} x "
"{import torch, no imports}.",
epilog="This script uses test_single_configuration.py to run the "
"actual measurements.",
)
parser.add_argument(
"--num_gpus_in_cluster",
type=int,
required=True,
help="The number of GPUs in the cluster. This determines how many "
"GPU resources each actor/task requests.",
)
parser.add_argument(
"--num_cpus_in_cluster",
type=int,
required=True,
help="The number of CPUs in the cluster. This determines how many "
"CPU resources each actor/task requests.",
)
parser.add_argument(
"--num_tasks_or_actors_per_run",
type=int,
required=True,
help="The number of tasks or actors per 'run'. A run starts this "
"many tasks/actors and consitutes a single measurement. Several "
"runs can be composed within a single job for measure warm start, "
"or spread across different jobs to measure cold start.",
)
parser.add_argument(
"--num_measurements_per_configuration",
type=int,
required=True,
help="The number of measurements to record per configuration.",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
sys.exit(
main(
args.num_cpus_in_cluster,
args.num_gpus_in_cluster,
args.num_tasks_or_actors_per_run,
args.num_measurements_per_configuration,
)
)
| TestConfiguration |
python | doocs__leetcode | solution/1600-1699/1634.Add Two Polynomials Represented as Linked Lists/Solution.py | {
"start": 198,
"end": 950
} | class ____:
def addPoly(self, poly1: "PolyNode", poly2: "PolyNode") -> "PolyNode":
dummy = curr = PolyNode()
while poly1 and poly2:
if poly1.power > poly2.power:
curr.next = poly1
poly1 = poly1.next
curr = curr.next
elif poly1.power < poly2.power:
curr.next = poly2
poly2 = poly2.next
curr = curr.next
else:
if c := poly1.coefficient + poly2.coefficient:
curr.next = PolyNode(c, poly1.power)
curr = curr.next
poly1 = poly1.next
poly2 = poly2.next
curr.next = poly1 or poly2
return dummy.next
| Solution |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/backfills.py | {
"start": 1840,
"end": 2009
} | class ____(BaseModel):
"""Backfill Collection serializer for responses."""
backfills: Iterable[BackfillResponse]
total_entries: int
| BackfillCollectionResponse |
python | django-guardian__django-guardian | guardian/exceptions.py | {
"start": 123,
"end": 226
} | class ____(Exception):
"""Base class for all guardian-specific exceptions."""
pass
| GuardianError |
python | realpython__materials | python-protocol/shapes_v1.py | {
"start": 452,
"end": 884
} | class ____(Shape):
def __init__(self, side) -> None:
self.side = side
def get_area(self) -> float:
return self.side**2
def get_perimeter(self) -> float:
return 4 * self.side
def print_shape_info(shape: Shape):
print(f"Area: {shape.get_area()}")
print(f"Perimeter: {shape.get_perimeter()}")
circle = Circle(10)
square = Square(5)
print_shape_info(circle)
print_shape_info(square)
| Square |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_tool_selection.py | {
"start": 16555,
"end": 20531
} | class ____:
"""Test handling of duplicate and invalid tool selections."""
def test_duplicate_tool_selection_deduplicated(self) -> None:
"""Test that duplicate tool selections are deduplicated."""
model_requests = []
@wrap_model_call
def trace_model_requests(request, handler):
model_requests.append(request)
return handler(request)
# Selector returns duplicates
tool_selection_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{
"name": "ToolSelectionResponse",
"id": "1",
"args": {
"tools": [
"get_weather",
"get_weather",
"search_web",
"search_web",
]
},
}
],
),
]
)
)
model = FakeModel(messages=iter([AIMessage(content="Done")]))
tool_selector = LLMToolSelectorMiddleware(max_tools=5, model=tool_selection_model)
agent = create_agent(
model=model,
tools=[get_weather, search_web, calculate],
middleware=[tool_selector, trace_model_requests],
)
agent.invoke({"messages": [HumanMessage("test")]})
# Duplicates should be removed
assert len(model_requests) > 0
for request in model_requests:
tool_names = [tool.name for tool in request.tools]
assert tool_names == ["get_weather", "search_web"]
assert len(tool_names) == 2
def test_max_tools_with_duplicates(self) -> None:
"""Test that max_tools works correctly with duplicate selections."""
model_requests = []
@wrap_model_call
def trace_model_requests(request, handler):
model_requests.append(request)
return handler(request)
# Selector returns duplicates but max_tools=2
tool_selection_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{
"name": "ToolSelectionResponse",
"id": "1",
"args": {
"tools": [
"get_weather",
"get_weather",
"search_web",
"search_web",
"calculate",
]
},
}
],
),
]
)
)
model = FakeModel(messages=iter([AIMessage(content="Done")]))
tool_selector = LLMToolSelectorMiddleware(max_tools=2, model=tool_selection_model)
agent = create_agent(
model=model,
tools=[get_weather, search_web, calculate],
middleware=[tool_selector, trace_model_requests],
)
agent.invoke({"messages": [HumanMessage("test")]})
# Should deduplicate and respect max_tools
assert len(model_requests) > 0
for request in model_requests:
tool_names = [tool.name for tool in request.tools]
assert len(tool_names) == 2
assert "get_weather" in tool_names
assert "search_web" in tool_names
| TestDuplicateAndInvalidTools |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.