language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/eager/memory_tests/remote_memory_test.py | {
"start": 1132,
"end": 2174
} | class ____(test.TestCase):
def __init__(self, method):
super(RemoteWorkerMemoryTest, self).__init__(method)
# used for remote worker tests
self._cached_server = server_lib.Server.create_local_server()
self._cached_server_target = self._cached_server.target[len("grpc://"):]
def testMemoryLeakInLocalCopy(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
remote.connect_to_remote_host(self._cached_server_target)
# Run a function locally with the input on a remote worker and ensure we
# do not leak a reference to the remote tensor.
@def_function.function
def local_func(i):
return i
def func():
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
x = array_ops.zeros([1000, 1000], dtypes.int32)
local_func(x)
memory_test_util.assert_no_leak(
func, num_iters=100, increase_threshold_absolute_mb=50)
if __name__ == "__main__":
test.main()
| RemoteWorkerMemoryTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_gap05.py | {
"start": 315,
"end": 1365
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_gap05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [45938176, 59715584]
chart.axis2_ids = [70848512, 54519680]
data = [[1, 2, 3, 4, 5], [6, 8, 6, 4, 2]]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series({"values": "=Sheet1!$A$1:$A$5", "gap": 51, "overlap": 12})
chart.add_series(
{"values": "=Sheet1!$B$1:$B$5", "y2_axis": 1, "gap": 251, "overlap": -27}
)
chart.set_x2_axis({"label_position": "next_to"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | networkx__networkx | networkx/algorithms/tree/coding.py | {
"start": 694,
"end": 13445
} | class ____(nx.NetworkXException):
"""Raised when a function expects a tree (that is, a connected
undirected graph with no cycles) but gets a non-tree graph as input
instead.
"""
@not_implemented_for("directed")
@nx._dispatchable(graphs="T")
def to_nested_tuple(T, root, canonical_form=False):
"""Returns a nested tuple representation of the given tree.
The nested tuple representation of a tree is defined
recursively. The tree with one node and no edges is represented by
the empty tuple, ``()``. A tree with ``k`` subtrees is represented
by a tuple of length ``k`` in which each element is the nested tuple
representation of a subtree.
Parameters
----------
T : NetworkX graph
An undirected graph object representing a tree.
root : node
The node in ``T`` to interpret as the root of the tree.
canonical_form : bool
If ``True``, each tuple is sorted so that the function returns
a canonical form for rooted trees. This means "lighter" subtrees
will appear as nested tuples before "heavier" subtrees. In this
way, each isomorphic rooted tree has the same nested tuple
representation.
Returns
-------
tuple
A nested tuple representation of the tree.
Notes
-----
This function is *not* the inverse of :func:`from_nested_tuple`; the
only guarantee is that the rooted trees are isomorphic.
See also
--------
from_nested_tuple
to_prufer_sequence
Examples
--------
The tree need not be a balanced binary tree::
>>> T = nx.Graph()
>>> T.add_edges_from([(0, 1), (0, 2), (0, 3)])
>>> T.add_edges_from([(1, 4), (1, 5)])
>>> T.add_edges_from([(3, 6), (3, 7)])
>>> root = 0
>>> nx.to_nested_tuple(T, root)
(((), ()), (), ((), ()))
Continuing the above example, if ``canonical_form`` is ``True``, the
nested tuples will be sorted::
>>> nx.to_nested_tuple(T, root, canonical_form=True)
((), ((), ()), ((), ()))
Even the path graph can be interpreted as a tree::
>>> T = nx.path_graph(4)
>>> root = 0
>>> nx.to_nested_tuple(T, root)
((((),),),)
"""
def _make_tuple(T, root, _parent):
"""Recursively compute the nested tuple representation of the
given rooted tree.
``_parent`` is the parent node of ``root`` in the supertree in
which ``T`` is a subtree, or ``None`` if ``root`` is the root of
the supertree. This argument is used to determine which
neighbors of ``root`` are children and which is the parent.
"""
# Get the neighbors of `root` that are not the parent node. We
# are guaranteed that `root` is always in `T` by construction.
children = set(T[root]) - {_parent}
if len(children) == 0:
return ()
nested = (_make_tuple(T, v, root) for v in children)
if canonical_form:
nested = sorted(nested)
return tuple(nested)
# Do some sanity checks on the input.
if not nx.is_tree(T):
raise nx.NotATree("provided graph is not a tree")
if root not in T:
raise nx.NodeNotFound(f"Graph {T} contains no node {root}")
return _make_tuple(T, root, None)
@nx._dispatchable(graphs=None, returns_graph=True)
def from_nested_tuple(sequence, sensible_relabeling=False):
"""Returns the rooted tree corresponding to the given nested tuple.
The nested tuple representation of a tree is defined
recursively. The tree with one node and no edges is represented by
the empty tuple, ``()``. A tree with ``k`` subtrees is represented
by a tuple of length ``k`` in which each element is the nested tuple
representation of a subtree.
Parameters
----------
sequence : tuple
A nested tuple representing a rooted tree.
sensible_relabeling : bool
Whether to relabel the nodes of the tree so that nodes are
labeled in increasing order according to their breadth-first
search order from the root node.
Returns
-------
NetworkX graph
The tree corresponding to the given nested tuple, whose root
node is node 0. If ``sensible_labeling`` is ``True``, nodes will
be labeled in breadth-first search order starting from the root
node.
Notes
-----
This function is *not* the inverse of :func:`to_nested_tuple`; the
only guarantee is that the rooted trees are isomorphic.
See also
--------
to_nested_tuple
from_prufer_sequence
Examples
--------
Sensible relabeling ensures that the nodes are labeled from the root
starting at 0::
>>> balanced = (((), ()), ((), ()))
>>> T = nx.from_nested_tuple(balanced, sensible_relabeling=True)
>>> edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]
>>> all((u, v) in T.edges() or (v, u) in T.edges() for (u, v) in edges)
True
"""
def _make_tree(sequence):
"""Recursively creates a tree from the given sequence of nested
tuples.
This function employs the :func:`~networkx.tree.join` function
to recursively join subtrees into a larger tree.
"""
# The empty sequence represents the empty tree, which is the
# (unique) graph with a single node. We mark the single node
# with an attribute that indicates that it is the root of the
# graph.
if len(sequence) == 0:
return nx.empty_graph(1)
# For a nonempty sequence, get the subtrees for each child
# sequence and join all the subtrees at their roots. After
# joining the subtrees, the root is node 0.
return nx.tree.join_trees([(_make_tree(child), 0) for child in sequence])
# Make the tree and remove the `is_root` node attribute added by the
# helper function.
T = _make_tree(sequence)
if sensible_relabeling:
# Relabel the nodes according to their breadth-first search
# order, starting from the root node (that is, the node 0).
bfs_nodes = chain([0], (v for u, v in nx.bfs_edges(T, 0)))
labels = {v: i for i, v in enumerate(bfs_nodes)}
# We would like to use `copy=False`, but `relabel_nodes` doesn't
# allow a relabel mapping that can't be topologically sorted.
T = nx.relabel_nodes(T, labels)
return T
@not_implemented_for("directed")
@nx._dispatchable(graphs="T")
def to_prufer_sequence(T):
r"""Returns the Prüfer sequence of the given tree.
A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and
*n* - 1, inclusive. The tree corresponding to a given Prüfer
sequence can be recovered by repeatedly joining a node in the
sequence with a node with the smallest potential degree according to
the sequence.
Parameters
----------
T : NetworkX graph
An undirected graph object representing a tree.
Returns
-------
list
The Prüfer sequence of the given tree.
Raises
------
NetworkXPointlessConcept
If the number of nodes in `T` is less than two.
NotATree
If `T` is not a tree.
KeyError
If the set of nodes in `T` is not {0, …, *n* - 1}.
Notes
-----
There is a bijection from labeled trees to Prüfer sequences. This
function is the inverse of the :func:`from_prufer_sequence`
function.
Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead
of from 0 to *n* - 1. This function requires nodes to be labeled in
the latter form. You can use :func:`~networkx.relabel_nodes` to
relabel the nodes of your tree to the appropriate format.
This implementation is from [1]_ and has a running time of
$O(n)$.
See also
--------
to_nested_tuple
from_prufer_sequence
References
----------
.. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu.
"An optimal algorithm for Prufer codes."
*Journal of Software Engineering and Applications* 2.02 (2009): 111.
<https://doi.org/10.4236/jsea.2009.22016>
Examples
--------
There is a bijection between Prüfer sequences and labeled trees, so
this function is the inverse of the :func:`from_prufer_sequence`
function:
>>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]
>>> tree = nx.Graph(edges)
>>> sequence = nx.to_prufer_sequence(tree)
>>> sequence
[3, 3, 3, 4]
>>> tree2 = nx.from_prufer_sequence(sequence)
>>> list(tree2.edges()) == edges
True
"""
# Perform some sanity checks on the input.
n = len(T)
if n < 2:
msg = "Prüfer sequence undefined for trees with fewer than two nodes"
raise nx.NetworkXPointlessConcept(msg)
if not nx.is_tree(T):
raise nx.NotATree("provided graph is not a tree")
if set(T) != set(range(n)):
raise KeyError("tree must have node labels {0, ..., n - 1}")
degree = dict(T.degree())
def parents(u):
return next(v for v in T[u] if degree[v] > 1)
index = u = next(k for k in range(n) if degree[k] == 1)
result = []
for i in range(n - 2):
v = parents(u)
result.append(v)
degree[v] -= 1
if v < index and degree[v] == 1:
u = v
else:
index = u = next(k for k in range(index + 1, n) if degree[k] == 1)
return result
@nx._dispatchable(graphs=None, returns_graph=True)
def from_prufer_sequence(sequence):
r"""Returns the tree corresponding to the given Prüfer sequence.
A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and
*n* - 1, inclusive. The tree corresponding to a given Prüfer
sequence can be recovered by repeatedly joining a node in the
sequence with a node with the smallest potential degree according to
the sequence.
Parameters
----------
sequence : list
A Prüfer sequence, which is a list of *n* - 2 integers between
zero and *n* - 1, inclusive.
Returns
-------
NetworkX graph
The tree corresponding to the given Prüfer sequence.
Raises
------
NetworkXError
If the Prüfer sequence is not valid.
Notes
-----
There is a bijection from labeled trees to Prüfer sequences. This
function is the inverse of the :func:`from_prufer_sequence` function.
Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead
of from 0 to *n* - 1. This function requires nodes to be labeled in
the latter form. You can use :func:`networkx.relabel_nodes` to
relabel the nodes of your tree to the appropriate format.
This implementation is from [1]_ and has a running time of
$O(n)$.
References
----------
.. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu.
"An optimal algorithm for Prufer codes."
*Journal of Software Engineering and Applications* 2.02 (2009): 111.
<https://doi.org/10.4236/jsea.2009.22016>
See also
--------
from_nested_tuple
to_prufer_sequence
Examples
--------
There is a bijection between Prüfer sequences and labeled trees, so
this function is the inverse of the :func:`to_prufer_sequence`
function:
>>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]
>>> tree = nx.Graph(edges)
>>> sequence = nx.to_prufer_sequence(tree)
>>> sequence
[3, 3, 3, 4]
>>> tree2 = nx.from_prufer_sequence(sequence)
>>> list(tree2.edges()) == edges
True
"""
n = len(sequence) + 2
# `degree` stores the remaining degree (plus one) for each node. The
# degree of a node in the decoded tree is one more than the number
# of times it appears in the code.
degree = Counter(chain(sequence, range(n)))
T = nx.empty_graph(n)
# `not_orphaned` is the set of nodes that have a parent in the
# tree. After the loop, there should be exactly two nodes that are
# not in this set.
not_orphaned = set()
index = u = next(k for k in range(n) if degree[k] == 1)
for v in sequence:
# check the validity of the prufer sequence
if v < 0 or v > n - 1:
raise nx.NetworkXError(
f"Invalid Prufer sequence: Values must be between 0 and {n - 1}, got {v}"
)
T.add_edge(u, v)
not_orphaned.add(u)
degree[v] -= 1
if v < index and degree[v] == 1:
u = v
else:
index = u = next(k for k in range(index + 1, n) if degree[k] == 1)
# At this point, there must be exactly two orphaned nodes; join them.
orphans = set(T) - not_orphaned
u, v = orphans
T.add_edge(u, v)
return T
| NotATree |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 256718,
"end": 257057
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "project")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
project = sgqlc.types.Field("Project", graphql_name="project")
| CreateProjectPayload |
python | scrapy__scrapy | scrapy/spiders/crawl.py | {
"start": 3161,
"end": 7850
} | class ____(Spider):
rules: Sequence[Rule] = ()
_rules: list[Rule]
_follow_links: bool
def __init__(self, *a: Any, **kw: Any):
super().__init__(*a, **kw)
self._compile_rules()
if method_is_overridden(self.__class__, CrawlSpider, "_parse_response"):
warnings.warn(
f"The CrawlSpider._parse_response method, which the "
f"{global_object_name(self.__class__)} class overrides, is "
f"deprecated: it will be removed in future Scrapy releases. "
f"Please override the CrawlSpider.parse_with_rules method "
f"instead."
)
def _parse(self, response: Response, **kwargs: Any) -> Any:
return self.parse_with_rules(
response=response,
callback=self.parse_start_url,
cb_kwargs=kwargs,
follow=True,
)
def parse_start_url(self, response: Response, **kwargs: Any) -> Any:
return []
def process_results(
self, response: Response, results: Iterable[Any]
) -> Iterable[Any]:
return results
def _build_request(self, rule_index: int, link: Link) -> Request:
return Request(
url=link.url,
callback=self._callback,
errback=self._errback,
meta={"rule": rule_index, "link_text": link.text},
)
def _requests_to_follow(self, response: Response) -> Iterable[Request | None]:
if not isinstance(response, HtmlResponse):
return
seen: set[Link] = set()
for rule_index, rule in enumerate(self._rules):
links: list[Link] = [
lnk
for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen
]
for link in cast("ProcessLinksT", rule.process_links)(links):
seen.add(link)
request = self._build_request(rule_index, link)
yield cast("ProcessRequestT", rule.process_request)(request, response)
def _callback(self, response: Response, **cb_kwargs: Any) -> Any:
rule = self._rules[cast("int", response.meta["rule"])]
return self.parse_with_rules(
response,
cast("CallbackT", rule.callback),
{**rule.cb_kwargs, **cb_kwargs},
rule.follow,
)
def _errback(self, failure: Failure) -> Iterable[Any]:
rule = self._rules[cast("int", failure.request.meta["rule"])] # type: ignore[attr-defined]
return self._handle_failure(
failure, cast("Callable[[Failure], Any]", rule.errback)
)
async def parse_with_rules(
self,
response: Response,
callback: CallbackT | None,
cb_kwargs: dict[str, Any],
follow: bool = True,
) -> AsyncIterator[Any]:
if callback:
cb_res = callback(response, **cb_kwargs) or ()
if isinstance(cb_res, AsyncIterator):
cb_res = await collect_asyncgen(cb_res)
elif isinstance(cb_res, Awaitable):
cb_res = await cb_res
cb_res = self.process_results(response, cb_res)
for request_or_item in iterate_spider_output(cb_res):
yield request_or_item
if follow and self._follow_links:
for request_or_item in self._requests_to_follow(response):
yield request_or_item
def _parse_response(
self,
response: Response,
callback: CallbackT | None,
cb_kwargs: dict[str, Any],
follow: bool = True,
) -> AsyncIterator[Any]:
warnings.warn(
"The CrawlSpider._parse_response method is deprecated: "
"it will be removed in future Scrapy releases. "
"Please use the CrawlSpider.parse_with_rules method instead.",
stacklevel=2,
)
return self.parse_with_rules(response, callback, cb_kwargs, follow)
def _handle_failure(
self, failure: Failure, errback: Callable[[Failure], Any] | None
) -> Iterable[Any]:
if errback:
results = errback(failure) or ()
yield from iterate_spider_output(results)
def _compile_rules(self) -> None:
self._rules = []
for rule in self.rules:
self._rules.append(copy.copy(rule))
self._rules[-1]._compile(self)
@classmethod
def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:
spider = super().from_crawler(crawler, *args, **kwargs)
spider._follow_links = crawler.settings.getbool("CRAWLSPIDER_FOLLOW_LINKS")
return spider
| CrawlSpider |
python | encode__django-rest-framework | tests/test_views.py | {
"start": 1931,
"end": 2389
} | class ____(TestCase):
def setUp(self):
self.view = BasicView.as_view()
def test_400_parse_error(self):
request = factory.post('/', 'f00bar', content_type='application/json')
response = self.view(request)
expected = {
'detail': JSON_ERROR
}
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert sanitise_json_error(response.data) == expected
| ClassBasedViewIntegrationTests |
python | django__django | tests/schema/models.py | {
"start": 2498,
"end": 2763
} | class ____(models.Model):
author = models.OneToOneField(Author, models.CASCADE)
title = models.CharField(max_length=100, db_index=True)
pub_date = models.DateTimeField()
class Meta:
apps = new_apps
db_table = "schema_book"
| BookWithO2O |
python | django__django | tests/multiple_database/tests.py | {
"start": 97724,
"end": 98424
} | class ____(SimpleTestCase):
"""allow_relation() is called with unsaved model instances."""
databases = {"default", "other"}
router_prevents_msg = "the current database router prevents this relation"
def test_foreign_key_relation(self):
person = Person(name="Someone")
pet = Pet()
with self.assertRaisesMessage(ValueError, self.router_prevents_msg):
pet.owner = person
def test_reverse_one_to_one_relation(self):
user = User(username="Someone", password="fake_hash")
profile = UserProfile()
with self.assertRaisesMessage(ValueError, self.router_prevents_msg):
user.userprofile = profile
| RelationAssignmentTests |
python | pandas-dev__pandas | pandas/tests/test_common.py | {
"start": 337,
"end": 1048
} | class ____:
def fn(self, x):
return x
partial1 = partial(fn)
partial2 = partial(partial1)
lambda_ = lambda x: x
class SomeCall:
def __call__(self):
# This shouldn't actually get called below; SomeCall.__init__
# should.
raise NotImplementedError
@pytest.mark.parametrize(
"func, expected",
[
(fn, "fn"),
(partial1, "fn"),
(partial2, "fn"),
(lambda_, "<lambda>"),
(SomeCall(), "SomeCall"),
(1, None),
],
)
def test_get_callable_name(self, func, expected):
assert com.get_callable_name(func) == expected
| TestGetCallableName |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braintree/source_braintree/schemas/dispute.py | {
"start": 373,
"end": 465
} | class ____(CatalogModel):
message: str
send_at: datetime
sender: str
| PaypalMessage |
python | Textualize__textual | docs/examples/guide/layout/grid_layout5_col_span.py | {
"start": 80,
"end": 560
} | class ____(App):
CSS_PATH = "grid_layout5_col_span.tcss"
def compose(self) -> ComposeResult:
yield Static("One", classes="box")
yield Static("Two [b](column-span: 2)", classes="box", id="two")
yield Static("Three", classes="box")
yield Static("Four", classes="box")
yield Static("Five", classes="box")
yield Static("Six", classes="box")
if __name__ == "__main__":
app = GridLayoutExample()
app.run()
| GridLayoutExample |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/result.py | {
"start": 2508,
"end": 6031
} | class ____:
"""Base for metadata about result rows."""
__slots__ = ()
_tuplefilter: Optional[_TupleGetterType] = None
_translated_indexes: Optional[Sequence[int]] = None
_unique_filters: Optional[Sequence[Callable[[Any], Any]]] = None
_keymap: _KeyMapType
_keys: Sequence[str]
_processors: Optional[_ProcessorsType]
_key_to_index: Dict[_KeyType, int]
@property
def keys(self) -> RMKeyView:
return RMKeyView(self)
def _has_key(self, key: object) -> bool:
raise NotImplementedError()
def _for_freeze(self) -> ResultMetaData:
raise NotImplementedError()
@overload
def _key_fallback(
self, key: Any, err: Optional[Exception], raiseerr: Literal[True] = ...
) -> NoReturn: ...
@overload
def _key_fallback(
self,
key: Any,
err: Optional[Exception],
raiseerr: Literal[False] = ...,
) -> None: ...
@overload
def _key_fallback(
self, key: Any, err: Optional[Exception], raiseerr: bool = ...
) -> Optional[NoReturn]: ...
def _key_fallback(
self, key: Any, err: Optional[Exception], raiseerr: bool = True
) -> Optional[NoReturn]:
assert raiseerr
raise KeyError(key) from err
def _raise_for_ambiguous_column_name(
self, rec: _KeyMapRecType
) -> NoReturn:
raise NotImplementedError(
"ambiguous column name logic is implemented for "
"CursorResultMetaData"
)
def _index_for_key(
self, key: _KeyIndexType, raiseerr: bool
) -> Optional[int]:
raise NotImplementedError()
def _indexes_for_keys(
self, keys: Sequence[_KeyIndexType]
) -> Sequence[int]:
raise NotImplementedError()
def _metadata_for_keys(
self, keys: Sequence[_KeyIndexType]
) -> Iterator[_KeyMapRecType]:
raise NotImplementedError()
def _reduce(self, keys: Sequence[_KeyIndexType]) -> ResultMetaData:
raise NotImplementedError()
def _getter(
self, key: Any, raiseerr: bool = True
) -> Optional[Callable[[Row[Unpack[TupleAny]]], Any]]:
index = self._index_for_key(key, raiseerr)
if index is not None:
return operator.itemgetter(index)
else:
return None
def _row_as_tuple_getter(
self, keys: Sequence[_KeyIndexType]
) -> _TupleGetterType:
indexes = self._indexes_for_keys(keys)
return tuplegetter(*indexes)
def _make_key_to_index(
self, keymap: Mapping[_KeyType, Sequence[Any]], index: int
) -> Dict[_KeyType, int]:
return {
key: rec[index]
for key, rec in keymap.items()
if rec[index] is not None
}
def _key_not_found(self, key: Any, attr_error: bool) -> NoReturn:
if key in self._keymap:
# the index must be none in this case
self._raise_for_ambiguous_column_name(self._keymap[key])
else:
# unknown key
if attr_error:
try:
self._key_fallback(key, None)
except KeyError as ke:
raise AttributeError(ke.args[0]) from ke
else:
self._key_fallback(key, None)
@property
def _effective_processors(self) -> Optional[_ProcessorsType]:
if not self._processors or NONE_SET.issuperset(self._processors):
return None
else:
return self._processors
| ResultMetaData |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_pgf.py | {
"start": 13573,
"end": 29488
} | class ____(RendererBase):
def __init__(self, figure, fh):
"""
Create a new PGF renderer that translates any drawing instruction
into text commands to be interpreted in a latex pgfpicture environment.
Attributes
----------
figure : `~matplotlib.figure.Figure`
Matplotlib figure to initialize height, width and dpi from.
fh : file-like
File handle for the output of the drawing commands.
"""
super().__init__()
self.dpi = figure.dpi
self.fh = fh
self.figure = figure
self.image_counter = 0
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# docstring inherited
_writeln(self.fh, r"\begin{pgfscope}")
# convert from display units to in
f = 1. / self.dpi
# set style and clip
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
# build marker definition
bl, tr = marker_path.get_extents(marker_trans).get_points()
coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
_writeln(self.fh,
r"\pgfsys@defobject{currentmarker}"
r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
self._print_pgf_path(None, marker_path, marker_trans)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
_writeln(self.fh, r"}")
maxcoord = 16383 / 72.27 * self.dpi # Max dimensions in LaTeX.
clip = (-maxcoord, -maxcoord, maxcoord, maxcoord)
# draw marker for each vertex
for point, code in path.iter_segments(trans, simplify=False,
clip=clip):
x, y = point[0] * f, point[1] * f
_writeln(self.fh, r"\begin{pgfscope}")
_writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
_writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
_writeln(self.fh, r"\end{pgfscope}")
_writeln(self.fh, r"\end{pgfscope}")
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
_writeln(self.fh, r"\begin{pgfscope}")
# draw the path
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
self._print_pgf_path(gc, path, transform, rgbFace)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
_writeln(self.fh, r"\end{pgfscope}")
# if present, draw pattern on top
if gc.get_hatch():
_writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_path_styles(gc, rgbFace)
# combine clip and path for clipping
self._print_pgf_clip(gc)
self._print_pgf_path(gc, path, transform, rgbFace)
_writeln(self.fh, r"\pgfusepath{clip}")
# build pattern definition
_writeln(self.fh,
r"\pgfsys@defobject{currentpattern}"
r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
_writeln(self.fh, r"\begin{pgfscope}")
_writeln(self.fh,
r"\pgfpathrectangle"
r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
_writeln(self.fh, r"\pgfusepath{clip}")
scale = mpl.transforms.Affine2D().scale(self.dpi)
self._print_pgf_path(None, gc.get_hatch_path(), scale)
self._pgf_path_draw(stroke=True)
_writeln(self.fh, r"\end{pgfscope}")
_writeln(self.fh, r"}")
# repeat pattern, filling the bounding rect of the path
f = 1. / self.dpi
(xmin, ymin), (xmax, ymax) = \
path.get_extents(transform).get_points()
xmin, xmax = f * xmin, f * xmax
ymin, ymax = f * ymin, f * ymax
repx, repy = math.ceil(xmax - xmin), math.ceil(ymax - ymin)
_writeln(self.fh,
r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
for iy in range(repy):
for ix in range(repx):
_writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
_writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
_writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
_writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
_writeln(self.fh, r"\end{pgfscope}")
def _print_pgf_clip(self, gc):
f = 1. / self.dpi
# check for clip box
bbox = gc.get_clip_rectangle()
if bbox:
p1, p2 = bbox.get_points()
w, h = p2 - p1
coords = p1[0] * f, p1[1] * f, w * f, h * f
_writeln(self.fh,
r"\pgfpathrectangle"
r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
% coords)
_writeln(self.fh, r"\pgfusepath{clip}")
# check for clip path
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
self._print_pgf_path(gc, clippath, clippath_trans)
_writeln(self.fh, r"\pgfusepath{clip}")
def _print_pgf_path_styles(self, gc, rgbFace):
# cap style
capstyles = {"butt": r"\pgfsetbuttcap",
"round": r"\pgfsetroundcap",
"projecting": r"\pgfsetrectcap"}
_writeln(self.fh, capstyles[gc.get_capstyle()])
# join style
joinstyles = {"miter": r"\pgfsetmiterjoin",
"round": r"\pgfsetroundjoin",
"bevel": r"\pgfsetbeveljoin"}
_writeln(self.fh, joinstyles[gc.get_joinstyle()])
# filling
has_fill = rgbFace is not None
if gc.get_forced_alpha():
fillopacity = strokeopacity = gc.get_alpha()
else:
strokeopacity = gc.get_rgb()[3]
fillopacity = rgbFace[3] if has_fill and len(rgbFace) > 3 else 1.0
if has_fill:
_writeln(self.fh,
r"\definecolor{currentfill}{rgb}{%f,%f,%f}"
% tuple(rgbFace[:3]))
_writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
if has_fill and fillopacity != 1.0:
_writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
# linewidth and color
lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
stroke_rgba = gc.get_rgb()
_writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
_writeln(self.fh,
r"\definecolor{currentstroke}{rgb}{%f,%f,%f}"
% stroke_rgba[:3])
_writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
if strokeopacity != 1.0:
_writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
# line style
dash_offset, dash_list = gc.get_dashes()
if dash_list is None:
_writeln(self.fh, r"\pgfsetdash{}{0pt}")
else:
_writeln(self.fh,
r"\pgfsetdash{%s}{%fpt}"
% ("".join(r"{%fpt}" % dash for dash in dash_list),
dash_offset))
def _print_pgf_path(self, gc, path, transform, rgbFace=None):
f = 1. / self.dpi
# check for clip box / ignore clip for filled paths
bbox = gc.get_clip_rectangle() if gc else None
maxcoord = 16383 / 72.27 * self.dpi # Max dimensions in LaTeX.
if bbox and (rgbFace is None):
p1, p2 = bbox.get_points()
clip = (max(p1[0], -maxcoord), max(p1[1], -maxcoord),
min(p2[0], maxcoord), min(p2[1], maxcoord))
else:
clip = (-maxcoord, -maxcoord, maxcoord, maxcoord)
# build path
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
x, y = tuple(points)
_writeln(self.fh,
r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CLOSEPOLY:
_writeln(self.fh, r"\pgfpathclose")
elif code == Path.LINETO:
x, y = tuple(points)
_writeln(self.fh,
r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CURVE3:
cx, cy, px, py = tuple(points)
coords = cx * f, cy * f, px * f, py * f
_writeln(self.fh,
r"\pgfpathquadraticcurveto"
r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
% coords)
elif code == Path.CURVE4:
c1x, c1y, c2x, c2y, px, py = tuple(points)
coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
_writeln(self.fh,
r"\pgfpathcurveto"
r"{\pgfqpoint{%fin}{%fin}}"
r"{\pgfqpoint{%fin}{%fin}}"
r"{\pgfqpoint{%fin}{%fin}}"
% coords)
# apply pgf decorators
sketch_params = gc.get_sketch_params() if gc else None
if sketch_params is not None:
# Only "length" directly maps to "segment length" in PGF's API.
# PGF uses "amplitude" to pass the combined deviation in both x-
# and y-direction, while matplotlib only varies the length of the
# wiggle along the line ("randomness" and "length" parameters)
# and has a separate "scale" argument for the amplitude.
# -> Use "randomness" as PRNG seed to allow the user to force the
# same shape on multiple sketched lines
scale, length, randomness = sketch_params
if scale is not None:
# make matplotlib and PGF rendering visually similar
length *= 0.5
scale *= 2
# PGF guarantees that repeated loading is a no-op
_writeln(self.fh, r"\usepgfmodule{decorations}")
_writeln(self.fh, r"\usepgflibrary{decorations.pathmorphing}")
_writeln(self.fh, r"\pgfkeys{/pgf/decoration/.cd, "
f"segment length = {(length * f):f}in, "
f"amplitude = {(scale * f):f}in}}")
_writeln(self.fh, f"\\pgfmathsetseed{{{int(randomness)}}}")
_writeln(self.fh, r"\pgfdecoratecurrentpath{random steps}")
def _pgf_path_draw(self, stroke=True, fill=False):
actions = []
if stroke:
actions.append("stroke")
if fill:
actions.append("fill")
_writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
def option_scale_image(self):
# docstring inherited
return True
def option_image_nocomposite(self):
# docstring inherited
return not mpl.rcParams['image.composite_image']
def draw_image(self, gc, x, y, im, transform=None):
# docstring inherited
h, w = im.shape[:2]
if w == 0 or h == 0:
return
if not os.path.exists(getattr(self.fh, "name", "")):
raise ValueError(
"streamed pgf-code does not support raster graphics, consider "
"using the pgf-to-pdf option")
# save the images to png files
path = pathlib.Path(self.fh.name)
fname_img = "%s-img%d.png" % (path.stem, self.image_counter)
Image.fromarray(im[::-1]).save(path.parent / fname_img)
self.image_counter += 1
# reference the image in the pgf picture
_writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
f = 1. / self.dpi # from display coords to inch
if transform is None:
_writeln(self.fh,
r"\pgfsys@transformshift{%fin}{%fin}" % (x * f, y * f))
w, h = w * f, h * f
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
_writeln(self.fh,
r"\pgfsys@transformcm{%f}{%f}{%f}{%f}{%fin}{%fin}" %
(tr1 * f, tr2 * f, tr3 * f, tr4 * f,
(tr5 + x) * f, (tr6 + y) * f))
w = h = 1 # scale is already included in the transform
interp = str(transform is None).lower() # interpolation in PDF reader
_writeln(self.fh,
r"\pgftext[left,bottom]"
r"{%s[interpolate=%s,width=%fin,height=%fin]{%s}}" %
(_get_image_inclusion_command(),
interp, w, h, fname_img))
_writeln(self.fh, r"\end{pgfscope}")
def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None):
# docstring inherited
self.draw_text(gc, x, y, s, prop, angle, ismath="TeX", mtext=mtext)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
# prepare string for tex
s = _escape_and_apply_props(s, prop)
_writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
alpha = gc.get_alpha()
if alpha != 1.0:
_writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
_writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
rgb = tuple(gc.get_rgb())[:3]
_writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
_writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
_writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
s = r"\color{textcolor}" + s
dpi = self.figure.dpi
text_args = []
if mtext and (
(angle == 0 or
mtext.get_rotation_mode() == "anchor") and
mtext.get_verticalalignment() != "center_baseline"):
# if text anchoring can be supported, get the original coordinates
# and add alignment information
pos = mtext.get_unitless_position()
x, y = mtext.get_transform().transform(pos)
halign = {"left": "left", "right": "right", "center": ""}
valign = {"top": "top", "bottom": "bottom",
"baseline": "base", "center": ""}
text_args.extend([
f"x={x/dpi:f}in",
f"y={y/dpi:f}in",
halign[mtext.get_horizontalalignment()],
valign[mtext.get_verticalalignment()],
])
else:
# if not, use the text layout provided by Matplotlib.
text_args.append(f"x={x/dpi:f}in, y={y/dpi:f}in, left, base")
if angle != 0:
text_args.append("rotate=%f" % angle)
_writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
_writeln(self.fh, r"\end{pgfscope}")
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
# get text metrics in units of latex pt, convert to display units
w, h, d = (LatexManager._get_cached_or_new()
.get_width_height_descent(s, prop))
# TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
# but having a little bit more space around the text looks better,
# plus the bounding box reported by LaTeX is VERY narrow
f = mpl_pt_to_in * self.dpi
return w * f, h * f, d * f
def flipy(self):
# docstring inherited
return False
def get_canvas_width_height(self):
# docstring inherited
return (self.figure.get_figwidth() * self.dpi,
self.figure.get_figheight() * self.dpi)
def points_to_pixels(self, points):
# docstring inherited
return points * mpl_pt_to_in * self.dpi
| RendererPgf |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/static_analysis/type_inference.py | {
"start": 1620,
"end": 4936
} | class ____(object):
"""Resolver objects handle the process of looking up actual names and types.
Unless noted otherwise, all resolve_* methods:
* have a first namespace argument, mapping string to actual values
* have a second types_namespace argument, mapping string to actual inferred
types
* specify names as QN objects
* specify types as a Set of inferred types
Unless noted otherwise, all resolve_* methods must return either:
* a set of `type` objects
* None
"""
def res_name(self, ns, types_ns, name):
"""Resolves the type/value an external (e.g. closure, global) variable.
Args:
ns: namespace
types_ns: types namespace
name: symbol name
Returns:
Tuple (type, static_value). The first element is the type to use for
inference. The second is the static value to use. Return None to treat it
as unknown.
"""
raise NotImplementedError('subclasses must implement')
def res_value(self, ns, value):
"""Resolves the type a literal or static value."""
raise NotImplementedError('subclasses must implement')
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
"""Resolves the type of a (possibly annotated) function argument.
Args:
ns: namespace
types_ns: types namespace
f_name: str, the function name
name: str, the argument name
type_anno: the type annotating the argument, if any
f_is_local: bool, whether the function is a local function
Returns:
Set of the argument types.
"""
raise NotImplementedError('subclasses must implement')
def res_call(self, ns, types_ns, node, f_type, args, keywords):
"""Resolves the return type an external function or method call.
Args:
ns: namespace
types_ns: types namespace
node: str, the function name
f_type: types of the actual function being called, if known
args: types of each respective argument in node.args
keywords: types of each respective argument in node.keywords
Returns:
Tuple (return_type, side_effect_types). The first element is just the
return types of the function. The second element is a map from
argument names to sets of types, and allow modelling side effects of
functions (for example via global or nonlocal).
"""
raise NotImplementedError('subclasses must implement')
# TODO(mdan): Clean this up.
def res_slice(self, ns, types_ns, node_or_slice, value, slice_):
"""Resolves the return type of slice operation."""
raise NotImplementedError('subclasses must implement')
def res_compare(self, ns, types_ns, node, left, right):
"""Resolves the return type of a unary operation."""
raise NotImplementedError('subclasses must implement')
def res_unop(self, ns, types_ns, node, opnd):
"""Resolves the return type of a unary operation."""
raise NotImplementedError('subclasses must implement')
def res_binop(self, ns, types_ns, node, left, right):
"""Resolves the return type of a binary operation."""
raise NotImplementedError('subclasses must implement')
def res_list_literal(self, ns, elt_types):
"""Resolves the type of a list literal from its elements."""
raise NotImplementedError('subclasses must implement')
| Resolver |
python | sqlalchemy__sqlalchemy | test/sql/test_resultset.py | {
"start": 123815,
"end": 127585
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True, autoincrement=False),
Column("user_name", VARCHAR(20)),
Column("x", Integer),
Column("y", Integer),
test_needs_acid=True,
)
Table(
"users_autoinc",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
def test_fetchall(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack", "x": 1, "y": 2},
{"user_id": 8, "user_name": "ed", "x": 2, "y": 3},
{"user_id": 9, "user_name": "fred", "x": 15, "y": 20},
],
)
result = connection.execute(select(users).order_by(users.c.user_id))
eq_(
result.all(),
[(7, "jack", 1, 2), (8, "ed", 2, 3), (9, "fred", 15, 20)],
)
@testing.combinations(
((1, 0), [("jack", 7), ("ed", 8), ("fred", 9)]),
((3,), [(2,), (3,), (20,)]),
((-2, -1), [(1, 2), (2, 3), (15, 20)]),
argnames="columns, expected",
)
def test_columns(self, connection, columns, expected):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack", "x": 1, "y": 2},
{"user_id": 8, "user_name": "ed", "x": 2, "y": 3},
{"user_id": 9, "user_name": "fred", "x": 15, "y": 20},
],
)
result = connection.execute(select(users).order_by(users.c.user_id))
all_ = result.columns(*columns).all()
eq_(all_, expected)
assert type(all_[0]) is Row
def test_columns_twice(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[{"user_id": 7, "user_name": "jack", "x": 1, "y": 2}],
)
result = connection.execute(select(users).order_by(users.c.user_id))
all_ = (
result.columns("x", "y", "user_name", "user_id")
.columns("user_name", "x")
.all()
)
eq_(all_, [("jack", 1)])
assert type(all_[0]) is Row
def test_columns_plus_getter(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[{"user_id": 7, "user_name": "jack", "x": 1, "y": 2}],
)
result = connection.execute(select(users).order_by(users.c.user_id))
result = result.columns("x", "y", "user_name")
getter = result._metadata._getter("y")
eq_(getter(result.first()), 2)
def test_partitions(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{
"user_id": i,
"user_name": "user %s" % i,
"x": i * 5,
"y": i * 20,
}
for i in range(500)
],
)
result = connection.execute(select(users).order_by(users.c.user_id))
start = 0
for partition in result.columns(0, 1).partitions(20):
eq_(
partition,
[(i, "user %s" % i) for i in range(start, start + 20)],
)
start += 20
assert result._soft_closed
| GenerativeResultTest |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 120471,
"end": 122715
} | class ____:
@pytest.mark.parametrize("dim", [1, 3])
@pytest.mark.parametrize("size", [None, 1, 5, (5, 4)])
def test_samples(self, dim, size):
# test that samples have correct shape and norm 1
rng = np.random.default_rng(2777937887058094419)
uniform_direction_dist = uniform_direction(dim, seed=rng)
samples = uniform_direction_dist.rvs(size)
mean, cov = np.zeros(dim), np.eye(dim)
expected_shape = rng.multivariate_normal(mean, cov, size=size).shape
assert samples.shape == expected_shape
norms = np.linalg.norm(samples, axis=-1)
assert_allclose(norms, 1.)
@pytest.mark.parametrize("dim", [None, 0, (2, 2), 2.5])
def test_invalid_dim(self, dim):
message = ("Dimension of vector must be specified, "
"and must be an integer greater than 0.")
with pytest.raises(ValueError, match=message):
uniform_direction.rvs(dim)
def test_frozen_distribution(self):
dim = 5
frozen = uniform_direction(dim)
frozen_seed = uniform_direction(dim, seed=514)
rvs1 = frozen.rvs(random_state=514)
rvs2 = uniform_direction.rvs(dim, random_state=514)
rvs3 = frozen_seed.rvs()
assert_equal(rvs1, rvs2)
assert_equal(rvs1, rvs3)
@pytest.mark.parametrize("dim", [2, 5, 8])
def test_uniform(self, dim):
rng = np.random.default_rng(1036978481269651776)
spherical_dist = uniform_direction(dim, seed=rng)
# generate random, orthogonal vectors
v1, v2 = spherical_dist.rvs(size=2)
v2 -= v1 @ v2 * v1
v2 /= np.linalg.norm(v2)
assert_allclose(v1 @ v2, 0, atol=1e-14) # orthogonal
# generate data and project onto orthogonal vectors
samples = spherical_dist.rvs(size=10000)
s1 = samples @ v1
s2 = samples @ v2
angles = np.arctan2(s1, s2)
# test that angles follow a uniform distribution
# normalize angles to range [0, 1]
angles += np.pi
angles /= 2*np.pi
# perform KS test
uniform_dist = uniform()
kstest_result = kstest(angles, uniform_dist.cdf)
assert kstest_result.pvalue > 0.05
| TestUniformDirection |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 1479,
"end": 1874
} | class ____(BaseModel):
type: Literal["BearerAuthenticator"]
api_token: str = Field(
...,
description="Token to inject as request header for authenticating with the API.",
examples=["{{ config['api_key'] }}", "{{ config['token'] }}"],
title="Bearer Token",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| BearerAuthenticator |
python | keras-team__keras | keras/src/trainers/trainer_test.py | {
"start": 4282,
"end": 4549
} | class ____(Trainer, layers.Layer):
def __init__(self, **kwargs):
layers.Layer.__init__(self, **kwargs)
Trainer.__init__(self)
def call(self, x, training=False):
if training:
return x
return x * 0
| TrainingTestingLayer |
python | sqlalchemy__sqlalchemy | test/orm/test_dataclasses.py | {
"start": 15714,
"end": 21159
} | class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@dataclasses.dataclass
class WidgetDC:
__sa_dataclass_metadata_key__ = "sa"
widget_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk on mixin
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
},
)
has_a_default: str = dataclasses.field(
default="some default",
metadata={"sa": lambda: Column(String(50))},
)
@declarative
@dataclasses.dataclass
class Widget(WidgetDC):
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__tablename__ = "special_widgets"
__sa_dataclass_metadata_key__ = "sa"
special_widget_id: int = dataclasses.field(
init=False,
metadata={
"sa": Column(
ForeignKey("widgets.widget_id"), primary_key=True
)
},
)
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@dataclasses.dataclass
class AccountDC:
__sa_dataclass_metadata_key__ = "sa"
# relationship on mixin
widgets: List[Widget] = dataclasses.field(
default_factory=list,
metadata={"sa": lambda: relationship("Widget")},
)
account_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
@declarative
class Account(AccountDC):
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
@declarative
@dataclasses.dataclass
class User:
__tablename__ = "user"
__sa_dataclass_metadata_key__ = "sa"
user_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk w declared attr on mapped class
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
},
)
cls.classes["Account"] = Account
cls.classes["Widget"] = Widget
cls.classes["User"] = User
cls.classes["SpecialWidget"] = SpecialWidget
def test_setup(self):
Account, Widget, User, SpecialWidget = self.classes(
"Account", "Widget", "User", "SpecialWidget"
)
assert "account_id" in Widget.__table__.c
assert list(Widget.__table__.c.account_id.foreign_keys)[0].references(
Account.__table__
)
assert inspect(Account).relationships.widgets.mapper is inspect(Widget)
assert "account_id" not in SpecialWidget.__table__.c
assert "has_a_default" in Widget.__table__.c
assert "has_a_default" not in SpecialWidget.__table__.c
assert "account_id" in User.__table__.c
assert list(User.__table__.c.account_id.foreign_keys)[0].references(
Account.__table__
)
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget(magic=True)
eq_(
dataclasses.asdict(widget),
{
"widget_id": None,
"account_id": None,
"has_a_default": "some default",
"name": None,
"special_widget_id": None,
"magic": True,
},
)
eq_(
dataclasses.astuple(widget),
(None, None, "some default", None, None, True),
)
| FieldEmbeddedMixinWLambdaTest |
python | mlflow__mlflow | mlflow/spacy/__init__.py | {
"start": 10826,
"end": 13749
} | class ____:
def __init__(self, spacy_model):
self.spacy_model = spacy_model
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.spacy_model
def predict(
self,
dataframe,
params: dict[str, Any] | None = None,
):
"""Only works for predicting using text categorizer.
Not suitable for other pipeline components (e.g: parser)
Args:
dataframe: pandas dataframe containing texts to be categorized
expected shape is (n_rows,1 column)
params: Additional parameters to pass to the model for inference.
Returns:
dataframe with predictions
"""
if len(dataframe.columns) != 1:
raise MlflowException("Shape of input dataframe must be (n_rows, 1column)")
return pd.DataFrame(
{"predictions": dataframe.iloc[:, 0].apply(lambda text: self.spacy_model(text).cats)}
)
def _load_pyfunc(path):
"""Load PyFunc implementation. Called by ``pyfunc.load_model``.
Args:
path: Local filesystem path to the MLflow Model with the ``spacy`` flavor.
"""
return _SpacyModelWrapper(_load_model(path))
def load_model(model_uri, dst_path=None):
"""Load a spaCy model from a local file (if ``run_id`` is ``None``) or a run.
Args:
model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
Returns:
A spaCy loaded model
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
_add_code_from_conf_to_system_path(local_model_path, flavor_conf)
# Flavor configurations for models saved in MLflow version <= 0.8.0 may not contain a
# `data` key; in this case, we assume the model artifact path to be `model.spacy`
spacy_model_file_path = os.path.join(local_model_path, flavor_conf.get("data", "model.spacy"))
return _load_model(path=spacy_model_file_path)
| _SpacyModelWrapper |
python | fluentpython__example-code-2e | 09-closure-deco/clock/clockdeco_cls.py | {
"start": 366,
"end": 1037
} | class ____: # <1>
def __init__(self, fmt=DEFAULT_FMT): # <2>
self.fmt = fmt
def __call__(self, func): # <3>
def clocked(*_args):
t0 = time.perf_counter()
_result = func(*_args) # <4>
elapsed = time.perf_counter() - t0
name = func.__name__
args = ', '.join(repr(arg) for arg in _args)
result = repr(_result)
print(self.fmt.format(**locals()))
return _result
return clocked
# end::CLOCKDECO_CLS[]
if __name__ == '__main__':
@clock()
def snooze(seconds):
time.sleep(seconds)
for i in range(3):
snooze(.123)
| clock |
python | PrefectHQ__prefect | src/integrations/prefect-github/tests/test_graphql.py | {
"start": 912,
"end": 1645
} | class ____:
def __init__(self, error_key=None):
self.result = (
{error_key: "Errors encountered:"} if error_key else {"data": "success"}
)
def get_endpoint(self):
return lambda op, vars: self.result
def get_client(self):
return lambda op, vars: self.result
@pytest.mark.parametrize("error_key", ["errors", False])
def test_execute_graphql(error_key):
mock_credentials = MockCredentials(error_key=error_key)
@flow
def test_flow():
return execute_graphql("op", mock_credentials)
if error_key:
with pytest.raises(RuntimeError, match="Errors encountered:"):
test_flow()
else:
assert test_flow() == "success"
| MockCredentials |
python | huggingface__transformers | src/transformers/models/x_clip/modeling_x_clip.py | {
"start": 43192,
"end": 44758
} | class ____(nn.Module):
"""
This corresponds to the `MultiframeIntegrationTransformer` class in the original implementation.
"""
def __init__(self, config: XCLIPVisionConfig):
super().__init__()
self.position_embedding = nn.Parameter(torch.empty(1, config.num_frames, config.hidden_size))
self.encoder = XCLIPEncoder(config)
def forward(
self,
hidden_states,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
residual = hidden_states
# add position embeddings
hidden_states = hidden_states + self.position_embedding
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = last_hidden_state.type(hidden_states.dtype) + residual
pooled_output = last_hidden_state.mean(dim=1, keepdim=False)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| XCLIPMultiframeIntegrationTransformer |
python | getsentry__sentry | src/sentry/identity/github/provider.py | {
"start": 881,
"end": 1963
} | class ____(OAuth2Provider):
key = IntegrationProviderSlug.GITHUB.value
name = "GitHub"
oauth_access_token_url = "https://github.com/login/oauth/access_token"
oauth_authorize_url = "https://github.com/login/oauth/authorize"
oauth_scopes = ()
def get_oauth_client_id(self):
return options.get("github-app.client-id")
def get_oauth_client_secret(self):
return options.get("github-app.client-secret")
def build_identity(self, data):
data = data["data"]
access_token = data.get("access_token")
if not access_token:
raise PermissionDenied()
user = get_user_info(access_token)
return {
"type": "github",
"id": user["id"],
"email": user["email"],
"email_verified": bool(user["email"]),
"login": user["login"],
"name": user["name"],
"company": user["company"],
"scopes": [], # GitHub apps do not have user scopes
"data": self.get_oauth_data(data),
}
| GitHubIdentityProvider |
python | Textualize__textual | tests/command_palette/test_declare_sources.py | {
"start": 712,
"end": 1080
} | class ____(AppWithActiveCommandPalette):
pass
async def test_no_app_command_sources() -> None:
"""An app with no sources declared should work fine."""
async with AppWithNoSources().run_test() as pilot:
assert isinstance(pilot.app.screen, CommandPalette)
assert pilot.app.screen._provider_classes == {SystemCommandsProvider}
| AppWithNoSources |
python | pytorch__pytorch | torchgen/api/autograd.py | {
"start": 9746,
"end": 38959
} | class ____:
func: NativeFunction
info: dict[str, DifferentiabilityInfo] | None
fw_derivatives: dict[str, Sequence[ForwardDerivative]] | None
# TODO: Update comment below since it is out of date.
def dispatch_strategy(fn: NativeFunctionWithDifferentiabilityInfo) -> str:
"""How are we going to call the underlying implementation of a
declaration? There are two strategies:
- use_derived: we want to call the implementation on CPUDoubleType
(or a similar, derived Type instance). Because these derived
instances deal in Tensors, not Variables (it's a completely different
object, so it doesn't dispatch back to VariableType), code on
this dispatch path needs to wrap/unwrap tensors. If the
derived implementation takes and returns tensors, the
implementation is usually differentiable (although we also use
the derived dispatch path for non-differentiable functions
that we still want to dispatch on the derived Type instance;
e.g., size())
- use_type: we want to call the implementation on Type, because
it is implemented concretely, and the functions it invokes will
get dispatched back to VariableType (which will ensure that they
are differentiable.)
"""
# fn is derived as long as any of its per-key differentiability infos
# has_derivatives. dispatch_strategy() is used to guard generation of fns in VariableType
# and ADInplaceOrViewType. We want to generate these functions as long as a
# derivative is defined for ANY dispatch key.
if fn.func.is_abstract or (
fn.info is not None and any(info.has_derivatives for info in fn.info.values())
):
# If the function is abstract (not implemented on at::Type), we must
# call the implementation on the derived type with unpacked tensors.
# If the function has a derivative specified and is concrete, we could
# call either implementation. We prefer the calling the derived
# type's implementation with unpacked tensors because it is more
# performant in some cases: any internal calls to other ATen functions
# won't have the history tracked.
# If the function has a type dispatched argument (i.e. is a factory),
# we prefer calling the derived type's implementation both because it is
# more performant and to ensure factory functions return tensors with _version
# of 0 (probably not strictly necessary, but nice to have to keeps versions simple
# to understand.
return "use_derived"
else:
# If the function is concrete (we don't have to override it) and we
# didn't declare it in derivatives.yaml, we'll assume that it is
# actually implemented out of differentiable functions. (This
# assumption might not hold, but then you'll see gradcheck fail.)
return "use_type"
def is_foreach_func(f: NativeFunction) -> bool:
return f.func.name.name.base.startswith("_foreach_")
# note(crcrpar): Most foreach functions can reference an out-place `torch` function whose schema kind
# is functional for their backward derivatives (and forward derivatives in the future), i.e.,
# they would find such one in `functional_info_by_signature`. There however are some exceptions:
_foreach_with_inplace_ref = {"_foreach_zero_"}
_foreach_with_tensor_overload = {
"_foreach_add.Tensor",
"_foreach_mul.Tensor",
"_foreach_div.Tensor",
}
# The following do not support the alpha kwarg, which the nonforeach versions support.
_skip_argument_len_check = {
"_foreach_add.Scalar",
"_foreach_add_.Scalar",
"_foreach_add.ScalarList",
"_foreach_add_.ScalarList",
"_foreach_sub.Scalar",
"_foreach_sub_.Scalar",
"_foreach_sub.ScalarList",
"_foreach_sub_.ScalarList",
}
# Checks if `function_schema` is a native, non-foreach function which `f`, a foreach function
# reference to generate derivatives.
def is_reference_for_foreach(
f: NativeFunction,
function_schema: FunctionSchema,
) -> bool:
return (
f.func.name.name.base.split("_foreach_")[-1] == function_schema.name.name.base
and (
not function_schema.name.name.inplace
or str(f.func.name) in _foreach_with_inplace_ref
)
and (
str(f.func.name) in _skip_argument_len_check
or len(f.func.arguments.flat_non_out)
== len(function_schema.arguments.flat_non_out)
)
and all(
ref_arg.type in (arg.type, getattr(arg.type, "elem", None))
for arg, ref_arg in zip(
f.func.arguments.flat_non_out,
function_schema.arguments.flat_non_out,
)
)
)
# TODO(crcrpar): Avoid hard coding "Default" ideally.
def gen_foreach_derivativeinfo(
foreach_function: NativeFunction,
functional_info_by_signature: dict[
FunctionSchema, dict[str, DifferentiabilityInfo]
],
non_functional_info_by_signature: dict[
FunctionSchema, dict[str, DifferentiabilityInfo]
],
dispatch_key: str = "Default",
) -> tuple[DifferentiabilityInfo | None, bool]:
"""Generate DifferentiabilityInfo for out-place foreach function, return the existing one for in-place.
The second return value indicates whether the info is generated in this function.
"""
ref_diff_info: DifferentiabilityInfo | None = None
for function_schema, diff_info in functional_info_by_signature.items():
if not is_reference_for_foreach(foreach_function, function_schema):
continue
ref_diff_info = diff_info[dispatch_key]
if ref_diff_info is not None:
break
# note(crcrpar): It seems like `zero`'s info isn't available in functional_info_by_signature
# while the info of `zero_` is in non_functional_info_by_signature
if (
ref_diff_info is None
and foreach_function.func.kind() == SchemaKind.inplace
and str(foreach_function.func.name) in _foreach_with_inplace_ref
):
for function_schema, diff_info in non_functional_info_by_signature.items():
if not is_reference_for_foreach(foreach_function, function_schema):
continue
ref_diff_info = diff_info[dispatch_key]
if ref_diff_info is not None:
break
if ref_diff_info is None:
return None, False
# non out-place uses the existing Derivative.
if foreach_function.func.kind() == SchemaKind.inplace:
return ref_diff_info, False
map_refarg2foreacharg, map_name2arg = {}, {}
for i, (arg, ref_arg) in enumerate(
zip(
foreach_function.func.arguments.flat_non_out,
function_schema.arguments.flat_non_out,
)
):
map_refarg2foreacharg[ref_arg.name] = arg.name
map_name2arg[arg.name] = arg
all_saved_inputs, all_saved_outputs, all_var_names = [], [], []
modified_derivative_formulas = []
for i, derivative in enumerate(ref_diff_info.derivatives):
modified_formula = derivative.formula.replace("grad", "grads[i]").replace(
"result", "result[i]"
)
saved_inputs, saved_outputs = [], []
# note(crcrpar): This context seems necessary to call `cpp.argument_type`
with local.parametrize(
use_const_ref_for_mutable_tensors=foreach_function.use_const_ref_for_mutable_tensors,
use_ilistref_for_tensor_lists=foreach_function.part_of_structured_group,
):
for ref_input in derivative.saved_inputs:
ref_input_jit_name = ref_input.expr.split(".")[0]
mapped_name = map_refarg2foreacharg[ref_input_jit_name]
if isinstance(map_name2arg[mapped_name].type, ListType):
mapped_expr = mapped_name + "[i]"
else:
mapped_expr = mapped_name
new_expr = ref_input.expr.replace(ref_input_jit_name, mapped_expr)
modified_formula = modified_formula.replace(
cast(str, ref_input.nctype.name), new_expr
)
nctype = cpp.argument_type(map_name2arg[mapped_name], binds=mapped_name)
canonical_nctype = NamedCType(
nctype.name, nctype.type.remove_const_ref()
)
saved_inputs.append(
SavedAttribute(nctype=canonical_nctype, expr=mapped_name)
)
for ref_output in derivative.saved_outputs:
if ref_output.nctype.name == "result":
saved_outputs.append(
SavedAttribute(
nctype=NamedCType(
name="result", type=BaseCType(tensorListT)
),
expr="result",
)
)
else:
raise RuntimeError("")
var_names = [map_refarg2foreacharg[var] for var in derivative.var_names]
all_var_names.extend(var_names)
all_saved_inputs.extend(saved_inputs)
all_saved_outputs.extend(saved_outputs)
modified_derivative = Derivative(
formula=modified_formula,
original_formula=derivative.formula,
var_names=tuple(var_names),
saved_inputs=tuple(saved_inputs),
saved_outputs=tuple(saved_outputs),
named_gradients=set(),
)
modified_derivative_formulas.append(modified_derivative)
with local.parametrize(
use_const_ref_for_mutable_tensors=foreach_function.use_const_ref_for_mutable_tensors,
use_ilistref_for_tensor_lists=foreach_function.part_of_structured_group,
):
args_with_derivatives = [
Binding(
name=arg.name,
nctype=cpp.argument_type(arg, binds=arg.name),
argument=arg,
default=None,
)
for arg in foreach_function.func.arguments.flat_non_out
if arg.name in all_var_names
]
forward_derivatives: list[ForwardDerivative] = []
fw_derivative: ForwardDerivative
for fw_derivative in ref_diff_info.forward_derivatives:
var_names: list[str] = list(fw_derivative.var_names) # type: ignore[no-redef]
var_types: list[Type] = list(fw_derivative.var_types)
required_inputs_fw_grad: list[str] = []
required_inputs_primal: list[str] = []
if fw_derivative.required_inputs_fw_grad is not None:
required_inputs_fw_grad = list(fw_derivative.required_inputs_fw_grad)
if fw_derivative.required_inputs_primal:
required_inputs_primal = list(fw_derivative.required_inputs_primal)
modified_formula = fw_derivative.formula
# Foreach's result is TensorList
if "result" in modified_formula:
modified_formula = fw_derivative.formula.replace("result", "result[i]")
for foreach_arg, ref_arg in zip(
foreach_function.func.arguments.flat_non_out,
ref_diff_info.func.func.arguments.flat_non_out,
):
# Modify reference forward formula
if (
isinstance(foreach_arg.type, ListType)
and not foreach_arg.type.is_tensor_like()
):
# Assuming ScalarList
modified_formula = modified_formula.replace(
ref_arg.name, foreach_arg.name + "[i]"
)
elif foreach_arg.type.is_tensor_like():
# Assuming TensorList / Tensor
# assert isinstance(foreach_arg.type, ListType), f"{foreach_function.func.name}, {foreach_arg.type}"
assert isinstance(foreach_arg.type, ListType) or (
foreach_arg.type == BaseType(BaseTy.Tensor)
and str(foreach_function.func.name) in _foreach_with_tensor_overload
), f"{foreach_function.func.name}, {foreach_arg.type}"
for suffix in ("_p", "_t"):
curr_expr = ref_arg.name + suffix
if curr_expr in modified_formula:
new_expr = foreach_arg.name + suffix
modified_formula = modified_formula.replace(curr_expr, new_expr)
else:
# Assuming Scalar
if foreach_arg.name != ref_arg.name:
modified_formula = modified_formula.replace(
ref_arg.name, foreach_arg.name
)
# note(crcrpar): there should exist a cooler way...
for i, name in enumerate(var_names):
if name == ref_arg.name:
var_names[i] = foreach_arg.name
var_types[i] = foreach_arg.type
for i, name in enumerate(required_inputs_fw_grad):
if name == ref_arg.name:
required_inputs_fw_grad[i] = foreach_arg.name
for i, name in enumerate(required_inputs_primal):
if name == ref_arg.name:
required_inputs_primal[i] = foreach_arg.name
forward_derivatives.append(
ForwardDerivative(
formula=modified_formula,
var_names=tuple(var_names),
var_types=tuple(var_types),
required_inputs_fw_grad=tuple(required_inputs_fw_grad),
required_inputs_primal=tuple(required_inputs_primal),
required_original_self_value=fw_derivative.required_original_self_value,
is_reusing_outplace_formula=fw_derivative.is_reusing_outplace_formula,
)
)
return (
DifferentiabilityInfo(
name=foreach_function.func.name.name.base,
func=foreach_function,
op=f"Foreach{ref_diff_info.op}{foreach_function.func.name.overload_name}",
derivatives=modified_derivative_formulas,
forward_derivatives=forward_derivatives,
all_saved_inputs=tuple(set(all_saved_inputs)),
all_saved_outputs=tuple(set(all_saved_outputs)),
available_named_gradients=(),
used_named_gradients=set(),
args_with_derivatives=args_with_derivatives,
non_differentiable_arg_names=[],
output_differentiability=None,
output_differentiability_conditions=None,
),
True,
)
def match_differentiability_info(
native_functions: list[NativeFunction],
differentiability_infos: dict[FunctionSchema, dict[str, DifferentiabilityInfo]],
) -> list[NativeFunctionWithDifferentiabilityInfo]:
"""Sets the "derivative" key on declarations to matching autograd function
In-place functions will use the out-of-place derivative definition if there
is no in-place specific derivative.
"""
functional_info_by_signature = {
schema.signature(strip_default=True): info_dict
for schema, info_dict in differentiability_infos.items()
if schema.kind() == SchemaKind.functional
}
non_functional_info_by_signature = {
schema.signature(strip_default=True): info_dict
for schema, info_dict in differentiability_infos.items()
if schema.kind() != SchemaKind.functional
}
def find_info(
f: NativeFunction,
) -> tuple[dict[str, DifferentiabilityInfo] | None, bool]:
# Don't bother matching info to generated out= variants
if "generated" in f.tags and f.func.kind() == SchemaKind.out:
return None, False
# (1) Check for an exact match
if f.func in differentiability_infos:
return differentiability_infos[f.func], True
# (2) If no exact match, check if the out-of-place variant
# of this operator has a match.
# i.e mul() for mul_() or mul_out()
# note(crcrpar): Check foreach or not because in-place foreach functions use backward defined for the existing
# native functions instead of the out-place counterparts.
f_sig = f.func.signature(strip_default=True)
if f_sig in functional_info_by_signature and not is_foreach_func(f):
return functional_info_by_signature[f_sig], False
# (3) Some operators have a derivative explicitly defined for the mutable
# variant, but get a code-generated out-of-place variant which does *not*
# come with a derivative formula.
# For the generated out-of-place variant, use the mutable variant's formula
# if it exists.
if "generated" in f.tags and f_sig in non_functional_info_by_signature:
info_dict = non_functional_info_by_signature[f_sig]
# See https://github.com/pytorch/pytorch/pull/76320/files#r874816389
assert not any(
any("self" in str(input.nctype.name) for input in info.all_saved_inputs)
for info in info_dict.values()
), f"""\
Attempted to convert a derivative formula for a mutable operator
to be used by automatically by its functional variant ("{str(f.func)}").
this is not currently supported (we'd need to fix up the formula in the codegen)."""
return info_dict, False
# (4) Generate derivative information of foreach functions if none is defined in `derivatives.yaml`
if is_foreach_func(f):
assert f.func not in differentiability_infos
diff_info, is_generated = gen_foreach_derivativeinfo(
f,
functional_info_by_signature,
non_functional_info_by_signature,
)
if diff_info is None:
return None, False
# TODO(crcrpar): Avoid hard coding "Default" ideally.
diff_info_dict = {"Default": diff_info}
if is_generated:
differentiability_infos[f.func] = diff_info_dict
functional_info_by_signature[f.func] = diff_info_dict
return diff_info_dict, is_generated
return None, False
result: list[NativeFunctionWithDifferentiabilityInfo] = []
for f in native_functions:
info_dict, is_exact_match = find_info(f)
# Currently, the '.strides()' to 'strides_or_error' replacement does not support
# 'self' derivatives of an inplace function, so we must check for this case.
if f.func.kind() == SchemaKind.inplace and (info_dict is not None):
for info in info_dict.values():
for derivative in info.derivatives:
if "self" in derivative.var_names:
for saved_input in derivative.saved_inputs:
assert "strides_or_error" not in saved_input.expr, (
"Calling '.strides()' in the 'self' derivative formula of an "
f"in-place function is not supported: {f.func}"
)
if not info_dict:
result.append(
NativeFunctionWithDifferentiabilityInfo(
func=f, info=None, fw_derivatives=None
)
)
continue
fw_derivative_dict: dict[str, Sequence[ForwardDerivative]] = {}
for key, info in info_dict.items():
if not info.forward_derivatives:
fw_derivative_dict[key] = []
continue
forward_derivatives = info.forward_derivatives
# For functions that have a single def for out-of-place and inplace (like abs())
if f.func.kind() == SchemaKind.inplace:
# For inplace functions there is a little bit of work to do:
# 1) Validate the formula and make sure the input that is modified in not used:
# - If there is a formula for the inplace variant of the function (is_exact_match == True) then
# we make sure that the original value of the input that is being modified inplace (self_p) is
# not used in the formula. Note that the formula can use "original_self_p" here and that would
# trigger a clone of the original input.
# - If we are reusing the out of place formula (is_exact_match == False) then we replace every
# occurrence of self_p and self_t by original_self_p and original_self_t. These will be
# populated by cloned version of the original input (either the clone done by the backward AD
# logic if self is also used in a backward formula or a special clone that we add).
# 2) At this point, there cannot be a self_p in the formula.
# 3) Change "result" into "self_p" as by design, in the inplace function codegen, the result is
# simply called self (as it is modified inplace).
# 4) Update the required primals data in case it used to contain "result" but should now contain
# "self"
# 5) If it is not an exact match, the user formula is not modifying the existing forward grad
# inplace as it should. So add some code that makes sure that we do so if the forward grad
# already exists.
assert (
len(info.forward_derivatives) == 1
) # Only single output inplace should exist
fw_info = info.forward_derivatives[0]
formula = fw_info.formula
def replace_self_with_original_self(formula: str, postfix: str) -> str:
def repl(m: re.Match[str]) -> str:
return f"{m.group(1)}original_self{postfix}{m.group(2)}"
return re.sub(IDENT_REGEX.format(f"self{postfix}"), repl, formula)
if re.search(IDENT_REGEX.format("self_p"), formula):
if is_exact_match:
# For manually defined formulas, don't allow the original value to be used
raise RuntimeError(
f'The formula for "{f.func.name}" is using the original value of self '
"that is being modified inplace. This would lead to wrong forward gradients. "
'Please use "result" in the formula only.'
)
else:
# When the original formula is out of place, we save a clone of the primal
# value to be able to access this value if needed
# replace "self_p"/"self_t" from the formula by "original_self_p"/"original_self_t"
formula = replace_self_with_original_self(formula, "_p")
formula = replace_self_with_original_self(formula, "_t")
# replace "result" from the formula by "self_p"
def repl(m: re.Match[str]) -> str:
return f"{m.group(1)}self_p{m.group(2)}"
formula = re.sub(IDENT_REGEX.format("result"), repl, formula)
required_primals = fw_info.required_inputs_primal
if re.search(IDENT_REGEX.format("self_p"), formula):
required_primals = (
required_primals + ("self",) if required_primals else ("self",)
)
if not is_exact_match:
# NOTE [In-place forward AD formula Optimization]
#
# This optimization transforms the formula to directly do inplace, i.e.
# instead of self_t.copy_(self_t.op()) we do self_t.op_() when the following are met:
#
# 1) the formula satisfies the pattern: "self_t.op(*args)"
# 2) "op" in (1) needs to be the same as the op the derivative is for
#
# (2) may seem too strict, but currently the only ops that satisfy (1) also satisfy (2)
# If there is a need, we can relax (2) to allow any op that has an in-place variant
is_single_method_on_self_t = False
directly_do_inplace = False
op_name: str | None = None
between_parens: str | None = None
match = re.fullmatch(r"self_t.([\w]*)\((.*)\)", formula)
if match:
op_name, between_parens = match.group(1), match.group(2)
# We want to...
# Match: self_t.op1(other_p.op2(arg))
# Avoid: self_t.op1(args) + self_t.op2(args)
# Avoid: self_t.op1(other_p.op2(arg)) + self_t.op2(args)
def check_parens_nest_level_gt_zero(s: str) -> bool:
level = 1
for ch in s:
if ch == ")":
level -= 1
if level == 0:
return False
if ch == "(":
level += 1
return True
is_single_method_on_self_t = check_parens_nest_level_gt_zero(
between_parens
)
directly_do_inplace = (
is_single_method_on_self_t and op_name == info.name
)
if directly_do_inplace:
assert op_name is not None
assert between_parens is not None
formula = f"self_t_raw.defined() ? self_t_raw.{op_name}_({between_parens}) : {formula}"
else:
# Make sure that the forward grad is modified inplace when the original formula
# is out of place
formula = f"self_t_raw.defined() ? self_t_raw.copy_({formula}) : {formula}"
required_original_self_value = bool(
re.search(IDENT_REGEX.format("original_self_p"), formula)
) or bool(re.search(IDENT_REGEX.format("original_self_t"), formula))
forward_derivatives = [
ForwardDerivative(
formula=formula,
var_names=("self",),
var_types=fw_info.var_types,
required_inputs_fw_grad=fw_info.required_inputs_fw_grad,
required_inputs_primal=required_primals,
required_original_self_value=required_original_self_value,
is_reusing_outplace_formula=not is_exact_match,
),
]
fw_derivative_dict[key] = forward_derivatives
result.append(
NativeFunctionWithDifferentiabilityInfo(
func=f, info=info_dict, fw_derivatives=fw_derivative_dict
)
)
return result
def is_differentiable(
name: str, type: Type, info: DifferentiabilityInfo | None
) -> bool:
return type.is_tensor_like() and (
info is None or name not in info.non_differentiable_arg_names
)
def gen_differentiable_outputs(
fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
) -> list[DifferentiableOutput]:
f = fn.func
info = fn.info[key] if fn.info else None
outputs: list[DifferentiableOutput] = [
DifferentiableOutput(
name=name,
type=ret.type,
cpp_type=cpp.return_type(ret, symint=True).cpp_type(),
)
for name, ret in zip(cpp.return_names(f), f.func.returns)
]
output_differentiability = info.output_differentiability if info else None
if output_differentiability is not None:
if len(output_differentiability) != len(outputs):
raise RuntimeError(
f"The length of output_differentiability ({len(output_differentiability)}), "
f"does not match the number of outputs ({len(outputs)})."
)
differentiable_outputs: list[DifferentiableOutput] = []
if False in output_differentiability and f.func.kind() == SchemaKind.inplace:
raise RuntimeError(
"output_differentiability=False for inplace operation (version_counter won't get updated)"
)
for differentiable, output in zip(output_differentiability, outputs):
if differentiable:
differentiable_outputs.append(output)
return differentiable_outputs
candidate_differentiable_outputs = list(
filter(lambda r: is_differentiable(r.name, r.type, info), outputs)
)
if uses_single_grad(info):
return candidate_differentiable_outputs[:1]
else:
return candidate_differentiable_outputs
| NativeFunctionWithDifferentiabilityInfo |
python | joke2k__faker | tests/providers/test_job.py | {
"start": 3589,
"end": 3840
} | class ____:
"""Test ka_GE job provider"""
def test_job(self, faker, num_samples):
for _ in range(num_samples):
job = faker.job()
assert isinstance(job, str)
assert job in KaGeJobProvider.jobs
| TestKaGe |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor20.py | {
"start": 266,
"end": 929
} | class ____(Generic[T]):
def add(self, a: T, b: T) -> T:
return a + b
int_adder: Adder[int] = Adder()
int_adder.add(1, 2)
# This should be an error because "adder"
# should be of type Adder[int].
int_adder.add("1", 2)
def requires_str_adder(str_adder: Adder[str]):
return str_adder
a = requires_str_adder(Adder())
print(a.add("1", "2"))
# This should be an error because the result
# of the call should be an Adder[str]
print(a.add(1, "2"))
generic_adder = Adder()
generic_adder.add(1, 2)
generic_adder.add("a", "b")
# Since the type has an Unknown type argument,
# the following should not generate an error.
generic_adder.add(1, "b")
| Adder |
python | google__jax | examples/ffi/tests/cpu_examples_test.py | {
"start": 771,
"end": 2166
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cpu"]):
self.skipTest("Unsupported platform")
def test_array_attr(self):
self.assertEqual(cpu_examples.array_attr(5), jnp.arange(5).sum().astype(jnp.int32))
self.assertEqual(cpu_examples.array_attr(3), jnp.arange(3).sum().astype(jnp.int32))
def test_array_attr_jit_cache(self):
jit_array_attr = jax.jit(cpu_examples.array_attr, static_argnums=(0,))
with jtu.count_jit_and_pmap_lowerings() as count:
jit_array_attr(5)
self.assertEqual(count(), 1) # compiles once the first time
with jtu.count_jit_and_pmap_lowerings() as count:
jit_array_attr(5)
self.assertEqual(count(), 0) # cache hit
def test_array_attr_no_jit(self):
with jax.disable_jit():
cpu_examples.array_attr(5) # doesn't crash
def test_dictionary_attr(self):
secret, count = cpu_examples.dictionary_attr(secret=5)
self.assertEqual(secret, 5)
self.assertEqual(count, 1)
secret, count = cpu_examples.dictionary_attr(secret=3, a_string="hello")
self.assertEqual(secret, 3)
self.assertEqual(count, 2)
with self.assertRaisesRegex(Exception, "Unexpected attribute"):
cpu_examples.dictionary_attr()
with self.assertRaisesRegex(Exception, "Wrong attribute type"):
cpu_examples.dictionary_attr(secret="invalid")
| AttrsTests |
python | joke2k__faker | faker/providers/phone_number/ro_RO/__init__.py | {
"start": 49,
"end": 2484
} | class ____(PhoneNumberProvider):
formats = (
"021 ### ####",
"0231 ### ###",
"0232 ### ###",
"0233 ### ###",
"0234 ### ###",
"0235 ### ###",
"0236 ### ###",
"0237 ### ###",
"0238 ### ###",
"0239 ### ###",
"0240 ### ###",
"0241 ### ###",
"0242 ### ###",
"0243 ### ###",
"0244 ### ###",
"0245 ### ###",
"0246 ### ###",
"0247 ### ###",
"0248 ### ###",
"0249 ### ###",
"0250 ### ###",
"0251 ### ###",
"0252 ### ###",
"0253 ### ###",
"0254 ### ###",
"0255 ### ###",
"0256 ### ###",
"0257 ### ###",
"0258 ### ###",
"0259 ### ###",
"0260 ### ###",
"0261 ### ###",
"0262 ### ###",
"0263 ### ###",
"0264 ### ###",
"0265 ### ###",
"0266 ### ###",
"0267 ### ###",
"0268 ### ###",
"0269 ### ###",
"0786 ### ###",
"0760 ### ###",
"0761 ### ###",
"0762 ### ###",
"0763 ### ###",
"0764 ### ###",
"0765 ### ###",
"0766 ### ###",
"0767 ### ###",
"0785 ### ###",
"0768 ### ###",
"0769 ### ###",
"0784 ### ###",
"0770 ### ###",
"0772 ### ###",
"0771 ### ###",
"0749 ### ###",
"0750 ### ###",
"0751 ### ###",
"0752 ### ###",
"0753 ### ###",
"0754 ### ###",
"0755 ### ###",
"0756 ### ###",
"0757 ### ###",
"0758 ### ###",
"0759 ### ###",
"0748 ### ###",
"0747 ### ###",
"0746 ### ###",
"0740 ### ###",
"0741 ### ###",
"0742 ### ###",
"0743 ### ###",
"0744 ### ###",
"0745 ### ###",
"0711 ### ###",
"0727 ### ###",
"0725 ### ###",
"0724 ### ###",
"0726 ### ###",
"0723 ### ###",
"0722 ### ###",
"0721 ### ###",
"0720 ### ###",
"0728 ### ###",
"0729 ### ###",
"0730 ### ###",
"0739 ### ###",
"0738 ### ###",
"0737 ### ###",
"0736 ### ###",
"0735 ### ###",
"0734 ### ###",
"0733 ### ###",
"0732 ### ###",
"0731 ### ###",
"0780 ### ###",
"0788 ### ###",
)
| Provider |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_quote_name01.py | {
"start": 315,
"end": 1920
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("quote_name01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
# Test quoted/non-quoted sheet names.
worksheet = workbook.add_worksheet("Sheet 1")
chart = workbook.add_chart({"type": "pie"})
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": ["'Sheet 1'", 0, 0, 4, 0]})
worksheet.insert_chart("E6", chart, {"x_offset": 26, "y_offset": 17})
sheetnames = (
"Sheet 2",
"Sheet!3",
'Sheet"4',
"Sheet#5",
"Sheet$6",
"Sheet%7",
"Sheet&8",
)
for sheetname in sheetnames:
worksheet = workbook.add_worksheet(sheetname)
chart = workbook.add_chart({"type": "pie"})
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": [sheetname, 0, 0, 4, 0]})
worksheet.insert_chart("E6", chart, {"x_offset": 26, "y_offset": 17})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_W.py | {
"start": 3118,
"end": 4229
} | class ____(Benchmark):
r"""
Wayburn and Seader 1 objective function.
This class defines the Wayburn and Seader 1 [1]_ global optimization
problem. This is a unimodal minimization problem defined as follows:
.. math::
f_{\text{WayburnSeader01}}(x) = (x_1^6 + x_2^4 - 17)^2
+ (2x_1 + x_2 - 4)^2
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = ([-2, 2], [-2, 2])
self.global_optimum = [[1.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 6 + x[1] ** 4 - 17) ** 2 + (2 * x[0] + x[1] - 4) ** 2
| WayburnSeader01 |
python | pytorch__pytorch | torch/_dynamo/variables/lists.py | {
"start": 29780,
"end": 36232
} | class ____(CommonListMethodsVariable):
def python_type(self) -> type:
return list
def __repr__(self) -> str:
return f"{self.__class__.__name__}(length={len(self.items)})"
def debug_repr(self) -> str:
return self.debug_repr_helper("[", "]")
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.foreach(self.items)
codegen.append_output(create_instruction("BUILD_LIST", arg=len(self.items)))
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
from .tensor import SymNodeVariable
if name == "__setitem__" and self.is_mutable():
if kwargs or len(args) != 2:
raise_args_mismatch(
tx,
name,
"2 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
key, value = args
if not key.is_python_constant():
# probably will graph-break
super().call_method(tx, name, args, kwargs)
tx.output.side_effects.mutation(self)
if isinstance(key, SliceVariable):
if not value.has_force_unpack_var_sequence(tx):
msg = ConstantVariable.create("can only assign an iterable")
raise_observed_exception(TypeError, tx, args=[msg])
key_as_const = key.as_python_constant()
if key_as_const.step == 0:
msg = ConstantVariable.create("slice step cannot be zero")
raise_observed_exception(ValueError, tx, args=[msg])
value_unpack = value.force_unpack_var_sequence(tx)
try:
self.items[key_as_const] = value_unpack
except Exception as exc:
raise_observed_exception(
type(exc),
tx,
args=list(map(ConstantVariable.create, exc.args)),
)
else:
if isinstance(key, SymNodeVariable):
key = key.evaluate_expr()
else:
key = key.as_python_constant()
try:
self.items[key] = value
except (IndexError, TypeError) as e:
raise_observed_exception(
type(e), tx, args=list(map(ConstantVariable.create, e.args))
)
return ConstantVariable.create(None)
if name == "sort" and self.is_mutable():
if len(args) != 0:
raise_args_mismatch(tx, name, "0 args", f"{len(args)} args")
key_fn_var = kwargs.pop("key", ConstantVariable.create(None))
reverse = kwargs.pop(
"reverse", ConstantVariable.create(False)
).as_python_constant()
if len(kwargs) != 0:
raise_args_mismatch(tx, name, "0 kwargs", f"{len(kwargs)} kwargs")
if (
key_fn_var.is_python_constant()
and key_fn_var.as_python_constant() is None
):
keys = self.items.copy()
else:
keys = [key_fn_var.call_function(tx, [x], {}) for x in self.items]
if not all(k.is_python_constant() for k in keys):
first_non_constant_key = None
for k in keys:
if not k.is_python_constant():
first_non_constant_key = k
assert first_non_constant_key is not None
try:
python_type = str(first_non_constant_key.python_type())
except NotImplementedError:
python_type = "unknown"
unimplemented(
gb_type="sort with non-constant keys",
context=str(first_non_constant_key),
explanation=(
f"Cannot perform sort with non-constant key. "
f"First non-constant key type: {python_type}. "
f"Most notably, we cannot sort with Tensor or SymInt keys, but we can "
f"sort ints."
),
hints=["Use something else as the key."],
)
tx.output.side_effects.mutation(self)
sorted_items_with_keys = sorted(
(
(
x,
k.as_python_constant(),
-i if reverse else i, # extra key to ensure stable sort
)
for i, (k, x) in enumerate(zip(keys, self.items))
),
key=operator.itemgetter(1, 2),
reverse=reverse,
)
self.items[:] = [x for x, *_ in sorted_items_with_keys]
return ConstantVariable.create(None)
if name == "__init__" and self.is_mutable():
if kwargs:
raise_args_mismatch(tx, name, "0 kwargs", f"{len(kwargs)} kwargs")
if len(args) == 0:
return ConstantVariable.create(None)
elif len(args) == 1 and args[0].has_force_unpack_var_sequence(tx):
(arg,) = args
tx.output.side_effects.mutation(self)
self.items[:] = arg.force_unpack_var_sequence(tx)
return ConstantVariable.create(None)
return super().call_method(tx, name, args, kwargs)
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
if name == "__class__":
source = AttrSource(self.source, name) if self.source else None
class_type = self.python_type()
if class_type is list:
return variables.BuiltinVariable(class_type, source=source)
else:
return variables.UserDefinedClassVariable(class_type, source=source)
return super().var_getattr(tx, name)
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
if self.python_type() is not list:
return super().call_obj_hasattr(tx, name)
return variables.ConstantVariable.create(hasattr([], name))
| ListVariable |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_shape_base.py | {
"start": 15959,
"end": 20268
} | class ____(TestCase):
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
def test_stack(self):
# non-iterable input
assert_raises(TypeError, stack, 1)
# 0d input
for input_ in [
(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)],
]:
assert_array_equal(stack(input_), [1, 2, 3])
# 1d input examples
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
# all input types
assert_array_equal(np.stack([a, b]), r1)
assert_array_equal(np.stack(array([a, b])), r1)
# all shapes for 1d input
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises(AxisError, stack, arrays, axis=2)
assert_raises(AxisError, stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [
(10, 3, 4),
(3, 10, 4),
(3, 4, 10),
(3, 4, 10),
(3, 10, 4),
(10, 3, 4),
]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert stack([[], [], []]).shape == (3, 0)
assert stack([[], [], []], axis=1).shape == (0, 3)
# out
out = np.zeros_like(r1)
np.stack((a, b), out=out)
assert_array_equal(out, r1)
# edge cases
assert_raises(ValueError, stack, [])
assert_raises(ValueError, stack, [])
assert_raises((RuntimeError, ValueError), stack, [1, np.arange(3)])
assert_raises((RuntimeError, ValueError), stack, [np.arange(3), 1])
assert_raises((RuntimeError, ValueError), stack, [np.arange(3), 1], axis=1)
assert_raises(
(RuntimeError, ValueError), stack, [np.zeros((3, 3)), np.zeros(3)], axis=1
)
assert_raises((RuntimeError, ValueError), stack, [np.arange(2), np.arange(3)])
# generator is deprecated: numpy 1.24 emits a warning but we don't
# with assert_warns(FutureWarning):
result = stack(x for x in range(3))
assert_array_equal(result, np.array([0, 1, 2]))
# casting and dtype test
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64)
expected_res = np.array([[1, 2], [2, 3], [3, 4]])
assert_array_equal(res, expected_res)
# casting and dtype with TypeError
with assert_raises(TypeError):
stack((a, b), dtype=np.int64, axis=1, casting="safe")
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@parametrize("axis", [0])
@parametrize("out_dtype", ["c8", "f4", "f8", "i8"]) # torch does not have ">f8",
@parametrize("casting", ["no", "equiv", "safe", "same_kind", "unsafe"])
def test_stack_out_and_dtype(self, axis, out_dtype, casting):
to_concat = (array([1, 2]), array([3, 4]))
res = array([[1, 2], [3, 4]])
out = np.zeros_like(res)
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
with assert_raises(TypeError):
stack(to_concat, dtype=out_dtype, axis=axis, casting=casting)
else:
res_out = stack(to_concat, out=out, axis=axis, casting=casting)
res_dtype = stack(to_concat, dtype=out_dtype, axis=axis, casting=casting)
assert res_out is out
assert_array_equal(out, res_dtype)
assert res_dtype.dtype == out_dtype
with assert_raises(TypeError):
stack(to_concat, out=out, dtype=out_dtype, axis=axis)
@xfail # (reason="TODO: implement block(...)")
@instantiate_parametrized_tests
| TestStackMisc |
python | ray-project__ray | rllib/utils/tests/test_tf_utils.py | {
"start": 1707,
"end": 2422
} | class ____:
def __init__(self):
# Uses a separate graph for each network.
with tf.Graph().as_default():
# Create the network.
loss, init, _, _ = make_linear_network()
sess = tf.Session()
# Additional code for setting and getting the weights.
variables = tf_utils.TensorFlowVariables(loss, sess)
# Return all of the data needed to use the network.
self.values = [variables, init, sess]
sess.run(init)
def set_and_get_weights(self, weights):
self.values[0].set_weights(weights)
return self.values[0].get_weights()
def get_weights(self):
return self.values[0].get_weights()
| NetActor |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 194220,
"end": 197055
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3, 3]"):
l_x_ = L_x_
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable("torch.func.{grad, vjp, jacrev, hessian} don't yet support saved tensor hooks. Please open an issue with your use case."); _saved_tensors_hooks_disable = None
_grad_increment_nesting = torch._C._functorch._grad_increment_nesting(); _grad_increment_nesting = None
diff_args: "f32[3, 3, 3]" = torch._C._functorch._wrap_for_grad(l_x_, 1); l_x_ = None
set_inplace_requires_grad_allowed = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed = None
_set_tensor_requires_grad: "f32[3, 3, 3]" = torch._functorch.eager_transforms._set_tensor_requires_grad(diff_args); _set_tensor_requires_grad = None
set_inplace_requires_grad_allowed_1 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_1 = None
sin: "f32[3, 3, 3]" = diff_args.sin()
sum_1: "f32[]" = sin.sum(); sin = None
output: "f32[]" = sum_1 + 3.0; sum_1 = None
_autograd_grad = torch._functorch.eager_transforms._autograd_grad((output,), [diff_args], create_graph = True); diff_args = None
grad_input: "f32[3, 3, 3]" = _autograd_grad[0]; _autograd_grad = None
grad_input_1: "f32[3, 3, 3]" = torch._C._functorch._unwrap_for_grad(grad_input, 1); grad_input = None
output_1: "f32[]" = torch._C._functorch._unwrap_for_grad(output, 1); output = output_1 = None
_grad_decrement_nesting = torch._C._functorch._grad_decrement_nesting(); _grad_decrement_nesting = None
_saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None
return (grad_input_1,)
""",
)
def test_grad_fn_with_kwargs(self):
def fn(x, y):
return (x + y).sum()
def wrapper_fn(x, y):
return torch.func.grad(fn)(x, y=y)
x = torch.randn(3, 3)
y = torch.randn(3, 3)
actual = wrapper_fn(x, y)
expected = torch.compile(wrapper_fn, backend="aot_eager", fullgraph=False)(x, y)
self.assertEqual(len(counters["graph_break"]), 0)
self.assertEqual(actual, expected)
def test_jacfwd(self):
counters.clear()
def wrapper_fn(x):
return torch.func.jacfwd(torch.sin)(x)
x = torch.randn(4, 3)
wrapped_gm = self._compile_check(wrapper_fn, (x,))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | pypa__warehouse | tests/common/db/oidc.py | {
"start": 3371,
"end": 3852
} | class ____(WarehouseFactory):
class Meta:
model = PendingActiveStatePublisher
id = factory.Faker("uuid4", cast_to=None)
project_name = factory.Faker("pystr", max_chars=12)
organization = factory.Faker("pystr", max_chars=12)
activestate_project_name = factory.Faker("pystr", max_chars=12)
actor = factory.Faker("pystr", max_chars=12)
actor_id = factory.Faker("uuid4")
added_by = factory.SubFactory(UserFactory)
| PendingActiveStatePublisherFactory |
python | celery__celery | celery/backends/cassandra.py | {
"start": 1572,
"end": 9014
} | class ____(BaseBackend):
"""Cassandra/AstraDB backend utilizing DataStax driver.
Raises:
celery.exceptions.ImproperlyConfigured:
if module :pypi:`cassandra-driver` is not available,
or not-exactly-one of the :setting:`cassandra_servers` and
the :setting:`cassandra_secure_bundle_path` settings is set.
"""
#: List of Cassandra servers with format: ``hostname``.
servers = None
#: Location of the secure connect bundle zipfile (absolute path).
bundle_path = None
supports_autoexpire = True # autoexpire supported via entry_ttl
def __init__(self, servers=None, keyspace=None, table=None, entry_ttl=None,
port=None, bundle_path=None, **kwargs):
super().__init__(**kwargs)
if not cassandra:
raise ImproperlyConfigured(E_NO_CASSANDRA)
conf = self.app.conf
self.servers = servers or conf.get('cassandra_servers', None)
self.bundle_path = bundle_path or conf.get(
'cassandra_secure_bundle_path', None)
self.port = port or conf.get('cassandra_port', None) or 9042
self.keyspace = keyspace or conf.get('cassandra_keyspace', None)
self.table = table or conf.get('cassandra_table', None)
self.cassandra_options = conf.get('cassandra_options', {})
# either servers or bundle path must be provided...
db_directions = self.servers or self.bundle_path
if not db_directions or not self.keyspace or not self.table:
raise ImproperlyConfigured(E_CASSANDRA_NOT_CONFIGURED)
# ...but not both:
if self.servers and self.bundle_path:
raise ImproperlyConfigured(E_CASSANDRA_MISCONFIGURED)
expires = entry_ttl or conf.get('cassandra_entry_ttl', None)
self.cqlexpires = (
Q_EXPIRES.format(expires) if expires is not None else '')
read_cons = conf.get('cassandra_read_consistency') or 'LOCAL_QUORUM'
write_cons = conf.get('cassandra_write_consistency') or 'LOCAL_QUORUM'
self.read_consistency = getattr(
cassandra.ConsistencyLevel, read_cons,
cassandra.ConsistencyLevel.LOCAL_QUORUM)
self.write_consistency = getattr(
cassandra.ConsistencyLevel, write_cons,
cassandra.ConsistencyLevel.LOCAL_QUORUM)
self.auth_provider = None
auth_provider = conf.get('cassandra_auth_provider', None)
auth_kwargs = conf.get('cassandra_auth_kwargs', None)
if auth_provider and auth_kwargs:
auth_provider_class = getattr(cassandra.auth, auth_provider, None)
if not auth_provider_class:
raise ImproperlyConfigured(E_NO_SUCH_CASSANDRA_AUTH_PROVIDER)
self.auth_provider = auth_provider_class(**auth_kwargs)
self._cluster = None
self._session = None
self._write_stmt = None
self._read_stmt = None
self._lock = threading.RLock()
def _get_connection(self, write=False):
"""Prepare the connection for action.
Arguments:
write (bool): are we a writer?
"""
if self._session is not None:
return
self._lock.acquire()
try:
if self._session is not None:
return
# using either 'servers' or 'bundle_path' here:
if self.servers:
self._cluster = cassandra.cluster.Cluster(
self.servers, port=self.port,
auth_provider=self.auth_provider,
**self.cassandra_options)
else:
# 'bundle_path' is guaranteed to be set
self._cluster = cassandra.cluster.Cluster(
cloud={
'secure_connect_bundle': self.bundle_path,
},
auth_provider=self.auth_provider,
**self.cassandra_options)
self._session = self._cluster.connect(self.keyspace)
# We're forced to do concatenation below, as formatting would
# blow up on superficial %s that'll be processed by Cassandra
self._write_stmt = cassandra.query.SimpleStatement(
Q_INSERT_RESULT.format(
table=self.table, expires=self.cqlexpires),
)
self._write_stmt.consistency_level = self.write_consistency
self._read_stmt = cassandra.query.SimpleStatement(
Q_SELECT_RESULT.format(table=self.table),
)
self._read_stmt.consistency_level = self.read_consistency
if write:
# Only possible writers "workers" are allowed to issue
# CREATE TABLE. This is to prevent conflicting situations
# where both task-creator and task-executor would issue it
# at the same time.
# Anyway; if you're doing anything critical, you should
# have created this table in advance, in which case
# this query will be a no-op (AlreadyExists)
make_stmt = cassandra.query.SimpleStatement(
Q_CREATE_RESULT_TABLE.format(table=self.table),
)
make_stmt.consistency_level = self.write_consistency
try:
self._session.execute(make_stmt)
except cassandra.AlreadyExists:
pass
except cassandra.OperationTimedOut:
# a heavily loaded or gone Cassandra cluster failed to respond.
# leave this class in a consistent state
if self._cluster is not None:
self._cluster.shutdown() # also shuts down _session
self._cluster = None
self._session = None
raise # we did fail after all - reraise
finally:
self._lock.release()
def _store_result(self, task_id, result, state,
traceback=None, request=None, **kwargs):
"""Store return value and state of an executed task."""
self._get_connection(write=True)
self._session.execute(self._write_stmt, (
task_id,
state,
buf_t(self.encode(result)),
self.app.now(),
buf_t(self.encode(traceback)),
buf_t(self.encode(self.current_task_children(request)))
))
def as_uri(self, include_password=True):
return 'cassandra://'
def _get_task_meta_for(self, task_id):
"""Get task meta-data for a task by id."""
self._get_connection()
res = self._session.execute(self._read_stmt, (task_id, )).one()
if not res:
return {'status': states.PENDING, 'result': None}
status, result, date_done, traceback, children = res
return self.meta_from_decoded({
'task_id': task_id,
'status': status,
'result': self.decode(result),
'date_done': date_done,
'traceback': self.decode(traceback),
'children': self.decode(children),
})
def __reduce__(self, args=(), kwargs=None):
kwargs = {} if not kwargs else kwargs
kwargs.update(
{'servers': self.servers,
'keyspace': self.keyspace,
'table': self.table})
return super().__reduce__(args, kwargs)
| CassandraBackend |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py | {
"start": 1799,
"end": 14116
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(_test_combinations())
def testNoIntraOpLimit(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(
simple_fn, [elems], [dtypes.int32], [(2,)],
max_intra_op_parallelism=0)[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
@combinations.generate(_test_combinations())
def testMapDefunSimple(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(2,)])[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
@combinations.generate(_test_combinations())
def testMapDefunMismatchedTypes(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return math_ops.cast(x, dtypes.float64)
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
@combinations.generate(_test_combinations())
def testMapDefunReduceDim(self):
# Tests where the output has a different rank from the input
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return array_ops.gather(x, 0)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
expected = constant_op.constant([1, 3, 5])
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
@combinations.generate(_test_combinations())
def testMapDefunMultipleOutputs(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return (x, math_ops.cast(x * 2 + 3, dtypes.float64))
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32, dtypes.float64], [(2,),
(2,)])
expected = [elems, elems * 2 + 3]
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
@combinations.generate(_test_combinations())
def testMapDefunShapeInference(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])[0]
self.assertEqual(result.get_shape(), (3, 2))
@combinations.generate(_test_combinations())
def testMapDefunPartialShapeInference(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
elems = array_ops.placeholder(dtypes.int64, (None, 2))
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])
self.assertEqual(result[0].get_shape().as_list(), [None, 2])
@combinations.generate(_test_combinations())
def testMapDefunRaisesErrorOnRuntimeShapeMismatch(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32)
])
def fn(x, y):
return x, y
elems1 = array_ops.placeholder(dtypes.int32)
elems2 = array_ops.placeholder(dtypes.int32)
result = map_defun.map_defun(fn, [elems1, elems2],
[dtypes.int32, dtypes.int32], [(), ()])
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"All inputs must have the same dimension 0."):
sess.run(result, feed_dict={elems1: [1, 2, 3, 4, 5], elems2: [1, 2, 3]})
@combinations.generate(_test_combinations())
def testMapDefunRaisesDefunError(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return array_ops.identity(x)
elems = constant_op.constant([0, 0, 0, 37, 0])
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(result)
@combinations.generate(_test_combinations())
def testMapDefunCancelledCorrectly(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([5], dtypes.int64)])
def defun(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
c = array_ops.tile(
array_ops.expand_dims(
constant_op.constant([1, 2, 3, 4, 5], dtype=dtypes.int64), 0),
[100, 1])
map_defun_op = map_defun.map_defun(defun, [c], [dtypes.int64], [()])[0]
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
self.evaluate(map_defun_op)
@combinations.generate(_test_combinations())
def testMapDefunWithUnspecifiedOutputShape(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
res = x * 2 + 3
return (res, res + 1, res + 2)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems],
[dtypes.int32, dtypes.int32, dtypes.int32],
[None, (None,), (2,)])
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r[0]), self.evaluate(expected))
self.assertAllEqual(self.evaluate(r[1]), self.evaluate(expected + 1))
self.assertAllEqual(self.evaluate(r[2]), self.evaluate(expected + 2))
@combinations.generate(_test_combinations())
def testMapDefunWithDifferentOutputShapeEachRun(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
elems = array_ops.placeholder(dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [None])[0]
with session.Session() as sess:
self.assertAllEqual(sess.run(r, feed_dict={elems: [0]}), [3])
self.assertAllEqual(
sess.run(r, feed_dict={elems: [[0], [1]]}), [[3], [5]])
@combinations.generate(_test_combinations())
def testMapDefunWithWrongOutputShape(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(1,)])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
@combinations.generate(_test_combinations())
def testMapDefunWithInvalidInput(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2
c = constant_op.constant(2)
with self.assertRaises(ValueError):
# Fails at graph construction time for inputs with known shapes.
r = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [None])[0]
p = array_ops.placeholder(dtypes.int32)
r = map_defun.map_defun(simple_fn, [p], [dtypes.int32], [None])[0]
with session.Session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(r, feed_dict={p: 0})
@combinations.generate(_test_combinations())
def testMapDefunWithParentCancellation(self):
# Checks that a cancellation of the parent graph is threaded through to
# MapDefunOp correctly.
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def simple_fn(x):
del x
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
# Blocking
return queue.dequeue_many(5)
c = constant_op.constant([1, 2, 3, 4, 5])
map_defun_op = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [()])[0]
with self.cached_session() as sess:
thread = self.checkedThread(
self.assert_op_cancelled, args=(map_defun_op,))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
@combinations.generate(_test_combinations())
def testMapDefunWithCapturedInputs(self):
c = constant_op.constant(2)
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return x + c
x = constant_op.constant([1, 2, 3, 4])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.int32], [()])[0]
expected = x + c
self.assertAllEqual(self.evaluate(expected), self.evaluate(map_defun_op))
@combinations.generate(_test_combinations())
def testMapDefunWithVariantTensor(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.variant)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
serialized = array_ops_stack.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.variant],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
@combinations.generate(_test_combinations())
def testMapDefunWithVariantTensorAsCaptured(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
del x
return serialized
x = constant_op.constant([0, 0])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.variant], [None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
@combinations.generate(_test_combinations())
def testMapDefunWithStrTensor(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.string)
serialized = array_ops_stack.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.string],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
if __name__ == "__main__":
test.main()
| MapDefunTest |
python | fluentpython__example-code-2e | 08-def-type-hints/coordinates/coordinates_named.py | {
"start": 270,
"end": 817
} | class ____(NamedTuple):
lat: float
lon: float
def geohash(lat_lon: Coordinate) -> str:
return gh.encode(*lat_lon, PRECISION)
# end::GEOHASH[]
# tag::DISPLAY[]
def display(lat_lon: tuple[float, float]) -> str:
lat, lon = lat_lon
ns = 'N' if lat >= 0 else 'S'
ew = 'E' if lon >= 0 else 'W'
return f'{abs(lat):0.1f}°{ns}, {abs(lon):0.1f}°{ew}'
# end::DISPLAY[]
def demo():
shanghai = 31.2304, 121.4737
print(display(shanghai))
s = geohash(shanghai)
print(s)
if __name__ == '__main__':
demo()
| Coordinate |
python | weaviate__weaviate-python-client | weaviate/collections/queries/fetch_objects/query/executor.py | {
"start": 769,
"end": 7933
} | class ____(
Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType]
):
@overload
def fetch_objects(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
after: Optional[UUID] = None,
filters: Optional[_Filters] = None,
sort: Optional[Sorting] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[QueryReturn[Properties, References]]: ...
@overload
def fetch_objects(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
after: Optional[UUID] = None,
filters: Optional[_Filters] = None,
sort: Optional[Sorting] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[QueryReturn[Properties, CrossReferences]]: ...
@overload
def fetch_objects(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
after: Optional[UUID] = None,
filters: Optional[_Filters] = None,
sort: Optional[Sorting] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[QueryReturn[Properties, TReferences]]: ...
@overload
def fetch_objects(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
after: Optional[UUID] = None,
filters: Optional[_Filters] = None,
sort: Optional[Sorting] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[QueryReturn[TProperties, References]]: ...
@overload
def fetch_objects(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
after: Optional[UUID] = None,
filters: Optional[_Filters] = None,
sort: Optional[Sorting] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[QueryReturn[TProperties, CrossReferences]]: ...
@overload
def fetch_objects(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
after: Optional[UUID] = None,
filters: Optional[_Filters] = None,
sort: Optional[Sorting] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[QueryReturn[TProperties, TReferences]]: ...
@overload
def fetch_objects(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
after: Optional[UUID] = None,
filters: Optional[_Filters] = None,
sort: Optional[Sorting] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[QueryReturnType[Properties, References, TProperties, TReferences]]: ...
def fetch_objects(
self,
*,
limit: Optional[int] = None,
offset: Optional[int] = None,
after: Optional[UUID] = None,
filters: Optional[_Filters] = None,
sort: Optional[Sorting] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[QueryReturnType[Properties, References, TProperties, TReferences]]:
"""Retrieve the objects in this collection without any search.
Args:
limit: The maximum number of results to return. If not specified, the default limit specified by the server is returned.
offset: The offset to start from. If not specified, the retrieval begins from the first object in the server.
after: The UUID of the object to start from. If not specified, the retrieval begins from the first object in the server.
filters: The filters to apply to the retrieval.
sort: The sorting to apply to the retrieval.
include_vector: Whether to include the vector in the results. If not specified, this is set to False.
return_metadata: The metadata to return for each object, defaults to `None`.
return_properties: The properties to return for each object.
return_references: The references to return for each object.
NOTE:
- If `return_properties` is not provided then all properties are returned except for blob properties.
- If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata.
- If `return_references` is not provided then no references are provided.
Returns:
A `QueryReturn` object that includes the searched objects.
Raises:
weaviate.exceptions.WeaviateGRPCQueryError: If the network connection to Weaviate fails.
"""
def resp(
res: search_get_pb2.SearchReply,
) -> QueryReturnType[Properties, References, TProperties, TReferences]:
return cast(
Any,
self._result_to_query_return(
res,
_QueryOptions.from_input(
return_metadata,
return_properties,
include_vector,
self._references,
return_references,
),
),
)
request = self._query.get(
limit=limit,
offset=offset,
after=after,
filters=filters,
sort=sort,
return_metadata=self._parse_return_metadata(return_metadata, include_vector),
return_properties=self._parse_return_properties(return_properties),
return_references=self._parse_return_references(cast(Any, return_references)),
)
return executor.execute(
response_callback=resp,
method=self._connection.grpc_search,
request=request,
)
| _FetchObjectsQueryExecutor |
python | pytorch__pytorch | torch/_inductor/codegen/cuda/cuda_kernel.py | {
"start": 1619,
"end": 1850
} | class ____:
node: IRNode
symbol: ValidLayoutSymbols
attr: ValidLayoutAttrs
dim: int
def matches(self, node, attr, dim) -> bool:
return self.node == node and self.attr == attr and self.dim == dim
| LayoutArg |
python | huggingface__transformers | tests/models/qwen3_next/test_modeling_qwen3_next.py | {
"start": 1768,
"end": 8821
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = Qwen3NextModelTester
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
"Qwen3-Next has a special Cache as it alternates with gated deltanet layers"
self.assertIsInstance(past_key_values, Qwen3NextDynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
expected_shape = (batch_size, num_heads, seq_length, head_dim)
attention_layer_indices = past_key_values.transformer_layers
self.assertListEqual(
[past_key_values.key_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
self.assertListEqual(
[past_key_values.value_cache[idx].shape for idx in attention_layer_indices],
[expected_shape] * len(attention_layer_indices),
)
def _check_caches_are_equal(self, cache1: Cache, cache2: Cache):
"Qwen3-Next has a special Cache as it alternates with gated deltanet layers"
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
if cache1.key_cache[idx] is not None:
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
def test_attention_outputs(self):
"Needs to be overwritten as Qwen3-Next alternates between attention layers and gated deltanet layers."
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self_attentions = outputs.attentions
self.assertEqual(out_len + 1, len(outputs))
self.assertEqual(len(self_attentions), sum(layer == "full_attention" for layer in config.layer_types))
self.assertListEqual(list(self_attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len])
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
def test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
):
"""
We need to overwrite this without the fp16 part of the dtype, because the slow path `torch_chunk_gated_delta_rule`
is not robust enough (flaky test) in fp16 due to upscaling in fp32 and then downscaling to fp16 at the end
"""
if dtype == "fp16":
self.skipTest("Not robust in fp16")
_test_eager_matches_sdpa_inference(
self,
name,
dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
)
@unittest.skip("The specific cache format cannot be instantiated from dp/ddp data.")
def test_multi_gpu_data_parallel_forward(self):
pass
@require_torch_multi_gpu
def test_can_use_device_map(self):
"""
Test that this model can be dispatched on multiple gpus. It's not obvious as the Cache is not standard,
ant each layer need to use the correct device on which it reside (i.e. it needs to be lazy initialized).
"""
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
inputs_dict = {k: v.to(0) if isinstance(v, torch.Tensor) else v for k, v in inputs_dict.items()}
# We want the linear attention layer to reside on device 1 with the device map (i.e. not the first/default device),
# to check if cache initialization is on the correct device
config.layer_types = ["full_attention", "linear_attention"]
model = model_class(config).eval()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
del model
model = model_class.from_pretrained(
tmpdirname,
device_map={
"lm_head": 0,
"model.embed_tokens": 0,
"model.norm": 0,
"model.layers.0": 0,
"model.layers.1": 1,
},
)
# Check that we indeed use 2 different devices for each layer
self.assertTrue({param.device for param in model.model.layers[0].parameters()} == {torch.device(0)})
self.assertTrue({param.device for param in model.model.layers[1].parameters()} == {torch.device(1)})
# This should not crash
_ = model.generate(**inputs_dict, max_new_tokens=5, min_new_tokens=5)
@slow
| Qwen3NextModelTest |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/v2025_09_23/test_task_instances.py | {
"start": 1201,
"end": 3237
} | class ____:
"""Test that API version 2025-09-23 does NOT include triggering_user_name field."""
def setup_method(self):
clear_db_runs()
def teardown_method(self):
clear_db_runs()
def test_ti_run_excludes_triggering_user_name(
self,
ver_client,
session,
create_task_instance,
time_machine,
):
"""
Test that the triggering_user_name field is NOT present in API version 2025-09-23.
This field was added in version 2025-10-10, so older API clients should not
receive it in the response.
"""
instant_str = "2024-09-30T12:00:00Z"
instant = timezone.parse(instant_str)
time_machine.move_to(instant, tick=False)
ti = create_task_instance(
task_id="test_triggering_user_exclusion",
state=State.QUEUED,
dagrun_state=DagRunState.RUNNING,
session=session,
start_date=instant,
)
session.commit()
response = ver_client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "test-hostname",
"unixname": "test-user",
"pid": 12345,
"start_date": instant_str,
},
)
assert response.status_code == 200
json_response = response.json()
# Verify the dag_run is present
assert "dag_run" in json_response
dag_run = json_response["dag_run"]
# The triggering_user_name field should NOT be present in this API version
assert "triggering_user_name" not in dag_run, (
"triggering_user_name should not be present in API version 2025-09-23"
)
# Verify other expected fields are still present
assert dag_run["dag_id"] == ti.dag_id
assert dag_run["run_id"] == "test"
assert dag_run["state"] == "running"
assert dag_run["conf"] == {}
| TestTIRunStateV20250923 |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/visitor.py | {
"start": 258,
"end": 3941
} | class ____(object):
__slots__ = 'in_array', 'index', 'keys', 'edits', 'prev'
def __init__(self, in_array, index, keys, edits, prev):
self.in_array = in_array
self.index = index
self.keys = keys
self.edits = edits
self.prev = prev
def visit(root, visitor, key_map=None):
visitor_keys = key_map or QUERY_DOCUMENT_KEYS
stack = None
in_array = isinstance(root, list)
keys = [root]
index = -1
edits = []
parent = None
path = []
ancestors = []
new_root = root
leave = visitor.leave
enter = visitor.enter
path_pop = path.pop
ancestors_pop = ancestors.pop
path_append = path.append
ancestors_append = ancestors.append
while True:
index += 1
is_leaving = index == len(keys)
is_edited = is_leaving and edits
if is_leaving:
key = path_pop() if ancestors else None
node = parent
parent = ancestors_pop() if ancestors else None
if is_edited:
if in_array:
node = list(node)
else:
node = copy(node)
edit_offset = 0
for edit_key, edit_value in edits:
if in_array:
edit_key -= edit_offset
if in_array and edit_value is REMOVE:
node.pop(edit_key)
edit_offset += 1
else:
if isinstance(node, list):
node[edit_key] = edit_value
else:
setattr(node, edit_key, edit_value)
index = stack.index
keys = stack.keys
edits = stack.edits
in_array = stack.in_array
stack = stack.prev
else:
if parent:
key = index if in_array else keys[index]
if isinstance(parent, list):
node = parent[key]
else:
node = getattr(parent, key, None)
else:
key = None
node = new_root
if node is REMOVE or node is None:
continue
if parent:
path_append(key)
result = None
if not isinstance(node, list):
assert isinstance(node, ast.Node), 'Invalid AST Node: ' + repr(node)
if is_leaving:
result = leave(node, key, parent, path, ancestors)
else:
result = enter(node, key, parent, path, ancestors)
if result is BREAK:
break
if result is False:
if not is_leaving:
path_pop()
continue
elif result is not None:
edits.append((key, result))
if not is_leaving:
if isinstance(result, ast.Node):
node = result
else:
path_pop()
continue
if result is None and is_edited:
edits.append((key, node))
if not is_leaving:
stack = Stack(in_array, index, keys, edits, stack)
in_array = isinstance(node, list)
keys = node if in_array else visitor_keys.get(type(node), None) or []
index = -1
edits = []
if parent:
ancestors_append(parent)
parent = node
if not stack:
break
if edits:
new_root = edits[-1][1]
return new_root
| Stack |
python | huggingface__transformers | src/transformers/pipelines/base.py | {
"start": 14974,
"end": 19002
} | class ____:
"""
Base class for all the pipeline supported data format both for reading and writing. Supported data formats
currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets columns to
pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (`str`): Where to save the outgoing data.
input_path (`str`): Where to look for the input data.
column (`str`): The column to read.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to overwrite the `output_path`.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self,
output_path: str | None,
input_path: str | None,
column: str | None,
overwrite: bool = False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError(f"{self.output_path} already exists on disk")
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError(f"{self.input_path} doesn't exist on disk")
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: dict | list[dict]):
"""
Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`].
Args:
data (`dict` or list of `dict`): The data to store.
"""
raise NotImplementedError()
def save_binary(self, data: dict | list[dict]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
Args:
data (`dict` or list of `dict`): The data to store.
Returns:
`str`: Path where the data has been saved.
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str,
output_path: str | None,
input_path: str | None,
column: str | None,
overwrite=False,
) -> "PipelineDataFormat":
"""
Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`.
Args:
format (`str`):
The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`.
output_path (`str`, *optional*):
Where to save the outgoing data.
input_path (`str`, *optional*):
Where to look for the input data.
column (`str`, *optional*):
The column to read.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to overwrite the `output_path`.
Returns:
[`~pipelines.PipelineDataFormat`]: The proper data format.
"""
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)")
| PipelineDataFormat |
python | doocs__leetcode | solution/0000-0099/0075.Sort Colors/Solution.py | {
"start": 0,
"end": 397
} | class ____:
def sortColors(self, nums: List[int]) -> None:
i, j, k = -1, len(nums), 0
while k < j:
if nums[k] == 0:
i += 1
nums[i], nums[k] = nums[k], nums[i]
k += 1
elif nums[k] == 2:
j -= 1
nums[j], nums[k] = nums[k], nums[j]
else:
k += 1
| Solution |
python | optuna__optuna | optuna/storages/_heartbeat.py | {
"start": 2711,
"end": 2891
} | class ____(BaseHeartbeatThread):
def __init__(self) -> None:
pass
def start(self) -> None:
pass
def join(self) -> None:
pass
| NullHeartbeatThread |
python | davidhalter__parso | parso/python/parser.py | {
"start": 213,
"end": 8108
} | class ____(BaseParser):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar.
"""
node_map = {
'expr_stmt': tree.ExprStmt,
'classdef': tree.Class,
'funcdef': tree.Function,
'file_input': tree.Module,
'import_name': tree.ImportName,
'import_from': tree.ImportFrom,
'break_stmt': tree.KeywordStatement,
'continue_stmt': tree.KeywordStatement,
'return_stmt': tree.ReturnStmt,
'raise_stmt': tree.KeywordStatement,
'yield_expr': tree.YieldExpr,
'del_stmt': tree.KeywordStatement,
'pass_stmt': tree.KeywordStatement,
'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': tree.KeywordStatement,
'assert_stmt': tree.AssertStmt,
'if_stmt': tree.IfStmt,
'with_stmt': tree.WithStmt,
'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt,
'sync_comp_for': tree.SyncCompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions.
'decorator': tree.Decorator,
'lambdef': tree.Lambda,
'lambdef_nocond': tree.Lambda,
'namedexpr_test': tree.NamedExpr,
}
default_node = tree.PythonNode
# Names/Keywords are handled separately
_leaf_map = {
PythonTokenTypes.STRING: tree.String,
PythonTokenTypes.NUMBER: tree.Number,
PythonTokenTypes.NEWLINE: tree.Newline,
PythonTokenTypes.ENDMARKER: tree.EndMarker,
PythonTokenTypes.FSTRING_STRING: tree.FStringString,
PythonTokenTypes.FSTRING_START: tree.FStringStart,
PythonTokenTypes.FSTRING_END: tree.FStringEnd,
}
def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'):
super().__init__(pgen_grammar, start_nonterminal,
error_recovery=error_recovery)
self.syntax_errors = []
self._omit_dedent_list = []
self._indent_counter = 0
def parse(self, tokens):
if self._error_recovery:
if self._start_nonterminal != 'file_input':
raise NotImplementedError
tokens = self._recovery_tokenize(tokens)
return super().parse(tokens)
def convert_node(self, nonterminal, children):
"""
Convert raw node information to a PythonBaseNode instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
try:
node = self.node_map[nonterminal](children)
except KeyError:
if nonterminal == 'suite':
# We don't want the INDENT/DEDENT in our parser tree. Those
# leaves are just cancer. They are virtual leaves and not real
# ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them.
children = [children[0]] + children[2:-1]
node = self.default_node(nonterminal, children)
return node
def convert_leaf(self, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type])
if type == NAME:
if value in self._pgen_grammar.reserved_syntax_strings:
return tree.Keyword(value, start_pos, prefix)
else:
return tree.Name(value, start_pos, prefix)
return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix)
def error_recovery(self, token):
tos_nodes = self.stack[-1].nodes
if tos_nodes:
last_leaf = tos_nodes[-1].get_last_leaf()
else:
last_leaf = None
if self._start_nonterminal == 'file_input' and \
(token.type == PythonTokenTypes.ENDMARKER
or token.type == DEDENT and not last_leaf.value.endswith('\n')
and not last_leaf.value.endswith('\r')):
# In Python statements need to end with a newline. But since it's
# possible (and valid in Python) that there's no newline at the
# end of a file, we have to recover even if the user doesn't want
# error recovery.
if self.stack[-1].dfa.from_rule == 'simple_stmt':
try:
plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE]
except KeyError:
pass
else:
if plan.next_dfa.is_final and not plan.dfa_pushes:
# We are ignoring here that the newline would be
# required for a simple_stmt.
self.stack[-1].dfa = plan.next_dfa
self._add_token(token)
return
if not self._error_recovery:
return super().error_recovery(token)
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for until_index, stack_node in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
if stack_node.nonterminal == 'file_input':
break
elif stack_node.nonterminal == 'suite':
# In the case where we just have a newline we don't want to
# do error recovery here. In all other cases, we want to do
# error recovery.
if len(stack_node.nodes) != 1:
break
return until_index
until_index = current_suite(self.stack)
if self._stack_removal(until_index + 1):
self._add_token(token)
else:
typ, value, start_pos, prefix = token
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix)
self.stack[-1].nodes.append(error_leaf)
tos = self.stack[-1]
if tos.nonterminal == 'suite':
# Need at least one statement in the suite. This happend with the
# error recovery above.
try:
tos.dfa = tos.dfa.arcs['stmt']
except KeyError:
# We're already in a final state.
pass
def _stack_removal(self, start_index):
all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes]
if all_nodes:
node = tree.PythonErrorNode(all_nodes)
self.stack[start_index - 1].nodes.append(node)
self.stack[start_index:] = []
return bool(all_nodes)
def _recovery_tokenize(self, tokens):
for token in tokens:
typ = token[0]
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
self._indent_counter -= 1
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
yield token
| Parser |
python | huggingface__transformers | src/transformers/models/bamba/modeling_bamba.py | {
"start": 22902,
"end": 46141
} | class ____(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
The are a few differences between this and Mamba2Mixer:
- The variable use_precomputed_states is slightly different due to the hybrid cache structure
- There's a few non-obvious bugs fixed with batching in the slow path that exist in main
- Some extra variables that our layer doesn't need have been removed
- We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged
"""
def __init__(self, config: BambaConfig, layer_idx: int):
super().__init__()
self.num_heads = config.mamba_n_heads
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = int(config.mamba_expand * self.hidden_size)
self.layer_idx = layer_idx
self.use_conv_bias = config.mamba_conv_bias
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_bias = config.mamba_proj_bias
self.layer_norm_epsilon = config.rms_norm_eps
self.n_groups = config.mamba_n_groups
self.head_dim = config.mamba_d_head
self.chunk_size = config.mamba_chunk_size
# FIXME:
self.time_step_limit = (0.0, float("inf"))
self.time_step_min = 0.001
self.time_step_max = 0.1
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(
in_channels=self.conv_dim,
out_channels=self.conv_dim,
bias=config.mamba_conv_bias,
kernel_size=self.conv_kernel_size,
groups=self.conv_dim,
padding=self.conv_kernel_size - 1,
)
# projection of the input hidden states
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(
self.hidden_size,
projection_size,
bias=self.use_bias,
)
# selective projection used to make dt, B and C input dependent
# time step projection (discretization)
# instantiate once and copy inv_dt in init_weights of PretrainedModel
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.norm = BambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d"
)
else:
logger.warning_once("The fast path for Bamba will be used when running the model on a GPU")
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
seq_idx: Optional[torch.IntTensor] = None,
):
# 1. Gated MLP's linear projection
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
projected_states = self.in_proj(hidden_states)
# Set up dimensions for reshapes later
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# getting projected states from cache if it exists
if use_precomputed_states:
gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
[self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
# 2. Convolution sequence transformation
hidden_states_B_C = causal_conv1d_update(
hidden_states_B_C,
cache_params.conv_states[self.layer_idx],
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # (nheads,)
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states_reshaped,
dt,
A,
B,
C,
D,
z=None,
dt_bias=dt_bias,
dt_softplus=True,
)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
hidden_states = self.norm(hidden_states, gate)
# 4. Final linear projection
out = self.out_proj(hidden_states)[:, None, ...]
# Fused calculations or step by step if no initialized cache is found
else:
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
# 2-4. Fused kernel for conv1d, SSM, and the final projection
if self.training and cache_params is None:
out = mamba_split_conv1d_scan_combined(
projected_states,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.dt_bias,
A,
D=self.D,
chunk_size=self.chunk_size,
seq_idx=seq_idx,
activation=self.activation,
rmsnorm_weight=self.norm.weight,
rmsnorm_eps=self.norm.variance_epsilon,
outproj_weight=self.out_proj.weight,
outproj_bias=self.out_proj.bias,
headdim=self.head_dim,
ngroups=self.n_groups,
norm_before_gate=False,
return_final_states=False,
**dt_limit_kwargs,
)
else:
gate, hidden_states_B_C, dt = projected_states.split(
[self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
# 2. Convolution sequence transformation
# Init cache
if cache_params is not None:
# storing the states
# If we just take xBC[:, :, -self.d_conv :], it will error if seqlen < self.d_conv
# Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise.
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
conv_states = nn.functional.pad(
hidden_states_B_C_transposed,
(self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0),
)
cache_params.conv_states[self.layer_idx].copy_(conv_states)
if self.activation not in ["silu", "swish"]:
hidden_states_B_C = self.act(
self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)
)
else:
hidden_states_B_C = causal_conv1d_fn(
x=hidden_states_B_C.transpose(1, 2),
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
seq_idx=seq_idx,
).transpose(1, 2)
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
# 3. SSM transformation
scan_output, ssm_state = mamba_chunk_scan_combined(
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
dt,
A,
B.view(batch_size, seq_len, self.n_groups, -1),
C.view(batch_size, seq_len, self.n_groups, -1),
chunk_size=self.chunk_size,
D=self.D,
z=None,
seq_idx=seq_idx,
return_final_states=True,
dt_bias=self.dt_bias,
dt_softplus=True,
**dt_limit_kwargs,
)
# Init cache
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
# Multiply "gate" branch and apply extra normalization layer
scan_output = self.norm(scan_output, gate)
# 4. Final linear projection
out = self.out_proj(scan_output)
return out
# fmt: off
def torch_forward(
self,
input_states,
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated MLP's linear projection
input_states = apply_mask_to_padding_states(input_states, attention_mask)
projected_states = self.in_proj(input_states)
gate, hidden_states_B_C, dt = projected_states.split(
[self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# 2. Convolution sequence transformation
if use_precomputed_states:
cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1)
cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device)
# We need to guarantee that anything regarding the cache is on the same device
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
hidden_states_B_C = torch.sum(
conv_states * self.conv1d.weight.squeeze(1), dim=-1
)
if self.use_conv_bias:
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
hidden_states_B_C = self.act(hidden_states_B_C)
else:
# Init cache
if cache_params is not None:
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
conv_states = nn.functional.pad(
hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx].copy_(conv_states)
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
dim=-1
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # [num_heads]
if use_precomputed_states:
# We need to guarantee that anything regarding the cache is on the same device
cache_device = cache_params.ssm_states[self.layer_idx].device
# Note: there is no need to pad parameter matrices here, as there is just one new token
# for batched generation
dt = dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
# [num_heads] -> [num_heads, head_dim]
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
# [bsz, num_heads, head_dim, state_size]
dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
# Discretize B
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
# [bsz, num_heads, head_dim, state_size]
dB = dt[..., None] * B[..., None, :]
# Discretize x into dB
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = (dB * hidden_states[..., None]).to(device=cache_device)
# State calculation
cache_params.ssm_states[self.layer_idx].copy_(
cache_params.ssm_states[self.layer_idx] * dA + dBx
)
# Subsequent output
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
# [bsz, num_heads, head_dim]
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
# Reshape ssm_states to merge the first two dimensions
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
# D skip connection
# [num_heads] -> [num_heads, head_dim]
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
y = y.reshape(batch_size, -1)[:, None, ...]
else:
# begin ssd naive implementation without einsums
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
# Discretize x and A
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
# Rearrange into blocks/chunks
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
# 1. Compute the output for each intra-chunk (diagonal blocks)
# This is the analog of a causal mask
L = torch.exp(segment_sum(A))
# Contraction of C and B to get G (attention-weights like)
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n)
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
# Compute M, equivalent to applying attention mask to weights
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
M = M_intermediate.sum(dim=-1)
# Compute Y_diag (apply to values)
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
# 2. Compute the state for each intra-chunk
# (right term of low-rank factorization of off-diagonal blocks; B terms)
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
# 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries
# (middle term of factorization of off-diag blocks; A terms)
if use_precomputed_states:
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
else:
previous_states = torch.zeros_like(states[:, :1])
states = torch.cat([previous_states, states], dim=1)
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
decay_chunk = decay_chunk.transpose(1, 3)
new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
states, ssm_state = new_states[:, :-1], new_states[:, -1]
# 4. Compute state -> output conversion per chunk
# (left term of low-rank factorization of off-diagonal blocks; C terms)
state_decay_out = torch.exp(A_cumsum)
C_times_states = (C[..., None, :] * states[:, :, None, ...])
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
# Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
y = Y_diag + Y_off
# [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
y = y + D_residual
# Cutting off padded chunks
if pad_size > 0:
y = y[:, :seq_len, :, :]
y = y.reshape(batch_size, seq_len, -1)
# Init cache
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = self.norm(y, gate)
# end ssd naive
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
return contextualized_states
# fmt: on
def forward(
self,
hidden_states,
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
seq_idx: Optional[torch.IntTensor] = None,
**kwargs,
):
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask, seq_idx)
if seq_idx is not None:
raise NotImplementedError(
"`seq_idx` support requires fast path support. Please install `mamba_ssm` and `causal_conv1d`"
)
dtype = hidden_states.dtype
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
| BambaMixer |
python | Delgan__loguru | loguru/_better_exceptions.py | {
"start": 523,
"end": 3797
} | class ____:
_default_style = frozenset(
{
"comment": "\x1b[30m\x1b[1m{}\x1b[0m",
"keyword": "\x1b[35m\x1b[1m{}\x1b[0m",
"builtin": "\x1b[1m{}\x1b[0m",
"string": "\x1b[36m{}\x1b[0m",
"number": "\x1b[34m\x1b[1m{}\x1b[0m",
"operator": "\x1b[35m\x1b[1m{}\x1b[0m",
"punctuation": "\x1b[1m{}\x1b[0m",
"constant": "\x1b[36m\x1b[1m{}\x1b[0m",
"identifier": "\x1b[1m{}\x1b[0m",
"other": "{}",
}.items()
)
_builtins = frozenset(dir(builtins))
_constants = frozenset({"True", "False", "None"})
_punctuation = frozenset({"(", ")", "[", "]", "{", "}", ":", ",", ";"})
if sys.version_info >= (3, 12):
_strings = frozenset(
{tokenize.STRING, tokenize.FSTRING_START, tokenize.FSTRING_MIDDLE, tokenize.FSTRING_END}
)
_fstring_middle = tokenize.FSTRING_MIDDLE
else:
_strings = frozenset({tokenize.STRING})
_fstring_middle = None
def __init__(self, style=None):
self._style = style or dict(self._default_style)
def highlight(self, source):
style = self._style
row, column = 0, 0
output = ""
for token in self.tokenize(source):
type_, string, (start_row, start_column), (_, end_column), line = token
if type_ == self._fstring_middle:
# When an f-string contains "{{" or "}}", they appear as "{" or "}" in the "string"
# attribute of the token. However, they do not count in the column position.
end_column += string.count("{") + string.count("}")
if type_ == tokenize.NAME:
if string in self._constants:
color = style["constant"]
elif keyword.iskeyword(string):
color = style["keyword"]
elif string in self._builtins:
color = style["builtin"]
else:
color = style["identifier"]
elif type_ == tokenize.OP:
if string in self._punctuation:
color = style["punctuation"]
else:
color = style["operator"]
elif type_ == tokenize.NUMBER:
color = style["number"]
elif type_ in self._strings:
color = style["string"]
elif type_ == tokenize.COMMENT:
color = style["comment"]
else:
color = style["other"]
if start_row != row:
source = source[column:]
row, column = start_row, 0
if type_ != tokenize.ENCODING:
output += line[column:start_column]
output += color.format(line[start_column:end_column])
column = end_column
output += source[column:]
return output
@staticmethod
def tokenize(source):
# Worth reading: https://www.asmeurer.com/brown-water-python/
source = source.encode("utf-8")
source = io.BytesIO(source)
try:
yield from tokenize.tokenize(source.readline)
except tokenize.TokenError:
return
| SyntaxHighlighter |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 3024,
"end": 3357
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 1, bias=False)
self.register_buffer("buffer", torch.randn(1, 2))
def forward(self, x):
self.buffer.add_(1)
return self.b(self.a(x))
| NetWithBuffers |
python | PyCQA__pylint | doc/data/messages/s/super-without-brackets/bad.py | {
"start": 78,
"end": 234
} | class ____(Soup):
@staticmethod
def temp():
super.temp() # [super-without-brackets]
print("But tomato soup is even hotter!")
| TomatoSoup |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 839069,
"end": 839805
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for ProjectCard."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectCardEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("ProjectCard"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| ProjectCardConnection |
python | kamyu104__LeetCode-Solutions | Python/latest-time-you-can-obtain-after-replacing-characters.py | {
"start": 38,
"end": 524
} | class ____(object):
def findLatestTime(self, s):
"""
:type s: str
:rtype: str
"""
result = list(s)
if result[0] == '?':
result[0] = '1' if result[1] == '?' or result[1] <= '1' else '0'
if result[1] == '?':
result[1] = '1' if result[0] == '1' else '9'
if result[3] == '?':
result[3] = '5'
if result[4] == '?':
result[4] = '9'
return "".join(result)
| Solution |
python | mlflow__mlflow | tests/pyfunc/test_scoring_server.py | {
"start": 1846,
"end": 3092
} | class ____(PythonModel):
def predict(self, context, model_input, params=None):
# If (and only-if) we define model signature, input is converted
# to pandas DataFrame in _enforce_schema applied in Pyfunc.predict.
# TODO: Confirm if this is ok, for me it sounds confusing.
if isinstance(model_input, pd.DataFrame):
model_input = model_input.to_dict(orient="records")[0]
messages = model_input["messages"]
ret = " ".join([m["content"] for m in messages])
return {
"id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"object": "chat.completion",
"created": 1698916461,
"model": "llama-2-70b-chat-hf",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": ret,
},
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 47, "completion_tokens": 49, "total_tokens": 96},
# Echo model input and params for testing purposes
"model_input": model_input,
"params": params,
}
| MyChatLLM |
python | PrefectHQ__prefect | tests/client/test_base_client.py | {
"start": 27748,
"end": 36600
} | class ____:
"""Test custom headers functionality in HTTP clients."""
async def test_default_no_custom_headers(self):
"""Test that no custom headers are added by default."""
async with PrefectHttpxAsyncClient(base_url="http://localhost:4200") as client:
# Should only have standard headers, no custom ones
headers = dict(client.headers)
# httpx normalizes header names to lowercase
assert "user-agent" in headers
# Verify no unexpected custom headers
custom_header_prefixes = ["x-", "authorization", "api-key"]
custom_headers = [
k
for k in headers.keys()
if any(
k.lower().startswith(prefix) for prefix in custom_header_prefixes
)
]
assert len(custom_headers) == 0
async def test_custom_headers_from_settings(self):
"""Test that custom headers are added from settings."""
custom_headers = {
"X-Test-Header": "test-value",
"X-Custom-Auth": "Bearer token123",
"Api-Version": "v1",
}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
async with PrefectHttpxAsyncClient(
base_url="http://localhost:4200"
) as client:
for header_name, expected_value in custom_headers.items():
assert client.headers[header_name] == expected_value
async def test_custom_headers_json_env_var(self, monkeypatch: pytest.MonkeyPatch):
"""Test custom headers from JSON environment variable."""
json_value = (
'{"X-Json-Header": "json-value", "Authorization": "Bearer env-token"}'
)
monkeypatch.setenv("PREFECT_CLIENT_CUSTOM_HEADERS", json_value)
# Create a new settings instance to pick up the env var
from prefect.settings.models.root import Settings
settings = Settings()
expected_headers = {
"X-Json-Header": "json-value",
"Authorization": "Bearer env-token",
}
assert settings.client.custom_headers == expected_headers
# Test that it works with the client (using the setting directly)
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: expected_headers}):
async with PrefectHttpxAsyncClient(
base_url="http://localhost:4200"
) as client:
assert client.headers["X-Json-Header"] == "json-value"
assert client.headers["Authorization"] == "Bearer env-token"
async def test_protected_headers_not_overridden(self):
"""Test that critical headers cannot be overridden by custom headers."""
malicious_headers = {
"User-Agent": "malicious-agent",
"user-agent": "another-malicious-agent", # Test case insensitive
"Prefect-Csrf-Token": "fake-token",
"prefect-csrf-client": "fake-client",
"X-Safe-Header": "this-should-work",
}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: malicious_headers}):
async with PrefectHttpxAsyncClient(
base_url="http://localhost:4200"
) as client:
# User-Agent should still be the Prefect one (httpx normalizes to lowercase)
assert "prefect/" in client.headers["user-agent"]
assert "malicious-agent" not in client.headers["user-agent"]
# CSRF headers should not be set (they're set later during requests)
assert "prefect-csrf-token" not in client.headers
assert "prefect-csrf-client" not in client.headers
# Safe header should be added
assert client.headers["X-Safe-Header"] == "this-should-work"
async def test_sync_client_custom_headers(self):
"""Test custom headers work with sync client."""
from prefect.client.base import PrefectHttpxSyncClient
custom_headers = {"X-Sync-Test": "sync-value", "Custom-Header": "sync-custom"}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
with PrefectHttpxSyncClient(base_url="http://localhost:4200") as client:
for header_name, expected_value in custom_headers.items():
assert client.headers[header_name] == expected_value
async def test_custom_headers_case_preserved(self):
"""Test that custom header names preserve their case."""
custom_headers = {
"X-CamelCase-Header": "value1",
"lowercase-header": "value2",
"UPPERCASE-HEADER": "value3",
}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
async with PrefectHttpxAsyncClient(
base_url="http://localhost:4200"
) as client:
# Headers should be accessible with original case
assert client.headers["X-CamelCase-Header"] == "value1"
assert client.headers["lowercase-header"] == "value2"
assert client.headers["UPPERCASE-HEADER"] == "value3"
async def test_empty_custom_headers(self):
"""Test that empty custom headers dict works correctly."""
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: {}}):
async with PrefectHttpxAsyncClient(
base_url="http://localhost:4200"
) as client:
# Should behave same as default (no custom headers)
assert "user-agent" in client.headers
# No unexpected headers should be added
expected_headers = {
"accept",
"accept-encoding",
"connection",
"user-agent",
}
actual_headers = {k.lower() for k in client.headers.keys()}
assert actual_headers == expected_headers
@pytest.mark.parametrize(
"protected_header",
[
"User-Agent",
"user-agent",
"USER-AGENT",
"Prefect-Csrf-Token",
"prefect-csrf-token",
"PREFECT-CSRF-TOKEN",
"Prefect-Csrf-Client",
"prefect-csrf-client",
],
)
async def test_protected_headers_case_insensitive(self, protected_header):
"""Test that protected headers are blocked regardless of case."""
custom_headers = {protected_header: "should-be-blocked"}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
async with PrefectHttpxAsyncClient(
base_url="http://localhost:4200"
) as client:
if protected_header.lower() == "user-agent":
# User-Agent should still be the Prefect one (httpx normalizes to lowercase)
assert "prefect/" in client.headers["user-agent"]
assert "should-be-blocked" not in client.headers["user-agent"]
else:
# Other protected headers should not be in headers at all
# (they get added later in the request lifecycle)
assert protected_header.lower() not in client.headers
async def test_protected_headers_warning_logged(self, caplog):
"""Test that warning is logged when protected headers are attempted."""
import logging
malicious_headers = {
"User-Agent": "malicious-agent",
"Prefect-Csrf-Token": "fake-token",
}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: malicious_headers}):
with caplog.at_level(logging.WARNING):
async with PrefectHttpxAsyncClient(base_url="http://localhost:4200"):
pass
# Should have logged warnings for both protected headers
warning_messages = [
record.message
for record in caplog.records
if record.levelname == "WARNING"
]
# Check that warnings were logged for protected headers
user_agent_warning = any(
"User-Agent" in msg and "ignored because it conflicts" in msg
for msg in warning_messages
)
csrf_warning = any(
"Prefect-Csrf-Token" in msg and "ignored because it conflicts" in msg
for msg in warning_messages
)
assert user_agent_warning, (
f"Expected User-Agent warning not found in: {warning_messages}"
)
assert csrf_warning, (
f"Expected Prefect-Csrf-Token warning not found in: {warning_messages}"
)
| TestCustomHeaders |
python | getsentry__sentry | src/sentry/replays/usecases/ingest/event_parser.py | {
"start": 1170,
"end": 1246
} | class ____:
timestamp: float
url: str | None
@dataclass
| HydrationError |
python | tensorflow__tensorflow | tensorflow/python/feature_column/serialization_test.py | {
"start": 929,
"end": 4073
} | class ____(test.TestCase):
"""Tests for serialization, deserialization helpers."""
def test_serialize_non_feature_column(self):
class NotAFeatureColumn(object):
pass
with self.assertRaisesRegex(ValueError, 'is not a FeatureColumn'):
serialization.serialize_feature_column(NotAFeatureColumn())
def test_deserialize_invalid_config(self):
with self.assertRaisesRegex(ValueError, 'Improper config format: {}'):
serialization.deserialize_feature_column({})
def test_deserialize_config_missing_key(self):
config_missing_key = {
'config': {
# Dtype is missing and should cause a failure.
# 'dtype': 'int32',
'default_value': None,
'key': 'a',
'normalizer_fn': None,
'shape': (2,)
},
'class_name': 'NumericColumn'
}
with self.assertRaisesRegex(ValueError,
'Invalid config:.*expected keys.*dtype'):
serialization.deserialize_feature_column(config_missing_key)
def test_deserialize_invalid_class(self):
with self.assertRaisesRegex(
ValueError, 'Unknown feature_column_v2: NotExistingFeatureColumnClass'):
serialization.deserialize_feature_column({
'class_name': 'NotExistingFeatureColumnClass',
'config': {}
})
def test_deserialization_deduping(self):
price = fc.numeric_column('price')
bucketized_price = fc.bucketized_column(price, boundaries=[0, 1])
configs = serialization.serialize_feature_columns([price, bucketized_price])
deserialized_feature_columns = serialization.deserialize_feature_columns(
configs)
self.assertLen(deserialized_feature_columns, 2)
new_price = deserialized_feature_columns[0]
new_bucketized_price = deserialized_feature_columns[1]
# Ensure these are not the original objects:
self.assertIsNot(price, new_price)
self.assertIsNot(bucketized_price, new_bucketized_price)
# But they are equivalent:
self.assertEqual(price, new_price)
self.assertEqual(bucketized_price, new_bucketized_price)
# Check that deduping worked:
self.assertIs(new_bucketized_price.source_column, new_price)
def deserialization_custom_objects(self):
# Note that custom_objects is also tested extensively above per class, this
# test ensures that the public wrappers also handle it correctly.
def _custom_fn(input_tensor):
return input_tensor + 42.
price = fc.numeric_column('price', normalizer_fn=_custom_fn)
configs = serialization.serialize_feature_columns([price])
deserialized_feature_columns = serialization.deserialize_feature_columns(
configs)
self.assertLen(deserialized_feature_columns, 1)
new_price = deserialized_feature_columns[0]
# Ensure these are not the original objects:
self.assertIsNot(price, new_price)
# But they are equivalent:
self.assertEqual(price, new_price)
# Check that normalizer_fn points to the correct function.
self.assertIs(new_price.normalizer_fn, _custom_fn)
if __name__ == '__main__':
test.main()
| FeatureColumnSerializationTest |
python | altair-viz__altair | tests/utils/test_core.py | {
"start": 1630,
"end": 11863
} | class ____(ValueChannel, schemapi.SchemaBase):
_schema = {json_schema_dict_str}
_encoding_name = "strokeWidth"
'''
@pytest.fixture(params=[False, True])
def pd_data(request) -> pd.DataFrame:
data = pd.DataFrame(
{
"x": [1, 2, 3, 4, 5],
"y": ["A", "B", "C", "D", "E"],
"z": pd.date_range("2018-01-01", periods=5, freq="D"),
"t": pd.date_range("2018-01-01", periods=5, freq="D").tz_localize("UTC"),
}
)
object_dtype = request.param
if object_dtype:
data = data.astype("object")
return data
@pytest.mark.parametrize(
("value", "expected_type"),
[
([1, 2, 3], "integer"),
([1.0, 2.0, 3.0], "floating"),
([1, 2.0, 3], "mixed-integer-float"),
(["a", "b", "c"], "string"),
(["a", "b", np.nan], "mixed"),
],
)
def test_infer_dtype(value, expected_type):
assert infer_dtype(value, skipna=False) == expected_type
# ruff: noqa: C408
@pytest.mark.parametrize(
("shorthand", "expected"),
[
("", {}),
# Fields alone
("foobar", dict(field="foobar")),
(r"blah\:(fd ", dict(field=r"blah\:(fd ")),
# Fields with type
("foobar:quantitative", dict(type="quantitative", field="foobar")),
("foobar:nominal", dict(type="nominal", field="foobar")),
("foobar:ordinal", dict(type="ordinal", field="foobar")),
("foobar:temporal", dict(type="temporal", field="foobar")),
("foobar:geojson", dict(type="geojson", field="foobar")),
("foobar:Q", dict(type="quantitative", field="foobar")),
("foobar:N", dict(type="nominal", field="foobar")),
("foobar:O", dict(type="ordinal", field="foobar")),
("foobar:T", dict(type="temporal", field="foobar")),
("foobar:G", dict(type="geojson", field="foobar")),
# Fields with aggregate and/or type
("average(foobar)", dict(field="foobar", aggregate="average")),
(
"min(foobar):temporal",
dict(type="temporal", field="foobar", aggregate="min"),
),
("sum(foobar):Q", dict(type="quantitative", field="foobar", aggregate="sum")),
# check that invalid arguments are not split-out
("invalid(blah)", dict(field="invalid(blah)")),
(r"blah\:invalid", dict(field=r"blah\:invalid")),
(r"invalid(blah)\:invalid", dict(field=r"invalid(blah)\:invalid")),
# check parsing in presence of strange characters
(
r"average(a b\:(c\nd):Q",
dict(aggregate="average", field=r"a b\:(c\nd", type="quantitative"),
),
# special case: count doesn't need an argument
("count()", dict(aggregate="count", type="quantitative")),
("count():O", dict(aggregate="count", type="ordinal")),
# time units:
("month(x)", dict(field="x", timeUnit="month", type="temporal")),
("year(foo):O", dict(field="foo", timeUnit="year", type="ordinal")),
(
"date(date):quantitative",
dict(field="date", timeUnit="date", type="quantitative"),
),
(
"yearmonthdate(field)",
dict(field="field", timeUnit="yearmonthdate", type="temporal"),
),
],
)
def test_parse_shorthand(shorthand: str, expected: dict[str, Any]) -> None:
assert parse_shorthand(shorthand) == expected
@pytest.mark.parametrize(
("shorthand", "expected"),
[
("x", dict(field="x", type="quantitative")),
("y", dict(field="y", type="nominal")),
("z", dict(field="z", type="temporal")),
("t", dict(field="t", type="temporal")),
("count(x)", dict(field="x", aggregate="count", type="quantitative")),
("count()", dict(aggregate="count", type="quantitative")),
("month(z)", dict(timeUnit="month", field="z", type="temporal")),
("month(t)", dict(timeUnit="month", field="t", type="temporal")),
],
)
def test_parse_shorthand_with_data(
pd_data, shorthand: str, expected: dict[str, Any]
) -> None:
assert parse_shorthand(shorthand, pd_data) == expected
@pytest.mark.skipif(Version("1.0.0") > PANDAS_VERSION, reason="dtype unavailable")
def test_parse_shorthand_with_data_pandas_v1(pd_data) -> None:
pd_data["b"] = pd.Series([True, False, True, False, None], dtype="boolean")
shorthand = "b"
expected = dict(field="b", type="nominal")
assert parse_shorthand(shorthand, pd_data) == expected
@skip_requires_pyarrow
def test_parse_shorthand_for_arrow_timestamp():
import pyarrow as pa
data = pd.DataFrame(
{
"z": pd.date_range("2018-01-01", periods=5, freq="D"),
"t": pd.date_range("2018-01-01", periods=5, freq="D").tz_localize("UTC"),
}
)
# Convert to arrow-packed dtypes
data = pa.Table.from_pandas(data).to_pandas(types_mapper=pd.ArrowDtype)
assert parse_shorthand("z", data) == {"field": "z", "type": "temporal"}
assert parse_shorthand("z", data) == {"field": "z", "type": "temporal"}
def test_parse_shorthand_all_aggregates():
aggregates = alt.Root._schema["definitions"]["AggregateOp"]["enum"]
for aggregate in aggregates:
shorthand = f"{aggregate}(field):Q"
assert parse_shorthand(shorthand) == {
"aggregate": aggregate,
"field": "field",
"type": "quantitative",
}
def test_parse_shorthand_all_timeunits():
timeUnits = []
for loc in ["Local", "Utc"]:
for typ in ["Single", "Multi"]:
defn = loc + typ + "TimeUnit"
timeUnits.extend(alt.Root._schema["definitions"][defn]["enum"])
for timeUnit in timeUnits:
shorthand = f"{timeUnit}(field):Q"
assert parse_shorthand(shorthand) == {
"timeUnit": timeUnit,
"field": "field",
"type": "quantitative",
}
def test_parse_shorthand_window_count():
shorthand = "count()"
dct = parse_shorthand(
shorthand,
parse_aggregates=False,
parse_window_ops=True,
parse_timeunits=False,
parse_types=False,
)
assert dct == {"op": "count"}
def test_parse_shorthand_all_window_ops():
window_ops = alt.Root._schema["definitions"]["WindowOnlyOp"]["enum"]
aggregates = alt.Root._schema["definitions"]["AggregateOp"]["enum"]
for op in window_ops + aggregates:
shorthand = f"{op}(field)"
dct = parse_shorthand(
shorthand,
parse_aggregates=False,
parse_window_ops=True,
parse_timeunits=False,
parse_types=False,
)
assert dct == {"field": "field", "op": op}
def test_update_nested():
original = {"x": {"b": {"foo": 2}, "c": 4}}
update = {"x": {"b": {"foo": 5}, "d": 6}, "y": 40}
output = update_nested(original, update, copy=True)
assert output is not original
assert output == {"x": {"b": {"foo": 5}, "c": 4, "d": 6}, "y": 40}
output2 = update_nested(original, update)
assert output2 is original
assert output == output2
@pytest.fixture
def channels() -> types.ModuleType:
channels = types.ModuleType("channels")
exec(FAKE_CHANNELS_MODULE, channels.__dict__)
return channels
@pytest.fixture
def channels_cached(channels) -> core._ChannelCache:
"""Previously ``_ChannelCache.from_channels``."""
cached = core._ChannelCache.__new__(core._ChannelCache)
cached.channel_to_name = {
c: c._encoding_name # pyright: ignore[reportAttributeAccessIssue]
for c in channels.__dict__.values()
if isinstance(c, type)
and issubclass(c, alt.SchemaBase)
and hasattr(c, "_encoding_name")
}
cached.name_to_channel = core._invert_group_channels(cached.channel_to_name)
return cached
def _getargs(*args, **kwargs):
return args, kwargs
def test_infer_encoding_types(
monkeypatch: pytest.MonkeyPatch, channels, channels_cached
):
# Indirectly initialize `_CHANNEL_CACHE`
infer_encoding_types((), {})
# Replace with contents of `FAKE_CHANNELS_MODULE`
# Scoped to only this test
monkeypatch.setattr(core, "_CHANNEL_CACHE", channels_cached)
expected = {
"x": channels.X("xval"),
"y": channels.YValue("yval"),
"strokeWidth": channels.StrokeWidthValue(value=4),
}
# All positional args
args, kwds = _getargs(
channels.X("xval"), channels.YValue("yval"), channels.StrokeWidthValue(4)
)
assert infer_encoding_types(args, kwds) == expected
# All keyword args
args, kwds = _getargs(x="xval", y=alt.value("yval"), strokeWidth=alt.value(4))
assert infer_encoding_types(args, kwds) == expected
# Mixed positional & keyword
args, kwds = _getargs(
channels.X("xval"), channels.YValue("yval"), strokeWidth=alt.value(4)
)
assert infer_encoding_types(args, kwds) == expected
def test_infer_encoding_types_with_condition():
args, kwds = _getargs(
size=alt.condition("pred1", alt.value(1), alt.value(2)),
color=alt.condition("pred2", alt.value("red"), "cfield:N"),
opacity=alt.condition("pred3", "ofield:N", alt.value(0.2)),
)
expected = {
"size": alt.SizeValue(
2,
condition=alt.ConditionalPredicateValueDefnumberExprRef(
value=1, test=alt.Predicate("pred1")
),
),
"color": alt.Color(
field=alt.FieldName("cfield"),
type=alt.StandardType("nominal"),
condition=alt.ConditionalPredicateValueDefGradientstringnullExprRef(
value="red",
test=alt.Predicate("pred2"),
),
),
"opacity": alt.OpacityValue(
0.2,
condition=alt.ConditionalPredicateMarkPropFieldOrDatumDef(
field=alt.FieldName("ofield"),
test=alt.Predicate("pred3"),
type=alt.StandardType("nominal"),
),
),
}
assert infer_encoding_types(args, kwds) == expected
def test_invalid_data_type():
with pytest.raises(
ValueError, match=r'"\(fd " is not one of the valid encoding data types'
):
parse_shorthand(r"blah:(fd ")
| StrokeWidthValue |
python | great-expectations__great_expectations | great_expectations/metrics/column_pair/column_pair.py | {
"start": 266,
"end": 622
} | class ____(Metric[_MetricResult], kw_only=True):
column_A: NonEmptyString
column_B: NonEmptyString
ignore_row_if: Literal["both_values_are_missing", "either_value_is_missing", "neither"] = (
"both_values_are_missing"
)
row_condition: Optional[StrictStr] = None
condition_parser: Optional[ConditionParser] = None
| ColumnPairMetric |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 78576,
"end": 80367
} | class ____(UnknownEntityError):
"""Raised when we encounter a package spack doesn't have."""
def __init__(
self,
name,
repo: Optional[Union[Repo, RepoPath, str]] = None,
*,
get_close_matches=difflib.get_close_matches,
):
msg = "Attempting to retrieve anonymous package."
long_msg = None
if name:
msg = f"Package '{name}' not found"
if repo:
if isinstance(repo, Repo):
msg += f" in repository '{repo.root}'"
elif isinstance(repo, str):
msg += f" in repository '{repo}'"
# Special handling for specs that may have been intended as
# filenames: prompt the user to ask whether they intended to write
# './<name>'.
if name.endswith(".yaml"):
long_msg = "Did you mean to specify a filename with './{0}'?"
long_msg = long_msg.format(name)
else:
long_msg = "Use 'spack create' to create a new package."
if not repo:
repo = PATH.ensure_unwrapped()
# We need to compare the base package name
pkg_name = name.rsplit(".", 1)[-1]
similar = []
if isinstance(repo, RepoPath):
try:
similar = get_close_matches(pkg_name, repo.all_package_names())
except Exception:
pass
if 1 <= len(similar) <= 5:
long_msg += "\n\nDid you mean one of the following packages?\n "
long_msg += "\n ".join(similar)
super().__init__(msg, long_msg)
self.name = name
| UnknownPackageError |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/utils/waiter_with_logging.py | {
"start": 6008,
"end": 6782
} | class ____:
"""
Contains the info necessary to extract the status from a response; only computes the value when necessary.
Used to avoid computations if the logs are disabled at the given level.
"""
def __init__(self, jmespath_queries: list[str], response: dict[str, Any]):
self.jmespath_queries = jmespath_queries
self.response = response
def __str__(self):
"""Loop through the args list and generate a string containing values from the waiter response."""
values = []
for query in self.jmespath_queries:
value = jmespath.search(query, self.response)
if value is not None and value != "":
values.append(str(value))
return " - ".join(values)
| _LazyStatusFormatter |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib_test.py | {
"start": 2315,
"end": 5672
} | class ____(distribute_lib.StrategyExtendedV1):
def __init__(self, distribute):
super(_TestExtended, self).__init__(distribute)
worker_device_pairs = [("", ["/device:CPU:0"])]
self._input_workers = input_lib.InputWorkers(worker_device_pairs)
def _call_for_each_replica(self, fn, args, kwargs):
with _TestReplicaContext(
self._container_strategy(), replica_id_in_sync_group=0):
return fn(*args, **kwargs)
def _create_variable(self, next_creator, **kwargs):
return _get_test_variable(kwargs["name"], kwargs["synchronization"],
kwargs["aggregation"])
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib_v1.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _distribute_datasets_from_function(self, dataset_fn, options):
return dataset_fn(distribute_lib.InputContext())
def _local_results(self, value):
return (value,)
def _reduce_to(self, reduce_op, value, destinations, options):
del reduce_op, destinations, options
return value
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
# TODO(tomhennigan) This is missing many things (e.g. ctx.run_op).
ctx = input_lib.MultiStepContext()
for _ in range(iterations):
fn(ctx, iterator.get_next())
return ctx
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._unwrap, result)
def _get_local_replica_id(self, replica_id_in_sync_group):
return replica_id_in_sync_group
def _assert_in_default_state(t):
t.assertIs(distribute_lib._get_default_replica_context(),
distribute_lib.get_replica_context())
t.assertIs(None, distribute_lib.get_cross_replica_context())
t.assertFalse(distribute_lib.in_cross_replica_context())
t.assertIs(
distribute_lib._get_default_strategy(), distribute_lib.get_strategy())
t.assertFalse(distribute_lib.has_strategy())
def _run_in_and_out_of_scope(unbound_test_method):
def wrapper(test_case):
dist = _TestStrategy()
# Running in the default (replica) scope should be supported.
_assert_in_default_state(test_case)
unbound_test_method(test_case, dist)
# As well as running in the strategy scope.
with dist.scope():
unbound_test_method(test_case, dist)
_assert_in_default_state(test_case)
# When run under a different strategy the test method should fail.
another_strategy = _TestStrategy()
msg = "Mixing different .*Strategy objects"
with test_case.assertRaisesRegex(RuntimeError, msg):
with another_strategy.scope():
unbound_test_method(test_case, dist)
return wrapper
| _TestExtended |
python | catalyst-team__catalyst | examples/detection/models/yolo_x.py | {
"start": 17542,
"end": 40711
} | class ____(nn.Module):
def __init__(
self,
num_classes,
width=1.0,
strides=[8, 16, 32],
in_channels=[256, 512, 1024],
act="silu",
depthwise=False,
):
"""
Args:
act (str): activation type of conv.
Default is `"silu"`.
depthwise (bool): wheather apply depthwise conv in conv branch.
Default is `False`.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.cls_preds = nn.ModuleList()
self.reg_preds = nn.ModuleList()
self.obj_preds = nn.ModuleList()
self.stems = nn.ModuleList()
_conv_class = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
nn.Sequential(
*[
_conv_class(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
_conv_class(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
nn.Sequential(
*[
_conv_class(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
_conv_class(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
nn.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
nn.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
nn.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.l1_loss = nn.L1Loss(reduction="none")
self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction="none")
self.iou_loss = IOUloss(reduction="none")
self.strides = strides
# self.grids = nn.ParameterList(
# [nn.Parameter(torch.zeros(1), requires_grad=False) for _ in range(len(in_channels))]
# )
self.grids = [torch.zeros(1)] * len(in_channels)
# self.expanded_strides = [None] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
b = conv.bias.view(self.n_anchors, -1)
b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
for conv in self.obj_preds:
b = conv.bias.view(self.n_anchors, -1)
b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = torch.cat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(
output, k, stride_this_level, xin[0].type()
)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(
torch.zeros(1, grid.shape[1])
.fill_(stride_this_level)
.type_as(xin[0])
)
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.view(
batch_size, self.n_anchors, 4, hsize, wsize
)
reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(
batch_size, -1, 4
)
origin_preds.append(reg_output.clone())
else:
output = torch.cat(
[reg_output, obj_output.sigmoid(), cls_output.sigmoid()], 1
)
outputs.append(output)
if self.training:
return self.get_losses(
imgs,
x_shifts,
y_shifts,
expanded_strides,
labels,
torch.cat(outputs, 1),
origin_preds,
dtype=xin[0].dtype,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = torch.cat(
[x.flatten(start_dim=2) for x in outputs], dim=2
).permute(0, 2, 1)
if self.decode_in_inference:
return self.decode_outputs(outputs, dtype=xin[0].type())
else:
return outputs
def get_output_and_grid(self, output, k, stride, dtype):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize, 2).type(dtype)
self.grids[k] = grid
output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = output.permute(0, 1, 3, 4, 2).reshape(
batch_size, self.n_anchors * hsize * wsize, -1
)
grid = grid.view(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = torch.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs, dtype):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
grid = torch.stack((xv, yv), 2).view(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(torch.full((*shape, 1), stride))
grids = torch.cat(grids, dim=1).type(dtype)
strides = torch.cat(strides, dim=1).type(dtype)
outputs[..., :2] = (outputs[..., :2] + grids) * strides
outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
return outputs
def get_losses(
self,
imgs,
x_shifts,
y_shifts,
expanded_strides,
labels,
outputs,
origin_preds,
dtype,
):
bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]
obj_preds = outputs[:, :, 4].unsqueeze(-1) # [batch, n_anchors_all, 1]
cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]
# calculate targets
mixup = labels.shape[2] > 5
if mixup:
label_cut = labels[..., :5]
else:
label_cut = labels
nlabel = (label_cut.sum(dim=2) > 0).sum(dim=1) # number of objects
total_num_anchors = outputs.shape[1]
x_shifts = torch.cat(x_shifts, 1) # [1, n_anchors_all]
y_shifts = torch.cat(y_shifts, 1) # [1, n_anchors_all]
expanded_strides = torch.cat(expanded_strides, 1)
if self.use_l1:
origin_preds = torch.cat(origin_preds, 1)
cls_targets = []
reg_targets = []
l1_targets = []
obj_targets = []
fg_masks = []
num_fg = 0.0
num_gts = 0.0
for batch_idx in range(outputs.shape[0]):
num_gt = int(nlabel[batch_idx])
num_gts += num_gt
if num_gt == 0:
cls_target = outputs.new_zeros((0, self.num_classes))
reg_target = outputs.new_zeros((0, 4))
l1_target = outputs.new_zeros((0, 4))
obj_target = outputs.new_zeros((total_num_anchors, 1))
fg_mask = outputs.new_zeros(total_num_anchors).bool()
else:
gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
gt_classes = labels[batch_idx, :num_gt, 0]
bboxes_preds_per_image = bbox_preds[batch_idx]
try:
(
gt_matched_classes,
fg_mask,
pred_ious_this_matching,
matched_gt_inds,
num_fg_img,
) = self.get_assignments( # noqa
batch_idx,
num_gt,
total_num_anchors,
gt_bboxes_per_image,
gt_classes,
bboxes_preds_per_image,
expanded_strides,
x_shifts,
y_shifts,
cls_preds,
bbox_preds,
obj_preds,
labels,
imgs,
)
except RuntimeError:
logger.error(
"OOM RuntimeError is raised due to the huge "
"memory cost during label assignment. "
"CPU mode is applied in this batch. "
"If you want to avoid this issue, "
"try to reduce the batch size or image size."
)
torch.cuda.empty_cache()
(
gt_matched_classes,
fg_mask,
pred_ious_this_matching,
matched_gt_inds,
num_fg_img,
) = self.get_assignments( # noqa
batch_idx,
num_gt,
total_num_anchors,
gt_bboxes_per_image,
gt_classes,
bboxes_preds_per_image,
expanded_strides,
x_shifts,
y_shifts,
cls_preds,
bbox_preds,
obj_preds,
labels,
imgs,
"cpu",
)
torch.cuda.empty_cache()
num_fg += num_fg_img
cls_target = F.one_hot(
gt_matched_classes.to(torch.int64), self.num_classes
) * pred_ious_this_matching.unsqueeze(-1)
obj_target = fg_mask.unsqueeze(-1)
reg_target = gt_bboxes_per_image[matched_gt_inds]
if self.use_l1:
l1_target = self.get_l1_target(
outputs.new_zeros((num_fg_img, 4)),
gt_bboxes_per_image[matched_gt_inds],
expanded_strides[0][fg_mask],
x_shifts=x_shifts[0][fg_mask],
y_shifts=y_shifts[0][fg_mask],
)
cls_targets.append(cls_target)
reg_targets.append(reg_target)
obj_targets.append(obj_target.to(dtype))
fg_masks.append(fg_mask)
if self.use_l1:
l1_targets.append(l1_target)
cls_targets = torch.cat(cls_targets, 0)
reg_targets = torch.cat(reg_targets, 0)
obj_targets = torch.cat(obj_targets, 0)
fg_masks = torch.cat(fg_masks, 0)
if self.use_l1:
l1_targets = torch.cat(l1_targets, 0)
num_fg = max(num_fg, 1)
loss_iou = (
self.iou_loss(bbox_preds.view(-1, 4)[fg_masks], reg_targets)
).sum() / num_fg
loss_obj = (
self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets)
).sum() / num_fg
loss_cls = (
self.bcewithlog_loss(
cls_preds.view(-1, self.num_classes)[fg_masks], cls_targets
)
).sum() / num_fg
if self.use_l1:
loss_l1 = (
self.l1_loss(origin_preds.view(-1, 4)[fg_masks], l1_targets)
).sum() / num_fg
else:
loss_l1 = 0.0
reg_weight = 5.0
loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
return (
loss,
reg_weight * loss_iou,
loss_obj,
loss_cls,
loss_l1,
num_fg / max(num_gts, 1),
)
def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8):
l1_target[:, 0] = gt[:, 0] / stride - x_shifts
l1_target[:, 1] = gt[:, 1] / stride - y_shifts
l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)
l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)
return l1_target
@torch.no_grad()
def get_assignments(
self,
batch_idx,
num_gt,
total_num_anchors,
gt_bboxes_per_image,
gt_classes,
bboxes_preds_per_image,
expanded_strides,
x_shifts,
y_shifts,
cls_preds,
bbox_preds,
obj_preds,
labels,
imgs,
mode="gpu",
):
if mode == "cpu":
print("------------CPU Mode for This Batch-------------")
gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()
bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()
gt_classes = gt_classes.cpu().float()
expanded_strides = expanded_strides.cpu().float()
x_shifts = x_shifts.cpu()
y_shifts = y_shifts.cpu()
fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
gt_bboxes_per_image,
expanded_strides,
x_shifts,
y_shifts,
total_num_anchors,
num_gt,
)
bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
cls_preds_ = cls_preds[batch_idx][fg_mask]
obj_preds_ = obj_preds[batch_idx][fg_mask]
num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
if mode == "cpu":
gt_bboxes_per_image = gt_bboxes_per_image.cpu()
bboxes_preds_per_image = bboxes_preds_per_image.cpu()
pair_wise_ious = bboxes_iou(gt_bboxes_per_image, bboxes_preds_per_image, False)
gt_cls_per_image = (
F.one_hot(gt_classes.to(torch.int64), self.num_classes)
.float()
.unsqueeze(1)
.repeat(1, num_in_boxes_anchor, 1)
)
pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
if mode == "cpu":
cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()
cls_preds_ = (
cls_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
* obj_preds_.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() # noqa: W503
)
pair_wise_cls_loss = F.binary_cross_entropy(
cls_preds_.sqrt_(), gt_cls_per_image, reduction="none"
).sum(-1)
del cls_preds_
cost = (
pair_wise_cls_loss
+ 3.0 * pair_wise_ious_loss
+ 100000.0 * (~is_in_boxes_and_center)
)
(
num_fg,
gt_matched_classes,
pred_ious_this_matching,
matched_gt_inds,
) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask)
del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
if mode == "cpu":
gt_matched_classes = gt_matched_classes.cuda()
fg_mask = fg_mask.cuda()
pred_ious_this_matching = pred_ious_this_matching.cuda()
matched_gt_inds = matched_gt_inds.cuda()
return (
gt_matched_classes,
fg_mask,
pred_ious_this_matching,
matched_gt_inds,
num_fg,
)
def get_in_boxes_info(
self,
gt_bboxes_per_image,
expanded_strides,
x_shifts,
y_shifts,
total_num_anchors,
num_gt,
):
expanded_strides_per_image = expanded_strides[0]
x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
# fmt: off
x_centers_per_image = (
(x_shifts_per_image + 0.5 * expanded_strides_per_image)
.unsqueeze(0)
.repeat(num_gt, 1)
) # [n_anchor] -> [n_gt, n_anchor]
y_centers_per_image = (
(y_shifts_per_image + 0.5 * expanded_strides_per_image)
.unsqueeze(0)
.repeat(num_gt, 1)
)
# fmt: on
gt_bboxes_per_image_l = (
(gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2])
.unsqueeze(1)
.repeat(1, total_num_anchors)
)
gt_bboxes_per_image_r = (
(gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2])
.unsqueeze(1)
.repeat(1, total_num_anchors)
)
gt_bboxes_per_image_t = (
(gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3])
.unsqueeze(1)
.repeat(1, total_num_anchors)
)
gt_bboxes_per_image_b = (
(gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3])
.unsqueeze(1)
.repeat(1, total_num_anchors)
)
b_l = x_centers_per_image - gt_bboxes_per_image_l
b_r = gt_bboxes_per_image_r - x_centers_per_image
b_t = y_centers_per_image - gt_bboxes_per_image_t
b_b = gt_bboxes_per_image_b - y_centers_per_image
bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
# in fixed center
center_radius = 2.5
gt_bboxes_per_image_l = (gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
1, total_num_anchors
) - center_radius * expanded_strides_per_image.unsqueeze(0)
gt_bboxes_per_image_r = (gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
1, total_num_anchors
) + center_radius * expanded_strides_per_image.unsqueeze(0)
gt_bboxes_per_image_t = (gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
1, total_num_anchors
) - center_radius * expanded_strides_per_image.unsqueeze(0)
gt_bboxes_per_image_b = (gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
1, total_num_anchors
) + center_radius * expanded_strides_per_image.unsqueeze(0)
c_l = x_centers_per_image - gt_bboxes_per_image_l
c_r = gt_bboxes_per_image_r - x_centers_per_image
c_t = y_centers_per_image - gt_bboxes_per_image_t
c_b = gt_bboxes_per_image_b - y_centers_per_image
center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
is_in_centers = center_deltas.min(dim=-1).values > 0.0
is_in_centers_all = is_in_centers.sum(dim=0) > 0
# in boxes and in centers
is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
is_in_boxes_and_center = (
is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]
)
return is_in_boxes_anchor, is_in_boxes_and_center
def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask):
# Dynamic K
# ---------------------------------------------------------------
matching_matrix = torch.zeros_like(cost)
ious_in_boxes_matrix = pair_wise_ious
n_candidate_k = 10
topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
for gt_idx in range(num_gt):
_, pos_idx = torch.topk(
cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
)
matching_matrix[gt_idx][pos_idx] = 1.0
del topk_ious, dynamic_ks, pos_idx
anchor_matching_gt = matching_matrix.sum(0)
if (anchor_matching_gt > 1).sum() > 0:
cost_min, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
matching_matrix[:, anchor_matching_gt > 1] *= 0.0
matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
fg_mask_inboxes = matching_matrix.sum(0) > 0.0
num_fg = fg_mask_inboxes.sum().item()
fg_mask[fg_mask.clone()] = fg_mask_inboxes
matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
gt_matched_classes = gt_classes[matched_gt_inds]
pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[
fg_mask_inboxes
]
return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
| YOLOXHead |
python | langchain-ai__langchain | libs/core/tests/unit_tests/example_selectors/test_base.py | {
"start": 107,
"end": 816
} | class ____(BaseExampleSelector):
def __init__(self) -> None:
self.example: dict[str, str] | None = None
def add_example(self, example: dict[str, str]) -> None:
self.example = example
@override
def select_examples(self, input_variables: dict[str, str]) -> list[dict]:
return [input_variables]
async def test_aadd_example() -> None:
selector = DummyExampleSelector()
await selector.aadd_example({"foo": "bar"})
assert selector.example == {"foo": "bar"}
async def test_aselect_examples() -> None:
selector = DummyExampleSelector()
examples = await selector.aselect_examples({"foo": "bar"})
assert examples == [{"foo": "bar"}]
| DummyExampleSelector |
python | sqlalchemy__sqlalchemy | examples/sharding/separate_schema_translates.py | {
"start": 1684,
"end": 2124
} | class ____(DeclarativeBase):
pass
# table setup. we'll store a lead table of continents/cities, and a secondary
# table storing locations. a particular row will be placed in the database
# whose shard id corresponds to the 'continent'. in this setup, secondary rows
# in 'weather_reports' will be placed in the same DB as that of the parent, but
# this can be changed if you're willing to write more complex sharding
# functions.
| Base |
python | conda__conda | conda/plugins/types.py | {
"start": 15616,
"end": 16907
} | class ____(ABC):
"""
**EXPERIMENTAL**
Base class for all environment specifications.
Environment specs parse different types of environment definition files
(environment.yml, requirements.txt, pyproject.toml, etc.) into a common
Environment object model.
"""
# Determines if the EnvSpec plugin should be included in the set
# of available plugins checked during environment_spec plugin detection.
# If set to False, the only way to use the plugin will be through explicitly
# requesting it as a cli argument or setting in .condarc. By default,
# autodetection is enabled.
detection_supported: ClassVar[bool] = True
@abstractmethod
def can_handle(self) -> bool:
"""
Determines if the EnvSpec plugin can read and operate on the
environment described by the `filename`.
:returns bool: returns True, if the plugin can interpret the file.
"""
raise NotImplementedError()
@property
@abstractmethod
def env(self) -> Environment:
"""
Express the provided environment file as a conda environment object.
:returns Environment: the conda environment represented by the file.
"""
raise NotImplementedError()
@dataclass
| EnvironmentSpecBase |
python | huggingface__transformers | src/transformers/models/pix2struct/modeling_pix2struct.py | {
"start": 58633,
"end": 67175
} | class ____(Pix2StructPreTrainedModel, GenerationMixin):
config: Pix2StructConfig
main_input_name = "flattened_patches"
def __init__(self, config: Pix2StructConfig):
super().__init__(config)
self.encoder = Pix2StructVisionModel(config.vision_config)
self.decoder = Pix2StructTextModel(config.text_config)
self.is_vqa = config.is_vqa
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.decoder.set_output_embeddings(new_embeddings)
@auto_docstring
def forward(
self,
flattened_patches: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
labels: Optional[torch.LongTensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r"""
flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`):
Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` =
`num_channels` * `patch_size` * `patch_size`
The process of flattening the pixel patches is done by `Pix2StructProcessor`.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss for the decoder.
Example:
Inference:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
>>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
>>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base")
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> # autoregressive generation
>>> generated_ids = model.generate(**inputs, max_new_tokens=50)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> print(generated_text)
A stop sign is on a street corner.
>>> # conditional generation
>>> text = "A picture of"
>>> inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False)
>>> generated_ids = model.generate(**inputs, max_new_tokens=50)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> print(generated_text)
A picture of a stop sign with a red stop sign
```
Training:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
>>> processor = AutoProcessor.from_pretrained("google/pix2struct-base")
>>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-base")
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "A stop sign is on the street corner."
>>> inputs = processor(images=image, return_tensors="pt")
>>> labels = processor(text=text, return_tensors="pt").input_ids
>>> # forward pass
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> print(f"{loss.item():.5f}")
5.94282
```"""
use_cache = use_cache if use_cache is not None else self.config.text_config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
flattened_patches=flattened_patches,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
decoder_attention_mask = (
decoder_attention_mask
if decoder_attention_mask is not None
else decoder_input_ids.ne(self.config.pad_token_id).float()
)
# Always attend to the first token
decoder_attention_mask[:, 0] = 1
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
labels=labels,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=decoder_outputs.loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
__all__ = [
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
| Pix2StructForConditionalGeneration |
python | ray-project__ray | rllib/core/models/configs.py | {
"start": 1537,
"end": 3136
} | class ____(abc.ABC):
"""Base class for configuring a `Model` instance.
ModelConfigs are DL framework-agnostic.
A `Model` (as a sub-component of an `RLModule`) is built via calling the
respective ModelConfig's `build()` method.
RLModules build their sub-components this way after receiving one or more
`ModelConfig` instances from a Catalog object.
However, `ModelConfig` is not restricted to be used only with Catalog or RLModules.
Usage examples can be found in the individual Model classes', e.g.
see `ray.rllib.core.models.configs::MLPHeadConfig`.
Attributes:
input_dims: The input dimensions of the network
always_check_shapes: Whether to always check the inputs and outputs of the
model for the specifications. Input specifications are checked on failed
forward passes of the model regardless of this flag. If this flag is set
to `True`, inputs and outputs are checked on every call. This leads to
a slow-down and should only be used for debugging.
"""
input_dims: Union[List[int], Tuple[int, ...]] = None
always_check_shapes: bool = False
@abc.abstractmethod
def build(self, framework: str):
"""Builds the model.
Args:
framework: The framework to use for building the model.
"""
raise NotImplementedError
@property
def output_dims(self) -> Optional[Tuple[int, ...]]:
"""Read-only `output_dims` are inferred automatically from other settings."""
return None
@ExperimentalAPI
@dataclass
| ModelConfig |
python | run-llama__llama_index | llama-index-core/llama_index/core/graph_stores/types.py | {
"start": 3271,
"end": 6627
} | class ____(BaseModel):
"""In memory labelled property graph containing entities and relations."""
nodes: SerializeAsAny[Dict[str, LabelledNode]] = Field(default_factory=dict)
relations: SerializeAsAny[Dict[str, Relation]] = Field(default_factory=dict)
triplets: Set[Tuple[str, str, str]] = Field(
default_factory=set, description="List of triplets (subject, relation, object)."
)
def _get_relation_key(
self,
relation: Optional[Relation] = None,
subj_id: Optional[str] = None,
obj_id: Optional[str] = None,
rel_id: Optional[str] = None,
) -> str:
"""Get relation id."""
if relation:
return f"{relation.source_id}_{relation.label}_{relation.target_id}"
return f"{subj_id}_{rel_id}_{obj_id}"
def get_all_nodes(self) -> List[LabelledNode]:
"""Get all entities."""
return list(self.nodes.values())
def get_all_relations(self) -> List[Relation]:
"""Get all relations."""
return list(self.relations.values())
def get_triplets(self) -> List[Triplet]:
"""Get all triplets."""
return [
(
self.nodes[subj],
self.relations[
self._get_relation_key(obj_id=obj, subj_id=subj, rel_id=rel)
],
self.nodes[obj],
)
for subj, rel, obj in self.triplets
]
def add_triplet(self, triplet: Triplet) -> None:
"""Add a triplet."""
subj, rel, obj = triplet
if (subj.id, rel.id, obj.id) in self.triplets:
return
self.triplets.add((subj.id, rel.id, obj.id))
self.nodes[subj.id] = subj
self.nodes[obj.id] = obj
self.relations[self._get_relation_key(relation=rel)] = rel
def add_node(self, node: LabelledNode) -> None:
"""Add a node."""
self.nodes[node.id] = node
def add_relation(self, relation: Relation) -> None:
"""Add a relation."""
if relation.source_id not in self.nodes:
self.nodes[relation.source_id] = EntityNode(name=relation.source_id)
if relation.target_id not in self.nodes:
self.nodes[relation.target_id] = EntityNode(name=relation.target_id)
self.add_triplet(
(self.nodes[relation.source_id], relation, self.nodes[relation.target_id])
)
def delete_triplet(self, triplet: Triplet) -> None:
"""Delete a triplet."""
subj, rel, obj = triplet
if (subj.id, rel.id, obj.id) not in self.triplets:
return
self.triplets.remove((subj.id, rel.id, obj.id))
if subj.id in self.nodes:
del self.nodes[subj.id]
if obj.id in self.nodes:
del self.nodes[obj.id]
rel_key = self._get_relation_key(relation=rel)
if rel_key in self.relations:
del self.relations[rel_key]
def delete_node(self, node: LabelledNode) -> None:
"""Delete a node."""
if node.id in self.nodes:
del self.nodes[node.id]
def delete_relation(self, relation: Relation) -> None:
"""Delete a relation."""
rel_key = self._get_relation_key(relation=relation)
if rel_key in self.relations:
del self.relations[rel_key]
@runtime_checkable
| LabelledPropertyGraph |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py | {
"start": 947,
"end": 1295
} | class ____(Model):
new_field = models.CharField(max_length=10)
class Meta:
verbose_name = "test model"
verbose_name_plural = "test models"
def __str__(self):
return self.new_field
@property
def my_brand_new_property(self):
return 1
def my_beautiful_method(self):
return 2
| TestModel4 |
python | doocs__leetcode | solution/3500-3599/3584.Maximum Product of First and Last Elements of a Subsequence/Solution.py | {
"start": 0,
"end": 335
} | class ____:
def maximumProduct(self, nums: List[int], m: int) -> int:
ans = mx = -inf
mi = inf
for i in range(m - 1, len(nums)):
x = nums[i]
y = nums[i - m + 1]
mi = min(mi, y)
mx = max(mx, y)
ans = max(ans, x * mi, x * mx)
return ans
| Solution |
python | sympy__sympy | sympy/functions/special/elliptic_integrals.py | {
"start": 9806,
"end": 14921
} | class ____(DefinedFunction):
r"""
Called with three arguments $n$, $z$ and $m$, evaluates the
Legendre incomplete elliptic integral of the third kind, defined by
.. math:: \Pi\left(n; z\middle| m\right) = \int_0^z \frac{dt}
{\left(1 - n \sin^2 t\right) \sqrt{1 - m \sin^2 t}}
Called with two arguments $n$ and $m$, evaluates the complete
elliptic integral of the third kind:
.. math:: \Pi\left(n\middle| m\right) =
\Pi\left(n; \tfrac{\pi}{2}\middle| m\right)
Explanation
===========
Note that our notation defines the incomplete elliptic integral
in terms of the parameter $m$ instead of the elliptic modulus
(eccentricity) $k$.
In this case, the parameter $m$ is defined as $m=k^2$.
Examples
========
>>> from sympy import elliptic_pi, I
>>> from sympy.abc import z, n, m
>>> elliptic_pi(n, z, m).series(z, n=4)
z + z**3*(m/6 + n/3) + O(z**4)
>>> elliptic_pi(0.5 + I, 1.0 - I, 1.2)
2.50232379629182 - 0.760939574180767*I
>>> elliptic_pi(0, 0)
pi/2
>>> elliptic_pi(1.0 - I/3, 2.0 + I)
3.29136443417283 + 0.32555634906645*I
References
==========
.. [1] https://en.wikipedia.org/wiki/Elliptic_integrals
.. [2] https://functions.wolfram.com/EllipticIntegrals/EllipticPi3
.. [3] https://functions.wolfram.com/EllipticIntegrals/EllipticPi
"""
@classmethod
def eval(cls, n, m, z=None):
if z is not None:
z, m = m, z
if n.is_zero:
return elliptic_f(z, m)
elif n is S.One:
return (elliptic_f(z, m) +
(sqrt(1 - m*sin(z)**2)*tan(z) -
elliptic_e(z, m))/(1 - m))
k = 2*z/pi
if k.is_integer:
return k*elliptic_pi(n, m)
elif m.is_zero:
return atanh(sqrt(n - 1)*tan(z))/sqrt(n - 1)
elif n == m:
return (elliptic_f(z, n) - elliptic_pi(1, z, n) +
tan(z)/sqrt(1 - n*sin(z)**2))
elif n in (S.Infinity, S.NegativeInfinity):
return S.Zero
elif m in (S.Infinity, S.NegativeInfinity):
return S.Zero
elif z.could_extract_minus_sign():
return -elliptic_pi(n, -z, m)
if n.is_zero:
return elliptic_f(z, m)
if m.is_extended_real and m.is_infinite or \
n.is_extended_real and n.is_infinite:
return S.Zero
else:
if n.is_zero:
return elliptic_k(m)
elif n is S.One:
return S.ComplexInfinity
elif m.is_zero:
return pi/(2*sqrt(1 - n))
elif m == S.One:
return S.NegativeInfinity/sign(n - 1)
elif n == m:
return elliptic_e(n)/(1 - n)
elif n in (S.Infinity, S.NegativeInfinity):
return S.Zero
elif m in (S.Infinity, S.NegativeInfinity):
return S.Zero
if n.is_zero:
return elliptic_k(m)
if m.is_extended_real and m.is_infinite or \
n.is_extended_real and n.is_infinite:
return S.Zero
def _eval_conjugate(self):
if len(self.args) == 3:
n, z, m = self.args
if (n.is_real and (n - 1).is_positive) is False and \
(m.is_real and (m - 1).is_positive) is False:
return self.func(n.conjugate(), z.conjugate(), m.conjugate())
else:
n, m = self.args
return self.func(n.conjugate(), m.conjugate())
def fdiff(self, argindex=1):
if len(self.args) == 3:
n, z, m = self.args
fm, fn = sqrt(1 - m*sin(z)**2), 1 - n*sin(z)**2
if argindex == 1:
return (elliptic_e(z, m) + (m - n)*elliptic_f(z, m)/n +
(n**2 - m)*elliptic_pi(n, z, m)/n -
n*fm*sin(2*z)/(2*fn))/(2*(m - n)*(n - 1))
elif argindex == 2:
return 1/(fm*fn)
elif argindex == 3:
return (elliptic_e(z, m)/(m - 1) +
elliptic_pi(n, z, m) -
m*sin(2*z)/(2*(m - 1)*fm))/(2*(n - m))
else:
n, m = self.args
if argindex == 1:
return (elliptic_e(m) + (m - n)*elliptic_k(m)/n +
(n**2 - m)*elliptic_pi(n, m)/n)/(2*(m - n)*(n - 1))
elif argindex == 2:
return (elliptic_e(m)/(m - 1) + elliptic_pi(n, m))/(2*(n - m))
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_Integral(self, *args, **kwargs):
from sympy.integrals.integrals import Integral
if len(self.args) == 2:
n, m, z = self.args[0], self.args[1], pi/2
else:
n, z, m = self.args
t = Dummy(uniquely_named_symbol('t', args).name)
return Integral(1/((1 - n*sin(t)**2)*sqrt(1 - m*sin(t)**2)), (t, 0, z))
| elliptic_pi |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias2.py | {
"start": 99,
"end": 197
} | class ____:
@staticmethod
def create(data: dict[str, Any]) -> "Mix":
return A()
| Base |
python | Textualize__textual | docs/examples/guide/screens/modes01.py | {
"start": 269,
"end": 409
} | class ____(Screen):
def compose(self) -> ComposeResult:
yield Placeholder("Settings Screen")
yield Footer()
| SettingsScreen |
python | huggingface__transformers | tests/models/roformer/test_modeling_roformer.py | {
"start": 18956,
"end": 19769
} | class ____(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
output = model(input_ids)[0]
# TODO Replace vocab size
vocab_size = 50000
expected_shape = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape, expected_shape)
# TODO Replace values below with what was printed above.
expected_slice = torch.tensor(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
| RoFormerModelIntegrationTest |
python | getsentry__sentry | src/sentry/api/serializers/models/incidentactivity.py | {
"start": 757,
"end": 1887
} | class ____(Serializer):
def get_attrs(self, item_list, user, **kwargs):
prefetch_related_objects(item_list, "incident__organization")
serialized_users = user_service.serialize_many(
filter={"user_ids": [i.user_id for i in item_list if i.user_id]},
as_user=serialize_generic_user(user),
)
user_lookup = {user["id"]: user for user in serialized_users}
return {item: {"user": user_lookup.get(str(item.user_id))} for item in item_list}
def serialize(self, obj, attrs, user, **kwargs) -> IncidentActivitySerializerResponse:
# Mark that we're using legacy IncidentActivity models (which depend on Incident -> AlertRule)
report_used_legacy_models()
incident = obj.incident
return {
"id": str(obj.id),
"incidentIdentifier": str(incident.identifier),
"user": attrs["user"],
"type": obj.type,
"value": obj.value,
"previousValue": obj.previous_value,
"comment": obj.comment,
"dateCreated": obj.date_added,
}
| IncidentActivitySerializer |
python | kubernetes-client__python | kubernetes/client/models/v1_node_daemon_endpoints.py | {
"start": 383,
"end": 3618
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kubelet_endpoint': 'V1DaemonEndpoint'
}
attribute_map = {
'kubelet_endpoint': 'kubeletEndpoint'
}
def __init__(self, kubelet_endpoint=None, local_vars_configuration=None): # noqa: E501
"""V1NodeDaemonEndpoints - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._kubelet_endpoint = None
self.discriminator = None
if kubelet_endpoint is not None:
self.kubelet_endpoint = kubelet_endpoint
@property
def kubelet_endpoint(self):
"""Gets the kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
:return: The kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
:rtype: V1DaemonEndpoint
"""
return self._kubelet_endpoint
@kubelet_endpoint.setter
def kubelet_endpoint(self, kubelet_endpoint):
"""Sets the kubelet_endpoint of this V1NodeDaemonEndpoints.
:param kubelet_endpoint: The kubelet_endpoint of this V1NodeDaemonEndpoints. # noqa: E501
:type: V1DaemonEndpoint
"""
self._kubelet_endpoint = kubelet_endpoint
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeDaemonEndpoints):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeDaemonEndpoints):
return True
return self.to_dict() != other.to_dict()
| V1NodeDaemonEndpoints |
python | catalyst-team__catalyst | examples/detection/models/yolo_x.py | {
"start": 40711,
"end": 44214
} | class ____(nn.Module):
"""
YOLOX model module.
The network returns loss values from three YOLO layers during training and detection results during test.
NOTE:
- model predicts bounding boxes in image size ranges
- bounding boxes format is [x_center, y_center, width, height]
- output format is [x_center, y_center, width, height, confidence, class_0_prob, ..., class_N_prob]
"""
def __init__(self, backbone=None, head=None):
super().__init__()
if backbone is None:
backbone = YOLOPAFPN()
if head is None:
head = YOLOXHead(80)
self.backbone = backbone
self.head = head
def forward(self, x, targets=None):
# fpn output content features of [dark3, dark4, dark5]
fpn_outs = self.backbone(x)
if self.training:
assert targets is not None
loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
fpn_outs, targets, x
)
return loss
else:
outputs = self.head(fpn_outs)
return outputs
def _init_fn(M):
for m in M.modules():
if isinstance(m, nn.BatchNorm2d):
m.eps = 1e-3
m.momentum = 0.03
def _yolo_x(num_classes=80, depth=1, width=1):
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(depth, width, in_channels=in_channels)
head = YOLOXHead(num_classes, width, in_channels=in_channels)
model = YOLOX(backbone, head)
model.apply(_init_fn)
model.head.initialize_biases(1e-2)
return model
def yolo_x_tiny(num_classes=80, *args, **kwargs):
"""YOLO X tiny.
Model expects 416x416 images and for that size will return `3549` anchors.
Args:
num_classes (int): number of classes to use for detection.
Default value is `80`.
Returns:
YOLOX model.
"""
model = _yolo_x(num_classes, depth=0.33, width=0.375)
return model
def yolo_x_small(num_classes=80, *args, **kwargs):
"""YOLO X small.
Model expects 640x640 images and for that size will return `8400` anchors.
Args:
num_classes (int): number of classes to use for detection.
Default value is `80`.
Returns:
YOLOX model.
"""
model = _yolo_x(num_classes, depth=0.33, width=0.50)
return model
def yolo_x_medium(num_classes=80, *args, **kwargs):
"""YOLO X medium.
Model expects 640x640 images and for that size will return `8400` anchors.
Args:
num_classes (int): number of classes to use for detection.
Default value is `80`.
Returns:
YOLOX model.
"""
model = _yolo_x(num_classes, depth=0.67, width=0.75)
return model
def yolo_x_large(num_classes=80, *args, **kwargs):
"""YOLO X large.
Model expects 640x640 images and for that size will return `8400` anchors.
Args:
num_classes (int): number of classes to use for detection.
Default value is `80`.
Returns:
YOLOX model.
"""
model = _yolo_x(num_classes, depth=1.0, width=1.0)
return model
def yolo_x_big(num_classes=80, *args, **kwargs):
"""YOLO X.
Model expects 640x640 images and for that size will return `8400` anchors.
Args:
num_classes (int): number of classes to use for detection.
Default value is `80`.
Returns:
YOLOX model.
"""
model = _yolo_x(num_classes, depth=1.33, width=1.25)
return model
| YOLOX |
python | doocs__leetcode | solution/0700-0799/0768.Max Chunks To Make Sorted II/Solution.py | {
"start": 0,
"end": 358
} | class ____:
def maxChunksToSorted(self, arr: List[int]) -> int:
stk = []
for v in arr:
if not stk or v >= stk[-1]:
stk.append(v)
else:
mx = stk.pop()
while stk and stk[-1] > v:
stk.pop()
stk.append(mx)
return len(stk)
| Solution |
python | django__django | tests/utils_tests/test_module_loading.py | {
"start": 2852,
"end": 5391
} | class ____(unittest.TestCase):
def setUp(self):
self.egg_dir = "%s/eggs" % os.path.dirname(__file__)
def tearDown(self):
sys.path_importer_cache.clear()
sys.modules.pop("egg_module.sub1.sub2.bad_module", None)
sys.modules.pop("egg_module.sub1.sub2.good_module", None)
sys.modules.pop("egg_module.sub1.sub2", None)
sys.modules.pop("egg_module.sub1", None)
sys.modules.pop("egg_module.bad_module", None)
sys.modules.pop("egg_module.good_module", None)
sys.modules.pop("egg_module", None)
def test_shallow_loader(self):
"Module existence can be tested inside eggs"
egg_name = "%s/test_egg.egg" % self.egg_dir
with extend_sys_path(egg_name):
egg_module = import_module("egg_module")
# An importable child
self.assertTrue(module_has_submodule(egg_module, "good_module"))
mod = import_module("egg_module.good_module")
self.assertEqual(mod.content, "Good Module")
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, "bad_module"))
with self.assertRaises(ImportError):
import_module("egg_module.bad_module")
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, "no_such_module"))
with self.assertRaises(ImportError):
import_module("egg_module.no_such_module")
def test_deep_loader(self):
"Modules deep inside an egg can still be tested for existence"
egg_name = "%s/test_egg.egg" % self.egg_dir
with extend_sys_path(egg_name):
egg_module = import_module("egg_module.sub1.sub2")
# An importable child
self.assertTrue(module_has_submodule(egg_module, "good_module"))
mod = import_module("egg_module.sub1.sub2.good_module")
self.assertEqual(mod.content, "Deep Good Module")
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, "bad_module"))
with self.assertRaises(ImportError):
import_module("egg_module.sub1.sub2.bad_module")
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, "no_such_module"))
with self.assertRaises(ImportError):
import_module("egg_module.sub1.sub2.no_such_module")
| EggLoader |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/overrides.py | {
"start": 1092,
"end": 2426
} | class ____(C):
def __init__(self, arg):
super(C, self).__init__(arg)
def methodA(self, arg):
_test_sink(arg)
def methodB(self):
return _test_source()
def testBase(o: Base, cls: Type[Base]):
y = o.methodB()
o.methodA(y)
cls.classMethod(y)
def testStaticBase(o: Base):
x = o.methodB()
Base.classMethod(x)
def testMakeBase():
o = Base()
x = o.methodB()
def testA(o: A, cls: Type[A]):
y = o.methodB()
o.methodA(y)
cls.classMethod(y)
def testStaticA(o: A):
x = o.methodB()
A.classMethod(x)
def testMakeA():
o = A()
x = o.methodB()
def testB(o: B, cls: Type[B]):
y = o.methodB()
o.methodA(y)
cls.classMethod(y)
def testStaticB(o: B):
x = o.methodB()
B.classMethod(x)
def testMakeB():
o = B()
x = o.methodB()
def testC(o: C, cls: Type[C]):
y = o.methodB()
o.methodA(y)
cls.classMethod(y)
def testStaticC(o: C):
x = o.methodB()
C.classMethod(x)
def testMakeC():
o = C()
x = o.methodB()
def testD(o: D, cls: Type[D]):
y = o.methodB()
o.methodA(y)
cls.classMethod(y)
def testStaticD(o: D):
x = o.methodB()
D.classMethod(x)
def testMakeD():
o = D()
x = o.methodB()
def constructorTest(cls: Type[D]) -> D:
return cls(_test_source())
| D |
python | PyCQA__isort | isort/format.py | {
"start": 3007,
"end": 3631
} | class ____:
ERROR = "ERROR"
SUCCESS = "SUCCESS"
def __init__(self, error: str, success: str, output: TextIO | None = None):
self.output = output or sys.stdout
self.success_message = success
self.error_message = error
def success(self, message: str) -> None:
print(self.success_message.format(success=self.SUCCESS, message=message), file=self.output)
def error(self, message: str) -> None:
print(self.error_message.format(error=self.ERROR, message=message), file=sys.stderr)
def diff_line(self, line: str) -> None:
self.output.write(line)
| BasicPrinter |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1053592,
"end": 1053749
} | class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (Commit, PullRequest)
| Closer |
python | ray-project__ray | python/ray/train/tests/test_gpu.py | {
"start": 862,
"end": 12142
} | class ____(LinearDataset):
"""Modifies the LinearDataset to also return non-tensor objects."""
def __getitem__(self, index):
return {"x": self.x[index, None], "y": 2}
def write_rank_data(tmp_path: Path, data: Union[int, List, Dict]):
rank = train.get_context().get_world_rank()
with open(tmp_path / f"{rank}.json", "w") as f:
json.dump(data, f)
def get_data_from_all_ranks(tmp_path: Path) -> Dict[int, Union[int, List, Dict]]:
rank_data = {}
for rank_file in tmp_path.glob("*.json"):
rank = int(rank_file.stem)
with open(rank_file, "r") as f:
data = json.load(f)
rank_data[rank] = data
return rank_data
@pytest.mark.parametrize("cuda_visible_devices", ["", "1,2"])
@pytest.mark.parametrize("num_gpus_per_worker", [0.5, 1, 2])
def test_torch_get_device(
shutdown_only, num_gpus_per_worker, cuda_visible_devices, monkeypatch, tmp_path
):
if cuda_visible_devices:
# Test if `get_device` is correct even with user specified env var.
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", cuda_visible_devices)
ray.init(num_cpus=4, num_gpus=2)
def train_fn():
# Confirm that the TorchConfig Prologue is effective
assert torch.cuda.current_device() == train.torch.get_device().index
# Make sure environment variable is being set correctly.
if cuda_visible_devices:
visible_devices = os.environ["CUDA_VISIBLE_DEVICES"]
assert visible_devices == "1,2"
devices = sorted([device.index for device in train.torch.get_devices()])
write_rank_data(tmp_path, devices)
trainer = TorchTrainer(
train_fn,
scaling_config=ScalingConfig(
num_workers=int(2 / num_gpus_per_worker),
use_gpu=True,
resources_per_worker={"GPU": num_gpus_per_worker},
),
)
trainer.fit()
rank_data = get_data_from_all_ranks(tmp_path)
devices = list(rank_data.values())
if num_gpus_per_worker == 0.5:
assert sorted(devices) == [[0], [0], [1], [1]]
elif num_gpus_per_worker == 1:
assert sorted(devices) == [[0], [1]]
elif num_gpus_per_worker == 2:
assert sorted(devices[0]) == [0, 1]
else:
raise RuntimeError(
"New parameter for this test has been added without checking that the "
"correct devices have been returned."
)
@pytest.mark.parametrize("num_gpus_per_worker", [0.5, 1, 2])
def test_torch_get_device_dist(ray_2_node_2_gpu, num_gpus_per_worker, tmp_path):
@patch("torch.cuda.is_available", lambda: True)
def train_fn():
# Confirm that the TorchConfig Prologue is effective
assert torch.cuda.current_device() == train.torch.get_device().index
devices = sorted([device.index for device in train.torch.get_devices()])
write_rank_data(tmp_path, devices)
trainer = TorchTrainer(
train_fn,
# use gloo instead of nccl, since nccl is not supported
# on this virtual gpu ray environment
torch_config=TorchConfig(backend="gloo"),
scaling_config=ScalingConfig(
num_workers=int(4 / num_gpus_per_worker),
use_gpu=True,
resources_per_worker={"GPU": num_gpus_per_worker},
),
)
trainer.fit()
rank_data = get_data_from_all_ranks(tmp_path)
devices = list(rank_data.values())
# cluster setups: 2 nodes, 2 gpus per node
# `CUDA_VISIBLE_DEVICES` is set to "0,1" on node 1 and node 2
if num_gpus_per_worker == 0.5:
# worker gpu topology:
# 4 workers on node 1, 4 workers on node 2
# `ray.get_gpu_ids()` returns [0], [0], [1], [1] on node 1
# and [0], [0], [1], [1] on node 2
assert sorted(devices) == [[0], [0], [0], [0], [1], [1], [1], [1]]
elif num_gpus_per_worker == 1:
# worker gpu topology:
# 2 workers on node 1, 2 workers on node 2
# `ray.get_gpu_ids()` returns [0], [1] on node 1 and [0], [1] on node 2
assert sorted(devices) == [[0], [0], [1], [1]]
elif num_gpus_per_worker == 2:
# worker gpu topology:
# 1 workers on node 1, 1 workers on node 2
# `ray.get_gpu_ids()` returns {0, 1} on node 1 and {0, 1} on node 2
# and `device_id` returns the one index from each set.
# So total count of devices should be 2.
assert devices == [[0, 1], [0, 1]]
else:
raise RuntimeError(
"New parameter for this test has been added without checking that the "
"correct devices have been returned."
)
def test_torch_prepare_model(ray_start_4_cpus_2_gpus):
"""Tests if ``prepare_model`` correctly wraps in DDP."""
def train_fn():
model = torch.nn.Linear(1, 1)
# Wrap in DDP.
model = train.torch.prepare_model(model)
# Make sure model is wrapped in DDP.
assert isinstance(model, DistributedDataParallel)
# Make sure model is on cuda.
assert next(model.parameters()).is_cuda
trainer = TorchTrainer(
train_fn, scaling_config=ScalingConfig(num_workers=2, use_gpu=True)
)
trainer.fit()
def train_fn_manual_override():
model = torch.nn.Linear(1, 1)
# Wrap in DDP and manually specify CPU.
model = train.torch.prepare_model(model, device=torch.device("cpu"))
# Make sure model is wrapped in DDP.
assert isinstance(model, DistributedDataParallel)
# Make sure model is NOT on cuda since we manually specified CPU.
assert not next(model.parameters()).is_cuda
trainer = TorchTrainer(
train_fn, scaling_config=ScalingConfig(num_workers=2, use_gpu=True)
)
trainer.fit()
def test_torch_prepare_model_uses_device(ray_start_4_cpus_2_gpus):
"""Tests if `prepare_model` uses the train.torch.get_device even if it does not
match with the local rank."""
# The below test should pass without errors.
@patch.object(
ray.train.torch.train_loop_utils,
"get_device",
lambda: torch.device(f"cuda:{1 - train.get_context().get_local_rank()}"),
)
def train_func():
# These assert statements must hold for prepare_model to wrap with DDP.
assert torch.cuda.is_available()
assert train.get_context().get_world_size() > 1
model = torch.nn.Linear(1, 1)
data = torch.ones(1)
data = data.to(train.torch.get_device())
model = train.torch.prepare_model(model)
model(data)
trainer = TorchTrainer(
train_func, scaling_config=ScalingConfig(num_workers=2, use_gpu=True)
)
trainer.fit()
@pytest.mark.parametrize(
"dataset", (LinearDataset, LinearDatasetDict, NonTensorDataset)
)
def test_torch_prepare_dataloader(ray_start_4_cpus_2_gpus, dataset):
data_loader = DataLoader(dataset(a=1, b=2, size=10))
def train_fn():
wrapped_data_loader = train.torch.prepare_data_loader(data_loader)
# Check that DistributedSampler has been added to the data loader.
assert isinstance(wrapped_data_loader.sampler, DistributedSampler)
# Make sure you can properly iterate through the DataLoader.
# Case where the dataset returns a tuple or list from __getitem__.
if isinstance(dataset, LinearDataset):
for batch in wrapped_data_loader:
x = batch[0]
y = batch[1]
# Make sure the data is on the correct device.
assert x.is_cuda and y.is_cuda
# Case where the dataset returns a dict from __getitem__.
elif isinstance(dataset, LinearDatasetDict):
for batch in wrapped_data_loader:
for x, y in zip(batch["x"], batch["y"]):
# Make sure the data is on the correct device.
assert x.is_cuda and y.is_cuda
elif isinstance(dataset, NonTensorDataset):
for batch in wrapped_data_loader:
for x, y in zip(batch["x"], batch["y"]):
# Make sure the data is on the correct device.
assert x.is_cuda and y == 2
trainer = TorchTrainer(
train_fn, scaling_config=ScalingConfig(num_workers=2, use_gpu=True)
)
trainer.fit()
@pytest.mark.parametrize("data_loader_num_workers", (0, 2))
def test_enable_reproducibility(ray_start_4_cpus_2_gpus, data_loader_num_workers):
# NOTE: Reproducible results aren't guaranteed between seeded executions, even with
# identical hardware and software dependencies. This test should be okay given that
# it only runs for two epochs on a small dataset.
# NOTE: I've chosen to use a ResNet model over a more simple model, because
# `enable_reproducibility` disables CUDA convolution benchmarking, and a simpler
# model (e.g., linear) might not test this feature.
def train_func():
train.torch.enable_reproducibility()
model = torchvision.models.resnet18()
model = train.torch.prepare_model(model)
dataset_length = 128
dataset = torch.utils.data.TensorDataset(
torch.randn(dataset_length, 3, 32, 32),
torch.randint(low=0, high=1000, size=(dataset_length,)),
)
# num_workers > 0 tests for https://github.com/ray-project/ray/issues/30247
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=64, num_workers=data_loader_num_workers
)
dataloader = train.torch.prepare_data_loader(dataloader)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
model.train()
for epoch in range(2):
for images, targets in dataloader:
optimizer.zero_grad()
outputs = model(images)
loss = torch.nn.functional.cross_entropy(outputs, targets)
loss.backward()
optimizer.step()
train.report(dict(loss=loss.item()))
trainer = TorchTrainer(
train_func, scaling_config=ScalingConfig(num_workers=2, use_gpu=True)
)
result1 = trainer.fit()
trainer = TorchTrainer(
train_func, scaling_config=ScalingConfig(num_workers=2, use_gpu=True)
)
result2 = trainer.fit()
assert result1.metrics["loss"] == result2.metrics["loss"]
def test_torch_fail_on_nccl_timeout(ray_start_4_cpus_2_gpus):
"""Tests that TorchTrainer raises exception on NCCL timeouts."""
def train_fn():
model = torch.nn.Linear(1, 1)
model = train.torch.prepare_model(model)
# Rank 0 worker will never reach the collective operation.
# NCCL should timeout.
if train.get_context().get_world_rank() == 0:
while True:
time.sleep(100)
torch.distributed.barrier()
trainer = TorchTrainer(
train_fn,
scaling_config=ScalingConfig(num_workers=2, use_gpu=True),
torch_config=TorchConfig(timeout_s=5),
)
# Training should fail and not hang.
with pytest.raises(TrainingFailedError) as exc_info:
trainer.fit()
assert isinstance(exc_info.value.__cause__, RayTaskError)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", "-s", __file__]))
| NonTensorDataset |
python | pytorch__pytorch | torch/testing/_internal/common_nn.py | {
"start": 137863,
"end": 142826
} | class ____(TestCase):
# _forward is defined in classes inheriting from NNTestCase
@abstractmethod
def _forward(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def _get_parameters(self, module: nn.Module) -> tuple[list[nn.Parameter], list[nn.Parameter]]:
raise NotImplementedError
@abstractmethod
def _zero_grad_parameters(self, module: nn.Module) -> None:
raise NotImplementedError
@abstractmethod
def _backward(self, module: nn.Module,
input: _TensorOrTensors, output: torch.Tensor,
grad_output: Union[torch.Tensor, Sequence[torch.Tensor]],
create_graph: bool = False):
raise NotImplementedError
def _jacobian(self, input, num_out):
if isinstance(input, tuple):
return tuple(self._jacobian(elem, num_out) for elem in input)
elif isinstance(input, list):
return [self._jacobian(elem, num_out) for elem in input]
else:
return torch.zeros(input.nelement(), num_out)
def _flatten_tensors(self, x):
if isinstance(x, torch.Tensor):
if x.is_sparse:
return x.to_dense().view(-1)
else:
return x.view(-1)
else:
return tuple(self._flatten_tensors(a) for a in x)
def _zero_grad_input(self, input):
if isinstance(input, torch.Tensor):
if input.requires_grad and input.grad is not None:
input.grad.zero_()
input.grad.detach_()
else:
for i in input:
self._zero_grad_input(i)
def _analytical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True):
output = self._forward(module, input)
output_size = output.nelement()
if jacobian_input:
jacobian_inp = self._jacobian(input, output_size)
flat_jacobian_input = list(_iter_tensors(jacobian_inp))
if jacobian_parameters:
num_param = sum(p.numel() for p in self._get_parameters(module)[0])
jacobian_param = torch.zeros(num_param, output_size)
for i in range(output_size):
param, d_param = self._get_parameters(module)
# make non grad zeros
d_param = [torch.zeros_like(p) if d is None else d for (p, d) in zip(param, d_param, strict=True)]
d_out = torch.zeros_like(output)
flat_d_out = d_out.view(-1)
flat_d_out[i] = 1
if jacobian_parameters:
self._zero_grad_parameters(module)
# Tensors will accumulate gradient from multiple steps
if jacobian_input:
self._zero_grad_input(input)
d_input = self._backward(module, input, output, d_out)
if jacobian_input:
for jacobian_x, d_x in zip(flat_jacobian_input, _iter_tensors(d_input), strict=True):
jacobian_x[:, i] = d_x.contiguous().view(-1)
if jacobian_parameters:
jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0)
res: tuple[torch.Tensor, ...] = ()
if jacobian_input:
res += jacobian_inp,
if jacobian_parameters:
res += jacobian_param,
return res
def _numerical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True):
def fw(*input):
return self._forward(module, input).detach()
res: tuple[torch.Tensor, ...] = ()
if jacobian_input:
res += _get_numerical_jacobian(fw, input, eps=1e-6),
if jacobian_parameters:
param, _ = self._get_parameters(module)
to_cat = []
for p in param:
jacobian = _get_numerical_jacobian(fw, input, target=p, eps=1e-6)
# get_numerical_jacobian returns a list of tuples but we require a tensor
to_cat.append(jacobian[0][0])
res += (torch.cat(to_cat, 0),)
return res
def check_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True):
jacobian_parameters = bool(self._get_parameters(module)[0])
analytical = self._analytical_jacobian(module, input, jacobian_input, jacobian_parameters)
numerical = self._numerical_jacobian(module, input, jacobian_input, jacobian_parameters)
analytical_t = list(_iter_tensors(analytical))
numerical_t = list(_iter_tensors(numerical))
differences = []
for a, n in zip(analytical_t, numerical_t, strict=True):
if a.numel() != 0:
differences.append(a.add(n, alpha=-1).abs().max())
# TODO: compare structure (ensure analytic jacobian has correct shape)
if len(differences) > 0:
self.assertLessEqual(max(differences), PRECISION) # type: ignore[type-var]
| NNTestCase |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wxagg.py | {
"start": 1398,
"end": 1468
} | class ____(_BackendWx):
FigureCanvas = FigureCanvasWxAgg
| _BackendWxAgg |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/representer.py | {
"start": 8562,
"end": 14615
} | class ____(BaseRepresenter):
def ignore_aliases(self, data):
# type: (Any) -> bool
# https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
# "i.e. two occurrences of the empty tuple may or may not yield the same object"
# so "data is ()" should not be used
if data is None or (isinstance(data, tuple) and data == ()):
return True
if isinstance(data, (bytes, str, bool, int, float)):
return True
return False
def represent_none(self, data):
# type: (Any) -> Any
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
# type: (Any) -> Any
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
# type: (Any) -> Any
if hasattr(base64, 'encodebytes'):
data = base64.encodebytes(data).decode('ascii')
else:
# check py2 only?
data = base64.encodestring(data).decode('ascii') # type: ignore
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data, anchor=None):
# type: (Any, Optional[Any]) -> Any
try:
value = self.dumper.boolean_representation[bool(data)]
except AttributeError:
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value, anchor=anchor)
def represent_int(self, data):
# type: (Any) -> Any
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value * inf_value):
inf_value *= inf_value
def represent_float(self, data):
# type: (Any) -> Any
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
else:
value = repr(data).lower()
if getattr(self.serializer, 'use_version', None) == (1, 1):
if '.' not in value and 'e' in value:
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag in YAML 1.1. We fix
# this by adding '.0' before the 'e' symbol.
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
# type: (Any) -> Any
# pairs = (len(data) > 0 and isinstance(data, list))
# if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
# if not pairs:
return self.represent_sequence('tag:yaml.org,2002:seq', data)
# value = []
# for item_key, item_value in data:
# value.append(self.represent_mapping('tag:yaml.org,2002:map',
# [(item_key, item_value)]))
# return SequenceNode('tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
# type: (Any) -> Any
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_ordereddict(self, data):
# type: (Any) -> Any
return self.represent_omap('tag:yaml.org,2002:omap', data)
def represent_set(self, data):
# type: (Any) -> Any
value = {} # type: Dict[Any, None]
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
# type: (Any) -> Any
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
# type: (Any) -> Any
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
# type: (Any, Any, Any, Any) -> Any
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
# type: (Any) -> None
raise RepresenterError(_F('cannot represent an object: {data!s}', data=data))
SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str, SafeRepresenter.represent_str)
SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary)
SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int, SafeRepresenter.represent_int)
SafeRepresenter.add_representer(float, SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list, SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set, SafeRepresenter.represent_set)
SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict)
if sys.version_info >= (2, 7):
import collections
SafeRepresenter.add_representer(
collections.OrderedDict, SafeRepresenter.represent_ordereddict
)
SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined)
| SafeRepresenter |
python | ray-project__ray | python/ray/serve/_private/common.py | {
"start": 7982,
"end": 23779
} | class ____:
name: str
status: DeploymentStatus
status_trigger: DeploymentStatusTrigger
message: str = ""
@property
def rank(self) -> int:
"""Get priority of state based on ranking_order().
The ranked order indicates what the status should be of a
hierarchically "higher" resource when derived from a group of
`DeploymentStatusInfo` sub-resources.
"""
if (self.status,) in DEPLOYMENT_STATUS_RANKING_ORDER:
return DEPLOYMENT_STATUS_RANKING_ORDER[(self.status,)]
elif (self.status, self.status_trigger) in DEPLOYMENT_STATUS_RANKING_ORDER:
return DEPLOYMENT_STATUS_RANKING_ORDER[(self.status, self.status_trigger)]
def debug_string(self):
return json.dumps(asdict(self), indent=4)
def _updated_copy(
self,
status: DeploymentStatus = None,
status_trigger: DeploymentStatusTrigger = None,
message: str = "",
):
"""Returns a copy of the current object with the passed in kwargs updated."""
return DeploymentStatusInfo(
name=self.name,
status=status if status else self.status,
status_trigger=status_trigger if status_trigger else self.status_trigger,
message=message,
)
def update_message(self, message: str):
return self._updated_copy(message=message)
def handle_transition(
self,
trigger: DeploymentStatusInternalTrigger,
message: str = "",
) -> "DeploymentStatusInfo":
"""Handles a transition from the current state to the next state.
Args:
trigger: An internal trigger that determines the state
transition. This is the new incoming trigger causing the
transition.
message: The message to set in status info.
Returns:
New instance of DeploymentStatusInfo representing the
next state to transition to.
"""
# If there was an unexpected internal error during reconciliation, set
# status to unhealthy immediately and return
if trigger == DeploymentStatusInternalTrigger.INTERNAL_ERROR:
return self._updated_copy(
status=DeploymentStatus.UNHEALTHY,
status_trigger=DeploymentStatusTrigger.INTERNAL_ERROR,
message=message,
)
# If deployment is being deleted, set status immediately and return
elif trigger == DeploymentStatusInternalTrigger.DELETE:
return self._updated_copy(
status=DeploymentStatus.UPDATING,
status_trigger=DeploymentStatusTrigger.DELETING,
message=message,
)
# Otherwise, go through normal state machine transitions
elif self.status == DeploymentStatus.UPDATING:
# Finished updating configuration and transition to healthy
if trigger == DeploymentStatusInternalTrigger.HEALTHY:
return self._updated_copy(
status=DeploymentStatus.HEALTHY,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_COMPLETED,
message=message,
)
# A new configuration has been deployed before deployment
# has finished updating
elif trigger == DeploymentStatusInternalTrigger.CONFIG_UPDATE:
return self._updated_copy(
status=DeploymentStatus.UPDATING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
# Autoscaling.
elif trigger == DeploymentStatusInternalTrigger.AUTOSCALE_UP:
return self._updated_copy(
status=DeploymentStatus.UPSCALING,
status_trigger=DeploymentStatusTrigger.AUTOSCALING,
message=message,
)
elif trigger == DeploymentStatusInternalTrigger.AUTOSCALE_DOWN:
return self._updated_copy(
status=DeploymentStatus.DOWNSCALING,
status_trigger=DeploymentStatusTrigger.AUTOSCALING,
message=message,
)
# Manually increasing or decreasing num replicas does not
# change the status while deployment is still updating.
elif trigger in {
DeploymentStatusInternalTrigger.MANUALLY_INCREASE_NUM_REPLICAS,
DeploymentStatusInternalTrigger.MANUALLY_DECREASE_NUM_REPLICAS,
}:
return self
# Failures occurred while a deployment was being updated
elif trigger == DeploymentStatusInternalTrigger.HEALTH_CHECK_FAILED:
return self._updated_copy(
status=DeploymentStatus.DEPLOY_FAILED,
status_trigger=DeploymentStatusTrigger.HEALTH_CHECK_FAILED,
message=message,
)
elif trigger == DeploymentStatusInternalTrigger.REPLICA_STARTUP_FAILED:
return self._updated_copy(
status=DeploymentStatus.DEPLOY_FAILED,
status_trigger=DeploymentStatusTrigger.REPLICA_STARTUP_FAILED,
message=message,
)
elif self.status in {DeploymentStatus.UPSCALING, DeploymentStatus.DOWNSCALING}:
# Failures occurred while upscaling/downscaling
if trigger == DeploymentStatusInternalTrigger.HEALTH_CHECK_FAILED:
return self._updated_copy(
status=DeploymentStatus.UNHEALTHY,
status_trigger=DeploymentStatusTrigger.HEALTH_CHECK_FAILED,
message=message,
)
elif trigger == DeploymentStatusInternalTrigger.REPLICA_STARTUP_FAILED:
return self._updated_copy(
status=DeploymentStatus.UNHEALTHY,
status_trigger=DeploymentStatusTrigger.REPLICA_STARTUP_FAILED,
message=message,
)
# Deployment transitions to healthy
elif trigger == DeploymentStatusInternalTrigger.HEALTHY:
return self._updated_copy(
status=DeploymentStatus.HEALTHY,
status_trigger=DeploymentStatusTrigger.UPSCALE_COMPLETED
if self.status == DeploymentStatus.UPSCALING
else DeploymentStatusTrigger.DOWNSCALE_COMPLETED,
message=message,
)
# Configuration is updated before scaling is finished
elif trigger == DeploymentStatusInternalTrigger.CONFIG_UPDATE:
return self._updated_copy(
status=DeploymentStatus.UPDATING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
elif self.status_trigger == DeploymentStatusTrigger.AUTOSCALING:
# Upscale replicas before previous autoscaling has finished
if trigger == DeploymentStatusInternalTrigger.AUTOSCALE_UP:
return self._updated_copy(
status=DeploymentStatus.UPSCALING,
message=message,
)
# Downscale replicas before previous autoscaling has finished
elif trigger == DeploymentStatusInternalTrigger.AUTOSCALE_DOWN:
return self._updated_copy(
status=DeploymentStatus.DOWNSCALING,
message=message,
)
# Manually upscale replicas with config update before previous autoscaling has finished
elif (
trigger
== DeploymentStatusInternalTrigger.MANUALLY_INCREASE_NUM_REPLICAS
):
return self._updated_copy(
status=DeploymentStatus.UPSCALING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
# Manually downscale replicas with config update before previous autoscaling has finished
elif (
trigger
== DeploymentStatusInternalTrigger.MANUALLY_DECREASE_NUM_REPLICAS
):
return self._updated_copy(
status=DeploymentStatus.DOWNSCALING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
elif self.status_trigger == DeploymentStatusTrigger.CONFIG_UPDATE_STARTED:
# Upscale replicas before previous config update has finished
if (
trigger
== DeploymentStatusInternalTrigger.MANUALLY_INCREASE_NUM_REPLICAS
):
return self._updated_copy(
status=DeploymentStatus.UPSCALING, message=message
)
# Downscale replicas before previous config update has finished
elif (
trigger
== DeploymentStatusInternalTrigger.MANUALLY_DECREASE_NUM_REPLICAS
):
return self._updated_copy(
status=DeploymentStatus.DOWNSCALING, message=message
)
elif self.status == DeploymentStatus.HEALTHY:
# Deployment remains healthy
if trigger == DeploymentStatusInternalTrigger.HEALTHY:
return self
# New configuration is deployed
elif trigger == DeploymentStatusInternalTrigger.CONFIG_UPDATE:
return self._updated_copy(
status=DeploymentStatus.UPDATING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
# Manually scaling / autoscaling num replicas
elif (
trigger
== DeploymentStatusInternalTrigger.MANUALLY_INCREASE_NUM_REPLICAS
):
return self._updated_copy(
status=DeploymentStatus.UPSCALING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
elif (
trigger
== DeploymentStatusInternalTrigger.MANUALLY_DECREASE_NUM_REPLICAS
):
return self._updated_copy(
status=DeploymentStatus.DOWNSCALING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
elif trigger == DeploymentStatusInternalTrigger.AUTOSCALE_UP:
return self._updated_copy(
status=DeploymentStatus.UPSCALING,
status_trigger=DeploymentStatusTrigger.AUTOSCALING,
message=message,
)
elif trigger == DeploymentStatusInternalTrigger.AUTOSCALE_DOWN:
return self._updated_copy(
status=DeploymentStatus.DOWNSCALING,
status_trigger=DeploymentStatusTrigger.AUTOSCALING,
message=message,
)
# Health check for one or more replicas has failed
elif trigger == DeploymentStatusInternalTrigger.HEALTH_CHECK_FAILED:
return self._updated_copy(
status=DeploymentStatus.UNHEALTHY,
status_trigger=DeploymentStatusTrigger.HEALTH_CHECK_FAILED,
message=message,
)
elif self.status == DeploymentStatus.UNHEALTHY:
# The deployment recovered
if trigger == DeploymentStatusInternalTrigger.HEALTHY:
return self._updated_copy(
status=DeploymentStatus.HEALTHY,
status_trigger=DeploymentStatusTrigger.UNSPECIFIED,
message=message,
)
# A new configuration is being deployed.
elif trigger == DeploymentStatusInternalTrigger.CONFIG_UPDATE:
return self._updated_copy(
status=DeploymentStatus.UPDATING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
# Old failures keep getting triggered, or new failures occurred.
elif trigger == DeploymentStatusInternalTrigger.HEALTH_CHECK_FAILED:
return self._updated_copy(
status=DeploymentStatus.UNHEALTHY,
status_trigger=DeploymentStatusTrigger.HEALTH_CHECK_FAILED,
message=message,
)
elif trigger == DeploymentStatusInternalTrigger.REPLICA_STARTUP_FAILED:
return self._updated_copy(
status=DeploymentStatus.UNHEALTHY,
status_trigger=DeploymentStatusTrigger.REPLICA_STARTUP_FAILED,
message=message,
)
elif self.status == DeploymentStatus.DEPLOY_FAILED:
# The deployment recovered
if trigger == DeploymentStatusInternalTrigger.HEALTHY:
return self._updated_copy(
status=DeploymentStatus.HEALTHY,
status_trigger=DeploymentStatusTrigger.UNSPECIFIED,
message=message,
)
# A new configuration is being deployed.
elif trigger == DeploymentStatusInternalTrigger.CONFIG_UPDATE:
return self._updated_copy(
status=DeploymentStatus.UPDATING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message=message,
)
# Old failures keep getting triggered, or new failures occurred.
elif trigger == DeploymentStatusInternalTrigger.HEALTH_CHECK_FAILED:
return self._updated_copy(
status=DeploymentStatus.DEPLOY_FAILED,
status_trigger=DeploymentStatusTrigger.HEALTH_CHECK_FAILED,
message=message,
)
elif trigger == DeploymentStatusInternalTrigger.REPLICA_STARTUP_FAILED:
return self._updated_copy(
status=DeploymentStatus.DEPLOY_FAILED,
status_trigger=DeploymentStatusTrigger.REPLICA_STARTUP_FAILED,
message=message,
)
# If it's any other transition, ignore it.
return self
def to_proto(self):
return DeploymentStatusInfoProto(
name=self.name,
status=f"DEPLOYMENT_STATUS_{self.status.name}",
status_trigger=f"DEPLOYMENT_STATUS_TRIGGER_{self.status_trigger.name}",
message=self.message,
)
@classmethod
def from_proto(cls, proto: DeploymentStatusInfoProto):
status = DeploymentStatusProto.Name(proto.status)[len("DEPLOYMENT_STATUS_") :]
status_trigger = DeploymentStatusTriggerProto.Name(proto.status_trigger)[
len("DEPLOYMENT_STATUS_TRIGGER_") :
]
return cls(
name=proto.name,
status=DeploymentStatus(status),
status_trigger=DeploymentStatusTrigger(status_trigger),
message=proto.message,
)
@dataclass(frozen=True)
| DeploymentStatusInfo |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 14420,
"end": 15272
} | class ____(Converter):
"""
Handles both fixed and variable-lengths arrays.
"""
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Converter.__init__(self, field, config, pos)
if config.get("verify", "ignore") == "exception":
self._splitter = self._splitter_pedantic
else:
self._splitter = self._splitter_lax
def parse_scalar(self, value, config=None, pos=0):
return self._base.parse_scalar(value, config, pos)
@staticmethod
def _splitter_pedantic(value, config=None, pos=None):
return pedantic_array_splitter.split(value)
@staticmethod
def _splitter_lax(value, config=None, pos=None):
if "," in value:
vo_warn(W01, (), config, pos)
return array_splitter.split(value)
| Array |
python | tensorflow__tensorflow | tensorflow/python/distribute/device_util.py | {
"start": 4265,
"end": 5612
} | class ____(object):
"""A fake Operation object to pass to device functions."""
def __init__(self):
self.device = ""
self.type = ""
self.name = ""
self.node_def = _FakeNodeDef()
def _set_device(self, device):
self.device = ops._device_string(device) # pylint: disable=protected-access
def _set_device_from_string(self, device_str):
self.device = device_str
def current():
"""Return a string (not canonicalized) for the current device."""
# TODO(josh11b): Work out how this function interacts with ops.colocate_with.
if ops.executing_eagerly_outside_functions():
d = context.context().device_name
else:
op = _FakeOperation()
ops.get_default_graph()._apply_device_functions(op) # pylint: disable=protected-access
d = op.device
return d
def get_host_for_device(device, device_index=0):
"""Returns the corresponding host device for the given device."""
spec = tf_device.DeviceSpec.from_string(device)
return tf_device.DeviceSpec(
job=spec.job,
replica=spec.replica,
task=spec.task,
device_type="CPU",
device_index=device_index,
).to_string()
def local_devices_from_num_gpus(num_gpus):
"""Returns device strings for local GPUs or CPU."""
return (tuple("/device:GPU:%d" % i for i in range(num_gpus)) or
("/device:CPU:0",))
| _FakeOperation |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_audio.py | {
"start": 47809,
"end": 49430
} | class ____(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
self.out_conv_dim = config.tdnn_dim[layer_id]
self.kernel_size = config.tdnn_kernel[layer_id]
self.dilation = config.tdnn_dilation[layer_id]
self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
self.activation = nn.ReLU()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if is_peft_available():
from peft.tuners.lora import LoraLayer
if is_peft_available():
if isinstance(self.kernel, LoraLayer):
warnings.warn(
"Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. "
"You should exclude TDNNLayer from LoRA's target modules.",
)
# for backward compatibility, we keep nn.Linear but call F.conv1d for speed up
hidden_states = hidden_states.transpose(1, 2)
weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2)
hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.activation(hidden_states)
return hidden_states
@auto_docstring(
custom_intro="""
Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification.
"""
)
| TDNNLayer |
python | getsentry__sentry | tests/sentry/workflow_engine/service/test_action_service.py | {
"start": 545,
"end": 13377
} | class ____(TestCase):
def setUp(self) -> None:
self.organization = self.create_organization(owner=self.user)
self.organization_2 = self.create_organization(owner=self.user)
self.integration = self.create_integration(
organization=self.organization,
provider="slack",
name="Test Integration",
external_id="123",
)
self.integration_2 = self.create_integration(
organization=self.organization,
provider="github",
name="Test Integration 2",
external_id="456",
)
self.sentry_app = self.create_sentry_app(
organization=self.organization, name="Test Sentry App"
)
self.sentry_app_2 = self.create_sentry_app(
organization=self.organization, name="Test Sentry App 2"
)
def test_delete_actions_for_organization_integration_successful_deletion(self) -> None:
condition_group = self.create_data_condition_group(organization=self.organization)
action = self.create_action(
type=Action.Type.SLACK,
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
)
self.create_data_condition_group_action(condition_group=condition_group, action=action)
action_service.delete_actions_for_organization_integration(
organization_id=self.organization.id, integration_id=self.integration.id
)
with assume_test_silo_mode(SiloMode.REGION):
assert not Action.objects.filter(id=action.id).exists()
def test_delete_actions_for_organization_integration_multiple_actions(self) -> None:
condition_group = self.create_data_condition_group(organization=self.organization)
action_1 = self.create_action(
type=Action.Type.SLACK,
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
)
action_2 = self.create_action(
type=Action.Type.SLACK,
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
)
action_3 = self.create_action(
type=Action.Type.SLACK,
integration_id=self.integration_2.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration 2",
},
)
self.create_data_condition_group_action(condition_group=condition_group, action=action_1)
self.create_data_condition_group_action(condition_group=condition_group, action=action_2)
self.create_data_condition_group_action(condition_group=condition_group, action=action_3)
action_service.delete_actions_for_organization_integration(
organization_id=self.organization.id, integration_id=self.integration.id
)
with assume_test_silo_mode(SiloMode.REGION):
assert not Action.objects.filter(id=action_1.id).exists()
assert not Action.objects.filter(id=action_2.id).exists()
assert Action.objects.filter(id=action_3.id).exists()
def test_delete_actions_for_organization_integration_wrong_organization(self) -> None:
condition_group = self.create_data_condition_group(organization=self.organization_2)
action = self.create_action(
type=Action.Type.SLACK,
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
)
self.create_data_condition_group_action(condition_group=condition_group, action=action)
action_service.delete_actions_for_organization_integration(
organization_id=self.organization.id, integration_id=self.integration.id
)
with assume_test_silo_mode(SiloMode.REGION):
assert Action.objects.filter(id=action.id).exists()
def test_delete_actions_for_organization_integration_mixed_types(self) -> None:
condition_group = self.create_data_condition_group(organization=self.organization)
integration_action = self.create_action(
type=Action.Type.SLACK,
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
)
sentry_app_action = self.create_action(
type=Action.Type.SENTRY_APP,
config={
"target_identifier": str(self.sentry_app.id),
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_ID,
"target_type": ActionTarget.SENTRY_APP,
},
)
self.create_data_condition_group_action(
condition_group=condition_group, action=integration_action
)
self.create_data_condition_group_action(
condition_group=condition_group, action=sentry_app_action
)
action_service.delete_actions_for_organization_integration(
organization_id=self.organization.id, integration_id=self.integration.id
)
with assume_test_silo_mode(SiloMode.REGION):
assert not Action.objects.filter(id=integration_action.id).exists()
assert Action.objects.filter(id=sentry_app_action.id).exists()
def test_disable_actions_for_organization_integration_mixed_types(self) -> None:
condition_group = self.create_data_condition_group(organization=self.organization)
integration_action = self.create_action(
type=Action.Type.SLACK,
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
)
sentry_app_action = self.create_action(
type=Action.Type.SENTRY_APP,
config={
"target_identifier": str(self.sentry_app.id),
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_ID,
"target_type": ActionTarget.SENTRY_APP,
},
)
self.create_data_condition_group_action(
condition_group=condition_group, action=integration_action
)
self.create_data_condition_group_action(
condition_group=condition_group, action=sentry_app_action
)
action_service.update_action_status_for_organization_integration(
organization_id=self.organization.id,
integration_id=self.integration.id,
status=ObjectStatus.DISABLED,
)
with assume_test_silo_mode(SiloMode.REGION):
action = Action.objects.filter(id=integration_action.id).first()
assert action is not None
assert action.status == ObjectStatus.DISABLED
action = Action.objects.filter(id=sentry_app_action.id).first()
assert action is not None
assert action.status == ObjectStatus.ACTIVE
def test_enable_actions_for_organization_integration_mixed_types(self) -> None:
condition_group = self.create_data_condition_group(organization=self.organization)
integration_action = self.create_action(
type=Action.Type.SLACK,
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
status=ObjectStatus.DISABLED,
)
sentry_app_action = self.create_action(
type=Action.Type.SENTRY_APP,
config={
"target_identifier": str(self.sentry_app.id),
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_ID,
"target_type": ActionTarget.SENTRY_APP,
},
status=ObjectStatus.DISABLED,
)
self.create_data_condition_group_action(
condition_group=condition_group, action=integration_action
)
self.create_data_condition_group_action(
condition_group=condition_group, action=sentry_app_action
)
action_service.update_action_status_for_organization_integration(
organization_id=self.organization.id,
integration_id=self.integration.id,
status=ObjectStatus.ACTIVE,
)
with assume_test_silo_mode(SiloMode.REGION):
action = Action.objects.filter(id=integration_action.id).first()
assert action is not None
assert action.status == ObjectStatus.ACTIVE
action = Action.objects.filter(id=sentry_app_action.id).first()
assert action is not None
assert action.status == ObjectStatus.DISABLED
def test_update_action_status_for_sentry_app__installation_uuid(self) -> None:
sentry_app_installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug,
organization=self.organization,
)
action = self.create_action(
type=Action.Type.SENTRY_APP,
config={
"target_identifier": sentry_app_installation.uuid,
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_INSTALLATION_UUID,
"target_type": ActionTarget.SENTRY_APP,
},
)
action_service.update_action_status_for_sentry_app_via_uuid(
organization_id=self.organization.id,
sentry_app_install_uuid=sentry_app_installation.uuid,
status=ObjectStatus.DISABLED,
)
with assume_test_silo_mode(SiloMode.REGION):
action.refresh_from_db()
assert action.status == ObjectStatus.DISABLED
def test_update_action_status_for_sentry_app__installation_uuid__region(self) -> None:
sentry_app_installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug,
organization=self.organization,
)
action = self.create_action(
type=Action.Type.SENTRY_APP,
config={
"target_identifier": sentry_app_installation.uuid,
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_INSTALLATION_UUID,
"target_type": ActionTarget.SENTRY_APP,
},
)
action_service.update_action_status_for_sentry_app_via_uuid__region(
region_name="us",
sentry_app_install_uuid=sentry_app_installation.uuid,
status=ObjectStatus.DISABLED,
)
with assume_test_silo_mode(SiloMode.REGION):
action.refresh_from_db()
assert action.status == ObjectStatus.DISABLED
def test_update_action_status_for_sentry_app__via_sentry_app_id(self) -> None:
action = self.create_action(
type=Action.Type.SENTRY_APP,
config={
"target_identifier": str(self.sentry_app.id),
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_ID,
"target_type": ActionTarget.SENTRY_APP,
},
)
action_service.update_action_status_for_sentry_app_via_sentry_app_id(
region_name="us",
sentry_app_id=self.sentry_app.id,
status=ObjectStatus.DISABLED,
)
with assume_test_silo_mode(SiloMode.REGION):
action.refresh_from_db()
assert action.status == ObjectStatus.DISABLED
def test_update_action_status_for_webhook_via_sentry_app_slug(self) -> None:
action = self.create_action(
type=Action.Type.WEBHOOK,
config={
"target_identifier": self.sentry_app.slug,
},
)
action_service.update_action_status_for_webhook_via_sentry_app_slug(
region_name="us",
sentry_app_slug=self.sentry_app.slug,
status=ObjectStatus.DISABLED,
)
with assume_test_silo_mode(SiloMode.REGION):
action.refresh_from_db()
assert action.status == ObjectStatus.DISABLED
| TestActionService |
python | pytorch__pytorch | torch/fx/passes/infra/partitioner.py | {
"start": 1496,
"end": 2031
} | class ____:
def __init__(self, graph_module: GraphModule):
self.downstreams = collections.defaultdict(set)
for node in reversed(graph_module.graph.nodes):
for output_node in node.users:
# add output_node and output_node's downstream dependency
self.downstreams[node].add(output_node)
self.downstreams[node].update(self.downstreams[output_node])
def downstreams_of(self, node: Node) -> set[Node]:
return self.downstreams[node]
| _DependencyViewer |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/simple_inheritance/package.py | {
"start": 683,
"end": 1062
} | class ____(BaseWithDirectives):
"""Simple package which acts as a build dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/simple-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("openblas", when="+openblas")
provides("lapack", when="+openblas")
depends_on("c", type="build")
| SimpleInheritance |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 248582,
"end": 252001
} | class ____(rv_continuous):
r"""A Student's t continuous random variable.
For the noncentral t distribution, see `nct`.
%(before_notes)s
See Also
--------
nct
Notes
-----
The probability density function for `t` is:
.. math::
f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
{\sqrt{\pi \nu} \Gamma(\nu/2)}
(1+x^2/\nu)^{-(\nu+1)/2}
where :math:`x` is a real number and the degrees of freedom parameter
:math:`\nu` (denoted ``df`` in the implementation) satisfies
:math:`\nu > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("df", False, (0, np.inf), (False, False))]
def _rvs(self, df, size=None, random_state=None):
return random_state.standard_t(df, size=size)
def _pdf(self, x, df):
return xpx.apply_where(
df == np.inf, (x, df),
lambda x, df: norm._pdf(x),
lambda x, df: np.exp(self._logpdf(x, df)))
def _logpdf(self, x, df):
def t_logpdf(x, df):
return (np.log(sc.poch(0.5 * df, 0.5))
- 0.5 * (np.log(df) + np.log(np.pi))
- (df + 1)/2*np.log1p(x * x/df))
def norm_logpdf(x, df):
return norm._logpdf(x)
return xpx.apply_where(df == np.inf, (x, df), norm_logpdf, t_logpdf)
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
# infinite df -> normal distribution (0.0, 1.0, 0.0, 0.0)
infinite_df = np.isposinf(df)
mu = np.where(df > 1, 0.0, np.inf)
condlist = ((df > 1) & (df <= 2),
(df > 2) & np.isfinite(df),
infinite_df)
choicelist = (lambda df: np.broadcast_to(np.inf, df.shape),
lambda df: df / (df-2.0),
lambda df: np.broadcast_to(1, df.shape))
mu2 = _lazyselect(condlist, choicelist, (df,), np.nan)
g1 = np.where(df > 3, 0.0, np.nan)
condlist = ((df > 2) & (df <= 4),
(df > 4) & np.isfinite(df),
infinite_df)
choicelist = (lambda df: np.broadcast_to(np.inf, df.shape),
lambda df: 6.0 / (df-4.0),
lambda df: np.broadcast_to(0, df.shape))
g2 = _lazyselect(condlist, choicelist, (df,), np.nan)
return mu, mu2, g1, g2
def _entropy(self, df):
if df == np.inf:
return norm._entropy()
def regular(df):
half = df/2
half1 = (df + 1)/2
return (half1*(sc.digamma(half1) - sc.digamma(half))
+ np.log(np.sqrt(df)*sc.beta(half, 0.5)))
def asymptotic(df):
# Formula from Wolfram Alpha:
# "asymptotic expansion (d+1)/2 * (digamma((d+1)/2) - digamma(d/2))
# + log(sqrt(d) * beta(d/2, 1/2))"
h = (norm._entropy() + 1/df + (df**-2.)/4 - (df**-3.)/6
- (df**-4.)/8 + 3/10*(df**-5.) + (df**-6.)/4)
return h
return xpx.apply_where(df >= 100, df, asymptotic, regular)
t = t_gen(name='t')
| t_gen |
python | kamyu104__LeetCode-Solutions | Python/minimum-incompatibility.py | {
"start": 12843,
"end": 14466
} | class ____(object):
def minimumIncompatibility(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def greedy(nums, k, is_reversed):
count = collections.Counter(nums)
if max(count.itervalues()) > k:
return -1
sorted_keys = sorted(count.keys(), reverse=is_reversed)
stks = [[] for _ in xrange(k)]
curr, remain = 0, len(nums)
while remain: # the while loop runs O(k) times, and the inner loops runs O(n) times
for x in sorted_keys: # fill the deterministic elements into the remaining subsets
if count[x] != len(stks)-curr:
continue
for i in xrange(curr, len(stks)):
stks[i].append(x)
remain -= count[x]
count[x] = 0
# greedily fill the contiguous ordered elements into the first vacant subset until it is full,
# otherwise, the result sum would get larger => in fact, this is wrong
for x in sorted_keys:
if not count[x]:
continue
stks[curr].append(x)
remain -= 1
count[x] -= 1
if len(stks[curr]) == len(nums)//k:
curr += 1
break
return sum([max(stk)-min(stk) for stk in stks])
return min(greedy(nums, k, False), greedy(nums, k, True)) # two possible minimas
| Solution_Wrong_Greedy |
python | py-pdf__pypdf | pypdf/annotations/_markup_annotations.py | {
"start": 2371,
"end": 4647
} | class ____(MarkupAnnotation):
"""A FreeText annotation"""
def __init__(
self,
*,
text: str,
rect: Union[RectangleObject, tuple[float, float, float, float]],
font: str = "Helvetica",
bold: bool = False,
italic: bool = False,
font_size: str = "14pt",
font_color: str = "000000",
border_color: Optional[str] = "000000",
background_color: Optional[str] = "ffffff",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self[NameObject("/Subtype")] = NameObject("/FreeText")
self[NameObject("/Rect")] = RectangleObject(rect)
# Table 225 of the 1.7 reference ("CSS2 style attributes used in rich text strings")
font_str = "font: "
if italic:
font_str = f"{font_str}italic "
else:
font_str = f"{font_str}normal "
if bold:
font_str = f"{font_str}bold "
else:
font_str = f"{font_str}normal "
font_str = f"{font_str}{font_size} {font}"
font_str = f"{font_str};text-align:left;color:#{font_color}"
default_appearance_string = ""
if border_color:
for st in hex_to_rgb(border_color):
default_appearance_string = f"{default_appearance_string}{st} "
default_appearance_string = f"{default_appearance_string}rg"
self.update(
{
NameObject("/Subtype"): NameObject("/FreeText"),
NameObject("/Rect"): RectangleObject(rect),
NameObject("/Contents"): TextStringObject(text),
# font size color
NameObject("/DS"): TextStringObject(font_str),
NameObject("/DA"): TextStringObject(default_appearance_string),
}
)
if border_color is None:
# Border Style
self[NameObject("/BS")] = DictionaryObject(
{
# width of 0 means no border
NameObject("/W"): NumberObject(0)
}
)
if background_color is not None:
self[NameObject("/C")] = ArrayObject(
[FloatObject(n) for n in hex_to_rgb(background_color)]
)
| FreeText |
python | nryoung__algorithms | tests/test_data_structures.py | {
"start": 16574,
"end": 17215
} | class ____(unittest.TestCase):
"""
Test Singly Linked List Implementation
"""
def test_singly_linked_list(self):
self.sl = singly_linked_list.SinglyLinkedList()
self.sl.add(10)
self.sl.add(5)
self.sl.add(30)
self.sl.remove(30)
self.assertEqual(self.sl.size, 2)
self.assertEqual(self.sl.search(30), False)
self.assertEqual(self.sl.search(5), True)
self.assertEqual(self.sl.search(10), True)
self.assertEqual(self.sl.remove(5), True)
self.assertEqual(self.sl.remove(10), True)
self.assertEqual(self.sl.size, 0)
| TestSinglyLinkedList |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.