language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ansible__ansible | test/integration/targets/plugin_loader/normal/action_plugins/self_referential.py | {
"start": 504,
"end": 918
} | class ____(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
result['changed'] = False
result['msg'] = 'self-referential action loaded and ran successfully'
return result
| ActionModule |
python | coleifer__peewee | tests/postgres.py | {
"start": 14507,
"end": 16440
} | class ____(ModelTestCase):
database = db
requires = [ArrayTSModel]
def dt(self, day, hour=0, minute=0, second=0):
return datetime.datetime(2018, 1, day, hour, minute, second)
def test_value_conversion(self):
data = {
'k1': [self.dt(1), self.dt(2), self.dt(3)],
'k2': [],
'k3': [self.dt(4, 5, 6, 7), self.dt(10, 11, 12, 13)],
}
for key in sorted(data):
ArrayTSModel.create(key=key, timestamps=data[key])
for key in sorted(data):
am = ArrayTSModel.get(ArrayTSModel.key == key)
self.assertEqual(am.timestamps, data[key])
# Perform lookup using timestamp values.
ts = ArrayTSModel.get(ArrayTSModel.timestamps.contains(self.dt(3)))
self.assertEqual(ts.key, 'k1')
ts = ArrayTSModel.get(
ArrayTSModel.timestamps.contains(self.dt(4, 5, 6, 7)))
self.assertEqual(ts.key, 'k3')
self.assertRaises(ArrayTSModel.DoesNotExist, ArrayTSModel.get,
ArrayTSModel.timestamps.contains(self.dt(4, 5, 6)))
def test_get_with_array_values(self):
a1 = ArrayTSModel.create(key='k1', timestamps=[self.dt(1)])
a2 = ArrayTSModel.create(key='k2', timestamps=[self.dt(2), self.dt(3)])
query = (ArrayTSModel
.select()
.where(ArrayTSModel.timestamps == [self.dt(1)]))
a1_db = query.get()
self.assertEqual(a1_db.id, a1.id)
query = (ArrayTSModel
.select()
.where(ArrayTSModel.timestamps == [self.dt(2), self.dt(3)]))
a2_db = query.get()
self.assertEqual(a2_db.id, a2.id)
a1_db = ArrayTSModel.get(timestamps=[self.dt(1)])
self.assertEqual(a1_db.id, a1.id)
a2_db = ArrayTSModel.get(timestamps=[self.dt(2), self.dt(3)])
self.assertEqual(a2_db.id, a2.id)
| TestArrayFieldConvertValues |
python | networkx__networkx | networkx/algorithms/shortest_paths/tests/test_weighted.py | {
"start": 15787,
"end": 17496
} | class ____:
"""Unit tests for the multi-source dialect of Dijkstra's shortest
path algorithms.
"""
def test_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra(nx.Graph(), {})
def test_path_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra_path(nx.Graph(), {})
def test_path_length_no_sources(self):
with pytest.raises(ValueError):
nx.multi_source_dijkstra_path_length(nx.Graph(), {})
@pytest.mark.parametrize(
"fn",
(
nx.multi_source_dijkstra_path,
nx.multi_source_dijkstra_path_length,
nx.multi_source_dijkstra,
),
)
def test_absent_source(self, fn):
G = nx.path_graph(2)
with pytest.raises(nx.NodeNotFound):
fn(G, [3], 0)
with pytest.raises(nx.NodeNotFound):
fn(G, [3], 3)
def test_two_sources(self):
edges = [(0, 1, 1), (1, 2, 1), (2, 3, 10), (3, 4, 1)]
G = nx.Graph()
G.add_weighted_edges_from(edges)
sources = {0, 4}
distances, paths = nx.multi_source_dijkstra(G, sources)
expected_distances = {0: 0, 1: 1, 2: 2, 3: 1, 4: 0}
expected_paths = {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [4, 3], 4: [4]}
assert distances == expected_distances
assert paths == expected_paths
def test_simple_paths(self):
G = nx.path_graph(4)
lengths = nx.multi_source_dijkstra_path_length(G, [0])
assert lengths == {n: n for n in G}
paths = nx.multi_source_dijkstra_path(G, [0])
assert paths == {n: list(range(n + 1)) for n in G}
| TestMultiSourceDijkstra |
python | pallets__click | src/click/exceptions.py | {
"start": 632,
"end": 1521
} | class ____(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception.
exit_code = 1
def __init__(self, message: str) -> None:
super().__init__(message)
# The context will be removed by the time we print the message, so cache
# the color settings here to be used later on (in `show`)
self.show_color: bool | None = resolve_color_default()
self.message = message
def format_message(self) -> str:
return self.message
def __str__(self) -> str:
return self.message
def show(self, file: t.IO[t.Any] | None = None) -> None:
if file is None:
file = get_text_stderr()
echo(
_("Error: {message}").format(message=self.format_message()),
file=file,
color=self.show_color,
)
| ClickException |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/operator1.py | {
"start": 1667,
"end": 1788
} | class ____:
__slots__ = ("__add__",)
def __init__(self):
self.__add__ = lambda x: x
e = E()
_ = e + e
| E |
python | falconry__falcon | tests/test_typing.py | {
"start": 184,
"end": 296
} | class ____:
userid: UUID | None = None
role: str = 'anonymous'
comment: str = 'no comment'
| RichContext |
python | ansible__ansible | lib/ansible/modules/group.py | {
"start": 18035,
"end": 19608
} | class ____(Group):
"""
This is a NetBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'NetBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if self.gid_min is not None:
cmd.append('-K')
cmd.append('GID_MIN=' + str(self.gid_min))
if self.gid_max is not None:
cmd.append('-K')
cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
| NetBsdGroup |
python | eventlet__eventlet | eventlet/timeout.py | {
"start": 1549,
"end": 6644
} | class ____(BaseException):
"""Raises *exception* in the current greenthread after *timeout* seconds.
When *exception* is omitted or ``None``, the :class:`Timeout` instance
itself is raised. If *seconds* is None, the timer is not scheduled, and is
only useful if you're planning to raise it directly.
Timeout objects are context managers, and so can be used in with statements.
When used in a with statement, if *exception* is ``False``, the timeout is
still raised, but the context manager suppresses it, so the code outside the
with-block won't see it.
"""
def __init__(self, seconds=None, exception=None):
self.seconds = seconds
self.exception = exception
self.timer = None
self.start()
def start(self):
"""Schedule the timeout. This is called on construction, so
it should not be called explicitly, unless the timer has been
canceled."""
assert not self.pending, \
'%r is already started; to restart it, cancel it first' % self
if self.seconds is None: # "fake" timeout (never expires)
self.timer = None
elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self
self.timer = get_hub().schedule_call_global(
self.seconds, greenlet.getcurrent().throw, self)
else: # regular timeout with user-provided exception
self.timer = get_hub().schedule_call_global(
self.seconds, greenlet.getcurrent().throw, self.exception)
return self
@property
def pending(self):
"""True if the timeout is scheduled to be raised."""
if self.timer is not None:
return self.timer.pending
else:
return False
def cancel(self):
"""If the timeout is pending, cancel it. If not using
Timeouts in ``with`` statements, always call cancel() in a
``finally`` after the block of code that is getting timed out.
If not canceled, the timeout will be raised later on, in some
unexpected section of the application."""
if self.timer is not None:
self.timer.cancel()
self.timer = None
def __repr__(self):
classname = self.__class__.__name__
if self.pending:
pending = ' pending'
else:
pending = ''
if self.exception is None:
exception = ''
else:
exception = ' exception=%r' % self.exception
return '<%s at %s seconds=%s%s%s>' % (
classname, hex(id(self)), self.seconds, exception, pending)
def __str__(self):
"""
>>> raise Timeout # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Timeout
"""
if self.seconds is None:
return ''
if self.seconds == 1:
suffix = ''
else:
suffix = 's'
if self.exception is None or self.exception is True:
return '%s second%s' % (self.seconds, suffix)
elif self.exception is False:
return '%s second%s (silent)' % (self.seconds, suffix)
else:
return '%s second%s (%s)' % (self.seconds, suffix, self.exception)
def __enter__(self):
if self.timer is None:
self.start()
return self
def __exit__(self, typ, value, tb):
self.cancel()
if value is self and self.exception is False:
return True
@property
def is_timeout(self):
return True
def with_timeout(seconds, function, *args, **kwds):
"""Wrap a call to some (yielding) function with a timeout; if the called
function fails to return before the timeout, cancel it and return a flag
value.
"""
timeout_value = kwds.pop("timeout_value", _MISSING)
timeout = Timeout(seconds)
try:
try:
return function(*args, **kwds)
except Timeout as ex:
if ex is timeout and timeout_value is not _MISSING:
return timeout_value
raise
finally:
timeout.cancel()
def wrap_is_timeout(base):
'''Adds `.is_timeout=True` attribute to objects returned by `base()`.
When `base` is class, attribute is added as read-only property. Returns `base`.
Otherwise, it returns a function that sets attribute on result of `base()` call.
Wrappers make best effort to be transparent.
'''
if inspect.isclass(base):
base.is_timeout = property(lambda _: True)
return base
@functools.wraps(base)
def fun(*args, **kwargs):
ex = base(*args, **kwargs)
ex.is_timeout = True
return ex
return fun
if isinstance(__builtins__, dict): # seen when running tests on py310, but HOW??
_timeout_err = __builtins__.get('TimeoutError', Timeout)
else:
_timeout_err = getattr(__builtins__, 'TimeoutError', Timeout)
def is_timeout(obj):
return bool(getattr(obj, 'is_timeout', False)) or isinstance(obj, _timeout_err)
| Timeout |
python | TheAlgorithms__Python | data_structures/linked_list/doubly_linked_list_two.py | {
"start": 815,
"end": 1165
} | class ____:
def __init__(self, head):
self.current = head
def __iter__(self):
return self
def __next__(self):
if not self.current:
raise StopIteration
else:
value = self.current.data
self.current = self.current.next
return value
@dataclass
| LinkedListIterator |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/property_graph/transformations/dynamic_llm.py | {
"start": 6510,
"end": 16934
} | class ____(TransformComponent):
"""
DynamicLLMPathExtractor is a component for extracting structured information from text
to build a knowledge graph. It uses an LLM to identify entities and their relationships,
with the ability to infer entity types and expand upon an initial ontology.
This extractor improves upon SimpleLLMPathExtractor by:
1. Detecting entity types instead of labeling them generically as "entity" and "chunk".
2. Accepting an initial ontology as input, specifying desired nodes and relationships.
3. Encouraging ontology expansion through its prompt design.
This extractor differs from SchemaLLMPathExtractor because:
1. It interprets the passed possible entities and relations as an initial ontology.
2. It encourages expansion of the initial ontology in the prompt.
3. It aims for flexibility in knowledge graph construction while still providing guidance.
Attributes:
llm (LLM): The language model used for extraction.
extract_prompt (PromptTemplate): The prompt template used to guide the LLM.
parse_fn (Callable): Function to parse the LLM output into triplets.
num_workers (int): Number of workers for parallel processing.
max_triplets_per_chunk (int): Maximum number of triplets to extract per text chunk.
allowed_entity_types (List[str]): List of initial entity types for the ontology.
allowed_entity_props (Optional[Union[List[str], List[Tuple[str, str]]]]):
List of initial entity properties for the ontology.
Can be either property names or tuples of (name, description).
allowed_relation_types (List[str]): List of initial relation types for the ontology.
allowed_relation_props (Optional[Union[List[str], List[Tuple[str, str]]]]):
List of initial relation properties for the ontology.
Can be either property names or tuples of (name, description).
"""
llm: LLM
extract_prompt: PromptTemplate
parse_fn: Callable
num_workers: int
max_triplets_per_chunk: int
allowed_entity_types: List[str]
allowed_entity_props: List[str]
allowed_relation_types: Optional[List[str]]
allowed_relation_props: Optional[List[str]]
def __init__(
self,
llm: Optional[LLM] = None,
extract_prompt: Optional[Union[str, PromptTemplate]] = None,
parse_fn: Optional[Callable] = None,
max_triplets_per_chunk: int = 10,
num_workers: int = 4,
allowed_entity_types: Optional[List[str]] = None,
allowed_entity_props: Optional[Union[List[str], List[Tuple[str, str]]]] = None,
allowed_relation_types: Optional[List[str]] = None,
allowed_relation_props: Optional[
Union[List[str], List[Tuple[str, str]]]
] = None,
) -> None:
"""
Initialize the DynamicLLMPathExtractor.
Args:
llm (Optional[LLM]): The language model to use. If None, uses the default from Settings.
extract_prompt (Optional[Union[str, PromptTemplate]]): The prompt template to use.
parse_fn (Callable): Function to parse LLM output into triplets.
max_triplets_per_chunk (int): Maximum number of triplets to extract per chunk.
num_workers (int): Number of workers for parallel processing.
allowed_entity_types (Optional[List[str]]): List of initial entity types for the ontology.
allowed_relation_types (Optional[List[str]]): List of initial relation types for the ontology.
"""
from llama_index.core import Settings
if isinstance(extract_prompt, str):
extract_prompt = PromptTemplate(extract_prompt)
if extract_prompt is None:
if allowed_entity_props is not None or allowed_relation_props is not None:
extract_prompt = DEFAULT_DYNAMIC_EXTRACT_PROPS_PROMPT
else:
extract_prompt = DEFAULT_DYNAMIC_EXTRACT_PROMPT
if parse_fn is None:
if allowed_entity_props is not None or allowed_relation_props is not None:
parse_fn = default_parse_dynamic_triplets_with_props
else:
parse_fn = default_parse_dynamic_triplets
# convert props to name -> description format if needed
if allowed_entity_props and isinstance(allowed_entity_props[0], tuple):
allowed_entity_props = [ # type: ignore
f"Property `{k}` with description ({v})"
for k, v in allowed_entity_props # type: ignore
]
if allowed_relation_props and isinstance(allowed_relation_props[0], tuple):
allowed_relation_props = [ # type: ignore
f"Property `{k}` with description ({v})"
for k, v in allowed_relation_props # type: ignore
]
super().__init__(
llm=llm or Settings.llm,
extract_prompt=extract_prompt,
parse_fn=parse_fn,
num_workers=num_workers,
max_triplets_per_chunk=max_triplets_per_chunk,
allowed_entity_types=allowed_entity_types or [],
allowed_entity_props=allowed_entity_props or [],
allowed_relation_types=allowed_relation_types or [],
allowed_relation_props=allowed_relation_props or [],
)
@classmethod
def class_name(cls) -> str:
"""Return the name of the class."""
return "DynamicLLMPathExtractor"
def __call__(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
"""
Extract triples from nodes.
Args:
nodes (List[BaseNode]): List of nodes to process.
show_progress (bool): Whether to show a progress bar.
**kwargs: Additional keyword arguments.
Returns:
List[BaseNode]: Processed nodes with extracted information.
"""
return asyncio.run(self.acall(nodes, show_progress=show_progress, **kwargs))
async def _apredict_without_props(self, text: str) -> str:
"""
Asynchronously predict triples from text without properties.
Args:
text (str): The text to process.
Returns:
str: The predicted triples.
"""
return await self.llm.apredict(
self.extract_prompt,
text=text,
max_knowledge_triplets=self.max_triplets_per_chunk,
allowed_entity_types=", ".join(self.allowed_entity_types)
if len(self.allowed_entity_types or []) > 0
else "No entity types provided, You are free to define them.",
allowed_relation_types=", ".join(self.allowed_relation_types or [])
if len(self.allowed_relation_types or []) > 0
else "No relation types provided, You are free to define them.",
)
async def _apredict_with_props(self, text: str) -> str:
"""
Asynchronously predict triples from text with properties.
Args:
text (str): The text to process.
Returns:
str: The predicted triples.
"""
return await self.llm.apredict(
self.extract_prompt,
text=text,
max_knowledge_triplets=self.max_triplets_per_chunk,
allowed_entity_types=", ".join(self.allowed_entity_types)
if len(self.allowed_entity_types or []) > 0
else "No entity types provided, You are free to define them.",
allowed_relation_types=", ".join(self.allowed_relation_types or [])
if len(self.allowed_relation_types or []) > 0
else "No relation types provided, You are free to define them.",
allowed_entity_properties=", ".join(self.allowed_entity_props)
if self.allowed_entity_props
else "No entity properties provided, You are free to define them.",
allowed_relation_properties=", ".join(self.allowed_relation_props)
if self.allowed_relation_props
else "No relation properties provided, You are free to define them.",
)
async def _aextract(self, node: BaseNode) -> BaseNode:
"""
Asynchronously extract triples from a single node.
Args:
node (BaseNode): The node to process.
Returns:
BaseNode: The processed node with extracted information.
"""
text = node.get_content(metadata_mode=MetadataMode.LLM)
try:
if (
self.allowed_entity_props is not None
and self.allowed_relation_props is not None
):
llm_response = await self._apredict_with_props(text)
else:
llm_response = await self._apredict_without_props(text)
triplets = self.parse_fn(llm_response)
except Exception as e:
print(f"Error during extraction: {e!s}")
triplets = []
existing_nodes = node.metadata.pop(KG_NODES_KEY, [])
existing_relations = node.metadata.pop(KG_RELATIONS_KEY, [])
metadata = node.metadata.copy()
for subj, rel, obj in triplets:
subj.properties.update(metadata)
obj.properties.update(metadata)
rel.properties.update(metadata)
existing_nodes.extend([subj, obj])
existing_relations.append(rel)
node.metadata[KG_NODES_KEY] = existing_nodes
node.metadata[KG_RELATIONS_KEY] = existing_relations
return node
async def acall(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
"""
Asynchronously extract triples from multiple nodes.
Args:
nodes (List[BaseNode]): List of nodes to process.
show_progress (bool): Whether to show a progress bar.
**kwargs: Additional keyword arguments.
Returns:
List[BaseNode]: Processed nodes with extracted information.
"""
jobs = []
for node in nodes:
jobs.append(self._aextract(node))
return await run_jobs(
jobs,
workers=self.num_workers,
show_progress=show_progress,
desc="Extracting and inferring knowledge graph from text",
)
| DynamicLLMPathExtractor |
python | pandas-dev__pandas | pandas/tests/frame/test_query_eval.py | {
"start": 38065,
"end": 46411
} | class ____:
def test_str_query_method(self, parser, engine):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)), columns=["b"])
df["strings"] = Series(list("aabbccddee"))
expect = df[df.strings == "a"]
if parser != "pandas":
col = "strings"
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
msg = r"'(Not)?In' nodes are not implemented"
for lh, op_, rh in zip(lhs, ops, rhs):
ex = f"{lh} {op_} {rh}"
with pytest.raises(NotImplementedError, match=msg):
df.query(
ex,
engine=engine,
parser=parser,
local_dict={"strings": df.strings},
)
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
tm.assert_frame_equal(res, df[df.strings.isin(["a"])])
expect = df[df.strings != "a"]
res = df.query('strings != "a"', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
tm.assert_frame_equal(res, df[~df.strings.isin(["a"])])
def test_str_list_query_method(self, parser, engine):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)), columns=["b"])
df["strings"] = Series(list("aabbccddee"))
expect = df[df.strings.isin(["a", "b"])]
if parser != "pandas":
col = "strings"
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
msg = r"'(Not)?In' nodes are not implemented"
for lh, ops_, rh in zip(lhs, ops, rhs):
ex = f"{lh} {ops_} {rh}"
with pytest.raises(NotImplementedError, match=msg):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
expect = df[~df.strings.isin(["a", "b"])]
res = df.query('strings != ["a", "b"]', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
df = DataFrame(
{
"a": list("aaaabbbbcccc"),
"b": list("aabbccddeeff"),
"c": np.random.default_rng(2).integers(5, size=12),
"d": np.random.default_rng(2).integers(9, size=12),
}
)
if parser == "pandas":
res = df.query("a in b", parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
tm.assert_frame_equal(res, expec)
res = df.query("a in b and c < d", parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
tm.assert_frame_equal(res, expec)
else:
msg = r"'(Not)?In' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("a in b", parser=parser, engine=engine)
msg = r"'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("a in b and c < d", parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
df = DataFrame(
{
"a": list("aaaabbbbcccc"),
"b": list("aabbccddeeff"),
"c": np.random.default_rng(2).integers(5, size=12),
"d": np.random.default_rng(2).integers(9, size=12),
}
)
res = df.query("a == b", parser=parser, engine=engine)
exp = df[df.a == df.b]
tm.assert_frame_equal(res, exp)
res = df.query("a != b", parser=parser, engine=engine)
exp = df[df.a != df.b]
tm.assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
skip_if_no_pandas_parser(parser)
events = [
f"page {n} {act}" for n in range(1, 4) for act in ["load", "exit"]
] * 2
stamps1 = date_range("2014-01-01 0:00:01", freq="30s", periods=6)
stamps2 = date_range("2014-02-01 1:00:01", freq="30s", periods=6)
df = DataFrame(
{
"id": np.arange(1, 7).repeat(2),
"event": events,
"timestamp": stamps1.append(stamps2),
}
)
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine)
tm.assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame({"a": ["a", "b", "test & test"], "b": [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == "test & test"]
tm.assert_frame_equal(res, expec)
@pytest.mark.parametrize(
"op, func",
[
["<", operator.lt],
[">", operator.gt],
["<=", operator.le],
[">=", operator.ge],
],
)
def test_query_lex_compare_strings(
self, parser, engine, op, func
):
a = Series(np.random.default_rng(2).choice(list("abcde"), 20))
b = Series(np.arange(a.size))
df = DataFrame({"X": a, "Y": b})
res = df.query(f'X {op} "d"', engine=engine, parser=parser)
expected = df[func(df.X, "d")]
tm.assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
columns = "bid", "bidsize", "ask", "asksize"
data = np.random.default_rng(2).integers(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query("bid & ask", engine=engine, parser=parser)
expected = df[df.bid & df.ask]
tm.assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame(
{
"Symbol": ["BUD US", "BUD US", "IBM US", "IBM US"],
"Price": [109.70, 109.72, 183.30, 183.35],
}
)
e = df[df.Symbol == "BUD US"]
symb = "BUD US" # noqa: F841
r = df.query("Symbol == @symb", parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
@pytest.mark.parametrize(
"in_list",
[
[None, "asdf", "ghjk"],
["asdf", None, "ghjk"],
["asdf", "ghjk", None],
[None, None, "asdf"],
["asdf", None, None],
[None, None, None],
],
)
def test_query_string_null_elements(self, in_list):
# GITHUB ISSUE #31516
parser = "pandas"
engine = "python"
expected = {i: value for i, value in enumerate(in_list) if value == "asdf"}
df_expected = DataFrame({"a": expected}, dtype="string")
df_expected.index = df_expected.index.astype("int64")
df = DataFrame({"a": in_list}, dtype="string")
df.index = Index(list(df.index), dtype=df.index.dtype)
res1 = df.query("a == 'asdf'", parser=parser, engine=engine)
res2 = df[df["a"] == "asdf"]
res3 = df.query("a <= 'asdf'", parser=parser, engine=engine)
tm.assert_frame_equal(res1, df_expected)
tm.assert_frame_equal(res1, res2)
tm.assert_frame_equal(res1, res3)
tm.assert_frame_equal(res2, res3)
| TestDataFrameQueryStrings |
python | pytorch__pytorch | torch/_inductor/exc.py | {
"start": 2610,
"end": 3314
} | class ____(RuntimeError):
def __init__(self, cmd: list[str], output: str) -> None:
if isinstance(output, bytes):
output = output.decode("utf-8")
self.cmd = cmd
self.output = output
super().__init__(
textwrap.dedent(
"""
C++ compile error
Command:
{cmd}
Output:
{output}
"""
)
.strip()
.format(cmd=" ".join(cmd), output=output)
)
def __reduce__(self) -> tuple[type, tuple[list[str], str]]:
return (self.__class__, (self.cmd, self.output))
| CppCompileError |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/api.py | {
"start": 987,
"end": 1081
} | class ____(RendezvousError):
"""Raised when a rendezvous is closed."""
| RendezvousClosedError |
python | django__django | tests/admin_checks/tests.py | {
"start": 1245,
"end": 1308
} | class ____(MessageMiddleware):
pass
| MessageMiddlewareSubclass |
python | plotly__plotly.py | plotly/graph_objs/bar/unselected/_marker.py | {
"start": 233,
"end": 3305
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar.unselected"
_path_str = "bar.unselected.marker"
_valid_props = {"color", "opacity"}
@property
def color(self):
"""
Sets the marker color of unselected points, applied only when a
selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, color=None, opacity=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.unselected.Marker`
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.unselected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("opacity", arg, opacity)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/sql/typed_results.py | {
"start": 1447,
"end": 14021
} | class ____(Base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
value: Mapped[Optional[str]]
t_user = Table(
"user",
MetaData(),
Column("id", Integer, primary_key=True),
Column("name", String),
)
e = create_engine("sqlite://")
ae = create_async_engine("sqlite+aiosqlite://")
connection = e.connect()
session = Session(connection)
async def async_connect() -> AsyncConnection:
return await ae.connect()
# the thing with the \*? seems like it could go away
# as of mypy 0.950
async_connection = asyncio.run(async_connect())
assert_type(async_connection, AsyncConnection)
async_session = AsyncSession(async_connection)
# (variable) users1: Sequence[User]
users1 = session.scalars(select(User)).all()
# (variable) user: User
user = session.query(User).one()
user_iter = iter(session.scalars(select(User)))
assert_type(async_session, AsyncSession)
single_stmt = select(User.name).where(User.name == "foo")
assert_type(single_stmt, Select[str])
multi_stmt = select(User.id, User.name).where(User.name == "foo")
assert_type(multi_stmt, Select[int, str])
def t_result_ctxmanager() -> None:
with connection.execute(select(column("q", Integer))) as r1:
assert_type(r1, CursorResult[int])
with r1.mappings() as r1m:
assert_type(r1m, MappingResult)
with connection.scalars(select(column("q", Integer))) as r2:
assert_type(r2, ScalarResult[int])
with session.execute(select(User.id)) as r3:
assert_type(r3, Result[int])
with session.scalars(select(User.id)) as r4:
assert_type(r4, ScalarResult[int])
def t_mappings() -> None:
r = connection.execute(select(t_user)).mappings().one()
r["name"] # string
r.get(t_user.c.id) # column
r2 = connection.execute(select(User)).mappings().one()
r2[User.id] # orm attribute
r2[User.__table__.c.id] # form clause column
m2 = User.id * 2
s2 = User.__table__.c.id + 2
fn = func.abs(User.id)
r3 = connection.execute(select(m2, s2, fn)).mappings().one()
r3[m2] # col element
r3[s2] # also col element
r3[fn] # function
def t_entity_varieties() -> None:
a1 = aliased(User)
s1 = select(User.id, User, User.name).where(User.name == "foo")
r1 = session.execute(s1)
assert_type(r1, Result[int, User, str])
s2 = select(User, a1).where(User.name == "foo")
r2 = session.execute(s2)
assert_type(r2, Result[User, User])
row = r2.t.one()
assert_type(row[0], User)
assert_type(row[1], User)
# testing that plain Mapped[x] gets picked up as well as
# aliased class
# there is unfortunately no way for attributes on an AliasedClass to be
# automatically typed since they are dynamically generated
a1_id = cast(Mapped[int], a1.id)
s3 = select(User.id, a1_id, a1, User).where(User.name == "foo")
assert_type(s3, Select[int, int, User, User])
# testing Mapped[entity]
some_mp = cast(Mapped[User], object())
s4 = select(some_mp, a1, User).where(User.name == "foo")
# NOTEXPECTED_RE_TYPE: sqlalchemy..*Select\*?\[User\*?, User\*?, User\*?\]
# sqlalchemy.sql._gen_overloads.Select[User, User, User]
assert_type(s4, Select[User, User, User])
# test plain core expressions
x = Column("x", Integer)
y = x + 5
s5 = select(x, y, User.name + "hi")
assert_type(s5, Select[int, int, str])
def t_ambiguous_result_type_one() -> None:
stmt = select(column("q", Integer), table("x", column("y")))
assert_type(stmt, Select[Unpack[tuple[Any, ...]]])
result = session.execute(stmt)
assert_type(result, Result[Unpack[tuple[Any, ...]]])
def t_ambiguous_result_type_two() -> None:
stmt = select(column("q"))
assert_type(stmt, Select[Any])
result = session.execute(stmt)
assert_type(result, Result[Unpack[tuple[Any, ...]]])
def t_aliased() -> None:
a1 = aliased(User)
s1 = select(a1)
assert_type(s1, Select[User])
s4 = select(a1.name, a1, a1, User).where(User.name == "foo")
assert_type(s4, Select[str, User, User, User])
def t_result_scalar_accessors() -> None:
result = connection.execute(single_stmt)
r1 = result.scalar()
assert_type(r1, str | None)
r2 = result.scalar_one()
assert_type(r2, str)
r3 = result.scalar_one_or_none()
assert_type(r3, str | None)
r4 = result.scalars()
assert_type(r4, ScalarResult[str])
r5 = result.scalars(0)
assert_type(r5, ScalarResult[str])
async def t_async_result_scalar_accessors() -> None:
result = await async_connection.stream(single_stmt)
r1 = await result.scalar()
assert_type(r1, str | None)
r2 = await result.scalar_one()
assert_type(r2, str)
r3 = await result.scalar_one_or_none()
assert_type(r3, str | None)
r4 = result.scalars()
assert_type(r4, AsyncScalarResult[str])
r5 = result.scalars(0)
assert_type(r5, AsyncScalarResult[str])
def t_result_insertmanyvalues_scalars() -> None:
stmt = insert(User).returning(User.id)
uids1 = connection.scalars(
stmt,
[
{"name": "n1"},
{"name": "n2"},
{"name": "n3"},
],
).all()
assert_type(uids1, Sequence[int])
uids2 = (
connection.execute(
stmt,
[
{"name": "n1"},
{"name": "n2"},
{"name": "n3"},
],
)
.scalars()
.all()
)
assert_type(uids2, Sequence[int])
async def t_async_result_insertmanyvalues_scalars() -> None:
stmt = insert(User).returning(User.id)
uids1 = (
await async_connection.scalars(
stmt,
[
{"name": "n1"},
{"name": "n2"},
{"name": "n3"},
],
)
).all()
assert_type(uids1, Sequence[int])
uids2 = (
(
await async_connection.execute(
stmt,
[
{"name": "n1"},
{"name": "n2"},
{"name": "n3"},
],
)
)
.scalars()
.all()
)
assert_type(uids2, Sequence[int])
def t_connection_execute_multi_row_t() -> None:
result = connection.execute(multi_stmt)
assert_type(result, CursorResult[int, str])
row = result.one()
assert_type(row, Row[int, str])
x, y = row.t
assert_type(x, int)
assert_type(y, str)
def t_connection_execute_multi() -> None:
result = connection.execute(multi_stmt).t
assert_type(result, TupleResult[tuple[int, str]])
row = result.one()
assert_type(row, tuple[int, str])
x, y = row
assert_type(x, int)
assert_type(y, str)
def t_connection_execute_single() -> None:
result = connection.execute(single_stmt).t
assert_type(result, TupleResult[tuple[str]])
row = result.one()
assert_type(row, tuple[str])
(x,) = row
assert_type(x, str)
def t_connection_execute_single_row_scalar() -> None:
result = connection.execute(single_stmt).t
assert_type(result, TupleResult[tuple[str]])
x = result.scalar()
assert_type(x, str | None)
def t_connection_scalar() -> None:
obj = connection.scalar(single_stmt)
assert_type(obj, str | None)
def t_connection_scalars() -> None:
result = connection.scalars(single_stmt)
assert_type(result, ScalarResult[str])
data = result.all()
assert_type(data, Sequence[str])
def t_session_execute_multi() -> None:
result = session.execute(multi_stmt).t
assert_type(result, TupleResult[tuple[int, str]])
row = result.one()
assert_type(row, tuple[int, str])
x, y = row
assert_type(x, int)
assert_type(y, str)
def t_session_execute_single() -> None:
result = session.execute(single_stmt).t
assert_type(result, TupleResult[tuple[str]])
row = result.one()
assert_type(row, tuple[str])
(x,) = row
assert_type(x, str)
def t_session_scalar() -> None:
obj = session.scalar(single_stmt)
assert_type(obj, str | None)
def t_session_scalars() -> None:
result = session.scalars(single_stmt)
assert_type(result, ScalarResult[str])
data = result.all()
assert_type(data, Sequence[str])
async def t_async_connection_execute_multi() -> None:
result = (await async_connection.execute(multi_stmt)).t
assert_type(result, TupleResult[tuple[int, str]])
row = result.one()
assert_type(row, tuple[int, str])
x, y = row
assert_type(x, int)
assert_type(y, str)
async def t_async_connection_execute_single() -> None:
result = (await async_connection.execute(single_stmt)).t
assert_type(result, TupleResult[tuple[str]])
row = result.one()
assert_type(row, tuple[str])
(x,) = row
assert_type(x, str)
async def t_async_connection_scalar() -> None:
obj = await async_connection.scalar(single_stmt)
assert_type(obj, str | None)
async def t_async_connection_scalars() -> None:
result = await async_connection.scalars(single_stmt)
assert_type(result, ScalarResult[str])
data = result.all()
assert_type(data, Sequence[str])
async def t_async_session_execute_multi() -> None:
result = (await async_session.execute(multi_stmt)).t
assert_type(result, TupleResult[tuple[int, str]])
row = result.one()
assert_type(row, tuple[int, str])
x, y = row
assert_type(x, int)
assert_type(y, str)
async def t_async_session_execute_single() -> None:
result = (await async_session.execute(single_stmt)).t
assert_type(result, TupleResult[tuple[str]])
row = result.one()
assert_type(row, tuple[str])
(x,) = row
assert_type(x, str)
async def t_async_session_scalar() -> None:
obj = await async_session.scalar(single_stmt)
assert_type(obj, str | None)
async def t_async_session_scalars() -> None:
result = await async_session.scalars(single_stmt)
assert_type(result, ScalarResult[str])
data = result.all()
assert_type(data, Sequence[str])
async def t_async_connection_stream_multi() -> None:
result = (await async_connection.stream(multi_stmt)).t
assert_type(result, AsyncTupleResult[tuple[int, str]])
row = await result.one()
assert_type(row, tuple[int, str])
x, y = row
assert_type(x, int)
assert_type(y, str)
async def t_async_connection_stream_single() -> None:
result = (await async_connection.stream(single_stmt)).t
assert_type(result, AsyncTupleResult[tuple[str]])
row = await result.one()
assert_type(row, tuple[str])
(x,) = row
assert_type(x, str)
async def t_async_connection_stream_scalars() -> None:
result = await async_connection.stream_scalars(single_stmt)
assert_type(result, AsyncScalarResult[str])
data = await result.all()
assert_type(data, Sequence[str])
async def t_async_session_stream_multi() -> None:
result = (await async_session.stream(multi_stmt)).t
assert_type(result, AsyncTupleResult[tuple[int, str]])
row = await result.one()
assert_type(row, tuple[int, str])
x, y = row
assert_type(x, int)
assert_type(y, str)
async def t_async_session_stream_single() -> None:
result = (await async_session.stream(single_stmt)).t
assert_type(result, AsyncTupleResult[tuple[str]])
row = await result.one()
assert_type(row, tuple[str])
(x,) = row
assert_type(x, str)
async def t_async_session_stream_scalars() -> None:
result = await async_session.stream_scalars(single_stmt)
assert_type(result, AsyncScalarResult[str])
data = await result.all()
assert_type(data, Sequence[str])
def test_outerjoin_10173() -> None:
class Other(Base):
__tablename__ = "other"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
stmt: Select[User, Other] = select(User, Other).outerjoin(
Other, User.id == Other.id
)
stmt2: Select[User, Optional[Other]] = select(
User, Nullable(Other)
).outerjoin(Other, User.id == Other.id)
stmt3: Select[int, Optional[str]] = select(
User.id, Nullable(Other.name)
).outerjoin(Other, User.id == Other.id)
def go(W: Optional[Type[Other]]) -> None:
stmt4: Select[str, Other] = select(
NotNullable(User.value), NotNullable(W)
).where(User.value.is_not(None))
print(stmt4)
print(stmt, stmt2, stmt3)
| User |
python | numba__numba | numba/tests/test_withlifting.py | {
"start": 5668,
"end": 8021
} | class ____(BaseTestWithLifting):
def check_same_semantic(self, func):
"""Ensure same semantic with non-jitted code
"""
jitted = njit(func)
with captured_stdout() as got:
jitted()
with captured_stdout() as expect:
func()
self.assertEqual(got.getvalue(), expect.getvalue())
def test_liftcall1(self):
self.check_extracted_with(liftcall1, expect_count=1,
expected_stdout="A 1\nB 2\n")
self.check_same_semantic(liftcall1)
def test_liftcall2(self):
self.check_extracted_with(liftcall2, expect_count=2,
expected_stdout="A 1\nB 2\nC 12\n")
self.check_same_semantic(liftcall2)
def test_liftcall3(self):
self.check_extracted_with(liftcall3, expect_count=2,
expected_stdout="A 1\nB 2\nC 47\n")
self.check_same_semantic(liftcall3)
def test_liftcall4(self):
accept = (errors.TypingError, errors.NumbaRuntimeError,
errors.NumbaValueError, errors.CompilerError)
with self.assertRaises(accept) as raises:
njit(liftcall4)()
# Known error. We only support one context manager per function
# for body that are lifted.
msg = ("compiler re-entrant to the same function signature")
self.assertIn(msg, str(raises.exception))
@expected_failure_py311
@expected_failure_py312
@expected_failure_py313
@expected_failure_py314
def test_liftcall5(self):
self.check_extracted_with(liftcall5, expect_count=1,
expected_stdout="0\n1\n2\n3\n4\n5\nA\n")
self.check_same_semantic(liftcall5)
def expected_failure_for_list_arg(fn):
def core(self, *args, **kwargs):
with self.assertRaises(errors.TypingError) as raises:
fn(self, *args, **kwargs)
self.assertIn('Does not support list type',
str(raises.exception))
return core
def expected_failure_for_function_arg(fn):
def core(self, *args, **kwargs):
with self.assertRaises(errors.TypingError) as raises:
fn(self, *args, **kwargs)
self.assertIn('Does not support function type',
str(raises.exception))
return core
| TestLiftCall |
python | django__django | tests/gis_tests/test_fields.py | {
"start": 477,
"end": 1553
} | class ____(SimpleTestCase):
def test_deconstruct_empty(self):
field = GeometryField()
*_, kwargs = field.deconstruct()
self.assertEqual(kwargs, {"srid": 4326})
def test_deconstruct_values(self):
field = GeometryField(
srid=4067,
dim=3,
geography=True,
extent=(
50199.4814,
6582464.0358,
-50000.0,
761274.6247,
7799839.8902,
50000.0,
),
tolerance=0.01,
)
*_, kwargs = field.deconstruct()
self.assertEqual(
kwargs,
{
"srid": 4067,
"dim": 3,
"geography": True,
"extent": (
50199.4814,
6582464.0358,
-50000.0,
761274.6247,
7799839.8902,
50000.0,
),
"tolerance": 0.01,
},
)
| GeometryFieldTests |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dag_run.py | {
"start": 2285,
"end": 10118
} | class ____(CronDataIntervalTimetable):
"""Custom timetable that generates custom run IDs."""
def generate_run_id(
self,
*,
run_type: DagRunType,
run_after,
data_interval: DataInterval | None,
**kwargs,
) -> str:
if data_interval:
return f"custom_{data_interval.start.strftime('%Y%m%d%H%M%S')}"
return f"custom_manual_{run_after.strftime('%Y%m%d%H%M%S')}"
@pytest.fixture
def custom_timetable_plugin(monkeypatch):
"""Fixture to register CustomTimetable for serialization."""
from airflow import plugins_manager
from airflow.utils.module_loading import qualname
timetable_class_name = qualname(CustomTimetable)
existing_timetables = getattr(plugins_manager, "timetable_classes", None) or {}
monkeypatch.setattr(plugins_manager, "initialize_timetables_plugins", lambda: None)
monkeypatch.setattr(
plugins_manager,
"timetable_classes",
{**existing_timetables, timetable_class_name: CustomTimetable},
)
DAG1_ID = "test_dag1"
DAG1_DISPLAY_NAME = "test_dag1"
DAG2_ID = "test_dag2"
DAG1_RUN1_ID = "dag_run_1"
DAG1_RUN2_ID = "dag_run_2"
DAG2_RUN1_ID = "dag_run_3"
DAG2_RUN2_ID = "dag_run_4"
DAG1_RUN1_STATE = DagRunState.SUCCESS
DAG1_RUN2_STATE = DagRunState.FAILED
DAG2_RUN1_STATE = DagRunState.SUCCESS
DAG2_RUN2_STATE = DagRunState.SUCCESS
DAG1_RUN1_RUN_TYPE = DagRunType.MANUAL
DAG1_RUN2_RUN_TYPE = DagRunType.SCHEDULED
DAG2_RUN1_RUN_TYPE = DagRunType.BACKFILL_JOB
DAG2_RUN2_RUN_TYPE = DagRunType.ASSET_TRIGGERED
DAG1_RUN1_TRIGGERED_BY = DagRunTriggeredByType.UI
DAG1_RUN2_TRIGGERED_BY = DagRunTriggeredByType.ASSET
DAG2_RUN1_TRIGGERED_BY = DagRunTriggeredByType.CLI
DAG2_RUN2_TRIGGERED_BY = DagRunTriggeredByType.REST_API
START_DATE1 = datetime(2024, 1, 15, 0, 0, tzinfo=timezone.utc)
LOGICAL_DATE1 = datetime(2024, 2, 16, 0, 0, tzinfo=timezone.utc)
LOGICAL_DATE2 = datetime(2024, 2, 20, 0, 0, tzinfo=timezone.utc)
RUN_AFTER1 = datetime(2024, 2, 16, 0, 0, tzinfo=timezone.utc)
RUN_AFTER2 = datetime(2024, 2, 20, 0, 0, tzinfo=timezone.utc)
START_DATE2 = datetime(2024, 4, 15, 0, 0, tzinfo=timezone.utc)
LOGICAL_DATE3 = datetime(2024, 5, 16, 0, 0, tzinfo=timezone.utc)
LOGICAL_DATE4 = datetime(2024, 5, 25, 0, 0, tzinfo=timezone.utc)
DAG1_RUN1_NOTE = "test_note"
DAG2_PARAM = {"validated_number": Param(1, minimum=1, maximum=10)}
DAG_RUNS_LIST = [DAG1_RUN1_ID, DAG1_RUN2_ID, DAG2_RUN1_ID, DAG2_RUN2_ID]
@pytest.fixture(autouse=True)
@provide_session
def setup(request, dag_maker, session=None):
clear_db_connections()
clear_db_runs()
clear_db_dags()
clear_db_dag_bundles()
clear_db_serialized_dags()
clear_db_logs()
if "no_setup" in request.keywords:
return
with dag_maker(DAG1_ID, schedule=None, start_date=START_DATE1, serialized=True):
task1 = EmptyOperator(task_id="task_1")
task2 = EmptyOperator(task_id="task_2")
dag_run1 = dag_maker.create_dagrun(
run_id=DAG1_RUN1_ID,
state=DAG1_RUN1_STATE,
run_type=DAG1_RUN1_RUN_TYPE,
triggered_by=DAG1_RUN1_TRIGGERED_BY,
logical_date=LOGICAL_DATE1,
)
# Set triggering_user_name for testing
dag_run1.triggering_user_name = "alice_admin"
dag_run1.note = (DAG1_RUN1_NOTE, "not_test")
# Set end_date for testing duration filter
dag_run1.end_date = dag_run1.start_date + timedelta(seconds=101)
# Set conf for testing conf_contains filter (values ordered for predictable sorting)
dag_run1.conf = {"env": "development", "version": "1.0"}
for i, task in enumerate([task1, task2], start=1):
ti = dag_run1.get_task_instance(task_id=task.task_id)
ti.task = task
ti.state = State.SUCCESS
session.merge(ti)
ti.xcom_push("return_value", f"result_{i}")
dag_run2 = dag_maker.create_dagrun(
run_id=DAG1_RUN2_ID,
state=DAG1_RUN2_STATE,
run_type=DAG1_RUN2_RUN_TYPE,
triggered_by=DAG1_RUN2_TRIGGERED_BY,
logical_date=LOGICAL_DATE2,
)
# Set triggering_user_name for testing
dag_run2.triggering_user_name = "bob_service"
# Set end_date for testing duration filter
dag_run2.end_date = dag_run2.start_date + timedelta(seconds=201)
# Set conf for testing conf_contains filter
dag_run2.conf = {"env": "production", "debug": True}
ti1 = dag_run2.get_task_instance(task_id=task1.task_id)
ti1.task = task1
ti1.state = State.SUCCESS
ti2 = dag_run2.get_task_instance(task_id=task2.task_id)
ti2.task = task2
ti2.state = State.FAILED
with dag_maker(DAG2_ID, schedule=None, start_date=START_DATE2, params=DAG2_PARAM, serialized=True):
EmptyOperator(task_id="task_2")
dag_run3 = dag_maker.create_dagrun(
run_id=DAG2_RUN1_ID,
state=DAG2_RUN1_STATE,
run_type=DAG2_RUN1_RUN_TYPE,
triggered_by=DAG2_RUN1_TRIGGERED_BY,
logical_date=LOGICAL_DATE3,
)
# Set triggering_user_name for testing
dag_run3.triggering_user_name = "service_account"
# Set end_date for testing duration filter
dag_run3.end_date = dag_run3.start_date + timedelta(seconds=51)
# Set conf for testing conf_contains filter
dag_run3.conf = {"env": "staging", "test_mode": True}
dag_run4 = dag_maker.create_dagrun(
run_id=DAG2_RUN2_ID,
state=DAG2_RUN2_STATE,
run_type=DAG2_RUN2_RUN_TYPE,
triggered_by=DAG2_RUN2_TRIGGERED_BY,
logical_date=LOGICAL_DATE4,
)
# Leave triggering_user_name as None for testing
dag_run4.triggering_user_name = None
# Set end_date for testing duration filter
dag_run4.end_date = dag_run4.start_date + timedelta(seconds=150)
# Set conf for testing conf_contains filter
dag_run4.conf = {"env": "testing", "mode": "ci"}
dag_maker.sync_dagbag_to_db()
dag_maker.dag_model.has_task_concurrency_limits = True
session.merge(ti1)
session.merge(ti2)
session.merge(dag_maker.dag_model)
session.commit()
def get_dag_versions_dict(dag_versions: list[DagVersion]) -> list[dict]:
return [
# must set mode="json" or the created_at and id will be python datetime and UUID instead of string
DagVersionResponse.model_validate(dag_version, from_attributes=True).model_dump(mode="json")
for dag_version in dag_versions
]
def get_dag_run_dict(run: DagRun):
return {
"bundle_version": None,
"dag_display_name": run.dag_model.dag_display_name,
"dag_run_id": run.run_id,
"dag_id": run.dag_id,
"logical_date": from_datetime_to_zulu_without_ms(run.logical_date) if run.logical_date else None,
"queued_at": from_datetime_to_zulu(run.queued_at) if run.queued_at else None,
"run_after": from_datetime_to_zulu_without_ms(run.run_after),
"start_date": from_datetime_to_zulu_without_ms(run.start_date) if run.start_date else None,
"end_date": from_datetime_to_zulu_without_ms(run.end_date) if run.end_date else None,
"duration": run.duration,
"data_interval_start": from_datetime_to_zulu_without_ms(run.data_interval_start)
if run.data_interval_start
else None,
"data_interval_end": from_datetime_to_zulu_without_ms(run.data_interval_end)
if run.data_interval_end
else None,
"last_scheduling_decision": (
from_datetime_to_zulu(run.last_scheduling_decision) if run.last_scheduling_decision else None
),
"run_type": run.run_type,
"state": run.state,
"triggered_by": run.triggered_by.value if run.triggered_by else None,
"triggering_user_name": run.triggering_user_name,
"conf": run.conf,
"note": run.note,
"dag_versions": get_dag_versions_dict(run.dag_versions),
"partition_key": None,
}
| CustomTimetable |
python | huggingface__transformers | src/transformers/models/siglip/modeling_siglip.py | {
"start": 9430,
"end": 11752
} | class ____(nn.Module):
def __init__(self, config: SiglipTextConfig):
super().__init__()
embed_dim = config.hidden_size
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
max_position_embedding = self.position_embedding.weight.shape[0]
if seq_length > max_position_embedding:
raise ValueError(
f"Sequence length must be less than max_position_embeddings (got `sequence length`: "
f"{seq_length} and max_position_embeddings: {max_position_embedding}"
)
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = inputs_embeds + position_embeddings
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| SiglipTextEmbeddings |
python | huggingface__transformers | src/transformers/models/granitemoe/modeling_granitemoe.py | {
"start": 19862,
"end": 20754
} | class ____(PreTrainedModel):
config: GraniteMoeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["GraniteMoeDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": GraniteMoeDecoderLayer,
"attentions": GraniteMoeAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, GraniteMoeParallelExperts):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
@auto_docstring
| GraniteMoePreTrainedModel |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/base_test.py | {
"start": 7901,
"end": 8618
} | class ____(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing single segment."""
n = inp
c = constant_op.constant(1.0, name="c")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {"TRTEngineOp_000": ["c", "add", "add1", "mul"]}
| ConstDataInputSingleEngineTest |
python | numpy__numpy | numpy/lib/tests/test__iotools.py | {
"start": 9815,
"end": 13830
} | class ____:
def test_has_nested_dtype(self):
"Test has_nested_dtype"
ndtype = np.dtype(float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
assert_equal(has_nested_fields(ndtype), True)
def test_easy_dtype(self):
"Test ndtype on dtypes"
# Simple case
ndtype = float
assert_equal(easy_dtype(ndtype), np.dtype(float))
# As string w/o names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', "i4"), ('f1', "f8")]))
# As string w/o names but different default format
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
# As string w/ names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (too many)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (not enough)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names=", b"),
np.dtype([('f0', "i4"), ('b', "f8")]))
# ... (with different default format)
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
np.dtype([('a', "i4"), ('f00', "f8")]))
# As list of tuples w/o names
ndtype = [('A', int), ('B', float)]
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
# As list of tuples w/ names
assert_equal(easy_dtype(ndtype, names="a,b"),
np.dtype([('a', int), ('b', float)]))
# As list of tuples w/ not enough names
assert_equal(easy_dtype(ndtype, names="a"),
np.dtype([('a', int), ('f0', float)]))
# As list of tuples w/ too many names
assert_equal(easy_dtype(ndtype, names="a,b,c"),
np.dtype([('a', int), ('b', float)]))
# As list of types w/o names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
# As list of types w names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', int), ('b', float), ('c', float)]))
# As simple dtype w/ names
ndtype = np.dtype(float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
# As simple dtype w/o names (but multiple fields)
ndtype = np.dtype(float)
assert_equal(
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
def test_flatten_dtype(self):
"Testing flatten_dtype"
# Standard dtype
dt = np.dtype([("a", "f8"), ("b", "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
# Recursive dtype
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
# dtype with shaped fields
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, int])
dt_flat = flatten_dtype(dt, True)
assert_equal(dt_flat, [float] * 2 + [int] * 3)
# dtype w/ titles
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
| TestMiscFunctions |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py | {
"start": 13073,
"end": 14303
} | class ____(Benchmark):
r"""
Cross-Leg-Table objective function.
This class defines the Cross-Leg-Table [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{CrossLegTable}}(x) = - \frac{1}{\left(\left|{e^{\left|{100
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
\sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1}}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -1`. The global minimum is found on the
planes :math:`x_1 = 0` and :math:`x_2 = 0`
..[1] Mishra, S. Global Optimization by Differential Evolution and Particle
Swarm Methods: Evaluation on Some Benchmark Functions Munich University,
2006
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0., 0.]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
u = 100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi
v = sin(x[0]) * sin(x[1])
return -(abs(v * exp(abs(u))) + 1) ** (-0.1)
| CrossLegTable |
python | doocs__leetcode | solution/0700-0799/0733.Flood Fill/Solution2.py | {
"start": 0,
"end": 619
} | class ____:
def floodFill(
self, image: List[List[int]], sr: int, sc: int, color: int
) -> List[List[int]]:
if image[sr][sc] == color:
return image
q = deque([(sr, sc)])
oc = image[sr][sc]
image[sr][sc] = color
dirs = (-1, 0, 1, 0, -1)
while q:
i, j = q.popleft()
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < len(image) and 0 <= y < len(image[0]) and image[x][y] == oc:
q.append((x, y))
image[x][y] = color
return image
| Solution |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 15799,
"end": 16811
} | class ____(BuiltinFunctionT):
_id = "len"
_inputs = [("b", (StringT.any(), BytesT.any(), DArrayT.any()))]
_return_type = UINT256_T
def _try_fold(self, node):
validate_call_args(node, 1)
arg = node.args[0].get_folded_value()
if isinstance(arg, (vy_ast.Str, vy_ast.Bytes, vy_ast.HexBytes)):
length = len(arg.value)
elif isinstance(arg, vy_ast.Hex):
length = len(arg.bytes_value)
else:
raise UnfoldableNode
return vy_ast.Int.from_node(node, value=length)
def infer_arg_types(self, node, expected_return_typ=None):
self._validate_arg_types(node)
# return a concrete type
typ = get_possible_types_from_node(node.args[0]).pop()
return [typ]
def build_IR(self, node, context):
arg = Expr(node.args[0], context).ir_node
if arg.value == "~calldata":
return IRnode.from_list(["calldatasize"], typ=UINT256_T)
return get_bytearray_length(arg)
| Len |
python | Netflix__metaflow | test/core/tests/card_default_editable_with_id.py | {
"start": 72,
"end": 4652
} | class ____(MetaflowTest):
"""
`current.card.append` should add to default editable card and not the one with `id`
when a card with `id` and non id are present
- Access of `current.card` with nonexistent id should not fail.
"""
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('environment(vars={"METAFLOW_CARD_NO_WARNING": "True"})')
@tag('card(type="test_editable_card",id="abc")')
@tag('card(type="test_editable_card")')
@steps(0, ["start"])
def step_start(self):
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
current.card.append(current.pathspec)
# This should not fail user code.
current.card["xyz"].append(TestStringComponent(str(self.random_number)))
current.card.append(TestStringComponent(str(self.random_number)))
@steps(0, ["end"], required=True)
def step_end(self):
self.here = True
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if run is None:
# This means CliCheck is in context.
for step in flow:
if step.name == "end":
# Ensure we reach the `end` even when a wrong `id` is used with `current.card`
checker.assert_artifact(step.name, "here", True)
continue
elif step.name != "start":
continue
cli_check_dict = checker.artifact_dict(step.name, "random_number")
for task_pathspec in cli_check_dict:
task_id = task_pathspec.split("/")[-1]
cards_info = checker.list_cards(step.name, task_id)
number = cli_check_dict[task_pathspec]["random_number"]
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 2,
True,
)
# Find the card without the id
default_editable_cards = [
c for c in cards_info["cards"] if c["id"] is None
]
assert_equals(len(default_editable_cards) == 1, True)
card = default_editable_cards[0]
checker.assert_card(
step.name,
task_id,
"test_editable_card",
"%d" % number,
card_hash=card["hash"],
exact_match=True,
)
else:
# This means MetadataCheck is in context.
for step in flow:
if step.name == "end":
# Ensure we reach the `end` even when a wrong `id` is used with `current.card`
checker.assert_artifact(step.name, "here", True)
continue
elif step.name != "start":
continue
meta_check_dict = checker.artifact_dict(step.name, "random_number")
for task_id in meta_check_dict:
random_number = meta_check_dict[task_id]["random_number"]
cards_info = checker.list_cards(step.name, task_id)
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 2,
True,
)
default_editable_cards = [
c for c in cards_info["cards"] if c["id"] is None
]
assert_equals(len(default_editable_cards) == 1, True)
card = default_editable_cards[0]
checker.assert_card(
step.name,
task_id,
"test_editable_card",
"%d" % random_number,
card_hash=card["hash"],
exact_match=False,
)
| DefaultEditableCardWithIdTest |
python | ray-project__ray | python/ray/serve/tests/unit/test_constants_utils.py | {
"start": 362,
"end": 1154
} | class ____:
def test_str_to_list_basic(self):
assert str_to_list("a,b,c") == ["a", "b", "c"]
def test_str_to_list_with_whitespace(self):
assert str_to_list(" a , b , c ") == ["a", "b", "c"]
def test_str_to_list_empty_string(self):
assert str_to_list("") == []
def test_str_to_list_with_empty_entries(self):
assert str_to_list("a,,b,c,") == ["a", "b", "c"]
def test_str_to_list_only_whitespace(self):
assert str_to_list(" ") == []
def test_str_to_list_single_entry(self):
assert str_to_list("single") == ["single"]
def test_str_to_list_only_commas(self):
assert str_to_list(",,,,") == []
def test_str_to_list_whitespace_entries(self):
assert str_to_list("a, ,b") == ["a", "b"]
| TestStrToList |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 9177,
"end": 9394
} | class ____(Where):
"""Logical AND of multiple where conditions"""
conditions: List[Where]
def to_dict(self) -> Dict[str, Any]:
return {"$and": [c.to_dict() for c in self.conditions]}
@dataclass
| And |
python | pytorch__pytorch | test/distributed/_composable/test_replicate_training.py | {
"start": 41677,
"end": 43566
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(torch.get_device_module(device_type).device_count(), 2)
@skip_if_lt_x_gpu(2)
def test_register_fsdp_forward_method(self):
class VisionTransformer(nn.Module):
def __init__(self) -> None:
super().__init__()
self.patch_proj = nn.Conv2d(3, 1024, kernel_size=14, stride=14)
def forward_features(self, imgs: torch.Tensor) -> torch.Tensor:
return self.patch_proj(imgs).flatten(2).transpose(1, 2)
def forward(self, imgs: torch.Tensor) -> torch.Tensor:
return self.forward_features(imgs).sum(dim=1)
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.vit, self.projector = VisionTransformer(), nn.Linear(1024, 256)
def forward(self, imgs: torch.Tensor) -> torch.Tensor:
# Run `vit.forward_features`, which is not `forward`!
patch_embeddings = self.vit.forward_features(imgs)
return self.projector(patch_embeddings)
torch.manual_seed(42)
model = Model()
ref_model = copy.deepcopy(model).to(device_type)
replicate(model.vit)
replicate(model.projector)
replicate(model)
register_fsdp_forward_method(model.vit, "forward_features")
torch.manual_seed(42 + self.rank + 1)
inp = torch.randn(4, 3, 224, 224, device=device_type.type)
ref_loss = ref_model(inp).sum()
loss = model(inp).sum()
self.assertEqual(ref_loss, loss)
ref_loss.backward()
loss.backward()
for param in ref_model.parameters():
dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
check_sharded_parity(self, ref_model, model)
| TestReplicateCustomForwardMethod |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_idaho_zip.py | {
"start": 732,
"end": 1727
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_idaho_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_idaho_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidIdahoZip |
python | kamyu104__LeetCode-Solutions | Python/count-ways-to-choose-coprime-integers-from-rows.py | {
"start": 1684,
"end": 2275
} | class ____(object):
def countCoprime(self, mat):
"""
:type mat: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
def gcd(a, b):
while b:
a, b = b, a%b
return a
dp = collections.defaultdict(int)
dp[0] = 1
for row in mat:
new_dp = collections.defaultdict(int)
for x in row:
for g, c in dp.iteritems():
ng = gcd(g, x)
new_dp[ng] = (new_dp[ng]+c)%MOD
dp = new_dp
return dp[1]
| Solution2 |
python | ray-project__ray | python/ray/_private/test_utils.py | {
"start": 21556,
"end": 36152
} | class ____:
"""A collection of timeseries from multiple addresses. Each timeseries is a
collection of samples with the same metric name and labels. Concretely:
- components_dict: a dictionary of addresses to the Component labels
- metric_descriptors: a dictionary of metric names to the Metric object
- metric_samples: the latest value of each label
"""
components_dict: Dict[str, Set[str]] = field(default_factory=dict)
metric_descriptors: Dict[str, Metric] = field(default_factory=dict)
metric_samples: Dict[frozenset, Sample] = field(default_factory=dict)
def flush(self):
self.components_dict.clear()
self.metric_descriptors.clear()
self.metric_samples.clear()
def get_metric_check_condition(
metrics_to_check: List[MetricSamplePattern],
timeseries: PrometheusTimeseries,
export_addr: Optional[str] = None,
) -> Callable[[], bool]:
"""A condition to check if a prometheus metrics reach a certain value.
This is a blocking check that can be passed into a `wait_for_condition`
style function.
Args:
metrics_to_check: A list of MetricSamplePattern. The fields that
aren't `None` will be matched.
timeseries: A PrometheusTimeseries object to store the metrics.
export_addr: Optional address to export metrics to.
Returns:
A function that returns True if all the metrics are emitted.
"""
node_info = ray.nodes()[0]
metrics_export_port = node_info["MetricsExportPort"]
addr = node_info["NodeManagerAddress"]
prom_addr = export_addr or build_address(addr, metrics_export_port)
def f():
for metric_pattern in metrics_to_check:
metric_samples = fetch_prometheus_timeseries(
[prom_addr], timeseries
).metric_samples.values()
for metric_sample in metric_samples:
if metric_pattern.matches(metric_sample):
break
else:
print(
f"Didn't find {metric_pattern}",
"all samples",
metric_samples,
)
return False
return True
return f
def wait_until_succeeded_without_exception(
func, exceptions, *args, timeout_ms=1000, retry_interval_ms=100, raise_last_ex=False
):
"""A helper function that waits until a given function
completes without exceptions.
Args:
func: A function to run.
exceptions: Exceptions that are supposed to occur.
args: arguments to pass for a given func
timeout_ms: Maximum timeout in milliseconds.
retry_interval_ms: Retry interval in milliseconds.
raise_last_ex: Raise the last exception when timeout.
Return:
Whether exception occurs within a timeout.
"""
if isinstance(type(exceptions), tuple):
raise Exception("exceptions arguments should be given as a tuple")
time_elapsed = 0
start = time.time()
last_ex = None
while time_elapsed <= timeout_ms:
try:
func(*args)
return True
except exceptions as ex:
last_ex = ex
time_elapsed = (time.time() - start) * 1000
time.sleep(retry_interval_ms / 1000.0)
if raise_last_ex:
ex_stack = (
traceback.format_exception(type(last_ex), last_ex, last_ex.__traceback__)
if last_ex
else []
)
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
return False
def recursive_fnmatch(dirpath, pattern):
"""Looks at a file directory subtree for a filename pattern.
Similar to glob.glob(..., recursive=True) but also supports 2.7
"""
matches = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def generate_system_config_map(**kwargs):
ray_kwargs = {
"_system_config": kwargs,
}
return ray_kwargs
def same_elements(elems_a, elems_b):
"""Checks if two iterables (such as lists) contain the same elements. Elements
do not have to be hashable (this allows us to compare sets of dicts for
example). This comparison is not necessarily efficient.
"""
a = list(elems_a)
b = list(elems_b)
for x in a:
if x not in b:
return False
for x in b:
if x not in a:
return False
return True
@ray.remote
def _put(obj):
return obj
def put_object(obj, use_ray_put):
if use_ray_put:
return ray.put(obj)
else:
return _put.remote(obj)
def wait_until_server_available(address, timeout_ms=5000, retry_interval_ms=100):
ip, port_str = parse_address(address)
port = int(port_str)
time_elapsed = 0
start = time.time()
while time_elapsed <= timeout_ms:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((ip, port))
except Exception:
time_elapsed = (time.time() - start) * 1000
time.sleep(retry_interval_ms / 1000.0)
s.close()
continue
s.close()
return True
return False
def get_other_nodes(cluster, exclude_head=False):
"""Get all nodes except the one that we're connected to."""
return [
node
for node in cluster.list_all_nodes()
if node._raylet_socket_name
!= ray._private.worker._global_node._raylet_socket_name
and (exclude_head is False or node.head is False)
]
def get_non_head_nodes(cluster):
"""Get all non-head nodes."""
return list(filter(lambda x: x.head is False, cluster.list_all_nodes()))
def init_error_pubsub():
"""Initialize error info pub/sub"""
s = ray._raylet.GcsErrorSubscriber(
address=ray._private.worker.global_worker.gcs_client.address
)
s.subscribe()
return s
def get_error_message(subscriber, num=1e6, error_type=None, timeout=20):
"""Gets errors from GCS subscriber.
Returns maximum `num` error strings within `timeout`.
Only returns errors of `error_type` if specified.
"""
deadline = time.time() + timeout
msgs = []
while time.time() < deadline and len(msgs) < num:
_, error_data = subscriber.poll(timeout=deadline - time.time())
if not error_data:
# Timed out before any data is received.
break
if error_type is None or error_type == error_data["type"]:
msgs.append(error_data)
else:
time.sleep(0.01)
return msgs
def init_log_pubsub():
"""Initialize log pub/sub"""
s = ray._raylet.GcsLogSubscriber(
address=ray._private.worker.global_worker.gcs_client.address
)
s.subscribe()
return s
def get_log_data(
subscriber,
num: int = 1e6,
timeout: float = 20,
job_id: Optional[str] = None,
matcher=None,
) -> List[dict]:
deadline = time.time() + timeout
msgs = []
while time.time() < deadline and len(msgs) < num:
logs_data = subscriber.poll(timeout=deadline - time.time())
if not logs_data:
# Timed out before any data is received.
break
if job_id and job_id != logs_data["job"]:
continue
if matcher and all(not matcher(line) for line in logs_data["lines"]):
continue
msgs.append(logs_data)
return msgs
def get_log_message(
subscriber,
num: int = 1e6,
timeout: float = 20,
job_id: Optional[str] = None,
matcher=None,
) -> List[List[str]]:
"""Gets log lines through GCS subscriber.
Returns maximum `num` of log messages, within `timeout`.
If `job_id` or `match` is specified, only returns log lines from `job_id`
or when `matcher` is true.
"""
msgs = get_log_data(subscriber, num, timeout, job_id, matcher)
return [msg["lines"] for msg in msgs]
def get_log_sources(
subscriber,
num: int = 1e6,
timeout: float = 20,
job_id: Optional[str] = None,
matcher=None,
):
"""Get the source of all log messages"""
msgs = get_log_data(subscriber, num, timeout, job_id, matcher)
return {msg["pid"] for msg in msgs}
def get_log_batch(
subscriber,
num: int,
timeout: float = 20,
job_id: Optional[str] = None,
matcher=None,
) -> List[str]:
"""Gets log batches through GCS subscriber.
Returns maximum `num` batches of logs. Each batch is a dict that includes
metadata such as `pid`, `job_id`, and `lines` of log messages.
If `job_id` or `match` is specified, only returns log batches from `job_id`
or when `matcher` is true.
"""
deadline = time.time() + timeout
batches = []
while time.time() < deadline and len(batches) < num:
logs_data = subscriber.poll(timeout=deadline - time.time())
if not logs_data:
# Timed out before any data is received.
break
if job_id and job_id != logs_data["job"]:
continue
if matcher and not matcher(logs_data):
continue
batches.append(logs_data)
return batches
def format_web_url(url):
"""Format web url."""
url = url.replace("localhost", "http://127.0.0.1")
if not url.startswith("http://"):
return "http://" + url
return url
def client_test_enabled() -> bool:
return ray._private.client_mode_hook.is_client_mode_enabled
def object_memory_usage() -> bool:
"""Returns the number of bytes used in the object store."""
total = ray.cluster_resources().get("object_store_memory", 0)
avail = ray.available_resources().get("object_store_memory", 0)
return total - avail
def fetch_raw_prometheus(prom_addresses):
# Local import so minimal dependency tests can run without requests
import requests
for address in prom_addresses:
try:
response = requests.get(f"http://{address}/metrics")
yield address, response.text
except requests.exceptions.ConnectionError:
continue
def fetch_prometheus(prom_addresses):
components_dict = {}
metric_descriptors = {}
metric_samples = []
for address in prom_addresses:
if address not in components_dict:
components_dict[address] = set()
for address, response in fetch_raw_prometheus(prom_addresses):
for metric in text_string_to_metric_families(response):
for sample in metric.samples:
metric_descriptors[sample.name] = metric
metric_samples.append(sample)
if "Component" in sample.labels:
components_dict[address].add(sample.labels["Component"])
return components_dict, metric_descriptors, metric_samples
def fetch_prometheus_timeseries(
prom_addreses: List[str],
result: PrometheusTimeseries,
) -> PrometheusTimeseries:
components_dict, metric_descriptors, metric_samples = fetch_prometheus(
prom_addreses
)
for address, components in components_dict.items():
if address not in result.components_dict:
result.components_dict[address] = set()
result.components_dict[address].update(components)
result.metric_descriptors.update(metric_descriptors)
for sample in metric_samples:
# udpate sample to the latest value
result.metric_samples[
frozenset(list(sample.labels.items()) + [("_metric_name_", sample.name)])
] = sample
return result
def fetch_prometheus_metrics(prom_addresses: List[str]) -> Dict[str, List[Any]]:
"""Return prometheus metrics from the given addresses.
Args:
prom_addresses: List of metrics_agent addresses to collect metrics from.
Returns:
Dict mapping from metric name to list of samples for the metric.
"""
_, _, samples = fetch_prometheus(prom_addresses)
samples_by_name = defaultdict(list)
for sample in samples:
samples_by_name[sample.name].append(sample)
return samples_by_name
def fetch_prometheus_metric_timeseries(
prom_addresses: List[str], result: PrometheusTimeseries
) -> Dict[str, List[Any]]:
samples = fetch_prometheus_timeseries(
prom_addresses, result
).metric_samples.values()
samples_by_name = defaultdict(list)
for sample in samples:
samples_by_name[sample.name].append(sample)
return samples_by_name
def raw_metric_timeseries(
info: RayContext, result: PrometheusTimeseries
) -> Dict[str, List[Any]]:
"""Return prometheus timeseries from a RayContext"""
metrics_page = "localhost:{}".format(info.address_info["metrics_export_port"])
print("Fetch metrics from", metrics_page)
return fetch_prometheus_metric_timeseries([metrics_page], result)
def get_system_metric_for_component(
system_metric: str, component: str, prometheus_server_address: str
) -> List[float]:
"""Get the system metric for a given component from a Prometheus server address.
Please note:
- This function requires the availability of the Prometheus server. Therefore, it
requires the server address.
- It assumes the system metric has a `Component` label and `pid` label. `pid` is the
process id, so it can be used to uniquely identify the process.
"""
session_name = os.path.basename(
ray._private.worker._global_node.get_session_dir_path()
)
query = f"sum({system_metric}{{Component='{component}',SessionName='{session_name}'}}) by (pid)"
resp = requests.get(
f"{prometheus_server_address}/api/v1/query?query={quote(query)}"
)
if resp.status_code != 200:
raise Exception(f"Failed to query Prometheus: {resp.status_code}")
result = resp.json()
return [float(item["value"][1]) for item in result["data"]["result"]]
def get_test_config_path(config_file_name):
"""Resolve the test config path from the config file dir"""
here = os.path.realpath(__file__)
path = pathlib.Path(here)
grandparent = path.parent.parent
return os.path.join(grandparent, "tests/test_cli_patterns", config_file_name)
def load_test_config(config_file_name):
"""Loads a config yaml from tests/test_cli_patterns."""
config_path = get_test_config_path(config_file_name)
config = yaml.safe_load(open(config_path).read())
return config
def set_setup_func():
import ray._private.runtime_env as runtime_env
runtime_env.VAR = "hello world"
| PrometheusTimeseries |
python | kamyu104__LeetCode-Solutions | Python/partition-string-into-minimum-beautiful-substrings.py | {
"start": 698,
"end": 1292
} | class ____(object):
def minimumBeautifulSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
max_pow_5 = 1
while max_pow_5*5 <= (1<<len(s))-1:
max_pow_5 *= 5
dp = [float("inf")]*(len(s)+1)
dp[0] = 0
for i in xrange(len(s)):
curr = 0
for j in reversed(xrange(i+1)):
curr += int(s[j])<<(i-j)
if s[j] == '1' and max_pow_5%curr == 0:
dp[i+1] = min(dp[i+1], dp[(j-1)+1]+1)
return dp[-1] if dp[-1] != float("inf") else -1
| Solution2 |
python | pallets__click | src/click/_winconsole.py | {
"start": 4695,
"end": 5699
} | class ____(_WindowsConsoleRawIOBase):
def writable(self) -> t.Literal[True]:
return True
@staticmethod
def _get_error_message(errno: int) -> str:
if errno == ERROR_SUCCESS:
return "ERROR_SUCCESS"
elif errno == ERROR_NOT_ENOUGH_MEMORY:
return "ERROR_NOT_ENOUGH_MEMORY"
return f"Windows error {errno}"
def write(self, b: Buffer) -> int:
bytes_to_be_written = len(b)
buf = get_buffer(b)
code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
code_units_written = c_ulong()
WriteConsoleW(
HANDLE(self.handle),
buf,
code_units_to_be_written,
byref(code_units_written),
None,
)
bytes_written = 2 * code_units_written.value
if bytes_written == 0 and bytes_to_be_written > 0:
raise OSError(self._get_error_message(GetLastError()))
return bytes_written
| _WindowsConsoleWriter |
python | huggingface__transformers | src/transformers/models/led/modeling_led.py | {
"start": 83612,
"end": 90959
} | class ____(LEDPreTrainedModel):
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: LEDConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = LEDEncoder(config)
self.decoder = LEDDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
global_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.Tensor], LEDSeq2SeqModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Using this like Bart, as LED is derived from it. So far
# No checkpoint on the hub exists that uses that in practice.
# https://github.com/huggingface/transformers/blob/ac3cb660cad283163f7c73cad511124e845ca388/src/transformers/models/bart/modeling_bart.py#L1153
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a LEDEncoderBaseModelOutput when return_dict=False
elif return_dict and not isinstance(encoder_outputs, LEDEncoderBaseModelOutput):
encoder_outputs = LEDEncoderBaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
global_attentions=encoder_outputs[3] if len(encoder_outputs) > 3 else None,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return LEDSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
encoder_global_attentions=encoder_outputs.global_attentions,
)
@auto_docstring(
custom_intro="""
The LED Model with a language modeling head. Can be used for summarization.
"""
)
| LEDModel |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 64859,
"end": 65129
} | class ____(Elemwise):
_parameters = ["frame", "_expr", "expr_kwargs"]
_defaults = {"expr_kwargs": {}}
_keyword_only = ["expr_kwargs"]
operation = M.eval
@functools.cached_property
def _kwargs(self) -> dict:
return {**self.expr_kwargs}
| Eval |
python | marshmallow-code__marshmallow | src/marshmallow/fields.py | {
"start": 25409,
"end": 27815
} | class ____(Field[list[_InternalT | None]]):
"""A list field, composed with another `Field` class or
instance.
Example: ::
numbers = fields.List(fields.Float())
:param cls_or_instance: A field class or instance.
:param kwargs: The same keyword arguments that :class:`Field` receives.
.. versionchanged:: 3.0.0rc9
Does not serialize scalar values to single-item lists.
"""
#: Default error messages.
default_error_messages = {"invalid": "Not a valid list."}
def __init__(
self,
cls_or_instance: Field[_InternalT] | type[Field[_InternalT]],
**kwargs: Unpack[_BaseFieldKwargs],
):
super().__init__(**kwargs)
try:
self.inner: Field[_InternalT] = _resolve_field_instance(cls_or_instance)
except _FieldInstanceResolutionError as error:
raise ValueError(
"The list elements must be a subclass or instance of "
"marshmallow.fields.Field."
) from error
if isinstance(self.inner, Nested):
self.only = self.inner.only
self.exclude = self.inner.exclude
def _bind_to_schema(self, field_name: str, parent: Schema | Field) -> None:
super()._bind_to_schema(field_name, parent)
self.inner = copy.deepcopy(self.inner)
self.inner._bind_to_schema(field_name, self)
if isinstance(self.inner, Nested):
self.inner.only = self.only
self.inner.exclude = self.exclude
def _serialize(self, value, attr, obj, **kwargs) -> list[_InternalT] | None:
if value is None:
return None
return [self.inner._serialize(each, attr, obj, **kwargs) for each in value]
def _deserialize(self, value, attr, data, **kwargs) -> list[_InternalT | None]:
if not utils.is_collection(value):
raise self.make_error("invalid")
result = []
errors = {}
for idx, each in enumerate(value):
try:
result.append(self.inner.deserialize(each, **kwargs))
except ValidationError as error:
if error.valid_data is not None:
result.append(typing.cast("_InternalT", error.valid_data))
errors.update({idx: error.messages})
if errors:
raise ValidationError(errors, valid_data=result)
return result
| List |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 8688,
"end": 8774
} | class ____(PolymorphicModel):
some_data = models.CharField(max_length=128)
| ProxyBase |
python | celery__celery | celery/utils/serialization.py | {
"start": 3129,
"end": 8209
} | class ____(Exception):
"""Wraps unpickleable exceptions.
Arguments:
exc_module (str): See :attr:`exc_module`.
exc_cls_name (str): See :attr:`exc_cls_name`.
exc_args (Tuple[Any, ...]): See :attr:`exc_args`.
Example:
>>> def pickle_it(raising_function):
... try:
... raising_function()
... except Exception as e:
... exc = UnpickleableExceptionWrapper(
... e.__class__.__module__,
... e.__class__.__name__,
... e.args,
... )
... pickle.dumps(exc) # Works fine.
"""
#: The module of the original exception.
exc_module = None
#: The name of the original exception class.
exc_cls_name = None
#: The arguments for the original exception.
exc_args = None
def __init__(self, exc_module, exc_cls_name, exc_args, text=None):
safe_exc_args = ensure_serializable(
exc_args, lambda v: pickle.loads(pickle.dumps(v))
)
self.exc_module = exc_module
self.exc_cls_name = exc_cls_name
self.exc_args = safe_exc_args
self.text = text
super().__init__(exc_module, exc_cls_name, safe_exc_args,
text)
def restore(self):
return create_exception_cls(self.exc_cls_name,
self.exc_module)(*self.exc_args)
def __str__(self):
return self.text
@classmethod
def from_exception(cls, exc):
res = cls(
exc.__class__.__module__,
exc.__class__.__name__,
getattr(exc, 'args', []),
safe_repr(exc)
)
if hasattr(exc, "__traceback__"):
res = res.with_traceback(exc.__traceback__)
return res
def get_pickleable_exception(exc):
"""Make sure exception is pickleable."""
try:
pickle.loads(pickle.dumps(exc))
except Exception: # pylint: disable=broad-except
pass
else:
return exc
nearest = find_pickleable_exception(exc)
if nearest:
return nearest
return UnpickleableExceptionWrapper.from_exception(exc)
def get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps):
"""Get pickleable exception type."""
try:
loads(dumps(cls))
except Exception: # pylint: disable=broad-except
return Exception
else:
return cls
def get_pickled_exception(exc):
"""Reverse of :meth:`get_pickleable_exception`."""
if isinstance(exc, UnpickleableExceptionWrapper):
return exc.restore()
return exc
def b64encode(s):
return bytes_to_str(base64encode(str_to_bytes(s)))
def b64decode(s):
return base64decode(str_to_bytes(s))
def strtobool(term, table=None):
"""Convert common terms for true/false to bool.
Examples (true/false/yes/no/on/off/1/0).
"""
if table is None:
table = STRTOBOOL_DEFAULT_TABLE
if isinstance(term, str):
try:
return table[term.lower()]
except KeyError:
raise TypeError(f'Cannot coerce {term!r} to type bool')
return term
def _datetime_to_json(dt):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(dt, datetime.datetime):
r = dt.isoformat()
if dt.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(dt, datetime.time):
r = dt.isoformat()
if dt.microsecond:
r = r[:12]
return r
else:
return dt.isoformat()
def jsonify(obj,
builtin_types=(numbers.Real, str), key=None,
keyfilter=None,
unknown_type_filter=None):
"""Transform object making it suitable for json serialization."""
from kombu.abstract import Object as KombuDictType
_jsonify = partial(jsonify, builtin_types=builtin_types, key=key,
keyfilter=keyfilter,
unknown_type_filter=unknown_type_filter)
if isinstance(obj, KombuDictType):
obj = obj.as_dict(recurse=True)
if obj is None or isinstance(obj, builtin_types):
return obj
elif isinstance(obj, (tuple, list)):
return [_jsonify(v) for v in obj]
elif isinstance(obj, dict):
return {
k: _jsonify(v, key=k) for k, v in obj.items()
if (keyfilter(k) if keyfilter else 1)
}
elif isinstance(obj, (datetime.date, datetime.time)):
return _datetime_to_json(obj)
elif isinstance(obj, datetime.timedelta):
return str(obj)
else:
if unknown_type_filter is None:
raise ValueError(
f'Unsupported type: {type(obj)!r} {obj!r} (parent: {key})'
)
return unknown_type_filter(obj)
def raise_with_context(exc):
exc_info = sys.exc_info()
if not exc_info:
raise exc
elif exc_info[1] is exc:
raise
raise exc from exc_info[1]
| UnpickleableExceptionWrapper |
python | scikit-learn__scikit-learn | sklearn/utils/_repr_html/base.py | {
"start": 4773,
"end": 6166
} | class ____:
"""Mixin to handle consistently the HTML representation.
When inheriting from this class, you need to define an attribute `_html_repr`
which is a callable that returns the HTML representation to be shown.
"""
@property
def _repr_html_(self):
"""HTML representation of estimator.
This is redundant with the logic of `_repr_mimebundle_`. The latter
should be favored in the long term, `_repr_html_` is only
implemented for consumers who do not interpret `_repr_mimbundle_`.
"""
if get_config()["display"] != "diagram":
raise AttributeError(
"_repr_html_ is only defined when the "
"'display' configuration option is set to "
"'diagram'"
)
return self._repr_html_inner
def _repr_html_inner(self):
"""This function is returned by the @property `_repr_html_` to make
`hasattr(estimator, "_repr_html_") return `True` or `False` depending
on `get_config()["display"]`.
"""
return self._html_repr()
def _repr_mimebundle_(self, **kwargs):
"""Mime bundle used by jupyter kernels to display estimator"""
output = {"text/plain": repr(self)}
if get_config()["display"] == "diagram":
output["text/html"] = self._html_repr()
return output
| ReprHTMLMixin |
python | django__django | tests/admin_views/tests.py | {
"start": 322639,
"end": 323861
} | class ____(TestCase):
"""
Tests group CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
group_count = Group.objects.count()
response = self.client.post(
reverse("admin:auth_group_add"),
{
"name": "newgroup",
},
)
Group.objects.order_by("-id")[0]
self.assertRedirects(response, reverse("admin:auth_group_changelist"))
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_permission_performance(self):
g = Group.objects.create(name="test_group")
# Ensure no queries are skipped due to cached content type for Group.
ContentType.objects.clear_cache()
with self.assertNumQueries(6):
response = self.client.get(reverse("admin:auth_group_change", args=(g.pk,)))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF="admin_views.urls")
| GroupAdminTest |
python | numba__numba | numba/tests/test_mixed_tuple_unroller.py | {
"start": 57418,
"end": 58255
} | class ____(CompilerBase):
""" Simple pipeline that wraps passes with the ResultCapturer pass"""
def define_pipelines(self):
pm = PassManager("Capturing Compiler")
def add_pass(x, y):
return pm.add_pass(capture(x), y)
add_pass(TranslateByteCode, "analyzing bytecode")
add_pass(FixupArgs, "fix up args")
add_pass(IRProcessing, "processing IR")
add_pass(LiteralUnroll, "handles literal_unroll")
# typing
add_pass(NopythonTypeInference, "nopython frontend")
# legalise
add_pass(IRLegalization,
"ensure IR is legal prior to lowering")
# lower
add_pass(NativeLowering, "native lowering")
add_pass(NoPythonBackend, "nopython mode backend")
pm.finalize()
return [pm]
| CapturingCompiler |
python | tensorflow__tensorflow | tensorflow/python/summary/writer/fake_summary_writer.py | {
"start": 989,
"end": 5502
} | class ____(object):
"""Fake summary writer."""
_replaced_summary_writer = None
@classmethod
def install(cls):
if cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter already installed.')
cls._replaced_summary_writer = writer.FileWriter
writer.FileWriter = FakeSummaryWriter
writer_cache.FileWriter = FakeSummaryWriter
@classmethod
def uninstall(cls):
if not cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter not installed.')
writer.FileWriter = cls._replaced_summary_writer
writer_cache.FileWriter = cls._replaced_summary_writer
cls._replaced_summary_writer = None
def __init__(self, logdir, graph=None):
self._logdir = logdir
self._graph = graph
self._summaries = {}
self._added_graphs = []
self._added_meta_graphs = []
self._added_session_logs = []
self._added_run_metadata = {}
@property
def summaries(self):
return self._summaries
def assert_summaries(self,
test_case,
expected_logdir=None,
expected_graph=None,
expected_summaries=None,
expected_added_graphs=None,
expected_added_meta_graphs=None,
expected_session_logs=None):
"""Assert expected items have been added to summary writer."""
if expected_logdir is not None:
test_case.assertEqual(expected_logdir, self._logdir)
if expected_graph is not None:
test_case.assertTrue(expected_graph is self._graph)
expected_summaries = expected_summaries or {}
for step in expected_summaries:
test_case.assertTrue(
step in self._summaries,
msg='Missing step %s from %s.' % (step, self._summaries.keys()))
actual_simple_values = {}
for step_summary in self._summaries[step]:
for v in step_summary.value:
# Ignore global_step/sec since it's written by Supervisor in a
# separate thread, so it's non-deterministic how many get written.
if 'global_step/sec' != v.tag:
actual_simple_values[v.tag] = v.simple_value
test_case.assertEqual(expected_summaries[step], actual_simple_values)
if expected_added_graphs is not None:
test_case.assertEqual(expected_added_graphs, self._added_graphs)
if expected_added_meta_graphs is not None:
test_case.assertEqual(len(expected_added_meta_graphs),
len(self._added_meta_graphs))
for expected, actual in zip(expected_added_meta_graphs,
self._added_meta_graphs):
test_util.assert_meta_graph_protos_equal(test_case, expected, actual)
if expected_session_logs is not None:
test_case.assertEqual(expected_session_logs, self._added_session_logs)
def add_summary(self, summ, current_global_step):
"""Add summary."""
if isinstance(summ, bytes):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summ)
summ = summary_proto
if current_global_step in self._summaries:
step_summaries = self._summaries[current_global_step]
else:
step_summaries = []
self._summaries[current_global_step] = step_summaries
step_summaries.append(summ)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_graph(self, graph, global_step=None, graph_def=None):
"""Add graph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
if graph_def is not None:
raise ValueError('Unexpected graph_def %s.' % graph_def)
self._added_graphs.append(graph)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Add metagraph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
self._added_meta_graphs.append(meta_graph_def)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_session_log(self, session_log, global_step=None):
# pylint: disable=unused-argument
self._added_session_logs.append(session_log)
def add_run_metadata(self, run_metadata, tag, global_step=None):
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
self._added_run_metadata[tag] = run_metadata
def flush(self):
pass
def reopen(self):
pass
def close(self):
pass
| FakeSummaryWriter |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_test.py | {
"start": 199945,
"end": 229433
} | class ____(test.TestCase, parameterized.TestCase):
def test_defaults(self):
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc._embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertIsNone(embedding_column.ckpt_to_load_from)
self.assertIsNone(embedding_column.tensor_name_in_ckpt)
self.assertIsNone(embedding_column.max_norm)
self.assertTrue(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_all_constructor_args(self):
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42.,
trainable=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_deep_copy(self):
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
original = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42.,
trainable=False)
for embedding_column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', embedding_column.categorical_column.name)
self.assertEqual(3, embedding_column.categorical_column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.categorical_column._parse_example_spec)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_invalid_initializer(self):
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=3)
with self.assertRaisesRegex(ValueError, 'initializer must be callable'):
fc._embedding_column(
categorical_column, dimension=2, initializer='not_fn')
def test_parse_example(self):
a = fc._categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_embedded = fc._embedding_column(a, dimension=2)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_embedded]))
self.assertIn('aaa', features)
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]), self.evaluate(features['aaa']))
def test_transform_feature(self):
with ops.Graph().as_default():
a = fc._categorical_column_with_identity(key='aaa', num_buckets=3)
a_embedded = fc._embedding_column(a, dimension=2)
features = {
'aaa': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
}
outputs = _transform_features(features, [a, a_embedded])
output_a = outputs[a]
output_embedded = outputs[a_embedded]
with _initialized_session():
_assert_sparse_tensor_value(self, self.evaluate(output_a),
self.evaluate(output_embedded))
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True,
'partition_variables': False,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False,
'partition_variables': False,
}, {
'testcase_name': 'use_safe_embedding_lookup_partitioned',
'use_safe_embedding_lookup': True,
'partition_variables': True,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup_partitioned',
'use_safe_embedding_lookup': False,
'partition_variables': True,
})
def test_get_dense_tensor(self, use_safe_embedding_lookup,
partition_variables):
with ops.Graph().as_default():
# Inputs.
vocabulary_size = 4
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(9., 13.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
self.assertEqual(dtypes.float32, dtype)
if partition_variables:
assert partition_info is not None
self.assertEqual([vocabulary_size, embedding_dimension],
partition_info.full_shape)
self.assertAllEqual((2, embedding_dimension), shape)
return array_ops.slice(
embedding_values, partition_info.var_offset, shape
)
else:
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
partitioner = None
if partition_variables:
partitioner = partitioned_variables.fixed_size_partitioner(2, axis=0)
with variable_scope.variable_scope('vars', partitioner=partitioner):
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
if partition_variables:
self.assertCountEqual(('vars/embedding_weights/part_0:0',
'vars/embedding_weights/part_1:0'),
tuple([v.name for v in global_vars]))
else:
self.assertCountEqual(('vars/embedding_weights:0',),
tuple([v.name for v in global_vars]))
for v in global_vars:
self.assertIsInstance(v, variables_lib.Variable)
with _initialized_session():
if partition_variables:
self.assertAllEqual(
embedding_values, array_ops.concat(global_vars, axis=0)
)
else:
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
if use_safe_embedding_lookup:
self.assertIn(
'SparseFillEmptyRows',
[x.type for x in ops.get_default_graph().get_operations()])
else:
self.assertNotIn(
'SparseFillEmptyRows',
[x.type for x in ops.get_default_graph().get_operations()])
def test_get_dense_tensor_3d(self):
with ops.Graph().as_default():
# Inputs.
vocabulary_size = 4
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)),
values=(2, 0, 1, 1, 2),
dense_shape=(4, 2, 5))
# Embedding variable.
embedding_dimension = 3
embedding_values = (
(1., 2., 4.), # id 0
(3., 5., 1.), # id 1
(7., 11., 2.), # id 2
(2., 7., 12.) # id 3
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]]
((7., 11., 2.), (0., 0., 0.)),
# example 1, ids [[], [0, 1]], embedding
# = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]]
((0., 0., 0.), (2., 3.5, 2.5)),
# example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]]
((0., 0., 0.), (0., 0., 0.)),
# example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]]
((3., 5., 1.), (7., 11., 2.)),
)
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
def test_get_dense_tensor_weight_collections(self):
with ops.Graph().as_default():
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = fc._embedding_column(categorical_column, dimension=2)
# Provide sparse input and get dense result.
embedding_column._get_dense_tensor(
_LazyBuilder({'aaa': sparse_input}), weight_collections=('my_vars',))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
my_vars = ops.get_collection('my_vars')
self.assertCountEqual(('embedding_weights:0',),
tuple([v.name for v in my_vars]))
@test_util.run_deprecated_v1
# Placeholders are TF1. Replacing with tf.function not feasible because of V1
# variable creation.
def test_get_dense_tensor_placeholder_inputs(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int64)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa':
sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, embedding_lookup.eval(
feed_dict={
input_indices: sparse_input.indices,
input_values: sparse_input.values,
input_shape: sparse_input.dense_shape,
}))
def test_get_dense_tensor_restore_from_ckpt(self):
with ops.Graph().as_default():
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable. The checkpoint file contains _embedding_values.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
ckpt_path = test.test_src_dir_path(
'python/feature_column/testdata/embedding.ckpt')
ckpt_tensor = 'my_embedding'
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
ckpt_to_load_from=ckpt_path,
tensor_name_in_ckpt=ckpt_tensor)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({'aaa': sparse_input}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
def test_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = fc.linear_model({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertCountEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v for v in ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertCountEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars[
'linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), self.evaluate(bias))
self.assertAllClose(zeros_embedding_values,
self.evaluate(embedding_weights))
self.assertAllClose(
np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights))
self.assertAllClose(
np.zeros((batch_size, 1)), self.evaluate(predictions))
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)),
self.evaluate(predictions))
def test_keras_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertCountEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v
for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertCountEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), self.evaluate(bias))
self.assertAllClose(zeros_embedding_values,
self.evaluate(embedding_weights))
self.assertAllClose(
np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights))
self.assertAllClose(
np.zeros((batch_size, 1)), self.evaluate(predictions))
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)),
self.evaluate(predictions))
def test_input_layer(self):
with ops.Graph().as_default():
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertCountEqual(('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, trainable_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(input_layer))
def test_input_layer_not_trainable(self):
with ops.Graph().as_default():
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc._categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
trainable=False)
# Provide sparse input and get dense result.
input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
self.assertCountEqual([],
ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(input_layer))
| EmbeddingColumnTest |
python | django__django | django/db/migrations/operations/special.py | {
"start": 5266,
"end": 8000
} | class ____(Operation):
"""
Run Python code in a context suitable for doing versioned ORM operations.
"""
category = OperationCategory.PYTHON
reduces_to_sql = False
def __init__(
self, code, reverse_code=None, atomic=None, hints=None, elidable=False
):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError("RunPython must be supplied with a callable")
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError("RunPython must be supplied with callable arguments")
self.reverse_code = reverse_code
self.hints = hints or {}
self.elidable = elidable
def deconstruct(self):
kwargs = {
"code": self.code,
}
if self.reverse_code is not None:
kwargs["reverse_code"] = self.reverse_code
if self.atomic is not None:
kwargs["atomic"] = self.atomic
if self.hints:
kwargs["hints"] = self.hints
return (self.__class__.__qualname__, [], kwargs)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# RunPython has access to all models. Ensure that all models are
# reloaded in case any are delayed.
from_state.clear_delayed_apps_cache()
if router.allow_migrate(
schema_editor.connection.alias, app_label, **self.hints
):
# We now execute the Python code in a context that contains a
# 'models' object, representing the versioned models as an app
# registry. We could try to override the global cache, but then
# people will still use direct imports, so we go with a
# documentation approach instead.
self.code(from_state.apps, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(
schema_editor.connection.alias, app_label, **self.hints
):
self.reverse_code(from_state.apps, schema_editor)
def describe(self):
return "Raw Python operation"
@staticmethod
def noop(apps, schema_editor):
return None
| RunPython |
python | tensorflow__tensorflow | tensorflow/python/ops/init_ops_v2.py | {
"start": 38593,
"end": 40249
} | class ____:
"""Random generator that selects appropriate random ops."""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(
shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
glorot_normal = GlorotNormal
glorot_uniform = GlorotUniform
| _RandomGenerator |
python | dagster-io__dagster | examples/with_pyspark_emr/with_pyspark_emr/definitions.py | {
"start": 382,
"end": 2115
} | class ____(ConfigurableIOManager):
pyspark: PySparkResource
path_prefix: str
def _get_path(self, context) -> str:
return "/".join([context.resource_config["path_prefix"], *context.asset_key.path])
def handle_output(self, context, obj):
obj.write.parquet(self._get_path(context))
def load_input(self, context):
spark = self.pyspark.spark_session
return spark.read.parquet(self._get_path(context.upstream_output))
@asset
def people(pyspark: PySparkResource, pyspark_step_launcher: ResourceParam[Any]) -> DataFrame:
schema = StructType([StructField("name", StringType()), StructField("age", IntegerType())])
rows = [Row(name="Thom", age=51), Row(name="Jonny", age=48), Row(name="Nigel", age=49)]
return pyspark.spark_session.createDataFrame(rows, schema)
emr_pyspark = PySparkResource(spark_config={"spark.executor.memory": "2g"})
@asset
def people_over_50(pyspark_step_launcher: ResourceParam[Any], people: DataFrame) -> DataFrame:
return people.filter(people["age"] > 50)
defs = Definitions(
assets=[people, people_over_50],
resources={
"pyspark_step_launcher": emr_pyspark_step_launcher.configured(
{
"cluster_id": {"env": "EMR_CLUSTER_ID"},
"local_pipeline_package_path": str(Path(__file__).parent),
"deploy_local_pipeline_package": True,
"region_name": "us-west-1",
"staging_bucket": "my_staging_bucket",
"wait_for_logs": True,
}
),
"pyspark": emr_pyspark,
"s3": S3Resource(),
"io_manager": ParquetIOManager(pyspark=emr_pyspark, path_prefix="s3://my-s3-bucket"),
},
)
| ParquetIOManager |
python | google__jax | jax/_src/sourcemap.py | {
"start": 1282,
"end": 4705
} | class ____:
version: int
# file: str
# source_root: str
sources: Sequence[str]
sources_content: Sequence[str]
names: Sequence[str]
mappings: Mappings
@classmethod
def from_json(cls, json_data: str) -> SourceMap:
"""Deserialize a source map from JSON."""
data = json.loads(json_data)
return cls(
version=data["version"],
sources=data["sources"],
sources_content=data["sourcesContent"],
names=data["names"],
mappings=deserialize_mappings(data["mappings"]),
)
def to_json(self) -> str:
"""Serialize a source map to JSON."""
data = {
"version": self.version,
"sources": self.sources,
"sourcesContent": self.sources_content,
"names": self.names,
"mappings": serialize_mappings(self.mappings),
}
return json.dumps(data)
VLQ_SIGN_MASK = 0x01
VLQ_MORE_MASK = 0x20
VLQ_VALUE_MASK = 0x1F
VLQ_VALUE_BITWIDTH = 5
VLQ_ALPHABET = (
list(range(ord("A"), ord("Z") + 1))
+ list(range(ord("a"), ord("z") + 1))
+ list(range(ord("0"), ord("9") + 1))
+ [ord("+"), ord("/")]
)
def make_vlq_decode_table():
lookup = {c: d for d, c in enumerate(VLQ_ALPHABET)}
return [lookup.get(i, None) for i in range(256)]
VLQ_DECODE_TABLE = make_vlq_decode_table()
def decode_vlq(enc: Iterable[int]) -> int:
"""Decode a Base-64-VLQ into an integer."""
enc_iter = iter(enc)
d = VLQ_DECODE_TABLE[next(enc_iter)]
sign = bool(d & VLQ_SIGN_MASK)
value = (d & VLQ_VALUE_MASK) >> 1
# Compensate for first quantum containing sign as LSB:
shift = -1
while d & VLQ_MORE_MASK:
shift += VLQ_VALUE_BITWIDTH
d = VLQ_DECODE_TABLE[next(enc_iter)]
value |= (d & VLQ_VALUE_MASK) << shift
return -value if sign else value
def encode_vlq(value: int) -> bytes:
"""Encode an integer into a Base-64-VLQ."""
# Move sign to LSB
value = ((-value) << 1 | 1) if value < 0 else value << 1
buf = []
while True:
d = value & VLQ_VALUE_MASK
value >>= VLQ_VALUE_BITWIDTH
more = value > 0
if more:
d |= VLQ_MORE_MASK
buf.append(VLQ_ALPHABET[d])
if not more:
break
return bytes(buf)
def decode_segment(enc: Iterable[int]) -> Segment:
"""Decode a sequence of VLQs into a segment."""
enc_iter = iter(enc)
col = decode_vlq(enc_iter)
try:
source = decode_vlq(enc_iter)
except StopIteration:
# Stopping here is fine (1-segment).
return (col,)
source_line = decode_vlq(enc_iter)
source_col = decode_vlq(enc_iter)
try:
name = decode_vlq(enc_iter)
except StopIteration:
# Stopping here is fine too (4-segment).
return col, source, source_line, source_col
# (5-segment)
return col, source, source_line, source_col, name
def encode_segment(seg: Segment) -> bytes:
"""Encode a segment into a sequence of VLQs."""
return b"".join(encode_vlq(value) for value in seg)
def deserialize_mappings(mappings_str: str) -> Mappings:
"""Decode a string of TC39 mapping data."""
mappings_bytes = bytes(mappings_str, encoding="ascii")
return [
list(map(decode_segment, mapping.split(b","))) if mapping else []
for mapping in mappings_bytes.split(b";")
]
def serialize_mappings(mappings: Mappings) -> str:
"""Encode mappings into a string of TC39 mapping data."""
enc = b";".join(
b",".join(encode_segment(seg) for seg in segs) for segs in mappings
)
return enc.decode("ascii")
| SourceMap |
python | python__mypy | mypyc/irbuild/statement.py | {
"start": 30062,
"end": 46667
} | class ____(mypy.traverser.TraverserVisitor):
def __init__(self) -> None:
super().__init__()
self.has_await = False
def visit_await_expr(self, o: mypy.nodes.AwaitExpr) -> None:
self.has_await = True
super().visit_await_expr(o)
def transform_try_stmt(builder: IRBuilder, t: TryStmt) -> None:
# Our compilation strategy for try/except/else/finally is to
# treat try/except/else and try/finally as separate language
# constructs that we compile separately. When we have a
# try/except/else/finally, we treat the try/except/else as the
# body of a try/finally block.
if t.is_star:
builder.error("Exception groups and except* cannot be compiled yet", t.line)
# Check if we're in an async function with a finally block that contains await
use_async_version = False
if t.finally_body and builder.fn_info.is_coroutine:
detector = AwaitDetector()
t.finally_body.accept(detector)
if detector.has_await:
# Use the async version that handles exceptions correctly
use_async_version = True
if t.finally_body:
def transform_try_body() -> None:
if t.handlers:
transform_try_except_stmt(builder, t)
else:
builder.accept(t.body)
body = t.finally_body
if use_async_version:
transform_try_finally_stmt_async(
builder, transform_try_body, lambda: builder.accept(body), t.line
)
else:
transform_try_finally_stmt(
builder, transform_try_body, lambda: builder.accept(body), t.line
)
else:
transform_try_except_stmt(builder, t)
def get_sys_exc_info(builder: IRBuilder) -> list[Value]:
exc_info = builder.call_c(get_exc_info_op, [], -1)
return [builder.add(TupleGet(exc_info, i, -1)) for i in range(3)]
def transform_with(
builder: IRBuilder,
expr: Expression,
target: Lvalue | None,
body: GenFunc,
is_async: bool,
line: int,
) -> None:
# This is basically a straight transcription of the Python code in PEP 343.
# I don't actually understand why a bunch of it is the way it is.
# We could probably optimize the case where the manager is compiled by us,
# but that is not our common case at all, so.
al = "a" if is_async else ""
mgr_v = builder.accept(expr)
is_native = isinstance(mgr_v.type, RInstance)
if is_native:
value = builder.add(MethodCall(mgr_v, f"__{al}enter__", args=[], line=line))
exit_ = None
else:
typ = builder.primitive_op(type_op, [mgr_v], line)
exit_ = builder.maybe_spill(builder.py_get_attr(typ, f"__{al}exit__", line))
value = builder.py_call(builder.py_get_attr(typ, f"__{al}enter__", line), [mgr_v], line)
mgr = builder.maybe_spill(mgr_v)
exc = builder.maybe_spill_assignable(builder.true())
if is_async:
value = emit_await(builder, value, line)
def maybe_natively_call_exit(exc_info: bool) -> Value:
if exc_info:
args = get_sys_exc_info(builder)
else:
none = builder.none_object()
args = [none, none, none]
if is_native:
assert isinstance(mgr_v.type, RInstance), mgr_v.type
exit_val = builder.gen_method_call(
builder.read(mgr),
f"__{al}exit__",
arg_values=args,
line=line,
result_type=none_rprimitive,
)
else:
assert exit_ is not None
exit_val = builder.py_call(builder.read(exit_), [builder.read(mgr)] + args, line)
if is_async:
return emit_await(builder, exit_val, line)
else:
return exit_val
def try_body() -> None:
if target:
builder.assign(builder.get_assignment_target(target), value, line)
body()
def except_body() -> None:
builder.assign(exc, builder.false(), line)
out_block, reraise_block = BasicBlock(), BasicBlock()
builder.add_bool_branch(maybe_natively_call_exit(exc_info=True), out_block, reraise_block)
builder.activate_block(reraise_block)
builder.call_c(reraise_exception_op, [], NO_TRACEBACK_LINE_NO)
builder.add(Unreachable())
builder.activate_block(out_block)
def finally_body() -> None:
out_block, exit_block = BasicBlock(), BasicBlock()
builder.add(Branch(builder.read(exc), exit_block, out_block, Branch.BOOL))
builder.activate_block(exit_block)
maybe_natively_call_exit(exc_info=False)
builder.goto_and_activate(out_block)
transform_try_finally_stmt(
builder,
lambda: transform_try_except(builder, try_body, [(None, None, except_body)], None, line),
finally_body,
line,
)
def transform_with_stmt(builder: IRBuilder, o: WithStmt) -> None:
# Generate separate logic for each expr in it, left to right
def generate(i: int) -> None:
if i >= len(o.expr):
builder.accept(o.body)
else:
transform_with(
builder, o.expr[i], o.target[i], lambda: generate(i + 1), o.is_async, o.line
)
generate(0)
def transform_assert_stmt(builder: IRBuilder, a: AssertStmt) -> None:
if builder.options.strip_asserts:
return
cond = builder.accept(a.expr)
ok_block, error_block = BasicBlock(), BasicBlock()
builder.add_bool_branch(cond, ok_block, error_block)
builder.activate_block(error_block)
if a.msg is None:
# Special case (for simpler generated code)
builder.add(RaiseStandardError(RaiseStandardError.ASSERTION_ERROR, None, a.line))
elif isinstance(a.msg, StrExpr):
# Another special case
builder.add(RaiseStandardError(RaiseStandardError.ASSERTION_ERROR, a.msg.value, a.line))
else:
# The general case -- explicitly construct an exception instance
message = builder.accept(a.msg)
exc_type = builder.load_module_attr_by_fullname("builtins.AssertionError", a.line)
exc = builder.py_call(exc_type, [message], a.line)
builder.call_c(raise_exception_op, [exc], a.line)
builder.add(Unreachable())
builder.activate_block(ok_block)
def transform_del_stmt(builder: IRBuilder, o: DelStmt) -> None:
transform_del_item(builder, builder.get_assignment_target(o.expr), o.line)
def transform_del_item(builder: IRBuilder, target: AssignmentTarget, line: int) -> None:
if isinstance(target, AssignmentTargetIndex):
builder.gen_method_call(
target.base, "__delitem__", [target.index], result_type=None, line=line
)
elif isinstance(target, AssignmentTargetAttr):
if isinstance(target.obj_type, RInstance):
cl = target.obj_type.class_ir
if not cl.is_deletable(target.attr):
builder.error(f'"{target.attr}" cannot be deleted', line)
builder.note(
'Using "__deletable__ = '
+ '[\'<attr>\']" in the class body enables "del obj.<attr>"',
line,
)
key = builder.load_str(target.attr)
builder.primitive_op(py_delattr_op, [target.obj, key], line)
elif isinstance(target, AssignmentTargetRegister):
# Delete a local by assigning an error value to it, which will
# prompt the insertion of uninit checks.
builder.add(
Assign(target.register, builder.add(LoadErrorValue(target.type, undefines=True)))
)
elif isinstance(target, AssignmentTargetTuple):
for subtarget in target.items:
transform_del_item(builder, subtarget, line)
# yield/yield from/await
# These are really expressions, not statements... but they depend on try/except/finally
def emit_yield(builder: IRBuilder, val: Value, line: int) -> Value:
retval = builder.coerce(val, builder.ret_types[-1], line)
cls = builder.fn_info.generator_class
# Create a new block for the instructions immediately following the yield expression, and
# set the next label so that the next time '__next__' is called on the generator object,
# the function continues at the new block.
next_block = BasicBlock()
next_label = len(cls.continuation_blocks)
cls.continuation_blocks.append(next_block)
builder.assign(cls.next_label_target, Integer(next_label), line)
builder.add(Return(retval, yield_target=next_block))
builder.activate_block(next_block)
add_raise_exception_blocks_to_generator_class(builder, line)
assert cls.send_arg_reg is not None
return cls.send_arg_reg
def emit_yield_from_or_await(
builder: IRBuilder, val: Value, line: int, *, is_await: bool
) -> Value:
# This is basically an implementation of the code in PEP 380.
# TODO: do we want to use the right types here?
result = Register(object_rprimitive)
to_yield_reg = Register(object_rprimitive)
received_reg = Register(object_rprimitive)
helper_method = GENERATOR_HELPER_NAME
if (
isinstance(val, (Call, MethodCall))
and isinstance(val.type, RInstance)
and val.type.class_ir.has_method(helper_method)
):
# This is a generated native generator class, and we can use a fast path.
# This allows two optimizations:
# 1) No need to call CPy_GetCoro() or iter() since for native generators
# it just returns the generator object (implemented here).
# 2) Instead of calling next(), call generator helper method directly,
# since next() just calls __next__ which calls the helper method.
iter_val: Value = val
else:
get_op = coro_op if is_await else iter_op
if isinstance(get_op, PrimitiveDescription):
iter_val = builder.primitive_op(get_op, [val], line)
else:
iter_val = builder.call_c(get_op, [val], line)
iter_reg = builder.maybe_spill_assignable(iter_val)
stop_block, main_block, done_block = BasicBlock(), BasicBlock(), BasicBlock()
if isinstance(iter_reg.type, RInstance) and iter_reg.type.class_ir.has_method(helper_method):
# Second fast path optimization: call helper directly (see also comment above).
#
# Calling a generated generator, so avoid raising StopIteration by passing
# an extra PyObject ** argument to helper where the stop iteration value is stored.
fast_path = True
obj = builder.read(iter_reg)
nn = builder.none_object()
stop_iter_val = Register(object_rprimitive)
err = builder.add(LoadErrorValue(object_rprimitive, undefines=True))
builder.assign(stop_iter_val, err, line)
ptr = builder.add(LoadAddress(object_pointer_rprimitive, stop_iter_val))
m = MethodCall(obj, helper_method, [nn, nn, nn, nn, ptr], line)
# Generators have custom error handling, so disable normal error handling.
m.error_kind = ERR_NEVER
_y_init = builder.add(m)
else:
fast_path = False
_y_init = builder.call_c(next_raw_op, [builder.read(iter_reg)], line)
builder.add(Branch(_y_init, stop_block, main_block, Branch.IS_ERROR))
builder.activate_block(stop_block)
if fast_path:
builder.primitive_op(propagate_if_error_op, [stop_iter_val], line)
builder.assign(result, stop_iter_val, line)
else:
# Try extracting a return value from a StopIteration and return it.
# If it wasn't, this reraises the exception.
builder.assign(result, builder.call_c(check_stop_op, [], line), line)
# Clear the spilled iterator/coroutine so that it will be freed.
# Otherwise, the freeing of the spilled register would likely be delayed.
err = builder.add(LoadErrorValue(iter_reg.type))
builder.assign(iter_reg, err, line)
builder.goto(done_block)
builder.activate_block(main_block)
builder.assign(to_yield_reg, _y_init, line)
# OK Now the main loop!
loop_block = BasicBlock()
builder.goto_and_activate(loop_block)
def try_body() -> None:
builder.assign(received_reg, emit_yield(builder, builder.read(to_yield_reg), line), line)
def except_body() -> None:
# The body of the except is all implemented in a C function to
# reduce how much code we need to generate. It returns a value
# indicating whether to break or yield (or raise an exception).
val = Register(object_rprimitive)
val_address = builder.add(LoadAddress(object_pointer_rprimitive, val))
to_stop = builder.call_c(yield_from_except_op, [builder.read(iter_reg), val_address], line)
ok, stop = BasicBlock(), BasicBlock()
builder.add(Branch(to_stop, stop, ok, Branch.BOOL))
# The exception got swallowed. Continue, yielding the returned value
builder.activate_block(ok)
builder.assign(to_yield_reg, val, line)
builder.nonlocal_control[-1].gen_continue(builder, line)
# The exception was a StopIteration. Stop iterating.
builder.activate_block(stop)
builder.assign(result, val, line)
builder.nonlocal_control[-1].gen_break(builder, line)
def else_body() -> None:
# Do a next() or a .send(). It will return NULL on exception
# but it won't automatically propagate.
_y = builder.call_c(send_op, [builder.read(iter_reg), builder.read(received_reg)], line)
ok, stop = BasicBlock(), BasicBlock()
builder.add(Branch(_y, stop, ok, Branch.IS_ERROR))
# Everything's fine. Yield it.
builder.activate_block(ok)
builder.assign(to_yield_reg, _y, line)
builder.nonlocal_control[-1].gen_continue(builder, line)
# Try extracting a return value from a StopIteration and return it.
# If it wasn't, this rereaises the exception.
builder.activate_block(stop)
builder.assign(result, builder.call_c(check_stop_op, [], line), line)
builder.nonlocal_control[-1].gen_break(builder, line)
builder.push_loop_stack(loop_block, done_block)
transform_try_except(builder, try_body, [(None, None, except_body)], else_body, line)
builder.pop_loop_stack()
builder.goto_and_activate(done_block)
return builder.read(result)
def emit_await(builder: IRBuilder, val: Value, line: int) -> Value:
return emit_yield_from_or_await(builder, val, line, is_await=True)
def transform_yield_expr(builder: IRBuilder, expr: YieldExpr) -> Value:
if builder.fn_info.is_coroutine:
builder.error("async generators are unimplemented", expr.line)
if expr.expr:
retval = builder.accept(expr.expr)
else:
retval = builder.builder.none()
return emit_yield(builder, retval, expr.line)
def transform_yield_from_expr(builder: IRBuilder, o: YieldFromExpr) -> Value:
return emit_yield_from_or_await(builder, builder.accept(o.expr), o.line, is_await=False)
def transform_await_expr(builder: IRBuilder, o: AwaitExpr) -> Value:
return emit_yield_from_or_await(builder, builder.accept(o.expr), o.line, is_await=True)
def transform_match_stmt(builder: IRBuilder, m: MatchStmt) -> None:
m.accept(MatchVisitor(builder, m))
def transform_type_alias_stmt(builder: IRBuilder, s: TypeAliasStmt) -> None:
line = s.line
# Use "_typing" to avoid importing "typing", as the latter can be expensive.
# "_typing" includes everything we need here.
mod = builder.call_c(import_op, [builder.load_str("_typing")], line)
type_params = create_type_params(builder, mod, s.type_args, s.line)
type_alias_type = builder.py_get_attr(mod, "TypeAliasType", line)
args = [builder.load_str(s.name.name), builder.none()]
arg_names: list[str | None] = [None, None]
arg_kinds = [ARG_POS, ARG_POS]
if s.type_args:
args.append(builder.new_tuple(type_params, line))
arg_names.append("type_params")
arg_kinds.append(ARG_NAMED)
alias = builder.py_call(type_alias_type, args, line, arg_names=arg_names, arg_kinds=arg_kinds)
# Use primitive to set function used to lazily compute type alias type value.
# The value needs to be lazily computed to match Python runtime behavior, but
# Python public APIs don't support this, so we use a C primitive.
compute_fn = s.value.accept(builder.visitor)
builder.builder.primitive_op(set_type_alias_compute_function_op, [alias, compute_fn], line)
target = builder.get_assignment_target(s.name)
builder.assign(target, alias, line)
| AwaitDetector |
python | automl__auto-sklearn | test/test_metalearning/test_metalearning.py | {
"start": 622,
"end": 5836
} | class ____(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.X_train, self.Y_train, self.X_test, self.Y_test = get_dataset("iris")
eliminate_class_two = self.Y_train != 2
self.X_train = self.X_train[eliminate_class_two]
self.Y_train = self.Y_train[eliminate_class_two]
@unittest.skip("TODO refactor!")
def test_metalearning(self):
dataset_name_classification = "digits"
initial_challengers_classification = {
"ACC_METRIC": '--initial-challengers " '
"-balancing:strategy 'weighting' "
"-classifier:__choice__ 'proj_logit'",
"AUC_METRIC": '--initial-challengers " '
"-balancing:strategy 'weighting' "
"-classifier:__choice__ 'liblinear_svc'",
"BAC_METRIC": '--initial-challengers " '
"-balancing:strategy 'weighting' "
"-classifier:__choice__ 'proj_logit'",
"F1_METRIC": '--initial-challengers " '
"-balancing:strategy 'weighting' "
"-classifier:__choice__ 'proj_logit'",
"PAC_METRIC": '--initial-challengers " '
"-balancing:strategy 'none' "
"-classifier:__choice__ 'random_forest'",
}
dataset_name_regression = "diabetes"
initial_challengers_regression = {
"A_METRIC": '--initial-challengers " '
"-imputation:strategy 'mean' "
"-one_hot_encoding:minimum_fraction '0.01' "
"-one_hot_encoding:use_minimum_fraction 'True' "
"-preprocessor:__choice__ 'no_preprocessing' "
"-regressor:__choice__ 'random_forest'",
"R2_METRIC": '--initial-challengers " '
"-imputation:strategy 'mean' "
"-one_hot_encoding:minimum_fraction '0.01' "
"-one_hot_encoding:use_minimum_fraction 'True' "
"-preprocessor:__choice__ 'no_preprocessing' "
"-regressor:__choice__ 'random_forest'",
}
for dataset_name, task, initial_challengers in [
(dataset_name_regression, REGRESSION, initial_challengers_regression),
(
dataset_name_classification,
MULTICLASS_CLASSIFICATION,
initial_challengers_classification,
),
]:
for metric in initial_challengers:
configuration_space = get_configuration_space(
{"metric": metric, "task": task, "is_sparse": False},
include={"feature_preprocessor": ["no_preprocessing"]},
)
X_train, Y_train, X_test, Y_test = get_dataset(dataset_name)
categorical = {i: False for i in range(X_train.shape[1])}
meta_features_label = _calculate_metafeatures(
X_train, Y_train, categorical, dataset_name, task
)
meta_features_encoded_label = _calculate_metafeatures_encoded(
X_train, Y_train, categorical, dataset_name, task
)
initial_configuration_strings_for_smac = suggest_via_metalearning(
meta_features_label,
meta_features_encoded_label,
configuration_space,
dataset_name,
metric,
task,
False,
1,
None,
)
print(metric)
print(initial_configuration_strings_for_smac[0])
self.assertTrue(
initial_configuration_strings_for_smac[0].startswith(
initial_challengers[metric]
)
)
def test_metadata_directory(self):
# Test that metadata directory is set correctly (if user specifies,
# Auto-sklearn should check that the directory exists. If not, it
# should use the default directory.
dask_client = unittest.mock.Mock()
automl1 = AutoSklearnClassifier(
time_left_for_this_task=30,
per_run_time_limit=5,
metadata_directory="pyMetaLearn/metadata_dir", # user metadata_dir
dask_client=dask_client,
)
self.assertEqual(automl1.metadata_directory, "pyMetaLearn/metadata_dir")
automl2 = AutoSklearnClassifier( # default metadata_dir
time_left_for_this_task=30,
per_run_time_limit=5,
dask_client=dask_client,
)
self.assertIsNone(automl2.metadata_directory)
nonexistent_dir = "nonexistent_dir"
automl3 = AutoSklearnClassifier(
time_left_for_this_task=30,
per_run_time_limit=5,
metadata_directory=nonexistent_dir, # user specified metadata_dir
dask_client=dask_client,
ensemble_class=None,
)
X, y = load_breast_cancer(return_X_y=True)
self.assertRaisesRegex(
ValueError,
"The specified metadata directory "
"'%s' does not exist!" % nonexistent_dir,
automl3.fit,
X=X,
y=y,
)
| Test |
python | pytorch__pytorch | torch/_inductor/fx_passes/group_batch_fusion.py | {
"start": 4804,
"end": 4954
} | class ____(GroupBatchFusionBase):
"""
Fuse ops in a group way, e.g, fuse mm/addmm of arbitrary input shapes with fbgemm.gmm.
"""
| GroupFusion |
python | django__django | tests/model_fields/test_uuid.py | {
"start": 11727,
"end": 12125
} | class ____(TransactionTestCase):
# Need a TransactionTestCase to avoid deferring FK constraint checking.
available_apps = ["model_fields"]
@skipUnlessDBFeature("supports_foreign_keys")
def test_unsaved_fk(self):
u1 = PrimaryKeyUUIDModel()
with self.assertRaises(IntegrityError):
RelatedToUUIDModel.objects.create(uuid_fk=u1)
| TestAsPrimaryKeyTransactionTests |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/linux/mkl/set-build-env.py | {
"start": 6633,
"end": 7267
} | class ____(IntelPlatform):
def __init__(self):
IntelPlatform.__init__(self, 8, 4)
def get_bazel_gcc_flags(self):
ICELAKE_ARCH_OLD = "skylake-avx512"
ICELAKE_ARCH_NEW = "icelake-client"
AVX512_FLAGS = ["avx512f", "avx512cd"]
if IntelPlatform.use_old_arch_names(self, 8, 4):
ret_val = self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
ICELAKE_ARCH_OLD + " "
for flag in AVX512_FLAGS:
ret_val += self.BAZEL_PREFIX_ + self.FLAG_PREFIX_ + flag + " "
return ret_val
else:
return self.BAZEL_PREFIX_ + self.ARCH_PREFIX_ + \
ICELAKE_ARCH_NEW + " "
| IcelakeClientPlatform |
python | pypa__pip | src/pip/_vendor/idna/codec.py | {
"start": 2939,
"end": 3422
} | class ____(Codec, codecs.StreamReader):
pass
def search_function(name: str) -> Optional[codecs.CodecInfo]:
if name != "idna2008":
return None
return codecs.CodecInfo(
name=name,
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
codecs.register(search_function)
| StreamReader |
python | spack__spack | lib/spack/spack/util/spack_yaml.py | {
"start": 17515,
"end": 18355
} | class ____(spack.error.SpackError):
"""Raised when there are issues with YAML parsing."""
def __init__(self, msg, yaml_error, filename=None):
self.filename = filename
super().__init__(msg, str(yaml_error))
def get_mark_from_yaml_data(obj):
"""Try to get ``spack.util.spack_yaml`` mark from YAML data.
We try the object, and if that fails we try its first member (if it's a container).
Returns:
mark if one is found, otherwise None.
"""
# mark of object itelf
mark = getattr(obj, "_start_mark", None)
if mark:
return mark
# mark of first member if it is a container
if isinstance(obj, (list, dict)):
first_member = next(iter(obj), None)
if first_member:
mark = getattr(first_member, "_start_mark", None)
return mark
| SpackYAMLError |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 1195,
"end": 2788
} | class ____:
def test_types(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {}})
assert isinstance(metadata.core, CoreMetadata)
assert isinstance(metadata.hatch, HatchMetadata)
assert isinstance(metadata.build, BuildMetadata)
def test_missing_core_metadata(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {})
with pytest.raises(ValueError, match="Missing `project` metadata table in configuration"):
_ = metadata.core
def test_core_metadata_not_table(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": "foo"})
with pytest.raises(TypeError, match="The `project` configuration must be a table"):
_ = metadata.core
def test_tool_metadata_not_table(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"tool": "foo"})
with pytest.raises(TypeError, match="The `tool` configuration must be a table"):
_ = metadata.hatch
def test_hatch_metadata_not_table(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"tool": {"hatch": "foo"}})
with pytest.raises(TypeError, match="The `tool.hatch` configuration must be a table"):
_ = metadata.hatch
def test_build_metadata_not_table(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"build-system": "foo"})
with pytest.raises(TypeError, match="The `build-system` configuration must be a table"):
_ = metadata.build
| TestInterface |
python | virgili0__Virgilio | Tools/regex-bin/regexPrinter.py | {
"start": 4450,
"end": 9288
} | class ____(TreeNode):
def __init__(self):
TreeNode.__init__(self, None, None, None)
def print(self):
yield ""
def tokenize(s):
tokens = []
for char in s:
for token in Token:
if char == token.value:
tokens.append((token, char))
break
else:
if char in "0123456789":
tokens.append((Token.DIGIT, char))
else:
tokens.append((Token.CHARACTER, char))
tokens.append((Token.EOF, "eof"))
i = 0
while i < len(tokens):
if tokens[i][0] == Token.LCURLY:
l, r, j = 0, 0, i+1
acc = ""
while j < len(tokens) and tokens[j][0] == Token.DIGIT:
acc += tokens[j][1]
tokens = tokens[:j] + tokens[j+1:]
try:
l = int(acc)
except ValueError:
raise SyntaxError("Curly quantifier with no left integer")
if j >= len(tokens) or tokens[j][0] != Token.COLON:
raise SyntaxError("Could not tokenize the expression")
tokens = tokens[:j] + tokens[j+1:]
acc = ""
while j < len(tokens) and tokens[j][0] == Token.DIGIT:
acc += tokens[j][1]
tokens = tokens[:j] + tokens[j+1:]
try:
r = int(acc)
except ValueError:
raise SyntaxError("Curly quantifier with no right integer")
if j >= len(tokens) or tokens[j][0] != Token.RCURLY:
raise SyntaxError("Could not tokenize the expression")
tokens = tokens[:j] + tokens[j+1:]
tokens[i] = (Token.CURLYQUANT, (l,r))
i += 1
return tokens
def printRegex(r):
return parse_expr(tokenize(r))
def parse_expr(tokens):
tree, tokens = parse_orexpr(tokens)
if tokens[0][0] == Token.EOF:
tree.next_node = EOFNode()
return tree
else:
raise SyntaxError("expected eof!")
def parse_orexpr(tokens):
temp, tokens = parse_word(tokens)
topOrNode = OrNode(Token.OR, Token.OR.value, [temp], NoQuantifierNode(), EOFNode())
ornode = topOrNode
while tokens[0][0] == Token.OR:
temp, tokens = parse_word(tokens[1:])
if temp is not None:
topOrNode.children.append(temp)
return topOrNode, tokens
def parse_word(tokens):
topnode, tokens = parse_quantchar(tokens)
if topnode is None:
raise SyntaxError("Empty word :/")
sub = node = topnode
# while the next token could start a quantified character, grab it
while tokens[0][0] in (Token.CHARACTER, Token.DIGIT, Token.LPARENS, Token.LCHOOSE):
sub, tokens = parse_quantchar(tokens)
node.next_node = sub
node = sub
return topnode, tokens
def parse_quantchar(tokens):
if tokens[0][0] == Token.DIGIT:
node, tokens = parse_digit(tokens)
else:
node, tokens = parse_char(tokens)
if node is None:
return None, tokens
quant, tokens = parse_quant(tokens)
node.quantifier = quant
return node, tokens
def parse_char(tokens):
if tokens[0][0] == Token.CHARACTER:
return LiteralNode(tokens[0][0], tokens[0][1], NoQuantifierNode(), EOFNode()), tokens[1:]
elif tokens[0][0] == Token.LPARENS:
node, temptokens = parse_orexpr(tokens[1:])
if temptokens[0][0] == Token.RPARENS:
return node, temptokens[1:]
else:
raise SyntaxError("Could not parse parenthesized expression")
elif tokens[0][0] == Token.LCHOOSE:
i = 1
value_range = ""
while i < len(tokens) and tokens[i][0] != Token.RCHOOSE:
value_range += tokens[i][1]
i += 1
if i >= len(tokens):
raise SyntaxError("Could not close CHOOSE section")
else:
tokens = tokens[i+1:]
return ChooseNode(Token.LCHOOSE, Token.LCHOOSE.value, NoQuantifierNode(), value_range, EOFNode()), tokens
def parse_quant(tokens):
if tokens[0][0] in (Token.PLUS, Token.TIMES, Token.QUESTION, Token.CURLYQUANT):
return QuantifierNode(tokens[0][0], tokens[0][1]), tokens[1:]
else:
return NoQuantifierNode(), tokens
def parse_digit(tokens):
if tokens[0][0] == Token.DIGIT:
return LiteralNode(tokens[0][0], tokens[0][1], NoQuantifierNode(), EOFNode()), tokens[1:]
else:
return None, tokens
if __name__ == "__main__":
print("regexPrinter: a printer of regular expressions")
print("\tjust write a regex to get all its matches printed")
print("\tto exit just hit Enter in the prompt")
while True:
s = input(" >> ")
if not s:
break
i = 1
for v in printRegex(s).print():
print("{}: {}".format(i, v))
i += 1 | EOFNode |
python | sympy__sympy | sympy/polys/domains/groundtypes.py | {
"start": 517,
"end": 580
} | class ____:
def __init__(self, obj):
pass
| _GMPYInteger |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 1570,
"end": 1777
} | class ____:
def __repr__(self):
return ('<%s %r>' %
(self.__class__.__name__, self.get_text()))
def get_text(self):
raise NotImplementedError
## LTComponent
##
| LTText |
python | lazyprogrammer__machine_learning_examples | rl2/cartpole/theano_warmup.py | {
"start": 291,
"end": 1025
} | class ____:
def __init__(self, D):
print("Hello Theano!")
w = np.random.randn(D) / np.sqrt(D)
self.w = theano.shared(w)
self.lr = 0.1
X = T.matrix('X')
Y = T.vector('Y')
Y_hat = X.dot(self.w)
delta = Y - Y_hat
cost = delta.dot(delta)
grad = T.grad(cost, self.w)
updates = [(self.w, self.w - self.lr*grad)]
self.train_op = theano.function(
inputs=[X, Y],
updates=updates,
)
self.predict_op = theano.function(
inputs=[X],
outputs=Y_hat,
)
def partial_fit(self, X, Y):
self.train_op(X, Y)
def predict(self, X):
return self.predict_op(X)
if __name__ == '__main__':
q_learning.SGDRegressor = SGDRegressor
q_learning.main()
| SGDRegressor |
python | kamyu104__LeetCode-Solutions | Python/check-if-array-pairs-are-divisible-by-k.py | {
"start": 50,
"end": 410
} | class ____(object):
def canArrange(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: bool
"""
count = collections.Counter(i%k for i in arr)
return (0 not in count or not count[0]%2) and \
all(k-i in count and count[i] == count[k-i] for i in xrange(1, k) if i in count)
| Solution |
python | dagster-io__dagster | examples/starlift-demo/dbt_example/migrating_airflow_dags/dags.py | {
"start": 647,
"end": 2805
} | class ____(BaseOperator):
def __init__(
self,
csv_path: Path,
db_path: Path,
columns: list[str],
columns_for_min: list[str],
*args,
**kwargs,
):
self._csv_path = csv_path
self._db_path = db_path
self._column_names = columns
self._columns_for_min = columns_for_min
super().__init__(*args, **kwargs)
def execute(self, context) -> None:
load_csv_to_duckdb(
csv_path=self._csv_path,
db_path=self._db_path,
columns=self._column_names,
)
for column in self._columns_for_min:
min_value = get_min_value(
db_path=self._db_path,
csv_path=self._csv_path,
column=column,
)
dagster_json_metadata = json.dumps(
{
f"{column}_min": min_value,
}
)
print(f"DAGSTER_START{dagster_json_metadata}DAGSTER_END") # noqa: T201
DBT_DIR = os.getenv("DBT_PROJECT_DIR")
args = f"--project-dir {DBT_DIR} --profiles-dir {DBT_DIR}"
dag = DAG(
"rebuild_iris_models",
default_args=default_args,
# daily schedule interval
schedule_interval="0 0 * * *",
is_paused_upon_creation=False,
)
load_iris = LoadToLakehouseOperator(
task_id="load_iris",
dag=dag,
csv_path=CSV_PATH,
db_path=DB_PATH,
columns=IRIS_COLUMNS,
columns_for_min=["sepal_length_cm"],
)
run_dbt_model = BashOperator(task_id="build_dbt_models", bash_command=f"dbt build {args}", dag=dag)
load_iris >> run_dbt_model # type: ignore
spark_dag = DAG(
dag_id="spark_dag",
default_args=default_args,
schedule_interval=None,
is_paused_upon_creation=False,
)
# Fake run spark job. Actually just echo to cmd line
run_spark_job = BashOperator(
task_id="run_spark_job",
bash_command="echo 'Running spark job'",
dag=spark_dag,
)
MIGRATING = True
if MIGRATING:
proxying_to_dagster(
global_vars=globals(),
proxied_state=load_proxied_state_from_yaml(Path(__file__).parent / "proxied_state"),
)
| LoadToLakehouseOperator |
python | automl__auto-sklearn | autosklearn/pipeline/components/feature_preprocessing/feature_agglomeration.py | {
"start": 545,
"end": 3144
} | class ____(AutoSklearnPreprocessingAlgorithm):
def __init__(self, n_clusters, affinity, linkage, pooling_func, random_state=None):
self.n_clusters = n_clusters
self.affinity = affinity
self.linkage = linkage
self.pooling_func = pooling_func
self.random_state = random_state
self.pooling_func_mapping = dict(mean=np.mean, median=np.median, max=np.max)
def fit(self, X, Y=None):
import sklearn.cluster
self.n_clusters = int(self.n_clusters)
n_clusters = min(self.n_clusters, X.shape[1])
if not callable(self.pooling_func):
self.pooling_func = self.pooling_func_mapping[self.pooling_func]
self.preprocessor = sklearn.cluster.FeatureAgglomeration(
n_clusters=n_clusters,
affinity=self.affinity,
linkage=self.linkage,
pooling_func=self.pooling_func,
)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "Feature Agglomeration",
"name": "Feature Agglomeration",
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (INPUT,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
n_clusters = UniformIntegerHyperparameter("n_clusters", 2, 400, 25)
affinity = CategoricalHyperparameter(
"affinity", ["euclidean", "manhattan", "cosine"], "euclidean"
)
linkage = CategoricalHyperparameter(
"linkage", ["ward", "complete", "average"], "ward"
)
pooling_func = CategoricalHyperparameter(
"pooling_func", ["mean", "median", "max"]
)
cs.add_hyperparameters([n_clusters, affinity, linkage, pooling_func])
affinity_and_linkage = ForbiddenAndConjunction(
ForbiddenInClause(affinity, ["manhattan", "cosine"]),
ForbiddenEqualsClause(linkage, "ward"),
)
cs.add_forbidden_clause(affinity_and_linkage)
return cs
| FeatureAgglomeration |
python | tensorflow__tensorflow | tensorflow/python/tools/api/generator/create_python_api_test.py | {
"start": 1119,
"end": 1221
} | class ____(object):
pass
_TEST_CONSTANT = 5
_MODULE_NAME = 'tensorflow.python.test_module'
| TestClass |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 41062,
"end": 41253
} | class ____(Callback):
def on_train_epoch_start(self, trainer, pl_module):
if trainer.current_epoch == 1:
raise RuntimeError("Trouble!")
| TroubledCallbackOnTrainEpochStart |
python | jd__tenacity | tests/test_tenacity.py | {
"start": 35909,
"end": 44965
} | class ____(unittest.TestCase):
def test_with_wait(self):
start = current_time_ms()
result = _retryable_test_with_wait(NoneReturnUntilAfterCount(5))
t = current_time_ms() - start
self.assertGreaterEqual(t, 250)
self.assertTrue(result)
def test_with_stop_on_return_value(self):
try:
_retryable_test_with_stop(NoneReturnUntilAfterCount(5))
self.fail("Expected RetryError after 3 attempts")
except RetryError as re:
self.assertFalse(re.last_attempt.failed)
self.assertEqual(3, re.last_attempt.attempt_number)
self.assertTrue(re.last_attempt.result() is None)
print(re)
def test_with_stop_on_exception(self):
try:
_retryable_test_with_stop(NoIOErrorAfterCount(5))
self.fail("Expected IOError")
except OSError as re:
self.assertTrue(isinstance(re, IOError))
print(re)
def test_retry_if_exception_of_type(self):
self.assertTrue(_retryable_test_with_exception_type_io(NoIOErrorAfterCount(5)))
try:
_retryable_test_with_exception_type_io(NoNameErrorAfterCount(5))
self.fail("Expected NameError")
except NameError as n:
self.assertTrue(isinstance(n, NameError))
print(n)
self.assertTrue(
_retryable_test_with_exception_type_custom(NoCustomErrorAfterCount(5))
)
try:
_retryable_test_with_exception_type_custom(NoNameErrorAfterCount(5))
self.fail("Expected NameError")
except NameError as n:
self.assertTrue(isinstance(n, NameError))
print(n)
def test_retry_except_exception_of_type(self):
self.assertTrue(
_retryable_test_if_not_exception_type_io(NoNameErrorAfterCount(5))
)
try:
_retryable_test_if_not_exception_type_io(NoIOErrorAfterCount(5))
self.fail("Expected IOError")
except OSError as err:
self.assertTrue(isinstance(err, IOError))
print(err)
def test_retry_until_exception_of_type_attempt_number(self):
try:
self.assertTrue(
_retryable_test_with_unless_exception_type_name(NameErrorUntilCount(5))
)
except NameError as e:
s = _retryable_test_with_unless_exception_type_name.statistics
self.assertTrue(s["attempt_number"] == 6)
print(e)
else:
self.fail("Expected NameError")
def test_retry_until_exception_of_type_no_type(self):
try:
# no input should catch all subclasses of Exception
self.assertTrue(
_retryable_test_with_unless_exception_type_no_input(
NameErrorUntilCount(5)
)
)
except NameError as e:
s = _retryable_test_with_unless_exception_type_no_input.statistics
self.assertTrue(s["attempt_number"] == 6)
print(e)
else:
self.fail("Expected NameError")
def test_retry_until_exception_of_type_wrong_exception(self):
try:
# two iterations with IOError, one that returns True
_retryable_test_with_unless_exception_type_name_attempt_limit(
IOErrorUntilCount(2)
)
self.fail("Expected RetryError")
except RetryError as e:
self.assertTrue(isinstance(e, RetryError))
print(e)
def test_retry_if_exception_message(self):
try:
self.assertTrue(
_retryable_test_if_exception_message_message(NoCustomErrorAfterCount(3))
)
except CustomError:
print(_retryable_test_if_exception_message_message.statistics)
self.fail("CustomError should've been retried from errormessage")
def test_retry_if_not_exception_message(self):
try:
self.assertTrue(
_retryable_test_if_not_exception_message_message(
NoCustomErrorAfterCount(2)
)
)
except CustomError:
s = _retryable_test_if_not_exception_message_message.statistics
self.assertTrue(s["attempt_number"] == 1)
def test_retry_if_not_exception_message_delay(self):
try:
self.assertTrue(
_retryable_test_not_exception_message_delay(NameErrorUntilCount(3))
)
except NameError:
s = _retryable_test_not_exception_message_delay.statistics
print(s["attempt_number"])
self.assertTrue(s["attempt_number"] == 4)
def test_retry_if_exception_message_match(self):
try:
self.assertTrue(
_retryable_test_if_exception_message_match(NoCustomErrorAfterCount(3))
)
except CustomError:
self.fail("CustomError should've been retried from errormessage")
def test_retry_if_not_exception_message_match(self):
try:
self.assertTrue(
_retryable_test_if_not_exception_message_message(
NoCustomErrorAfterCount(2)
)
)
except CustomError:
s = _retryable_test_if_not_exception_message_message.statistics
self.assertTrue(s["attempt_number"] == 1)
def test_retry_if_exception_cause_type(self):
self.assertTrue(
_retryable_test_with_exception_cause_type(NoNameErrorCauseAfterCount(5))
)
try:
_retryable_test_with_exception_cause_type(NoIOErrorCauseAfterCount(5))
self.fail("Expected exception without NameError as cause")
except NameError:
pass
def test_retry_preserves_argument_defaults(self):
def function_with_defaults(a=1):
return a
def function_with_kwdefaults(*, a=1):
return a
retrying = Retrying(
wait=tenacity.wait_fixed(0.01), stop=tenacity.stop_after_attempt(3)
)
wrapped_defaults_function = retrying.wraps(function_with_defaults)
wrapped_kwdefaults_function = retrying.wraps(function_with_kwdefaults)
self.assertEqual(
function_with_defaults.__defaults__, wrapped_defaults_function.__defaults__
)
self.assertEqual(
function_with_kwdefaults.__kwdefaults__,
wrapped_kwdefaults_function.__kwdefaults__,
)
def test_defaults(self):
self.assertTrue(_retryable_default(NoNameErrorAfterCount(5)))
self.assertTrue(_retryable_default_f(NoNameErrorAfterCount(5)))
self.assertTrue(_retryable_default(NoCustomErrorAfterCount(5)))
self.assertTrue(_retryable_default_f(NoCustomErrorAfterCount(5)))
def test_retry_function_object(self):
"""Test that funсtools.wraps doesn't cause problems with callable objects.
It raises an error upon trying to wrap it in Py2, because __name__
attribute is missing. It's fixed in Py3 but was never backported.
"""
class Hello:
def __call__(self):
return "Hello"
retrying = Retrying(
wait=tenacity.wait_fixed(0.01), stop=tenacity.stop_after_attempt(3)
)
h = retrying.wraps(Hello())
self.assertEqual(h(), "Hello")
def test_retry_function_attributes(self):
"""Test that the wrapped function attributes are exposed as intended.
- statistics contains the value for the latest function run
- retry object can be modified to change its behaviour (useful to patch in tests)
- retry object statistics do not contain valid information
"""
self.assertTrue(_retryable_test_with_stop(NoneReturnUntilAfterCount(2)))
expected_stats = {
"attempt_number": 3,
"delay_since_first_attempt": mock.ANY,
"idle_for": mock.ANY,
"start_time": mock.ANY,
}
self.assertEqual(_retryable_test_with_stop.statistics, expected_stats)
self.assertEqual(_retryable_test_with_stop.retry.statistics, {})
with mock.patch.object(
_retryable_test_with_stop.retry, "stop", tenacity.stop_after_attempt(1)
):
try:
self.assertTrue(_retryable_test_with_stop(NoneReturnUntilAfterCount(2)))
except RetryError as exc:
expected_stats = {
"attempt_number": 1,
"delay_since_first_attempt": mock.ANY,
"idle_for": mock.ANY,
"start_time": mock.ANY,
}
self.assertEqual(_retryable_test_with_stop.statistics, expected_stats)
self.assertEqual(exc.last_attempt.attempt_number, 1)
self.assertEqual(_retryable_test_with_stop.retry.statistics, {})
else:
self.fail("RetryError should have been raised after 1 attempt")
| TestDecoratorWrapper |
python | huggingface__transformers | src/transformers/models/glm46v/modular_glm46v.py | {
"start": 4952,
"end": 5218
} | class ____(Glm4vModel):
_no_split_modules = None
def __init__(self, config):
super().__init__(config)
self.visual = AutoModel.from_config(config.vision_config)
self.language_model = AutoModel.from_config(config.text_config)
| Glm46VModel |
python | optuna__optuna | optuna/_gp/search_space.py | {
"start": 601,
"end": 677
} | class ____(IntEnum):
LINEAR = 0
LOG = 1
CATEGORICAL = 2
| _ScaleType |
python | pypa__pipenv | pipenv/patched/pip/_internal/network/download.py | {
"start": 10668,
"end": 11228
} | class ____:
def __init__(
self,
session: PipSession,
progress_bar: str,
resume_retries: int,
) -> None:
self._downloader = Downloader(session, progress_bar, resume_retries)
def __call__(
self, links: Iterable[Link], location: str
) -> Iterable[Tuple[Link, Tuple[str, str]]]:
"""Download the files given by links into location."""
for link in links:
filepath, content_type = self._downloader(link, location)
yield link, (filepath, content_type)
| BatchDownloader |
python | mwaskom__seaborn | tests/test_categorical.py | {
"start": 117381,
"end": 118296
} | class ____:
@pytest.fixture
def container(self, wide_array):
ax = mpl.figure.Figure().subplots()
artist_dict = ax.boxplot(wide_array)
return BoxPlotContainer(artist_dict)
def test_repr(self, container, wide_array):
n = wide_array.shape[1]
assert str(container) == f"<BoxPlotContainer object with {n} boxes>"
def test_iteration(self, container):
for artist_tuple in container:
for attr in ["box", "median", "whiskers", "caps", "fliers", "mean"]:
assert hasattr(artist_tuple, attr)
def test_label(self, container):
label = "a box plot"
container.set_label(label)
assert container.get_label() == label
def test_children(self, container):
children = container.get_children()
for child in children:
assert isinstance(child, mpl.artist.Artist)
| TestBoxPlotContainer |
python | django__django | django/utils/text.py | {
"start": 11070,
"end": 15148
} | class ____(BytesIO):
def read(self):
ret = self.getvalue()
self.seek(0)
self.truncate()
return ret
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence, *, max_random_bytes=None):
buf = StreamingBuffer()
filename = _get_random_filename(max_random_bytes) if max_random_bytes else None
with GzipFile(
filename=filename, mode="wb", compresslevel=6, fileobj=buf, mtime=0
) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
async def acompress_sequence(sequence, *, max_random_bytes=None):
buf = StreamingBuffer()
filename = _get_random_filename(max_random_bytes) if max_random_bytes else None
with GzipFile(
filename=filename, mode="wb", compresslevel=6, fileobj=buf, mtime=0
) as zfile:
# Output headers...
yield buf.read()
async for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = _lazy_re_compile(
r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""",
re.VERBOSE,
)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
for bit in smart_split_re.finditer(str(text)):
yield bit[0]
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if not s or s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r"\%s" % quote, quote).replace(r"\\", "\\")
@keep_lazy_text
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
value = re.sub(r"[^\w\s-]", "", value.lower())
return re.sub(r"[-\s]+", "-", value).strip("-_")
def camel_case_to_spaces(value):
"""
Split CamelCase and convert to lowercase. Strip surrounding whitespace.
"""
return re_camel_case.sub(r" \1", value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, str)
| StreamingBuffer |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 9512,
"end": 9672
} | class ____(ObservedLookupError):
# A KeyError exception to be raised from inside Dynamo tracing. This can happen on dict __getitem__
pass
| ObservedKeyError |
python | facebook__pyre-check | client/tests/dataclasses_merge_test.py | {
"start": 484,
"end": 596
} | class ____:
x: Optional[int] = None
y: Optional[str] = None
@dataclass_merge
@dataclass(frozen=True)
| Basic |
python | mlflow__mlflow | mlflow/genai/labeling/stores.py | {
"start": 10361,
"end": 18164
} | class ____(AbstractLabelingStore):
"""
Databricks store that provides labeling functionality through the Databricks API.
This store delegates all labeling operations to the Databricks agents API.
"""
def _get_backend_session(
self, labeling_session: LabelingSession
) -> "_DatabricksLabelingSession":
"""
Get the backend session for a labeling session.
Note: We have to list all sessions and match by ID because the Databricks
agents API doesn't provide a direct get/fetch API for individual labeling sessions.
"""
app = get_databricks_review_app(labeling_session.experiment_id)
backend_sessions = app.get_labeling_sessions()
backend_session = next(
(
session
for session in backend_sessions
if session.labeling_session_id == labeling_session.labeling_session_id
),
None,
)
if backend_session is None:
raise MlflowException(
f"Labeling session {labeling_session.labeling_session_id} not found",
error_code=RESOURCE_DOES_NOT_EXIST,
)
return backend_session
def _databricks_session_to_labeling_session(
self, databricks_session: "_DatabricksLabelingSession"
) -> LabelingSession:
"""Create a LabelingSession from a Databricks backend session object."""
return LabelingSession(
name=databricks_session.name,
assigned_users=databricks_session.assigned_users,
agent=databricks_session.agent,
label_schemas=databricks_session.label_schemas,
labeling_session_id=databricks_session.labeling_session_id,
mlflow_run_id=databricks_session.mlflow_run_id,
review_app_id=databricks_session.review_app_id,
experiment_id=databricks_session.experiment_id,
url=databricks_session.url,
enable_multi_turn_chat=databricks_session.enable_multi_turn_chat,
custom_inputs=databricks_session.custom_inputs,
)
def get_labeling_session(self, run_id: str) -> LabelingSession:
"""Get a labeling session by MLflow run ID."""
labeling_sessions = self.get_labeling_sessions()
labeling_session = next(
(
labeling_session
for labeling_session in labeling_sessions
if labeling_session.mlflow_run_id == run_id
),
None,
)
if labeling_session is None:
raise MlflowException(f"Labeling session with run_id `{run_id}` not found")
return labeling_session
def get_labeling_sessions(self, experiment_id: str | None = None) -> list[LabelingSession]:
"""Get all labeling sessions for an experiment."""
app = get_databricks_review_app(experiment_id)
sessions = app.get_labeling_sessions()
return [self._databricks_session_to_labeling_session(session) for session in sessions]
def create_labeling_session(
self,
name: str,
*,
assigned_users: list[str] | None = None,
agent: str | None = None,
label_schemas: list[str] | None = None,
enable_multi_turn_chat: bool = False,
custom_inputs: dict[str, Any] | None = None,
experiment_id: str | None = None,
) -> LabelingSession:
"""Create a new labeling session."""
app = get_databricks_review_app(experiment_id)
backend_session = app.create_labeling_session(
name=name,
assigned_users=assigned_users or [],
agent=agent,
label_schemas=label_schemas or [],
enable_multi_turn_chat=enable_multi_turn_chat,
custom_inputs=custom_inputs,
)
return self._databricks_session_to_labeling_session(backend_session)
def delete_labeling_session(self, labeling_session: LabelingSession) -> None:
"""Delete a labeling session."""
backend_session = self._get_backend_session(labeling_session)
app = get_databricks_review_app(labeling_session.experiment_id)
app.delete_labeling_session(backend_session)
def get_label_schema(self, name: str) -> LabelSchema:
"""Get a label schema by name."""
app = get_databricks_review_app()
label_schema = next(
(label_schema for label_schema in app.label_schemas if label_schema.name == name),
None,
)
if label_schema is None:
raise MlflowException(f"Label schema with name `{name}` not found")
return LabelSchema._from_databricks_label_schema(label_schema)
def create_label_schema(
self,
name: str,
*,
type: str,
title: str,
input: Any,
instruction: str | None = None,
enable_comment: bool = False,
overwrite: bool = False,
) -> LabelSchema:
"""Create a new label schema."""
app = get_databricks_review_app()
return app.create_label_schema(
name=name,
type=type,
title=title,
input=input._to_databricks_input(),
instruction=instruction,
enable_comment=enable_comment,
overwrite=overwrite,
)
def delete_label_schema(self, name: str) -> None:
"""Delete a label schema."""
app = get_databricks_review_app()
app.delete_label_schema(name)
def add_dataset_to_session(
self,
labeling_session: LabelingSession,
dataset_name: str,
record_ids: list[str] | None = None,
) -> LabelingSession:
"""Add a dataset to a labeling session."""
backend_session = self._get_backend_session(labeling_session)
updated_session = backend_session.add_dataset(dataset_name, record_ids)
return self._databricks_session_to_labeling_session(updated_session)
def add_traces_to_session(
self,
labeling_session: LabelingSession,
traces: list[Trace],
) -> LabelingSession:
"""Add traces to a labeling session."""
backend_session = self._get_backend_session(labeling_session)
updated_session = backend_session.add_traces(traces)
return self._databricks_session_to_labeling_session(updated_session)
def sync_session_expectations(self, labeling_session: LabelingSession, dataset: str) -> None:
"""Sync traces and expectations from a labeling session to a dataset."""
backend_session = self._get_backend_session(labeling_session)
backend_session.sync_expectations(dataset)
def set_session_assigned_users(
self, labeling_session: LabelingSession, assigned_users: list[str]
) -> LabelingSession:
"""Set the assigned users for a labeling session."""
backend_session = self._get_backend_session(labeling_session)
updated_session = backend_session.set_assigned_users(assigned_users)
return self._databricks_session_to_labeling_session(updated_session)
# Create the global labeling store registry instance
_labeling_store_registry = LabelingStoreRegistry()
def _register_labeling_stores() -> None:
"""Register the default labeling store implementations"""
# Register Databricks store
_labeling_store_registry.register("databricks", DatabricksLabelingStore)
# Register entrypoints for custom implementations
_labeling_store_registry.register_entrypoints()
# Register the default stores
_register_labeling_stores()
def _get_labeling_store(tracking_uri: str | None = None) -> AbstractLabelingStore:
"""Get a labeling store from the registry"""
return _labeling_store_registry.get_store(tracking_uri)
| DatabricksLabelingStore |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/vertex_ai/auto_ml.py | {
"start": 1980,
"end": 75331
} | class ____(GoogleBaseHook, OperationHelper):
"""Hook for Google Cloud Vertex AI Auto ML APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._job: None | (
AutoMLForecastingTrainingJob
| AutoMLImageTrainingJob
| AutoMLTabularTrainingJob
| AutoMLTextTrainingJob
| AutoMLVideoTrainingJob
) = None
def get_pipeline_service_client(
self,
region: str | None = None,
) -> PipelineServiceClient:
"""Return PipelineServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return PipelineServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_job_service_client(
self,
region: str | None = None,
) -> JobServiceClient:
"""Return JobServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return JobServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_auto_ml_tabular_training_job(
self,
display_name: str,
optimization_prediction_type: str,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
optimization_objective_recall_value: float | None = None,
optimization_objective_precision_value: float | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLTabularTrainingJob:
"""Return AutoMLTabularTrainingJob object."""
return AutoMLTabularTrainingJob(
display_name=display_name,
optimization_prediction_type=optimization_prediction_type,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_forecasting_training_job(
self,
display_name: str,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLForecastingTrainingJob:
"""Return AutoMLForecastingTrainingJob object."""
return AutoMLForecastingTrainingJob(
display_name=display_name,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_image_training_job(
self,
display_name: str,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: models.Model | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLImageTrainingJob:
"""Return AutoMLImageTrainingJob object."""
return AutoMLImageTrainingJob(
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
model_type=model_type,
base_model=base_model,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_video_training_job(
self,
display_name: str,
prediction_type: str = "classification",
model_type: str = "CLOUD",
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLVideoTrainingJob:
"""Return AutoMLVideoTrainingJob object."""
return AutoMLVideoTrainingJob(
display_name=display_name,
prediction_type=prediction_type,
model_type=model_type,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
@staticmethod
def extract_model_id(obj: dict) -> str:
"""Return unique id of the Model."""
return obj["name"].rpartition("/")[-1]
@staticmethod
def extract_training_id(resource_name: str) -> str:
"""Return unique id of the Training pipeline."""
return resource_name.rpartition("/")[-1]
def cancel_auto_ml_job(self) -> None:
"""Cancel Auto ML Job for training pipeline."""
if self._job:
self._job.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_tabular_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.TabularDataset,
target_column: str,
optimization_prediction_type: str,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
optimization_objective_recall_value: float | None = None,
optimization_objective_precision_value: float | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
weight_column: str | None = None,
budget_milli_node_hours: int = 1000,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
disable_early_stopping: bool = False,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: str | None = None,
export_evaluated_data_items_override_destination: bool = False,
sync: bool = True,
parent_model: str | None = None,
is_default_version: bool | None = None,
model_version_aliases: list[str] | None = None,
model_version_description: str | None = None,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Tabular Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param target_column: Required. The name of the column values of which the Model is to predict.
:param parent_model: Optional. The resource name or model ID of an existing model.
The new model uploaded by this job will be a version of `parent_model`.
Only set this field when training a new version of an existing model.
:param is_default_version: Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
the model produced by this job without a version specified will
use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the model version produced by this job will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
:param model_version_aliases: Optional. User provided version aliases so that the model version
uploaded by this job can be referenced via alias instead of
auto-generated version ID. A default version alias will be created
for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
:param model_version_description: Optional. The description of the model version
being uploaded by this job.
:param optimization_prediction_type: The type of prediction the Model is to produce.
"classification" - Predict one out of multiple target values is picked for each row.
"regression" - Predict a value based on its relation to other values. This type is available only
to columns that contain semantically numeric values, i.e. integers or floating point number, even
if stored as e.g. strings.
:param optimization_objective: Optional. Objective function the Model is to be optimized towards.
The training task creates a Model that maximizes/minimizes the value of the objective function
over the validation set.
The supported optimization objectives depend on the prediction type, and in the case of
classification also the number of distinct values in the target column (two distinct values
-> binary, 3 or more distinct values -> multi class). If the field is not set, the default
objective function is used.
Classification (binary):
"maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC)
curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under the precision-recall curve.
"maximize-precision-at-recall" - Maximize precision for a specified recall value.
"maximize-recall-at-precision" - Maximize recall for a specified precision value.
Classification (multi class):
"minimize-log-loss" (default) - Minimize log loss.
Regression:
"minimize-rmse" (default) - Minimize root-mean-squared error (RMSE).
"minimize-mae" - Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
:param column_specs: Optional. Alternative to column_transformations where the keys of the dict are
column names and their respective values are one of AutoMLTabularTrainingJob.column_data_types.
When creating transformation for BigQuery Struct column, the column should be flattened using "."
as the delimiter. Only columns with no child should have a transformation. If an input column has
no transformations on it, such a column is ignored by the training, except for the targetColumn,
which should have no transformations defined on. Only one of column_transformations or
column_specs should be passed.
:param column_transformations: Optional. Transformations to apply to the input columns (i.e. columns
other than the targetColumn). Each transformation may produce multiple result values from the
column's value, and all are used for training. When creating transformation for BigQuery Struct
column, the column should be flattened using "." as the delimiter. Only columns with no child
should have a transformation. If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have no transformations
defined on. Only one of column_transformations or column_specs should be passed. Consider using
column_specs as column_transformations will be deprecated eventually.
:param optimization_objective_recall_value: Optional. Required when maximize-precision-at-recall
optimizationObjective was picked, represents the recall value at which the optimization is done.
The minimum value is 0 and the maximum is 1.0.
:param optimization_objective_precision_value: Optional. Required when maximize-recall-at-precision
optimizationObjective was picked, represents the precision value at which the optimization is
done.
The minimum value is 0 and the maximum is 1.0.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, the trained Model will be
secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or value in the column) must be one of
{``training``, ``validation``, ``test``}, and it defines to which set the given piece of data is
assigned. If for a piece of data the key is not present or has an invalid value, that piece is
ignored by the pipeline. Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data columns.
The value of the key values of the key (the values in the column) must be in RFC 3339 `date-time`
format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the
key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only
for tabular and time series Datasets. This parameter must be used with training_fraction_split,
validation_fraction_split and test_fraction_split.
:param weight_column: Optional. Name of the column that should be used as the weight column. Higher
values in this column give more importance to the row during Model training. The column must have
numeric values between 0 and 10000 inclusively, and 0 value means that the row is ignored. If the
weight column field is not set, then all rows are assumed to have equal weight of 1.
:param budget_milli_node_hours (int): Optional. The train budget of creating this Model, expressed in
milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model
will not exceed this budget. The final cost will be attempted to be close to the budget, though
may end up being (even) noticeably smaller - at the backend's discretion. This especially may
happen when further model training ceases to provide any improvements. If the budget is set to a
value known to be insufficient to train a Model for the given training set, the training won't be
attempted and will error. The minimum value is 1000 and the maximum is 72000.
:param model_display_name: Optional. If the script produces a managed Vertex AI Model. The display
name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8
characters. If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param disable_early_stopping: Required. If true, the entire budget is used. This disables the early
stopping feature. By default, the early stopping feature is enabled, which means that training
might stop before the entire training budget has been used, if further training does no longer
brings significant improvement to the model.
:param export_evaluated_data_items: Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
:param export_evaluated_data_items_bigquery_destination_uri: Optional. URI of desired destination
BigQuery table for exported test set predictions.
Expected format: ``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>
.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
:param export_evaluated_data_items_override_destination: Whether to override the contents of
[export_evaluated_data_items_bigquery_destination_uri], if the table exists, for exported test
set predictions. If False, and the table exists, then the training job will fail. Applies only if
[export_evaluated_data_items] is True and [export_evaluated_data_items_bigquery_destination_uri]
is specified.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
if column_transformations:
warnings.warn(
"Consider using column_specs as column_transformations will be deprecated eventually.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self._job = self.get_auto_ml_tabular_training_job(
project=project_id,
location=region,
display_name=display_name,
optimization_prediction_type=optimization_prediction_type,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLTabularTrainingJob was not created")
model = self._job.run(
dataset=dataset,
target_column=target_column,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
weight_column=weight_column,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
disable_early_stopping=disable_early_stopping,
export_evaluated_data_items=export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
sync=sync,
parent_model=parent_model,
is_default_version=is_default_version,
model_version_aliases=model_version_aliases,
model_version_description=model_version_description,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. Training Pipeline is not "
"configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_forecasting_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.TimeSeriesDataset,
target_column: str,
time_column: str,
time_series_identifier_column: str,
unavailable_at_forecast_columns: list[str],
available_at_forecast_columns: list[str],
forecast_horizon: int,
data_granularity_unit: str,
data_granularity_count: int,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
predefined_split_column_name: str | None = None,
weight_column: str | None = None,
time_series_attribute_columns: list[str] | None = None,
context_window: int | None = None,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: str | None = None,
export_evaluated_data_items_override_destination: bool = False,
quantiles: list[float] | None = None,
validation_options: str | None = None,
budget_milli_node_hours: int = 1000,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
sync: bool = True,
parent_model: str | None = None,
is_default_version: bool | None = None,
model_version_aliases: list[str] | None = None,
model_version_description: str | None = None,
window_stride_length: int | None = None,
window_max_count: int | None = None,
holiday_regions: list[str] | None = None,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Forecasting Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For time series
Datasets, all their data is exported to training, to pick and choose from.
:param target_column: Required. Name of the column that the Model is to predict values for.
:param time_column: Required. Name of the column that identifies time order in the time series.
:param parent_model: Optional. The resource name or model ID of an existing model.
The new model uploaded by this job will be a version of `parent_model`.
Only set this field when training a new version of an existing model.
:param is_default_version: Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
the model produced by this job without a version specified will
use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the model version produced by this job will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
:param model_version_aliases: Optional. User provided version aliases so that the model version
uploaded by this job can be referenced via alias instead of
auto-generated version ID. A default version alias will be created
for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
:param model_version_description: Optional. The description of the model version
being uploaded by this job.
:param time_series_identifier_column: Required. Name of the column that identifies the time series.
:param unavailable_at_forecast_columns: Required. Column names of columns that are unavailable at
forecast. Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is unknown before the forecast (e.g. population of a city
in a given year, or weather on a given day).
:param available_at_forecast_columns: Required. Column names of columns that are available at
forecast. Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is known at forecast.
:param forecast_horizon: Required. The amount of time into the future for which forecasted values for
the target are returned. Expressed in number of units defined by the [data_granularity_unit] and
[data_granularity_count] field. Inclusive.
:param data_granularity_unit: Required. The data granularity unit. Accepted values are ``minute``,
``hour``, ``day``, ``week``, ``month``, ``year``.
:param data_granularity_count: Required. The number of data granularity units between data points in
the training data. If [data_granularity_unit] is `minute`, can be 1, 5, 10, 15, or 30. For all
other values of [data_granularity_unit], must be 1.
:param optimization_objective: Optional. Objective function the model is to be optimized towards. The
training process creates a Model that optimizes the value of the objective function over the
validation set. The supported optimization objectives:
"minimize-rmse" (default) - Minimize root-mean-squared error (RMSE).
"minimize-mae" - Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
"minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE).
"minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and
mean-absolute-error (MAE).
"minimize-quantile-loss" - Minimize the quantile loss at the defined quantiles. (Set this
objective to build quantile forecasts.)
:param column_specs: Optional. Alternative to column_transformations where the keys of the dict are
column names and their respective values are one of AutoMLTabularTrainingJob.column_data_types.
When creating transformation for BigQuery Struct column, the column should be flattened using "."
as the delimiter. Only columns with no child should have a transformation. If an input column has
no transformations on it, such a column is ignored by the training, except for the targetColumn,
which should have no transformations defined on. Only one of column_transformations or
column_specs should be passed.
:param column_transformations: Optional. Transformations to apply to the input columns (i.e. columns
other than the targetColumn). Each transformation may produce multiple result values from the
column's value, and all are used for training. When creating transformation for BigQuery Struct
column, the column should be flattened using "." as the delimiter. Only columns with no child
should have a transformation. If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have no transformations
defined on. Only one of column_transformations or column_specs should be passed. Consider using
column_specs as column_transformations will be deprecated eventually.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or value in the column) must be one of
{``TRAIN``, ``VALIDATE``, ``TEST``}, and it defines to which set the given piece of data is
assigned. If for a piece of data the key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param weight_column: Optional. Name of the column that should be used as the weight column. Higher
values in this column give more importance to the row during Model training. The column must have
numeric values between 0 and 10000 inclusively, and 0 value means that the row is ignored. If the
weight column field is not set, then all rows are assumed to have equal weight of 1.
:param time_series_attribute_columns: Optional. Column names that should be used as attribute
columns. Each column is constant within a time series.
:param context_window: Optional. The amount of time into the past training and prediction data is
used for model training and prediction respectively. Expressed in number of units defined by the
[data_granularity_unit] and [data_granularity_count] fields. When not provided uses the default
value of 0 which means the model sets each series context window to be 0 (also known as "cold
start"). Inclusive.
:param export_evaluated_data_items: Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
:param export_evaluated_data_items_bigquery_destination_uri: Optional. URI of desired destination
BigQuery table for exported test set predictions. Expected format:
``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>
.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
:param export_evaluated_data_items_override_destination: Whether to override the contents of
[export_evaluated_data_items_bigquery_destination_uri], if the table exists, for exported test
set predictions. If False, and the table exists, then the training job will fail.
Applies only if [export_evaluated_data_items] is True and
[export_evaluated_data_items_bigquery_destination_uri] is specified.
:param quantiles: Quantiles to use for the `minizmize-quantile-loss`
[AutoMLForecastingTrainingJob.optimization_objective]. This argument is required in this case.
Accepts up to 5 quantiles in the form of a double from 0 to 1, exclusive. Each quantile must be
unique.
:param validation_options: Validation options for the data validation component. The available
options are: "fail-pipeline" - (default), will validate against the validation and fail the
pipeline if it fails. "ignore-validation" - ignore the results of the validation and continue the
pipeline
:param budget_milli_node_hours: Optional. The train budget of creating this Model, expressed in milli
node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will
not exceed this budget. The final cost will be attempted to be close to the budget, though may
end up being (even) noticeably smaller - at the backend's discretion. This especially may happen
when further model training ceases to provide any improvements. If the budget is set to a value
known to be insufficient to train a Model for the given training set, the training won't be
attempted and will error. The minimum value is 1000 and the maximum is 72000.
:param model_display_name: Optional. If the script produces a managed Vertex AI Model. The display
name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8
characters. If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
:param window_stride_length: Optional. Step length used to generate input examples. Every
``window_stride_length`` rows will be used to generate a sliding window.
:param window_max_count: Optional. Number of rows that should be used to generate input examples. If the
total row count is larger than this number, the input data will be randomly sampled to hit the count.
:param holiday_regions: Optional. You can select one or more geographical
regions to enable holiday effect modeling. During training, Vertex AI
creates holiday categorical features within the model based on the date
from TIME_COLUMN and the specified geographical regions.
"""
if column_transformations:
warnings.warn(
"Consider using column_specs as column_transformations will be deprecated eventually.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self._job = self.get_auto_ml_forecasting_training_job(
project=project_id,
location=region,
display_name=display_name,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLForecastingTrainingJob was not created")
model = self._job.run(
dataset=dataset,
target_column=target_column,
time_column=time_column,
time_series_identifier_column=time_series_identifier_column,
unavailable_at_forecast_columns=unavailable_at_forecast_columns,
available_at_forecast_columns=available_at_forecast_columns,
forecast_horizon=forecast_horizon,
data_granularity_unit=data_granularity_unit,
data_granularity_count=data_granularity_count,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
weight_column=weight_column,
time_series_attribute_columns=time_series_attribute_columns,
context_window=context_window,
export_evaluated_data_items=export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
quantiles=quantiles,
validation_options=validation_options,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
parent_model=parent_model,
is_default_version=is_default_version,
model_version_aliases=model_version_aliases,
model_version_description=model_version_description,
window_stride_length=window_stride_length,
window_max_count=window_max_count,
holiday_regions=holiday_regions,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. Training Pipeline is not "
"configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_image_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.ImageDataset,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: models.Model | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
budget_milli_node_hours: int | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
disable_early_stopping: bool = False,
sync: bool = True,
parent_model: str | None = None,
is_default_version: bool | None = None,
model_version_aliases: list[str] | None = None,
model_version_description: str | None = None,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Image Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param prediction_type: The type of prediction the Model is to produce, one of:
"classification" - Predict one out of multiple target values is picked for each row.
"object_detection" - Predict a value based on its relation to other values. This type is
available only to columns that contain semantically numeric values, i.e. integers or floating
point number, even if stored as e.g. strings.
:param parent_model: Optional. The resource name or model ID of an existing model.
The new model uploaded by this job will be a version of `parent_model`.
Only set this field when training a new version of an existing model.
:param is_default_version: Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
the model produced by this job without a version specified will
use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the model version produced by this job will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
:param model_version_aliases: Optional. User provided version aliases so that the model version
uploaded by this job can be referenced via alias instead of
auto-generated version ID. A default version alias will be created
for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
:param model_version_description: Optional. The description of the model version
being uploaded by this job.
:param multi_label: Required. Default is False. If false, a single-label (multi-class) Model will be
trained (i.e. assuming that for each image just up to one annotation may be applicable). If true,
a multi-label Model will be trained (i.e. assuming that for each image multiple annotations may
be applicable).
This is only applicable for the "classification" prediction_type and will be ignored otherwise.
:param model_type: Required. One of the following:
"CLOUD" - Default for Image Classification. A Model best tailored to be used within Google Cloud,
and which cannot be exported.
"CLOUD_HIGH_ACCURACY_1" - Default for Image Object Detection. A model best tailored to be used
within Google Cloud, and which cannot be exported. Expected to have a higher latency, but should
also have a higher prediction quality than other cloud models.
"CLOUD_LOW_LATENCY_1" - A model best tailored to be used within Google Cloud, and which cannot be
exported. Expected to have a low latency, but may have lower prediction quality than other cloud
models.
"MOBILE_TF_LOW_LATENCY_1" - A model that, in addition to being available within Google Cloud, can
also be exported as TensorFlow or Core ML model and used on a mobile or edge device afterwards.
Expected to have low latency, but may have lower prediction quality than other mobile models.
"MOBILE_TF_VERSATILE_1" - A model that, in addition to being available within Google Cloud, can
also be exported as TensorFlow or Core ML model and used on a mobile or edge device with
afterwards.
"MOBILE_TF_HIGH_ACCURACY_1" - A model that, in addition to being available within Google Cloud,
can also be exported as TensorFlow or Core ML model and used on a mobile or edge device
afterwards. Expected to have a higher latency, but should also have a higher prediction quality
than other mobile models.
:param base_model: Optional. Only permitted for Image Classification models. If it is specified, the
new model will be trained based on the `base` model. Otherwise, the new model will be trained
from scratch. The `base` model must be in the same Project and Location as the new Model to
train, and have the same model_type.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this
filter are used to test the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param budget_milli_node_hours: Optional. The train budget of creating this Model, expressed in milli
node hours i.e. 1,000 value in this field means 1 node hour.
Defaults by `prediction_type`:
`classification` - For Cloud models the budget must be: 8,000 - 800,000 milli node hours
(inclusive). The default value is 192,000 which represents one day in wall time, assuming 8 nodes
are used.
`object_detection` - For Cloud models the budget must be: 20,000 - 900,000 milli node hours
(inclusive). The default value is 216,000 which represents one day in wall time, assuming 9 nodes
are used.
The training cost of the model will not exceed this budget. The final cost will be attempted to
be close to the budget, though may end up being (even) noticeably smaller - at the backend's
discretion. This especially may happen when further model training ceases to provide any
improvements. If the budget is set to a value known to be insufficient to train a Model for the
given training set, the training won't be attempted and will error.
:param model_display_name: Optional. The display name of the managed Vertex AI Model. The name can be
up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon
creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param disable_early_stopping: Required. If true, the entire budget is used. This disables the early
stopping feature. By default, the early stopping feature is enabled, which means that training
might stop before the entire training budget has been used, if further training does no longer
brings significant improvement to the model.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
self._job = self.get_auto_ml_image_training_job(
project=project_id,
location=region,
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
model_type=model_type,
base_model=base_model,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLImageTrainingJob was not created")
model = self._job.run(
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
disable_early_stopping=disable_early_stopping,
sync=sync,
parent_model=parent_model,
is_default_version=is_default_version,
model_version_aliases=model_version_aliases,
model_version_description=model_version_description,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. AutoML Image Training "
"Pipeline is not configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_video_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.VideoDataset,
prediction_type: str = "classification",
model_type: str = "CLOUD",
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
test_filter_split: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
sync: bool = True,
parent_model: str | None = None,
is_default_version: bool | None = None,
model_version_aliases: list[str] | None = None,
model_version_description: str | None = None,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Video Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param prediction_type: The type of prediction the Model is to produce, one of:
"classification" - A video classification model classifies shots and segments in your videos
according to your own defined labels.
"object_tracking" - A video object tracking model detects and tracks multiple objects in shots
and segments. You can use these models to track objects in your videos according to your own
pre-defined, custom labels.
"action_recognition" - A video action recognition model pinpoints the location of actions with
short temporal durations (~1 second).
:param parent_model: Optional. The resource name or model ID of an existing model.
The new model uploaded by this job will be a version of `parent_model`.
Only set this field when training a new version of an existing model.
:param is_default_version: Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
the model produced by this job without a version specified will
use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the model version produced by this job will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
:param model_version_aliases: Optional. User provided version aliases so that the model version
uploaded by this job can be referenced via alias instead of
auto-generated version ID. A default version alias will be created
for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
:param model_version_description: Optional. The description of the model version
being uploaded by this job.
:param model_type: Required. One of the following:
"CLOUD" - available for "classification", "object_tracking" and "action_recognition" A Model best
tailored to be used within Google Cloud, and which cannot be exported.
"MOBILE_VERSATILE_1" - available for "classification", "object_tracking" and "action_recognition"
A model that, in addition to being available within Google Cloud, can also be exported (see
ModelService.ExportModel) as a TensorFlow or TensorFlow Lite model and used on a mobile or edge
device with afterwards.
"MOBILE_CORAL_VERSATILE_1" - available only for "object_tracking" A versatile model that is meant
to be exported (see ModelService.ExportModel) and used on a Google Coral device.
"MOBILE_CORAL_LOW_LATENCY_1" - available only for "object_tracking" A model that trades off
quality for low latency, to be exported (see ModelService.ExportModel) and used on a Google Coral
device.
"MOBILE_JETSON_VERSATILE_1" - available only for "object_tracking" A versatile model that is
meant to be exported (see ModelService.ExportModel) and used on an NVIDIA Jetson device.
"MOBILE_JETSON_LOW_LATENCY_1" - available only for "object_tracking" A model that trades off
quality for low latency, to be exported (see ModelService.ExportModel) and used on an NVIDIA
Jetson device.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this
filter are used to test the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param model_display_name: Optional. The display name of the managed Vertex AI Model. The name can be
up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon
creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
self._job = self.get_auto_ml_video_training_job(
project=project_id,
location=region,
display_name=display_name,
prediction_type=prediction_type,
model_type=model_type,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLVideoTrainingJob was not created")
model = self._job.run(
dataset=dataset,
training_fraction_split=training_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
test_filter_split=test_filter_split,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
parent_model=parent_model,
is_default_version=is_default_version,
model_version_aliases=model_version_aliases,
model_version_description=model_version_description,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. AutoML Video Training "
"Pipeline is not configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def delete_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.delete_training_pipeline(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TrainingPipeline:
"""
Get a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.get_training_pipeline(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_training_pipelines(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListTrainingPipelinesPager:
"""
List TrainingPipelines in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. The standard list filter. Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
Some examples of using the filter are:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_training_pipelines(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| AutoMLHook |
python | spack__spack | lib/spack/spack/container/writers.py | {
"start": 11048,
"end": 11910
} | class ____(PathContext):
"""Context used to instantiate a Dockerfile"""
#: Name of the template used for Dockerfiles
template_name = "container/Dockerfile"
@tengine.context_property
def manifest(self):
manifest_str = super().manifest
# Docker doesn't support HEREDOC, so we need to resort to
# a horrible echo trick to have the manifest in the Dockerfile
echoed_lines = []
for idx, line in enumerate(manifest_str.split("\n")):
quoted_line = shlex.quote(line)
if idx == 0:
echoed_lines.append("&& (echo " + quoted_line + " \\")
continue
echoed_lines.append("&& echo " + quoted_line + " \\")
echoed_lines[-1] = echoed_lines[-1].replace(" \\", ")")
return "\n".join(echoed_lines)
@writer("singularity")
| DockerContext |
python | google__jax | jax/_src/state/types.py | {
"start": 2099,
"end": 2152
} | class ____(RefEffect):
name: str = "Write"
| WriteEffect |
python | redis__redis-py | redis/asyncio/http/http_client.py | {
"start": 349,
"end": 3196
} | class ____(ABC):
@abstractmethod
async def get(
self,
path: str,
params: Optional[
Mapping[str, Union[None, str, int, float, bool, list, tuple]]
] = None,
headers: Optional[Mapping[str, str]] = None,
timeout: Optional[float] = None,
expect_json: bool = True,
) -> Union[HttpResponse, Any]:
"""
Invoke HTTP GET request."""
pass
@abstractmethod
async def delete(
self,
path: str,
params: Optional[
Mapping[str, Union[None, str, int, float, bool, list, tuple]]
] = None,
headers: Optional[Mapping[str, str]] = None,
timeout: Optional[float] = None,
expect_json: bool = True,
) -> Union[HttpResponse, Any]:
"""
Invoke HTTP DELETE request."""
pass
@abstractmethod
async def post(
self,
path: str,
json_body: Optional[Any] = None,
data: Optional[Union[bytes, str]] = None,
params: Optional[
Mapping[str, Union[None, str, int, float, bool, list, tuple]]
] = None,
headers: Optional[Mapping[str, str]] = None,
timeout: Optional[float] = None,
expect_json: bool = True,
) -> Union[HttpResponse, Any]:
"""
Invoke HTTP POST request."""
pass
@abstractmethod
async def put(
self,
path: str,
json_body: Optional[Any] = None,
data: Optional[Union[bytes, str]] = None,
params: Optional[
Mapping[str, Union[None, str, int, float, bool, list, tuple]]
] = None,
headers: Optional[Mapping[str, str]] = None,
timeout: Optional[float] = None,
expect_json: bool = True,
) -> Union[HttpResponse, Any]:
"""
Invoke HTTP PUT request."""
pass
@abstractmethod
async def patch(
self,
path: str,
json_body: Optional[Any] = None,
data: Optional[Union[bytes, str]] = None,
params: Optional[
Mapping[str, Union[None, str, int, float, bool, list, tuple]]
] = None,
headers: Optional[Mapping[str, str]] = None,
timeout: Optional[float] = None,
expect_json: bool = True,
) -> Union[HttpResponse, Any]:
"""
Invoke HTTP PATCH request."""
pass
@abstractmethod
async def request(
self,
method: str,
path: str,
params: Optional[
Mapping[str, Union[None, str, int, float, bool, list, tuple]]
] = None,
headers: Optional[Mapping[str, str]] = None,
body: Optional[Union[bytes, str]] = None,
timeout: Optional[float] = None,
) -> HttpResponse:
"""
Invoke HTTP request with given method."""
pass
| AsyncHTTPClient |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/screen.py | {
"start": 4153,
"end": 9620
} | class ____:
"""
Two dimensional buffer of :class:`.Char` instances.
"""
def __init__(
self,
default_char: Char | None = None,
initial_width: int = 0,
initial_height: int = 0,
) -> None:
if default_char is None:
default_char2 = _CHAR_CACHE[" ", Transparent]
else:
default_char2 = default_char
self.data_buffer: defaultdict[int, defaultdict[int, Char]] = defaultdict(
lambda: defaultdict(lambda: default_char2)
)
#: Escape sequences to be injected.
self.zero_width_escapes: defaultdict[int, defaultdict[int, str]] = defaultdict(
lambda: defaultdict(str)
)
#: Position of the cursor.
self.cursor_positions: dict[
Window, Point
] = {} # Map `Window` objects to `Point` objects.
#: Visibility of the cursor.
self.show_cursor = True
#: (Optional) Where to position the menu. E.g. at the start of a completion.
#: (We can't use the cursor position, because we don't want the
#: completion menu to change its position when we browse through all the
#: completions.)
self.menu_positions: dict[
Window, Point
] = {} # Map `Window` objects to `Point` objects.
#: Currently used width/height of the screen. This will increase when
#: data is written to the screen.
self.width = initial_width or 0
self.height = initial_height or 0
# Windows that have been drawn. (Each `Window` class will add itself to
# this list.)
self.visible_windows_to_write_positions: dict[Window, WritePosition] = {}
# List of (z_index, draw_func)
self._draw_float_functions: list[tuple[int, Callable[[], None]]] = []
@property
def visible_windows(self) -> list[Window]:
return list(self.visible_windows_to_write_positions.keys())
def set_cursor_position(self, window: Window, position: Point) -> None:
"""
Set the cursor position for a given window.
"""
self.cursor_positions[window] = position
def set_menu_position(self, window: Window, position: Point) -> None:
"""
Set the cursor position for a given window.
"""
self.menu_positions[window] = position
def get_cursor_position(self, window: Window) -> Point:
"""
Get the cursor position for a given window.
Returns a `Point`.
"""
try:
return self.cursor_positions[window]
except KeyError:
return Point(x=0, y=0)
def get_menu_position(self, window: Window) -> Point:
"""
Get the menu position for a given window.
(This falls back to the cursor position if no menu position was set.)
"""
try:
return self.menu_positions[window]
except KeyError:
try:
return self.cursor_positions[window]
except KeyError:
return Point(x=0, y=0)
def draw_with_z_index(self, z_index: int, draw_func: Callable[[], None]) -> None:
"""
Add a draw-function for a `Window` which has a >= 0 z_index.
This will be postponed until `draw_all_floats` is called.
"""
self._draw_float_functions.append((z_index, draw_func))
def draw_all_floats(self) -> None:
"""
Draw all float functions in order of z-index.
"""
# We keep looping because some draw functions could add new functions
# to this list. See `FloatContainer`.
while self._draw_float_functions:
# Sort the floats that we have so far by z_index.
functions = sorted(self._draw_float_functions, key=lambda item: item[0])
# Draw only one at a time, then sort everything again. Now floats
# might have been added.
self._draw_float_functions = functions[1:]
functions[0][1]()
def append_style_to_content(self, style_str: str) -> None:
"""
For all the characters in the screen.
Set the style string to the given `style_str`.
"""
b = self.data_buffer
char_cache = _CHAR_CACHE
append_style = " " + style_str
for y, row in b.items():
for x, char in row.items():
row[x] = char_cache[char.char, char.style + append_style]
def fill_area(
self, write_position: WritePosition, style: str = "", after: bool = False
) -> None:
"""
Fill the content of this area, using the given `style`.
The style is prepended before whatever was here before.
"""
if not style.strip():
return
xmin = write_position.xpos
xmax = write_position.xpos + write_position.width
char_cache = _CHAR_CACHE
data_buffer = self.data_buffer
if after:
append_style = " " + style
prepend_style = ""
else:
append_style = ""
prepend_style = style + " "
for y in range(
write_position.ypos, write_position.ypos + write_position.height
):
row = data_buffer[y]
for x in range(xmin, xmax):
cell = row[x]
row[x] = char_cache[
cell.char, prepend_style + cell.style + append_style
]
| Screen |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 84542,
"end": 84855
} | class ____(BaseModel):
hits: List["ScoredPoint"] = Field(..., description="Scored points that have the same value of the group_by key")
id: "GroupId" = Field(..., description="")
lookup: Optional["Record"] = Field(default=None, description="Record that has been looked up using the group id")
| PointGroup |
python | pytorch__pytorch | torch/ao/nn/quantized/reference/modules/rnn.py | {
"start": 1368,
"end": 5238
} | class ____(nn.RNNCellBase):
def __init__(
self,
input_size: int,
hidden_size: int,
bias: bool,
num_chunks: int,
device=None,
dtype=None,
weight_qparams_dict=None,
) -> None:
super().__init__(
input_size, hidden_size, bias, num_chunks, device=device, dtype=dtype
)
# TODO(jerryzh168): maybe make this arg a required arg
if weight_qparams_dict is None:
weight_qparams = {
"qscheme": torch.per_tensor_affine,
"dtype": torch.quint8,
"scale": 1.0,
"zero_point": 0,
}
weight_qparams_dict = {
"weight_ih": weight_qparams,
"weight_hh": weight_qparams,
"is_decomposed": False,
}
assert len(weight_qparams_dict) == 3, (
"Expected length for weight_qparams_dict to be 3 for QuantizedRNNCellBase(Reference)"
)
self._init_weight_qparams_dict(weight_qparams_dict, device)
def _init_weight_qparams_dict(self, weight_qparams_dict, device):
assert weight_qparams_dict is not None
self.is_decomposed = weight_qparams_dict["is_decomposed"]
for key, weight_qparams in weight_qparams_dict.items():
if key == "is_decomposed":
continue
# TODO: refactor the duplicated code to utils.py
weight_qscheme = weight_qparams["qscheme"]
weight_dtype = weight_qparams["dtype"]
setattr(self, key + "_qscheme", weight_qscheme)
setattr(self, key + "_dtype", weight_dtype)
assert weight_qscheme in [
None,
torch.per_tensor_affine,
torch.per_channel_affine,
], Exception(
f"qscheme: {weight_qscheme} is not support in {self._get_name()}"
)
if weight_qscheme is not None:
scale = weight_qparams["scale"]
scale_tensor = (
scale.detach().clone()
if isinstance(scale, torch.Tensor)
else torch.tensor(scale, dtype=torch.float, device=device)
)
self.register_buffer(key + "_scale", scale_tensor)
zp = weight_qparams["zero_point"]
zp_tensor = (
zp.detach().clone()
if isinstance(zp, torch.Tensor)
else torch.tensor(zp, dtype=torch.int, device=device)
)
self.register_buffer(key + "_zero_point", zp_tensor)
if weight_qscheme == torch.per_channel_affine:
axis = weight_qparams["axis"]
axis_tensor = (
axis.detach().clone()
if isinstance(axis, torch.Tensor)
else torch.tensor(axis, dtype=torch.int, device=device)
)
self.register_buffer(key + "_axis", axis_tensor)
else:
# added for TorchScriptability, not used
self.register_buffer(
key + "_axis", torch.tensor(0, dtype=torch.int, device=device)
)
setattr(self, key + "_axis_int", getattr(self, key + "_axis").item())
def _get_name(self):
return "QuantizedRNNCellBase(Reference)"
def get_quantized_weight_ih(self):
return get_quantized_weight(self, "weight_ih")
def get_quantized_weight_hh(self):
return get_quantized_weight(self, "weight_hh")
def get_weight_ih(self):
return _get_quantize_and_dequantized_weight(self, "weight_ih")
def get_weight_hh(self):
return _get_quantize_and_dequantized_weight(self, "weight_hh")
| RNNCellBase |
python | scikit-learn__scikit-learn | sklearn/utils/_param_validation.py | {
"start": 13103,
"end": 18045
} | class ____(_Constraint):
"""Constraint representing a typed interval.
Parameters
----------
type : {numbers.Integral, numbers.Real, RealNotInt}
The set of numbers in which to set the interval.
If RealNotInt, only reals that don't have the integer type
are allowed. For example 1.0 is allowed but 1 is not.
left : float or int or None
The left bound of the interval. None means left bound is -∞.
right : float, int or None
The right bound of the interval. None means right bound is +∞.
closed : {"left", "right", "both", "neither"}
Whether the interval is open or closed. Possible choices are:
- `"left"`: the interval is closed on the left and open on the right.
It is equivalent to the interval `[ left, right )`.
- `"right"`: the interval is closed on the right and open on the left.
It is equivalent to the interval `( left, right ]`.
- `"both"`: the interval is closed.
It is equivalent to the interval `[ left, right ]`.
- `"neither"`: the interval is open.
It is equivalent to the interval `( left, right )`.
Notes
-----
Setting a bound to `None` and setting the interval closed is valid. For instance,
strictly speaking, `Interval(Real, 0, None, closed="both")` corresponds to
`[0, +∞) U {+∞}`.
"""
def __init__(self, type, left, right, *, closed):
super().__init__()
self.type = type
self.left = left
self.right = right
self.closed = closed
self._check_params()
def _check_params(self):
if self.type not in (Integral, Real, RealNotInt):
raise ValueError(
"type must be either numbers.Integral, numbers.Real or RealNotInt."
f" Got {self.type} instead."
)
if self.closed not in ("left", "right", "both", "neither"):
raise ValueError(
"closed must be either 'left', 'right', 'both' or 'neither'. "
f"Got {self.closed} instead."
)
if self.type is Integral:
suffix = "for an interval over the integers."
if self.left is not None and not isinstance(self.left, Integral):
raise TypeError(f"Expecting left to be an int {suffix}")
if self.right is not None and not isinstance(self.right, Integral):
raise TypeError(f"Expecting right to be an int {suffix}")
if self.left is None and self.closed in ("left", "both"):
raise ValueError(
f"left can't be None when closed == {self.closed} {suffix}"
)
if self.right is None and self.closed in ("right", "both"):
raise ValueError(
f"right can't be None when closed == {self.closed} {suffix}"
)
else:
if self.left is not None and not isinstance(self.left, Real):
raise TypeError("Expecting left to be a real number.")
if self.right is not None and not isinstance(self.right, Real):
raise TypeError("Expecting right to be a real number.")
if self.right is not None and self.left is not None and self.right <= self.left:
raise ValueError(
f"right can't be less than left. Got left={self.left} and "
f"right={self.right}"
)
def __contains__(self, val):
if not isinstance(val, Integral) and np.isnan(val):
return False
left_cmp = operator.lt if self.closed in ("left", "both") else operator.le
right_cmp = operator.gt if self.closed in ("right", "both") else operator.ge
left = -np.inf if self.left is None else self.left
right = np.inf if self.right is None else self.right
if left_cmp(val, left):
return False
if right_cmp(val, right):
return False
return True
def is_satisfied_by(self, val):
if not isinstance(val, self.type):
return False
return val in self
def __str__(self):
type_str = "an int" if self.type is Integral else "a float"
left_bracket = "[" if self.closed in ("left", "both") else "("
left_bound = "-inf" if self.left is None else self.left
right_bound = "inf" if self.right is None else self.right
right_bracket = "]" if self.closed in ("right", "both") else ")"
# better repr if the bounds were given as integers
if not self.type == Integral and isinstance(self.left, Real):
left_bound = float(left_bound)
if not self.type == Integral and isinstance(self.right, Real):
right_bound = float(right_bound)
return (
f"{type_str} in the range "
f"{left_bracket}{left_bound}, {right_bound}{right_bracket}"
)
| Interval |
python | kubernetes-client__python | kubernetes/client/models/v1_token_review_status.py | {
"start": 383,
"end": 7097
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'audiences': 'list[str]',
'authenticated': 'bool',
'error': 'str',
'user': 'V1UserInfo'
}
attribute_map = {
'audiences': 'audiences',
'authenticated': 'authenticated',
'error': 'error',
'user': 'user'
}
def __init__(self, audiences=None, authenticated=None, error=None, user=None, local_vars_configuration=None): # noqa: E501
"""V1TokenReviewStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._audiences = None
self._authenticated = None
self._error = None
self._user = None
self.discriminator = None
if audiences is not None:
self.audiences = audiences
if authenticated is not None:
self.authenticated = authenticated
if error is not None:
self.error = error
if user is not None:
self.user = user
@property
def audiences(self):
"""Gets the audiences of this V1TokenReviewStatus. # noqa: E501
Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server. # noqa: E501
:return: The audiences of this V1TokenReviewStatus. # noqa: E501
:rtype: list[str]
"""
return self._audiences
@audiences.setter
def audiences(self, audiences):
"""Sets the audiences of this V1TokenReviewStatus.
Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server. # noqa: E501
:param audiences: The audiences of this V1TokenReviewStatus. # noqa: E501
:type: list[str]
"""
self._audiences = audiences
@property
def authenticated(self):
"""Gets the authenticated of this V1TokenReviewStatus. # noqa: E501
Authenticated indicates that the token was associated with a known user. # noqa: E501
:return: The authenticated of this V1TokenReviewStatus. # noqa: E501
:rtype: bool
"""
return self._authenticated
@authenticated.setter
def authenticated(self, authenticated):
"""Sets the authenticated of this V1TokenReviewStatus.
Authenticated indicates that the token was associated with a known user. # noqa: E501
:param authenticated: The authenticated of this V1TokenReviewStatus. # noqa: E501
:type: bool
"""
self._authenticated = authenticated
@property
def error(self):
"""Gets the error of this V1TokenReviewStatus. # noqa: E501
Error indicates that the token couldn't be checked # noqa: E501
:return: The error of this V1TokenReviewStatus. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this V1TokenReviewStatus.
Error indicates that the token couldn't be checked # noqa: E501
:param error: The error of this V1TokenReviewStatus. # noqa: E501
:type: str
"""
self._error = error
@property
def user(self):
"""Gets the user of this V1TokenReviewStatus. # noqa: E501
:return: The user of this V1TokenReviewStatus. # noqa: E501
:rtype: V1UserInfo
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this V1TokenReviewStatus.
:param user: The user of this V1TokenReviewStatus. # noqa: E501
:type: V1UserInfo
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1TokenReviewStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1TokenReviewStatus):
return True
return self.to_dict() != other.to_dict()
| V1TokenReviewStatus |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 3737,
"end": 3798
} | class ____(Protocol):
val1: ClassVar[Sequence[int]]
| Proto11 |
python | great-expectations__great_expectations | tests/actions/test_core_actions.py | {
"start": 8421,
"end": 9894
} | class ____:
@pytest.mark.unit
def test_create_payload(self, mock_context):
mock_validation_results = []
expected_payload = {
"test_suite_name": "my_suite",
"data_asset_name": "my_schema.my_table",
"validation_results": [],
}
api_notification_action = APINotificationAction(
name="my_api_notification", url="http://www.example.com"
)
payload = api_notification_action.create_payload(
"my_schema.my_table", "my_suite", mock_validation_results
)
assert payload == expected_payload
@pytest.mark.unit
def test_run(self, checkpoint_result: CheckpointResult):
url = "http://www.example.com"
action = APINotificationAction(name="my_action", url=url)
with mock.patch.object(requests, "post") as mock_post:
action.run(checkpoint_result=checkpoint_result)
mock_post.assert_called_once_with(
url,
headers={"Content-Type": "application/json"},
data=[
{
"data_asset_name": BATCH_ID_A,
"test_suite_name": SUITE_A,
"validation_results": [],
},
{
"data_asset_name": BATCH_ID_B,
"test_suite_name": SUITE_B,
"validation_results": [],
},
],
)
| TestAPINotificationAction |
python | matplotlib__matplotlib | lib/matplotlib/ticker.py | {
"start": 10788,
"end": 11464
} | class ____(Formatter):
"""
Use a user-defined function for formatting.
The function should take in two inputs (a tick value ``x`` and a
position ``pos``), and return a string containing the corresponding
tick label.
"""
def __init__(self, func):
self.func = func
self.offset_string = ""
def __call__(self, x, pos=None):
"""
Return the value of the user defined function.
*x* and *pos* are passed through as-is.
"""
return self.func(x, pos)
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
| FuncFormatter |
python | falconry__falcon | falcon/media/multipart.py | {
"start": 16906,
"end": 20808
} | class ____(BaseHandler):
"""Multipart form (content type ``multipart/form-data``) media handler.
The ``multipart/form-data`` media type for HTML5 forms is defined in
`RFC 7578 <https://tools.ietf.org/html/rfc7578>`_.
The multipart media type itself is defined in
`RFC 2046 section 5.1 <https://tools.ietf.org/html/rfc2046#section-5.1>`_.
.. note::
Unlike many form parsing implementations in other frameworks, this
handler does not consume the stream immediately. Rather, the stream is
consumed on-demand and parsed into individual body parts while iterating
over the media object.
For examples on parsing the request form, see also: :ref:`multipart`.
"""
_ASGI_MULTIPART_FORM: ClassVar[type[AsgiMultipartForm]]
parse_options: MultipartParseOptions
"""Configuration options for the multipart form parser and instances of
:class:`~falcon.media.multipart.BodyPart` it yields.
See also: :ref:`multipart_parser_conf`.
"""
def __init__(self, parse_options: MultipartParseOptions | None = None) -> None:
self.parse_options = parse_options or MultipartParseOptions()
@overload
def _deserialize_form(
self,
stream: ReadableIO,
content_type: str | None,
content_length: int | None,
form_cls: type[MultipartForm] = ...,
) -> MultipartForm: ...
@overload
def _deserialize_form(
self,
stream: AsyncReadableIO,
content_type: str | None,
content_length: int | None,
form_cls: type[AsgiMultipartForm] = ...,
) -> AsgiMultipartForm: ...
def _deserialize_form(
self,
stream: ReadableIO | AsyncReadableIO,
content_type: str | None,
content_length: int | None,
form_cls: type[MultipartForm | AsgiMultipartForm] = MultipartForm,
) -> MultipartForm | AsgiMultipartForm:
assert content_type is not None
_, options = parse_header(content_type)
try:
boundary = options['boundary']
except KeyError:
raise errors.HTTPInvalidHeader(
'No boundary specifier found in {!r}'.format(content_type),
'Content-Type',
)
# NOTE(vytas): RFC 2046, section 5.1.
# If a boundary delimiter line appears to end with white space, the
# white space must be presumed to have been added by a gateway, and
# must be deleted.
boundary = boundary.rstrip()
# NOTE(vytas): RFC 2046, section 5.1.
# The boundary parameter consists of 1 to 70 characters from a set of
# characters known to be very robust through mail gateways, and NOT
# ending with white space.
if not 1 <= len(boundary) <= 70:
raise errors.HTTPInvalidHeader(
'The boundary parameter must consist of 1 to 70 characters',
'Content-Type',
)
return form_cls(stream, boundary.encode(), content_length, self.parse_options) # type: ignore[arg-type]
def deserialize(
self,
stream: ReadableIO,
content_type: str | None,
content_length: int | None,
) -> MultipartForm:
return self._deserialize_form(stream, content_type, content_length)
async def deserialize_async(
self,
stream: AsyncReadableIO,
content_type: str | None,
content_length: int | None,
) -> AsgiMultipartForm:
return self._deserialize_form(
stream, content_type, content_length, form_cls=self._ASGI_MULTIPART_FORM
)
def serialize(self, media: object, content_type: str) -> NoReturn:
raise NotImplementedError('multipart form serialization unsupported')
# PERF(vytas): To avoid typos and improve storage space and speed over a dict.
# Inspired by RequestOptions.
| MultipartFormHandler |
python | python__mypy | mypyc/ir/ops.py | {
"start": 5304,
"end": 6120
} | class ____(Value):
"""A Register holds a value of a specific type, and it can be read and mutated.
A Register is always local to a function. Each local variable maps
to a Register, and they are also used for some (but not all)
temporary values.
Note that the term 'register' is overloaded and is sometimes used
to refer to arbitrary Values (for example, in RegisterOp).
"""
def __init__(self, type: RType, name: str = "", is_arg: bool = False, line: int = -1) -> None:
self.type = type
self.name = name
self.is_arg = is_arg
self.is_borrowed = is_arg
self.line = line
@property
def is_void(self) -> bool:
return False
def __repr__(self) -> str:
return f"<Register {self.name!r} at {hex(id(self))}>"
@final
| Register |
python | realpython__materials | python-constants/circle1.py | {
"start": 15,
"end": 381
} | class ____:
def __init__(self, radius):
self.radius = radius
def area(self):
return 3.14 * self.radius**2
def perimeter(self):
return 2 * 3.14 * self.radius
def projected_volume(self):
return 4 / 3 * 3.14 * self.radius**3
def __repr__(self):
return f"{self.__class__.__name__}(radius={self.radius})"
| Circle |
python | numpy__numpy | numpy/linalg/_linalg.py | {
"start": 2563,
"end": 114817
} | class ____(ValueError):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's ValueError
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def _raise_linalgerror_qr(err, flag):
raise LinAlgError("Incorrect argument found while performing "
"QR factorization")
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_wrap__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single: single,
double: double,
csingle: single,
cdouble: double}
_complex_types_map = {single: csingle,
double: cdouble,
csingle: csingle,
cdouble: cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
type_ = a.dtype.type
if issubclass(type_, inexact):
if isComplexType(type_):
is_complex = True
rt = _realType(type_, default=None)
if rt is double:
result_type = double
elif rt is None:
# unsupported inexact scalar
raise TypeError(f"array type {a.dtype.name} is unsupported in linalg")
else:
result_type = double
if is_complex:
result_type = _complex_types_map[result_type]
return cdouble, result_type
else:
return double, result_type
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _assert_2d(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assert_stacked_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assert_stacked_square(*arrays):
for a in arrays:
try:
m, n = a.shape[-2:]
except ValueError:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assert_finite(*arrays):
for a in arrays:
if not isfinite(a).all():
raise LinAlgError("Array must not contain infs or NaNs")
def _is_empty_2d(arr):
# check size first for efficiency
return arr.size == 0 and prod(arr.shape[-2:]) == 0
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def _tensorsolve_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensorsolve_dispatcher)
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=x.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> import numpy as np
>>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4))
>>> rng = np.random.default_rng()
>>> b = rng.normal(size=(2*3, 4))
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an - b.ndim):]
prod = 1
for k in oldshape:
prod *= k
if a.size != prod ** 2:
raise LinAlgError(
"Input arrays must satisfy the requirement \
prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])"
)
a = a.reshape(prod, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def _solve_dispatcher(a, b):
return (a, b)
@array_function_dispatch(_solve_dispatcher)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is (..., M) if b is
shape (M,) and (..., M, K) if b is (..., M, K), where the "..." part is
broadcasted between a and b.
Raises
------
LinAlgError
If `a` is singular or not square.
See Also
--------
scipy.linalg.solve : Similar function in SciPy.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
.. versionchanged:: 2.0
The b array is only treated as a shape (M,) column vector if it is
exactly 1-dimensional. In all other instances it is treated as a stack
of (M, K) matrices. Previously b would be treated as a stack of (M,)
vectors if b.ndim was equal to a.ndim - 1.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations:
``x0 + 2 * x1 = 1`` and
``3 * x0 + 5 * x1 = 2``:
>>> import numpy as np
>>> a = np.array([[1, 2], [3, 5]])
>>> b = np.array([1, 2])
>>> x = np.linalg.solve(a, b)
>>> x
array([-1., 1.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
with errstate(call=_raise_linalgerror_singular, invalid='call',
over='ignore', divide='ignore', under='ignore'):
r = gufunc(a, b, signature=signature)
return wrap(r.astype(result_t, copy=False))
def _tensorinv_dispatcher(a, ind=None):
return (a,)
@array_function_dispatch(_tensorinv_dispatcher)
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> import numpy as np
>>> a = np.eye(4*6).reshape((4, 6, 8, 3))
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> rng = np.random.default_rng()
>>> b = rng.normal(size=(4, 6))
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6).reshape((24, 8, 3))
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> rng = np.random.default_rng()
>>> b = rng.normal(size=24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def _unary_dispatcher(a):
return (a,)
@array_function_dispatch(_unary_dispatcher)
def inv(a):
"""
Compute the inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``a @ ainv = ainv @ a = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
See Also
--------
scipy.linalg.inv : Similar function in SciPy.
numpy.linalg.cond : Compute the condition number of a matrix.
numpy.linalg.svd : Compute the singular value decomposition of a matrix.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
If `a` is detected to be singular, a `LinAlgError` is raised. If `a` is
ill-conditioned, a `LinAlgError` may or may not be raised, and results may
be inaccurate due to floating-point errors.
References
----------
.. [1] Wikipedia, "Condition number",
https://en.wikipedia.org/wiki/Condition_number
Examples
--------
>>> import numpy as np
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(a @ ainv, np.eye(2))
True
>>> np.allclose(ainv @ a, np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5 , -0.5 ]],
[[-1.25, 0.75],
[ 0.75, -0.25]]])
If a matrix is close to singular, the computed inverse may not satisfy
``a @ ainv = ainv @ a = eye(a.shape[0])`` even if a `LinAlgError`
is not raised:
>>> a = np.array([[2,4,6],[2,0,2],[6,8,14]])
>>> inv(a) # No errors raised
array([[-1.12589991e+15, -5.62949953e+14, 5.62949953e+14],
[-1.12589991e+15, -5.62949953e+14, 5.62949953e+14],
[ 1.12589991e+15, 5.62949953e+14, -5.62949953e+14]])
>>> a @ inv(a)
array([[ 0. , -0.5 , 0. ], # may vary
[-0.5 , 0.625, 0.25 ],
[ 0. , 0. , 1. ]])
To detect ill-conditioned matrices, you can use `numpy.linalg.cond` to
compute its *condition number* [1]_. The larger the condition number, the
more ill-conditioned the matrix is. As a rule of thumb, if the condition
number ``cond(a) = 10**k``, then you may lose up to ``k`` digits of
accuracy on top of what would be lost to the numerical method due to loss
of precision from arithmetic methods.
>>> from numpy.linalg import cond
>>> cond(a)
np.float64(8.659885634118668e+17) # may vary
It is also possible to detect ill-conditioning by inspecting the matrix's
singular values directly. The ratio between the largest and the smallest
singular value is the condition number:
>>> from numpy.linalg import svd
>>> sigma = svd(a, compute_uv=False) # Do not compute singular vectors
>>> sigma.max()/sigma.min()
8.659885634118668e+17 # may vary
"""
a, wrap = _makearray(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(call=_raise_linalgerror_singular, invalid='call',
over='ignore', divide='ignore', under='ignore'):
ainv = _umath_linalg.inv(a, signature=signature)
return wrap(ainv.astype(result_t, copy=False))
def _matrix_power_dispatcher(a, n):
return (a,)
@array_function_dispatch(_matrix_power_dispatcher)
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
.. note:: Stacks of object matrices are not currently supported.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered".
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> import numpy as np
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assert_stacked_square(a)
try:
n = operator.index(n)
except TypeError as e:
raise TypeError("exponent must be an integer") from e
# Fall back on dot for object arrays. Object arrays are not supported by
# the current implementation of matmul using einsum
if a.dtype != object:
fmatmul = matmul
elif a.ndim == 2:
fmatmul = dot
else:
raise NotImplementedError(
"matrix_power not supported for stacks of object arrays")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return fmatmul(a, a)
elif n == 3:
return fmatmul(fmatmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else fmatmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else fmatmul(result, z)
return result
# Cholesky decomposition
def _cholesky_dispatcher(a, /, *, upper=None):
return (a,)
@array_function_dispatch(_cholesky_dispatcher)
def cholesky(a, /, *, upper=False):
"""
Cholesky decomposition.
Return the lower or upper Cholesky decomposition, ``L * L.H`` or
``U.H * U``, of the square matrix ``a``, where ``L`` is lower-triangular,
``U`` is upper-triangular, and ``.H`` is the conjugate transpose operator
(which is the ordinary transpose if ``a`` is real-valued). ``a`` must be
Hermitian (symmetric if real-valued) and positive-definite. No checking is
performed to verify whether ``a`` is Hermitian or not. In addition, only
the lower or upper-triangular and diagonal elements of ``a`` are used.
Only ``L`` or ``U`` is actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
upper : bool
If ``True``, the result must be the upper-triangular Cholesky factor.
If ``False``, the result must be the lower-triangular Cholesky factor.
Default: ``False``.
Returns
-------
L : (..., M, M) array_like
Lower or upper-triangular Cholesky factor of `a`. Returns a matrix
object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
See Also
--------
scipy.linalg.cholesky : Similar function in SciPy.
scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
positive-definite matrix.
scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
`scipy.linalg.cho_solve`.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L^{H} \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> import numpy as np
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[1.+0.j, 0.-2.j],
[0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> np.linalg.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # The upper-triangular Cholesky factor can also be obtained.
>>> np.linalg.cholesky(A, upper=True)
array([[1.-0.j, 0.-2.j],
[0.-0.j, 1.-0.j]])
"""
gufunc = _umath_linalg.cholesky_up if upper else _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(call=_raise_linalgerror_nonposdef, invalid='call',
over='ignore', divide='ignore', under='ignore'):
r = gufunc(a, signature=signature)
return wrap(r.astype(result_t, copy=False))
# outer product
def _outer_dispatcher(x1, x2):
return (x1, x2)
@array_function_dispatch(_outer_dispatcher)
def outer(x1, x2, /):
"""
Compute the outer product of two vectors.
This function is Array API compatible. Compared to ``np.outer``
it accepts 1-dimensional inputs only.
Parameters
----------
x1 : (M,) array_like
One-dimensional input array of size ``N``.
Must have a numeric data type.
x2 : (N,) array_like
One-dimensional input array of size ``M``.
Must have a numeric data type.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
outer
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.linalg.outer(x, [1, 2, 3])
array([['a', 'aa', 'aaa'],
['b', 'bb', 'bbb'],
['c', 'cc', 'ccc']], dtype=object)
"""
x1 = asanyarray(x1)
x2 = asanyarray(x2)
if x1.ndim != 1 or x2.ndim != 1:
raise ValueError(
"Input arrays must be one-dimensional, but they are "
f"{x1.ndim=} and {x2.ndim=}."
)
return _core_outer(x1, x2, out=None)
# QR decomposition
def _qr_dispatcher(a, mode=None):
return (a,)
@array_function_dispatch(_qr_dispatcher)
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (..., M, N)
An array-like object with the dimensionality of at least 2.
mode : {'reduced', 'complete', 'r', 'raw'}, optional, default: 'reduced'
If K = min(M, N), then
* 'reduced' : returns Q, R with dimensions (..., M, K), (..., K, N)
* 'complete' : returns Q, R with dimensions (..., M, M), (..., M, N)
* 'r' : returns R only with dimensions (..., K, N)
* 'raw' : returns h, tau with dimensions (..., N, M), (..., K,)
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
Q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case. In case the number of dimensions in the input array is
greater than 2 then a stack of the matrices with above properties
is returned.
R : ndarray of float or complex, optional
The upper-triangular matrix or a stack of upper-triangular
matrices if the number of dimensions in the input array is greater
than 2.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
See Also
--------
scipy.linalg.qr : Similar function in SciPy.
scipy.linalg.rq : Compute RQ decomposition of a matrix.
Notes
-----
When mode is 'reduced' or 'complete', the result will be a namedtuple with
the attributes ``Q`` and ``R``.
This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
``dorgqr``, and ``zungqr``.
For more information on the qr factorization, see for example:
https://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> a = rng.normal(size=(9, 6))
>>> Q, R = np.linalg.qr(a)
>>> np.allclose(a, np.dot(Q, R)) # a does equal QR
True
>>> R2 = np.linalg.qr(a, mode='r')
>>> np.allclose(R, R2) # mode='r' returns the same R as mode='full'
True
>>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input
>>> Q, R = np.linalg.qr(a)
>>> Q.shape
(3, 2, 2)
>>> R.shape
(3, 2, 2)
>>> np.allclose(a, np.matmul(Q, R))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = QR such that Q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(R) * (Q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 2, 2, 3])
>>> Q, R = np.linalg.qr(A)
>>> p = np.dot(Q.T, b)
>>> np.dot(np.linalg.inv(R), p)
array([ 1., 1.])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = (
"The 'full' option is deprecated in favor of 'reduced'.\n"
"For backward compatibility let mode default."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError(f"Unrecognized mode '{mode}'")
a, wrap = _makearray(a)
_assert_stacked_2d(a)
m, n = a.shape[-2:]
t, result_t = _commonType(a)
a = a.astype(t, copy=True)
a = _to_native_byte_order(a)
mn = min(m, n)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(call=_raise_linalgerror_qr, invalid='call',
over='ignore', divide='ignore', under='ignore'):
tau = _umath_linalg.qr_r_raw(a, signature=signature)
# handle modes that don't return q
if mode == 'r':
r = triu(a[..., :mn, :])
r = r.astype(result_t, copy=False)
return wrap(r)
if mode == 'raw':
q = transpose(a)
q = q.astype(result_t, copy=False)
tau = tau.astype(result_t, copy=False)
return wrap(q), tau
if mode == 'economic':
a = a.astype(result_t, copy=False)
return wrap(a)
# mc is the number of columns in the resulting q
# matrix. If the mode is complete then it is
# same as number of rows, and if the mode is reduced,
# then it is the minimum of number of rows and columns.
if mode == 'complete' and m > n:
mc = m
gufunc = _umath_linalg.qr_complete
else:
mc = mn
gufunc = _umath_linalg.qr_reduced
signature = 'DD->D' if isComplexType(t) else 'dd->d'
with errstate(call=_raise_linalgerror_qr, invalid='call',
over='ignore', divide='ignore', under='ignore'):
q = gufunc(a, tau, signature=signature)
r = triu(a[..., :mc, :])
q = q.astype(result_t, copy=False)
r = r.astype(result_t, copy=False)
return QRResult(wrap(q), wrap(r))
# Eigenvalues
@array_function_dispatch(_unary_dispatcher)
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
scipy.linalg.eigvals : Similar function in SciPy.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> import numpy as np
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by ``Q`` on one side and
by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.]) # random
"""
a, wrap = _makearray(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->D'
with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence,
invalid='call', over='ignore', divide='ignore',
under='ignore'):
w = _umath_linalg.eigvals(a, signature=signature)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def _eigvalsh_dispatcher(a, UPLO=None):
return (a,)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
scipy.linalg.eigvalsh : Similar function in SciPy.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
Examples
--------
>>> import numpy as np
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712]) # may vary
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa
array([1., 6.])
>>> wb
array([6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence,
invalid='call', over='ignore', divide='ignore',
under='ignore'):
w = gufunc(a, signature=signature)
return w.astype(_realType(result_t), copy=False)
# Eigenvectors
@array_function_dispatch(_unary_dispatcher)
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
A namedtuple with the following attributes:
eigenvalues : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
eigenvectors : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``eigenvectors[:,i]`` is the eigenvector corresponding to the
eigenvalue ``eigenvalues[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric or complex
Hermitian (conjugate symmetric) array.
eigvalsh : eigenvalues of a real symmetric or complex Hermitian
(conjugate symmetric) array.
scipy.linalg.eig : Similar function in SciPy that also solves the
generalized eigenvalue problem.
scipy.linalg.schur : Best choice for unitary and other non-Hermitian
normal matrices.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector `v` such
that ``a @ v = w * v``. Thus, the arrays `a`, `eigenvalues`, and
`eigenvectors` satisfy the equations ``a @ eigenvectors[:,i] =
eigenvalues[i] * eigenvectors[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`.
The array `eigenvectors` may not be of maximum rank, that is, some of the
columns may be linearly dependent, although round-off error may obscure
that fact. If the eigenvalues are all different, then theoretically the
eigenvectors are linearly independent and `a` can be diagonalized by a
similarity transformation using `eigenvectors`, i.e, ``inv(eigenvectors) @
a @ eigenvectors`` is diagonal.
For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
is preferred because the matrix `eigenvectors` is guaranteed to be
unitary, which is not the case when using `eig`. The Schur factorization
produces an upper triangular matrix rather than a diagonal matrix, but for
normal matrices only the diagonal of the upper triangular matrix is
needed, the rest is roundoff error.
Finally, it is emphasized that `eigenvectors` consists of the *right* (as
in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``y.T @ a
= z * y.T`` for some number `z` is called a *left* eigenvector of `a`,
and, in general, the left and right eigenvectors of a matrix are not
necessarily the (perhaps conjugate) transposes of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> import numpy as np
>>> from numpy import linalg as LA
(Almost) trivial example with real eigenvalues and eigenvectors.
>>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3)))
>>> eigenvalues
array([1., 2., 3.])
>>> eigenvectors
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Real matrix possessing complex eigenvalues and eigenvectors;
note that the eigenvalues are complex conjugates of each other.
>>> eigenvalues, eigenvectors = LA.eig(np.array([[1, -1], [1, 1]]))
>>> eigenvalues
array([1.+1.j, 1.-1.j])
>>> eigenvectors
array([[0.70710678+0.j , 0.70710678-0.j ],
[0. -0.70710678j, 0. +0.70710678j]])
Complex-valued matrix with real eigenvalues (but complex-valued
eigenvectors); note that ``a.conj().T == a``, i.e., `a` is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> eigenvalues, eigenvectors = LA.eig(a)
>>> eigenvalues
array([2.+0.j, 0.+0.j])
>>> eigenvectors
array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
[ 0.70710678+0.j , -0. +0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. eigenvalues are 1 +/- 1e-9
>>> eigenvalues, eigenvectors = LA.eig(a)
>>> eigenvalues
array([1., 1.])
>>> eigenvectors
array([[1., 0.],
[0., 1.]])
"""
a, wrap = _makearray(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence,
invalid='call', over='ignore', divide='ignore',
under='ignore'):
w, vt = _umath_linalg.eig(a, signature=signature)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return EigResult(w.astype(result_t, copy=False), wrap(vt))
@array_function_dispatch(_eigvalsh_dispatcher)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a complex Hermitian
(conjugate symmetric) or a real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian or real symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
A namedtuple with the following attributes:
eigenvalues : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
eigenvectors : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``eigenvectors[:, i]`` is the normalized eigenvector
corresponding to the eigenvalue ``eigenvalues[i]``. Will return a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
scipy.linalg.eigh : Similar function in SciPy (but also solves the
generalized eigenvalue problem).
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
``_heevd``.
The eigenvalues of real symmetric or complex Hermitian matrices are always
real. [1]_ The array `eigenvalues` of (column) eigenvectors is unitary and
`a`, `eigenvalues`, and `eigenvectors` satisfy the equations ``dot(a,
eigenvectors[:, i]) = eigenvalues[i] * eigenvectors[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> import numpy as np
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> eigenvalues, eigenvectors = LA.eigh(a)
>>> eigenvalues
array([0.17157288, 5.82842712])
>>> eigenvectors
array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> (np.dot(a, eigenvectors[:, 0]) -
... eigenvalues[0] * eigenvectors[:, 0]) # verify 1st eigenval/vec pair
array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
>>> (np.dot(a, eigenvectors[:, 1]) -
... eigenvalues[1] * eigenvectors[:, 1]) # verify 2nd eigenval/vec pair
array([0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> eigenvalues, eigenvectors = LA.eigh(A)
>>> eigenvalues
array([0.17157288, 5.82842712])
>>> eigenvectors
matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa
array([1., 6.])
>>> wb
array([6.+0.j, 1.+0.j])
>>> va
array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
[ 0. +0.89442719j, 0. -0.4472136j ]])
>>> vb
array([[ 0.89442719+0.j , -0. +0.4472136j],
[-0. +0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence,
invalid='call', over='ignore', divide='ignore',
under='ignore'):
w, vt = gufunc(a, signature=signature)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return EighResult(w, wrap(vt))
# Singular value decomposition
def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
return (a,)
@array_function_dispatch(_svd_dispatcher)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
When `a` is a 2D array, and ``full_matrices=False``, then it is
factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where
`u` and the Hermitian transpose of `vh` are 2D arrays with
orthonormal columns and `s` is a 1D array of `a`'s singular
values. When `a` is higher-dimensional, SVD is applied in
stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
Returns
-------
U : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
S : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
Vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
scipy.linalg.svd : Similar function in SciPy.
scipy.linalg.svdvals : Compute singular values of a matrix.
Notes
-----
When `compute_uv` is True, the result is a namedtuple with the following
attribute names: `U`, `S`, and `Vh`.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> a = rng.normal(size=(9, 6)) + 1j*rng.normal(size=(9, 6))
>>> b = rng.normal(size=(2, 7, 8, 3)) + 1j*rng.normal(size=(2, 7, 8, 3))
Reconstruction based on full SVD, 2D case:
>>> U, S, Vh = np.linalg.svd(a, full_matrices=True)
>>> U.shape, S.shape, Vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(U[:, :6] * S, Vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(S)
>>> np.allclose(a, np.dot(U, np.dot(smat, Vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> U, S, Vh = np.linalg.svd(a, full_matrices=False)
>>> U.shape, S.shape, Vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(U * S, Vh))
True
>>> smat = np.diag(S)
>>> np.allclose(a, np.dot(U, np.dot(smat, Vh)))
True
Reconstruction based on full SVD, 4D case:
>>> U, S, Vh = np.linalg.svd(b, full_matrices=True)
>>> U.shape, S.shape, Vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(U[..., :3] * S[..., None, :], Vh))
True
>>> np.allclose(b, np.matmul(U[..., :3], S[..., None] * Vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> U, S, Vh = np.linalg.svd(b, full_matrices=False)
>>> U.shape, S.shape, Vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(U * S[..., None, :], Vh))
True
>>> np.allclose(b, np.matmul(U, S[..., None] * Vh))
True
"""
import numpy as np
a, wrap = _makearray(a)
if hermitian:
# note: lapack svd returns eigenvalues with s ** 2 sorted descending,
# but eig returns s sorted ascending, so we re-order the eigenvalues
# and related arrays to have the correct order
if compute_uv:
s, u = eigh(a)
sgn = sign(s)
s = abs(s)
sidx = argsort(s)[..., ::-1]
sgn = np.take_along_axis(sgn, sidx, axis=-1)
s = np.take_along_axis(s, sidx, axis=-1)
u = np.take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = transpose(u * sgn[..., None, :]).conjugate()
return SVDResult(wrap(u), s, wrap(vt))
else:
s = eigvalsh(a)
s = abs(s)
return sort(s)[..., ::-1]
_assert_stacked_2d(a)
t, result_t = _commonType(a)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
gufunc = _umath_linalg.svd_f
else:
gufunc = _umath_linalg.svd_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
with errstate(call=_raise_linalgerror_svd_nonconvergence,
invalid='call', over='ignore', divide='ignore',
under='ignore'):
u, s, vh = gufunc(a, signature=signature)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return SVDResult(wrap(u), s, wrap(vh))
else:
signature = 'D->d' if isComplexType(t) else 'd->d'
with errstate(call=_raise_linalgerror_svd_nonconvergence,
invalid='call', over='ignore', divide='ignore',
under='ignore'):
s = _umath_linalg.svd(a, signature=signature)
s = s.astype(_realType(result_t), copy=False)
return s
def _svdvals_dispatcher(x):
return (x,)
@array_function_dispatch(_svdvals_dispatcher)
def svdvals(x, /):
"""
Returns the singular values of a matrix (or a stack of matrices) ``x``.
When x is a stack of matrices, the function will compute the singular
values for each matrix in the stack.
This function is Array API compatible.
Calling ``np.svdvals(x)`` to get singular values is the same as
``np.svd(x, compute_uv=False, hermitian=False)``.
Parameters
----------
x : (..., M, N) array_like
Input array having shape (..., M, N) and whose last two
dimensions form matrices on which to perform singular value
decomposition. Should have a floating-point data type.
Returns
-------
out : ndarray
An array with shape (..., K) that contains the vector(s)
of singular values of length K, where K = min(M, N).
See Also
--------
scipy.linalg.svdvals : Compute singular values of a matrix.
Examples
--------
>>> np.linalg.svdvals([[1, 2, 3, 4, 5],
... [1, 4, 9, 16, 25],
... [1, 8, 27, 64, 125]])
array([146.68862757, 5.57510612, 0.60393245])
Determine the rank of a matrix using singular values:
>>> s = np.linalg.svdvals([[1, 2, 3],
... [2, 4, 6],
... [-1, 1, -1]]); s
array([8.38434191e+00, 1.64402274e+00, 2.31534378e-16])
>>> np.count_nonzero(s > 1e-10) # Matrix of rank 2
2
"""
return svd(x, compute_uv=False, hermitian=False)
def _cond_dispatcher(x, p=None):
return (x,)
@array_function_dispatch(_cond_dispatcher)
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm used in the condition number computation:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the `numpy.inf` object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> import numpy as np
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746 # may vary
>>> (min(LA.svd(a, compute_uv=False)) *
... min(LA.svd(LA.inv(a), compute_uv=False)))
0.70710678118654746 # may vary
"""
x = asarray(x) # in case we have a matrix
if _is_empty_2d(x):
raise LinAlgError("cond is not defined on empty arrays")
if p is None or p in {2, -2}:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assert_stacked_square(x)
t, result_t = _commonType(x)
result_t = _realType(result_t) # condition number is always real
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = inf
elif nan_mask:
# Convention is to return scalars instead of 0d arrays.
r = r.dtype.type(inf)
return r
def _matrix_rank_dispatcher(A, tol=None, hermitian=None, *, rtol=None):
return (A,)
@array_function_dispatch(_matrix_rank_dispatcher)
def matrix_rank(A, tol=None, hermitian=False, *, rtol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
Parameters
----------
A : {(M,), (..., M, N)} array_like
Input vector or stack of matrices.
tol : (...) array_like, float, optional
Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M, N) * eps``.
hermitian : bool, optional
If True, `A` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
rtol : (...) array_like, float, optional
Parameter for the relative tolerance component. Only ``tol`` or
``rtol`` can be set at a time. Defaults to ``max(M, N) * eps``.
.. versionadded:: 2.0.0
Returns
-------
rank : (...) array_like
Rank of A.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `A`. By default, we identify singular values
less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency
(with the symbols defined above). This is the algorithm MATLAB uses [1]_.
It also appears in *Numerical recipes* in the discussion of SVD solutions
for linear least squares [2]_.
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there
is a column in `A` that is an exact (in floating point) linear combination
of other columns in `A`. Computing the SVD on `A` will not produce
a singular value exactly equal to 0 in general: any difference of
the smallest SVD value from 0 will be caused by numerical imprecision
in the calculation of the SVD. Our threshold for small SVD values takes
this numerical imprecision into account, and the default threshold will
detect such numerical rank deficiency. The threshold may declare a matrix
`A` rank deficient even if the linear combination of some columns of `A`
is not exactly equal to another column of `A` but only numerically very
close to another column of `A`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about
the sources of error in `A` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute
if the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documentation, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> import numpy as np
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
if rtol is not None and tol is not None:
raise ValueError("`tol` and `rtol` can't be both set.")
A = asarray(A)
if A.ndim < 2:
return int(not all(A == 0))
S = svd(A, compute_uv=False, hermitian=hermitian)
if tol is None:
if rtol is None:
rtol = max(A.shape[-2:]) * finfo(S.dtype).eps
else:
rtol = asarray(rtol)[..., newaxis]
tol = S.max(axis=-1, keepdims=True) * rtol
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def _pinv_dispatcher(a, rcond=None, hermitian=None, *, rtol=None):
return (a,)
@array_function_dispatch(_pinv_dispatcher)
def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float, optional
Cutoff for small singular values.
Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero.
Broadcasts against the stack of matrices. Default: ``1e-15``.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
rtol : (...) array_like of float, optional
Same as `rcond`, but it's an Array API compatible parameter name.
Only `rcond` or `rtol` can be set at a time. If none of them are
provided then NumPy's ``1e-15`` default is used. If ``rtol=None``
is passed then the API standard default is used.
.. versionadded:: 2.0.0
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
See Also
--------
scipy.linalg.pinv : Similar function in SciPy.
scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
Hermitian matrix.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> a = rng.normal(size=(9, 6))
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if rcond is None:
if rtol is _NoValue:
rcond = 1e-15
elif rtol is None:
rcond = max(a.shape[-2:]) * finfo(a.dtype).eps
else:
rcond = rtol
elif rtol is not _NoValue:
raise ValueError("`rtol` and `rcond` can't be both set.")
else:
# NOTE: Deprecate `rcond` in a few versions.
pass
rcond = asarray(rcond)
if _is_empty_2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@array_function_dispatch(_unary_dispatcher)
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
A namedtuple with the following attributes:
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logabsdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logabsdet`
will be -inf. In all cases, the determinant is equal to
``sign * np.exp(logabsdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> import numpy as np
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logabsdet) = np.linalg.slogdet(a)
>>> (sign, logabsdet)
(-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logabsdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logabsdet = np.linalg.slogdet(a)
>>> (sign, logabsdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logabsdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return SlogdetResult(sign, logdet)
@array_function_dispatch(_unary_dispatcher)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
scipy.linalg.det : Similar function in SciPy.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> import numpy as np
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0 # may vary
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def _lstsq_dispatcher(a, b, rcond=None):
return (a, b)
@array_function_dispatch(_lstsq_dispatcher)
def lstsq(a, b, rcond=None):
r"""
Return the least-squares solution to a linear matrix equation.
Computes the vector `x` that approximately solves the equation
``a @ x = b``. The equation may be under-, well-, or over-determined
(i.e., the number of linearly independent rows of `a` can be less than,
equal to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
is the "exact" solution of the equation. Else, `x` minimizes the
Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing
solutions, the one with the smallest 2-norm :math:`||x||` is returned.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
The default uses the machine precision times ``max(M, N)``. Passing
``-1`` will use machine precision.
.. versionchanged:: 2.0
Previously, the default was ``-1``, but a warning was given that
this would change.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of squared residuals: Squared Euclidean 2-norm for each column in
``b - a @ x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
See Also
--------
scipy.linalg.lstsq : Similar function in SciPy.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> import numpy as np
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> m, c
(1.0 -0.95) # may vary
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> _ = plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assert_2d(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
if rcond is None:
rcond = finfo(t).eps * max(n, m)
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
if n_rhs == 0:
# lapack can't handle n_rhs = 0 - so allocate
# the array one larger in that axis
b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
with errstate(call=_raise_linalgerror_lstsq, invalid='call',
over='ignore', divide='ignore', under='ignore'):
x, resids, rank, s = _umath_linalg.lstsq(a, b, rcond,
signature=signature)
if m == 0:
x[...] = 0
if n_rhs == 0:
# remove the item we added
x = x[..., :n_rhs]
resids = resids[..., :n_rhs]
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
# Copying lets the memory in r_parts be freed
x = x.astype(result_t, copy=True)
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op, initial=None):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by `numpy.linalg.norm()`.
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=False), axis=-1, initial=initial)
return result
def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
return (x,)
@array_function_dispatch(_norm_dispatcher)
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``x.ravel`` will be returned.
ord : {int, float, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes`` for what values are
supported for matrices and vectors respectively). inf means numpy's
`inf` object. The default is None.
axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
is None.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
See Also
--------
scipy.linalg.norm : Similar function in SciPy.
Notes
-----
For values of ``ord < 1``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
Both the Frobenius and nuclear norm orders are only defined for
matrices and raise a ValueError when ``x.ndim != 2``.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> import numpy as np
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
0.0
>>> LA.norm(b, -2)
1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
5.8480354764257312 # may vary
>>> LA.norm(a, -3)
0.0
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if (
(ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)
):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
x_real = x.real
x_imag = x.imag
sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag)
else:
sqnorm = x.dot(x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim * [1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception as e:
raise TypeError(
"'axis' must be None, an integer or a tuple of integers"
) from e
axis = (axis,)
if len(axis) == 1:
if ord == inf:
return abs(x).max(axis=axis, keepdims=keepdims, initial=0)
elif ord == -inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (
(x != 0)
.astype(x.real.dtype)
.sum(axis=axis, keepdims=keepdims)
)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
# None of the str-type keywords for ord ('fro', 'nuc')
# are valid for vectors
elif isinstance(ord, str):
raise ValueError(f"Invalid norm order '{ord}' for vectors")
else:
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= reciprocal(ord, dtype=ret.dtype)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax, 0)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis, initial=0)
elif ord == inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis, initial=0)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum, 0)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def _multidot_dispatcher(arrays, *, out=None):
yield from arrays
yield out
@array_function_dispatch(_multidot_dispatcher)
def multi_dot(arrays, *, out=None):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a, b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
numpy.dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> import numpy as np
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random((10000, 100))
>>> B = np.random.random((100, 1000))
>>> C = np.random.random((1000, 5))
>>> D = np.random.random((5, 333))
>>> # the actual dot multiplication
>>> _ = multi_dot([A, B, C, D])
instead of::
>>> _ = np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> _ = A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Assume we have three matrices
:math:`A_{10 \\times 100}, B_{100 \\times 5}, C_{5 \\times 50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1], out=out)
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assert_2d(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1, out=out)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C, out=None):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C, out=out)
else:
return dot(A, dot(B, C), out=out)
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of multiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = inf
for k in range(i, j):
q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j, out=None):
"""Actually do the multiplication with the given order."""
if i == j:
# the initial call with non-None out should never get here
assert out is None
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j),
out=out)
# diagonal
def _diagonal_dispatcher(x, /, *, offset=None):
return (x,)
@array_function_dispatch(_diagonal_dispatcher)
def diagonal(x, /, *, offset=0):
"""
Returns specified diagonals of a matrix (or a stack of matrices) ``x``.
This function is Array API compatible, contrary to
:py:func:`numpy.diagonal`, the matrix is assumed
to be defined by the last two dimensions.
Parameters
----------
x : (...,M,N) array_like
Input array having shape (..., M, N) and whose innermost two
dimensions form MxN matrices.
offset : int, optional
Offset specifying the off-diagonal relative to the main diagonal,
where::
* offset = 0: the main diagonal.
* offset > 0: off-diagonal above the main diagonal.
* offset < 0: off-diagonal below the main diagonal.
Returns
-------
out : (...,min(N,M)) ndarray
An array containing the diagonals and whose shape is determined by
removing the last two dimensions and appending a dimension equal to
the size of the resulting diagonals. The returned array must have
the same data type as ``x``.
See Also
--------
numpy.diagonal
Examples
--------
>>> a = np.arange(4).reshape(2, 2); a
array([[0, 1],
[2, 3]])
>>> np.linalg.diagonal(a)
array([0, 3])
A 3-D example:
>>> a = np.arange(8).reshape(2, 2, 2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.linalg.diagonal(a)
array([[0, 3],
[4, 7]])
Diagonals adjacent to the main diagonal can be obtained by using the
`offset` argument:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.linalg.diagonal(a, offset=1) # First superdiagonal
array([1, 5])
>>> np.linalg.diagonal(a, offset=2) # Second superdiagonal
array([2])
>>> np.linalg.diagonal(a, offset=-1) # First subdiagonal
array([3, 7])
>>> np.linalg.diagonal(a, offset=-2) # Second subdiagonal
array([6])
The anti-diagonal can be obtained by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.linalg.diagonal(np.fliplr(a)) # Horizontal flip
array([2, 4, 6])
>>> np.linalg.diagonal(np.flipud(a)) # Vertical flip
array([6, 4, 2])
Note that the order in which the diagonal is retrieved varies depending
on the flip function.
"""
return _core_diagonal(x, offset, axis1=-2, axis2=-1)
# trace
def _trace_dispatcher(x, /, *, offset=None, dtype=None):
return (x,)
@array_function_dispatch(_trace_dispatcher)
def trace(x, /, *, offset=0, dtype=None):
"""
Returns the sum along the specified diagonals of a matrix
(or a stack of matrices) ``x``.
This function is Array API compatible, contrary to
:py:func:`numpy.trace`.
Parameters
----------
x : (...,M,N) array_like
Input array having shape (..., M, N) and whose innermost two
dimensions form MxN matrices.
offset : int, optional
Offset specifying the off-diagonal relative to the main diagonal,
where::
* offset = 0: the main diagonal.
* offset > 0: off-diagonal above the main diagonal.
* offset < 0: off-diagonal below the main diagonal.
dtype : dtype, optional
Data type of the returned array.
Returns
-------
out : ndarray
An array containing the traces and whose shape is determined by
removing the last two dimensions and storing the traces in the last
array dimension. For example, if x has rank k and shape:
(I, J, K, ..., L, M, N), then an output array has rank k-2 and shape:
(I, J, K, ..., L) where::
out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :])
The returned array must have a data type as described by the dtype
parameter above.
See Also
--------
numpy.trace
Examples
--------
>>> np.linalg.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2, 2, 2))
>>> np.linalg.trace(a)
array([3, 11])
Trace is computed with the last two axes as the 2-d sub-arrays.
This behavior differs from :py:func:`numpy.trace` which uses the first two
axes by default.
>>> a = np.arange(24).reshape((3, 2, 2, 2))
>>> np.linalg.trace(a).shape
(3, 2)
Traces adjacent to the main diagonal can be obtained by using the
`offset` argument:
>>> a = np.arange(9).reshape((3, 3)); a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.linalg.trace(a, offset=1) # First superdiagonal
6
>>> np.linalg.trace(a, offset=2) # Second superdiagonal
2
>>> np.linalg.trace(a, offset=-1) # First subdiagonal
10
>>> np.linalg.trace(a, offset=-2) # Second subdiagonal
6
"""
return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype)
# cross
def _cross_dispatcher(x1, x2, /, *, axis=None):
return (x1, x2,)
@array_function_dispatch(_cross_dispatcher)
def cross(x1, x2, /, *, axis=-1):
"""
Returns the cross product of 3-element vectors.
If ``x1`` and/or ``x2`` are multi-dimensional arrays, then
the cross-product of each pair of corresponding 3-element vectors
is independently computed.
This function is Array API compatible, contrary to
:func:`numpy.cross`.
Parameters
----------
x1 : array_like
The first input array.
x2 : array_like
The second input array. Must be compatible with ``x1`` for all
non-compute axes. The size of the axis over which to compute
the cross-product must be the same size as the respective axis
in ``x1``.
axis : int, optional
The axis (dimension) of ``x1`` and ``x2`` containing the vectors for
which to compute the cross-product. Default: ``-1``.
Returns
-------
out : ndarray
An array containing the cross products.
See Also
--------
numpy.cross
Examples
--------
Vector cross-product.
>>> x = np.array([1, 2, 3])
>>> y = np.array([4, 5, 6])
>>> np.linalg.cross(x, y)
array([-3, 6, -3])
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the *right-hand rule*.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.linalg.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
>>> x = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([[4, 5], [6, 1], [2, 3]])
>>> np.linalg.cross(x, y, axis=0)
array([[-24, 6],
[ 18, 24],
[-6, -18]])
"""
x1 = asanyarray(x1)
x2 = asanyarray(x2)
if x1.shape[axis] != 3 or x2.shape[axis] != 3:
raise ValueError(
"Both input arrays must be (arrays of) 3-dimensional vectors, "
f"but they are {x1.shape[axis]} and {x2.shape[axis]} "
"dimensional instead."
)
return _core_cross(x1, x2, axis=axis)
# matmul
def _matmul_dispatcher(x1, x2, /):
return (x1, x2)
@array_function_dispatch(_matmul_dispatcher)
def matmul(x1, x2, /):
"""
Computes the matrix product.
This function is Array API compatible, contrary to
:func:`numpy.matmul`.
Parameters
----------
x1 : array_like
The first input array.
x2 : array_like
The second input array.
Returns
-------
out : ndarray
The matrix product of the inputs.
This is a scalar only when both ``x1``, ``x2`` are 1-d vectors.
Raises
------
ValueError
If the last dimension of ``x1`` is not the same size as
the second-to-last dimension of ``x2``.
If a scalar value is passed in.
See Also
--------
numpy.matmul
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.linalg.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.linalg.matmul(a, b)
array([1, 2])
>>> np.linalg.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.linalg.matmul(a,b).shape
(2, 2, 2)
>>> np.linalg.matmul(a, b)[0, 1, 1]
98
>>> sum(a[0, 1, :] * b[0 , :, 1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.linalg.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.linalg.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: matmul: Input operand 1 does not have enough dimensions ...
"""
return _core_matmul(x1, x2)
# tensordot
def _tensordot_dispatcher(x1, x2, /, *, axes=None):
return (x1, x2)
@array_function_dispatch(_tensordot_dispatcher)
def tensordot(x1, x2, /, *, axes=2):
return _core_tensordot(x1, x2, axes=axes)
tensordot.__doc__ = _core_tensordot.__doc__
# matrix_transpose
def _matrix_transpose_dispatcher(x):
return (x,)
@array_function_dispatch(_matrix_transpose_dispatcher)
def matrix_transpose(x, /):
return _core_matrix_transpose(x)
matrix_transpose.__doc__ = f"""{_core_matrix_transpose.__doc__}
Notes
-----
This function is an alias of `numpy.matrix_transpose`.
"""
# matrix_norm
def _matrix_norm_dispatcher(x, /, *, keepdims=None, ord=None):
return (x,)
@array_function_dispatch(_matrix_norm_dispatcher)
def matrix_norm(x, /, *, keepdims=False, ord="fro"):
"""
Computes the matrix norm of a matrix (or a stack of matrices) ``x``.
This function is Array API compatible.
Parameters
----------
x : array_like
Input array having shape (..., M, N) and whose two innermost
dimensions form ``MxN`` matrices.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in
the result as dimensions with size one. Default: False.
ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional
The order of the norm. For details see the table under ``Notes``
in `numpy.linalg.norm`.
See Also
--------
numpy.linalg.norm : Generic norm function
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.matrix_norm(b)
7.745966692414834
>>> LA.matrix_norm(b, ord='fro')
7.745966692414834
>>> LA.matrix_norm(b, ord=np.inf)
9.0
>>> LA.matrix_norm(b, ord=-np.inf)
2.0
>>> LA.matrix_norm(b, ord=1)
7.0
>>> LA.matrix_norm(b, ord=-1)
6.0
>>> LA.matrix_norm(b, ord=2)
7.3484692283495345
>>> LA.matrix_norm(b, ord=-2)
1.8570331885190563e-016 # may vary
"""
x = asanyarray(x)
return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord)
# vector_norm
def _vector_norm_dispatcher(x, /, *, axis=None, keepdims=None, ord=None):
return (x,)
@array_function_dispatch(_vector_norm_dispatcher)
def vector_norm(x, /, *, axis=None, keepdims=False, ord=2):
"""
Computes the vector norm of a vector (or batch of vectors) ``x``.
This function is Array API compatible.
Parameters
----------
x : array_like
Input array.
axis : {None, int, 2-tuple of ints}, optional
If an integer, ``axis`` specifies the axis (dimension) along which
to compute vector norms. If an n-tuple, ``axis`` specifies the axes
(dimensions) along which to compute batched vector norms. If ``None``,
the vector norm must be computed over all array values (i.e.,
equivalent to computing the vector norm of a flattened array).
Default: ``None``.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in
the result as dimensions with size one. Default: False.
ord : {int, float, inf, -inf}, optional
The order of the norm. For details see the table under ``Notes``
in `numpy.linalg.norm`.
See Also
--------
numpy.linalg.norm : Generic norm function
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) + 1
>>> a
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> b = a.reshape((3, 3))
>>> b
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> LA.vector_norm(b)
16.881943016134134
>>> LA.vector_norm(b, ord=np.inf)
9.0
>>> LA.vector_norm(b, ord=-np.inf)
1.0
>>> LA.vector_norm(b, ord=0)
9.0
>>> LA.vector_norm(b, ord=1)
45.0
>>> LA.vector_norm(b, ord=-1)
0.3534857623790153
>>> LA.vector_norm(b, ord=2)
16.881943016134134
>>> LA.vector_norm(b, ord=-2)
0.8058837395885292
"""
x = asanyarray(x)
shape = list(x.shape)
if axis is None:
# Note: np.linalg.norm() doesn't handle 0-D arrays
x = x.ravel()
_axis = 0
elif isinstance(axis, tuple):
# Note: The axis argument supports any number of axes, whereas
# np.linalg.norm() only supports a single axis for vector norm.
normalized_axis = normalize_axis_tuple(axis, x.ndim)
rest = tuple(i for i in range(x.ndim) if i not in normalized_axis)
newshape = axis + rest
x = _core_transpose(x, newshape).reshape(
(
prod([x.shape[i] for i in axis], dtype=int),
*[x.shape[i] for i in rest]
)
)
_axis = 0
else:
_axis = axis
res = norm(x, axis=_axis, ord=ord)
if keepdims:
# We can't reuse np.linalg.norm(keepdims) because of the reshape hacks
# above to avoid matrix norm logic.
_axis = normalize_axis_tuple(
range(len(shape)) if axis is None else axis, len(shape)
)
for i in _axis:
shape[i] = 1
res = res.reshape(tuple(shape))
return res
# vecdot
def _vecdot_dispatcher(x1, x2, /, *, axis=None):
return (x1, x2)
@array_function_dispatch(_vecdot_dispatcher)
def vecdot(x1, x2, /, *, axis=-1):
"""
Computes the vector dot product.
This function is restricted to arguments compatible with the Array API,
contrary to :func:`numpy.vecdot`.
Let :math:`\\mathbf{a}` be a vector in ``x1`` and :math:`\\mathbf{b}` be
a corresponding vector in ``x2``. The dot product is defined as:
.. math::
\\mathbf{a} \\cdot \\mathbf{b} = \\sum_{i=0}^{n-1} \\overline{a_i}b_i
over the dimension specified by ``axis`` and where :math:`\\overline{a_i}`
denotes the complex conjugate if :math:`a_i` is complex and the identity
otherwise.
Parameters
----------
x1 : array_like
First input array.
x2 : array_like
Second input array.
axis : int, optional
Axis over which to compute the dot product. Default: ``-1``.
Returns
-------
output : ndarray
The vector dot product of the input.
See Also
--------
numpy.vecdot
Examples
--------
Get the projected size along a given normal for an array of vectors.
>>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]])
>>> n = np.array([0., 0.6, 0.8])
>>> np.linalg.vecdot(v, n)
array([ 3., 8., 10.])
"""
return _core_vecdot(x1, x2, axis=axis)
| LinAlgError |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/model_serialization.py | {
"start": 1128,
"end": 2848
} | class ____:
batch_size_placeholder = "batch_size"
sequence_length_placeholder = "sequence_length"
vector_observation_placeholder = "vector_observation"
recurrent_in_placeholder = "recurrent_in"
visual_observation_placeholder_prefix = "visual_observation_"
observation_placeholder_prefix = "obs_"
previous_action_placeholder = "prev_action"
action_mask_placeholder = "action_masks"
random_normal_epsilon_placeholder = "epsilon"
value_estimate_output = "value_estimate"
recurrent_output = "recurrent_out"
memory_size = "memory_size"
version_number = "version_number"
continuous_action_output_shape = "continuous_action_output_shape"
discrete_action_output_shape = "discrete_action_output_shape"
continuous_action_output = "continuous_actions"
discrete_action_output = "discrete_actions"
deterministic_continuous_action_output = "deterministic_continuous_actions"
deterministic_discrete_action_output = "deterministic_discrete_actions"
# Deprecated TensorNames entries for backward compatibility
is_continuous_control_deprecated = "is_continuous_control"
action_output_deprecated = "action"
action_output_shape_deprecated = "action_output_shape"
@staticmethod
def get_visual_observation_name(index: int) -> str:
"""
Returns the name of the visual observation with a given index
"""
return TensorNames.visual_observation_placeholder_prefix + str(index)
@staticmethod
def get_observation_name(index: int) -> str:
"""
Returns the name of the observation with a given index
"""
return TensorNames.observation_placeholder_prefix + str(index)
| TensorNames |
python | google__jax | jax/experimental/colocated_python/func_backend.py | {
"start": 727,
"end": 1382
} | class ____:
"""Temporarily stores results from synchronous execution of functions."""
def __init__(self) -> None:
self._lock = threading.Lock()
self._storage: dict[int, Sequence[jax.Array]] = {}
def push(self, uid: int, out: Sequence[jax.Array]) -> None:
with self._lock:
if uid in self._storage:
raise ValueError(f"uid {uid} already exists")
self._storage[uid] = out
def pop(self, uid: int) -> Sequence[jax.Array]:
with self._lock:
if uid not in self._storage:
raise ValueError(f"uid {uid} does not exist")
return self._storage.pop(uid)
SINGLETON_RESULT_STORE = _ResultStore()
| _ResultStore |
python | getsentry__sentry | src/sentry/grouping/enhancer/matchers.py | {
"start": 11825,
"end": 12332
} | class ____(FrameMatch):
def _positive_frame_match(
self, match_frame: MatchFrame, exception_data: dict[str, Any], cache: ReturnValueCache
) -> bool:
if not self.field: # Shouldn't happen, but it keeps mypy happy
return False
value = match_frame[self.field]
if value is None:
return False
if value == self._encoded_pattern:
return True
return _cached(cache, glob_match, value, self._encoded_pattern)
| FrameFieldMatch |
python | numpy__numpy | numpy/typing/tests/data/pass/ufunc_config.py | {
"start": 403,
"end": 1205
} | class ____:
def write(self, a: str) -> int:
return 0
_err_default = np.geterr()
_bufsize_default = np.getbufsize()
_errcall_default = np.geterrcall()
try:
np.seterr(all=None)
np.seterr(divide="ignore")
np.seterr(over="warn")
np.seterr(under="call")
np.seterr(invalid="raise")
np.geterr()
np.setbufsize(4096)
np.getbufsize()
np.seterrcall(func1)
np.seterrcall(func2)
np.seterrcall(func3)
np.seterrcall(Write1())
np.seterrcall(Write2())
np.seterrcall(Write3())
np.geterrcall()
with np.errstate(call=func1, all="call"):
pass
with np.errstate(call=Write1(), divide="log", over="log"):
pass
finally:
np.seterr(**_err_default)
np.setbufsize(_bufsize_default)
np.seterrcall(_errcall_default)
| Write3 |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 8223,
"end": 9191
} | class ____(Operation):
def __init__(self, b=4, *, name=None):
super().__init__(name=name)
self.b = b
def call(self, x):
return backend.nn.squareplus(x, self.b)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.squareplus", "keras.ops.nn.squareplus"])
def squareplus(x, b=4):
"""Squareplus activation function.
The Squareplus activation function is defined as:
`f(x) = (x + sqrt(x^2 + b)) / 2`
Args:
x: Input tensor.
b: Smoothness parameter. Defaults to 4.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1.0, 0.0, 1.0])
>>> x_squareplus = keras.ops.squareplus(x)
>>> print(x_squareplus)
array([0.6180, 1.0000, 1.6180], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Squareplus(b).symbolic_call(x)
return backend.nn.squareplus(x, b)
| Squareplus |
python | vyperlang__vyper | vyper/ast/natspec.py | {
"start": 353,
"end": 5296
} | class ____:
userdoc: dict
devdoc: dict
def parse_natspec(annotated_vyper_module: vy_ast.Module) -> NatspecOutput:
try:
return _parse_natspec(annotated_vyper_module)
except NatSpecSyntaxException as e:
e.resolved_path = annotated_vyper_module.resolved_path
raise e
def _parse_natspec(annotated_vyper_module: vy_ast.Module) -> NatspecOutput:
"""
Parses NatSpec documentation from a contract.
Arguments
---------
annotated_vyper_module: Module
Module-level vyper ast node.
interface_codes: Dict, optional
Dict containing relevant data for any import statements related to
this contract.
Returns
-------
dict
NatSpec user documentation
dict
NatSpec developer documentation
"""
from vyper.semantics.types.function import FunctionVisibility
userdoc, devdoc = {}, {}
source: str = annotated_vyper_module.full_source_code
docstring = annotated_vyper_module.get("doc_string.value")
if docstring:
devdoc.update(_parse_docstring(source, docstring, ("param", "return")))
if "notice" in devdoc:
userdoc["notice"] = devdoc.pop("notice")
for node in [i for i in annotated_vyper_module.body if i.get("doc_string.value")]:
docstring = node.doc_string.value
func_type = node._metadata["func_type"]
if func_type.visibility != FunctionVisibility.EXTERNAL:
continue
if isinstance(node.returns, vy_ast.Tuple):
ret_len = len(node.returns.elements)
elif node.returns:
ret_len = 1
else:
ret_len = 0
args = tuple(i.arg for i in node.args.args)
invalid_fields = ("title", "license")
fn_natspec = _parse_docstring(source, docstring, invalid_fields, args, ret_len)
for method_id in func_type.method_ids:
if "notice" in fn_natspec:
userdoc.setdefault("methods", {})[method_id] = {"notice": fn_natspec.pop("notice")}
if fn_natspec:
devdoc.setdefault("methods", {})[method_id] = fn_natspec
return NatspecOutput(userdoc=userdoc, devdoc=devdoc)
def _parse_docstring(
source: str,
docstring: str,
invalid_fields: Tuple,
params: Optional[Tuple] = None,
return_length: int = 0,
) -> dict:
natspec: dict = {}
if params is None:
params = tuple()
line_no = LineNumbers(source)
start = source.index(docstring)
translate_map = {"return": "returns", "dev": "details", "param": "params"}
pattern = r"(?:^|\n)\s*@(\S+)\s*([\s\S]*?)(?=\n\s*@\S|\s*$)"
for match in re.finditer(pattern, docstring):
tag, value = match.groups()
err_args = (source, *line_no.offset_to_line(start + match.start(1)))
if tag not in SINGLE_FIELDS + PARAM_FIELDS and not tag.startswith("custom:"):
raise NatSpecSyntaxException(f"Unknown NatSpec field '@{tag}'", *err_args)
if tag in invalid_fields:
raise NatSpecSyntaxException(
f"'@{tag}' is not a valid field for this docstring", *err_args
)
if not value or value.startswith("@"):
raise NatSpecSyntaxException(f"No description given for tag '@{tag}'", *err_args)
if tag not in PARAM_FIELDS:
if tag in natspec:
raise NatSpecSyntaxException(f"Duplicate NatSpec field '@{tag}'", *err_args)
natspec[translate_map.get(tag, tag)] = " ".join(value.split())
continue
tag = translate_map.get(tag, tag)
natspec.setdefault(tag, {})
if tag == "params":
try:
key, value = value.split(maxsplit=1)
except ValueError as exc:
raise NatSpecSyntaxException(
f"No description given for parameter '{value}'", *err_args
) from exc
if key not in params:
raise NatSpecSyntaxException(f"Method has no parameter '{key}'", *err_args)
elif tag == "returns":
if not return_length:
raise NatSpecSyntaxException("Method does not return any values", *err_args)
if len(natspec["returns"]) >= return_length:
raise NatSpecSyntaxException(
"Number of documented return values exceeds actual number", *err_args
)
key = f"_{len(natspec['returns'])}"
if key in natspec[tag]:
raise NatSpecSyntaxException(f"Parameter '{key}' documented more than once", *err_args)
natspec[tag][key] = " ".join(value.split())
if not natspec:
natspec["notice"] = " ".join(docstring.split())
elif not docstring.strip().startswith("@"):
raise NatSpecSyntaxException(
"NatSpec docstring opens with untagged comment", source, *line_no.offset_to_line(start)
)
return natspec
| NatspecOutput |
python | Netflix__metaflow | test/core/tests/card_import.py | {
"start": 72,
"end": 5315
} | class ____(MetaflowTest):
"""
This test tries to check if the import scheme for cards works as intended.
- Importing a card and calling it via the `type` should work
- Importable cards could be editable.
- If the submodule has errors while importing then the rest of metaflow should not fail.
"""
PRIORITY = 4
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('card(type="editable_import_test_card",save_errors=False)')
@tag('card(type="test_broken_card",save_errors=False)')
@tag('card(type="non_editable_import_test_card",save_errors=False)')
@steps(0, ["start"])
def step_start(self):
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
# Adds a card to editable_import_test_card
current.card.append(TestStringComponent(str(self.random_number)))
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if run is None:
# This means CliCheck is in context.
for step in flow:
if step.name != "start":
continue
cli_check_dict = checker.artifact_dict(step.name, "random_number")
for task_pathspec in cli_check_dict:
task_id = task_pathspec.split("/")[-1]
random_number = cli_check_dict[task_pathspec]["random_number"]
cards_info = checker.list_cards(step.name, task_id)
# Safely importable cards should be present.
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 2,
True,
)
impc_e = [
c
for c in cards_info["cards"]
if c["type"] == "editable_import_test_card"
]
impc_e = impc_e[0]
impc_ne = [
c
for c in cards_info["cards"]
if c["type"] == "non_editable_import_test_card"
]
impc_ne = impc_ne[0]
checker.assert_card(
step.name,
task_id,
impc_ne["type"],
"%s" % cards_info["pathspec"],
card_hash=impc_ne["hash"],
exact_match=True,
)
checker.assert_card(
step.name,
task_id,
impc_e["type"],
"%d" % random_number,
card_hash=impc_e["hash"],
exact_match=True,
)
else:
# This means MetadataCheck is in context.
for step in flow:
if step.name != "start":
continue
meta_check_dict = checker.artifact_dict(step.name, "random_number")
for task_id in meta_check_dict:
random_number = meta_check_dict[task_id]["random_number"]
cards_info = checker.list_cards(
step.name,
task_id,
)
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 2,
True,
)
impc_e = [
c
for c in cards_info["cards"]
if c["type"] == "editable_import_test_card"
]
impc_e = impc_e[0]
impc_ne = [
c
for c in cards_info["cards"]
if c["type"] == "non_editable_import_test_card"
]
impc_ne = impc_ne[0]
# print()
task_pathspec = cards_info["pathspec"]
checker.assert_card(
step.name,
task_id,
impc_ne["type"],
"%s" % task_pathspec,
card_hash=impc_ne["hash"],
exact_match=True,
)
checker.assert_card(
step.name,
task_id,
impc_e["type"],
"%d" % random_number,
card_hash=impc_e["hash"],
exact_match=True,
)
| CardImportTest |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 75069,
"end": 76428
} | class ____:
"""Test th_TH address provider methods"""
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in ThThAddressProvider.countries
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in ThThAddressProvider.cities
def test_province(self, faker, num_samples):
for _ in range(num_samples):
province = faker.province()
assert isinstance(province, str)
assert province in ThThAddressProvider.provinces
def test_amphoe(self, faker, num_samples):
for _ in range(num_samples):
amphoe = faker.amphoe()
assert isinstance(amphoe, str)
assert amphoe in ThThAddressProvider.amphoes
def test_tambon(self, faker, num_samples):
for _ in range(num_samples):
tambon = faker.tambon()
assert isinstance(tambon, str)
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"[1-9]\d{4}", postcode)
| TestThTh |
python | google__jax | jax/_src/pallas/fuser/fusible_dtype.py | {
"start": 6574,
"end": 18103
} | class ____:
avals_in: Sequence[Any]
avals_out: Sequence[Any]
def physicalize_interp(
jaxpr: core.Jaxpr, consts: Sequence[core.Value], *args: core.Value
):
"""Physicalizes a jaxpr by replacing fusible dtypes with physical types."""
# TODO: Merge into JAX core.
env: dict[core.Var, Any] = {}
def read_env(var: core.Atom):
if isinstance(var, core.Literal):
return var.val
return env[var]
def write_env(var: core.Var, val: Any):
env[var] = val
foreach(write_env, jaxpr.constvars, consts)
assert len(jaxpr.invars) == len(
args
), f"Length mismatch: {jaxpr.invars} != {args}"
foreach(write_env, jaxpr.invars, args)
for eqn in jaxpr.eqns:
invals = list(map(read_env, eqn.invars))
avals_in = tuple(x.aval for x in eqn.invars)
name_stack = (
source_info_util.current_name_stack() + eqn.source_info.name_stack
)
with (
source_info_util.user_context(
eqn.source_info.traceback, name_stack=name_stack
),
eqn.ctx.manager,
):
# need to check types and then invoke the correct rule.
ctx = Context(
avals_in=avals_in, avals_out=[var.aval for var in eqn.outvars]
)
custom_rule = _phys_find_rule(eqn.primitive, avals_in)
if custom_rule:
outvals = custom_rule(ctx, *invals, **eqn.params)
else:
subfuns, bind_params = eqn.primitive.get_bind_params(eqn.params)
outvals = eqn.primitive.bind(*subfuns, *invals, **bind_params)
if eqn.primitive.multiple_results:
assert len(outvals) == len(eqn.outvars), eqn
foreach(write_env, eqn.outvars, outvals)
else:
write_env(eqn.outvars[0], outvals)
return map(read_env, jaxpr.outvars)
def _is_fusion_type(aval: core.AbstractValue):
"""Returns whether an aval is an array containing fusion types."""
return (
isinstance(aval, (core.ShapedArray, state.AbstractRef))
and hasattr(aval, 'dtype')
and isinstance(aval.dtype, FusionDType)
)
def _phys_find_rule(primitive, avals: Sequence[core.AbstractValue]):
"""Finds the physicalization rule for a primitive."""
if primitive in _physicalize_rules:
return _physicalize_rules[primitive]
fusion_types = {aval.dtype for aval in avals if _is_fusion_type(aval)} # pytype: disable=attribute-error
if len(fusion_types) == 0:
return None
elif len(fusion_types) > 1:
raise ValueError(f"Multiple fusion types for primitive: {fusion_types}")
fusion_type = fusion_types.pop()
if primitive not in fusion_type._op_registry:
raise ValueError(
f"No implementation found for primitive {primitive} "
f"for custom type {fusion_type}"
)
return fusion_type.get_op_rule(primitive)
def _assert_no_fusion_types(avals: Sequence[core.AbstractValue]):
if any(_is_fusion_type(aval) for aval in avals):
raise NotImplementedError(f"Fusion type found in avals: {avals}")
def _pallas_call_physicalize_rule(
ctx: Context, *args, jaxpr, grid_mapping: pallas_core.GridMapping, **kwargs
):
_assert_no_fusion_types(ctx.avals_in)
_assert_no_fusion_types(ctx.avals_out)
with grid_mapping.trace_env():
new_jaxpr = physicalize_closed_jaxpr(core.ClosedJaxpr(jaxpr, ()))
if diff := len(new_jaxpr.jaxpr.invars) - len(jaxpr.invars):
num_scratch_avals = len(grid_mapping.scratch_avals) + diff
new_scratch_avals = tuple(v.aval for v in
new_jaxpr.jaxpr.invars[-num_scratch_avals:])
grid_mapping = grid_mapping.replace(
scratch_avals=new_scratch_avals
)
return pallas_call.pallas_call_p.bind(
*args, jaxpr=new_jaxpr.jaxpr, grid_mapping=grid_mapping, **kwargs
)
_physicalize_rules[pallas_call.pallas_call_p] = _pallas_call_physicalize_rule
def _cond_physicalize_rule(ctx: Context, *args, branches, **kwargs):
_assert_no_fusion_types(ctx.avals_out)
physicalized_branches = tuple(
physicalize_closed_jaxpr(branch) for branch in branches
)
flat_args = jax.tree.leaves(args)
return conditionals.cond_p.bind(
*flat_args, branches=physicalized_branches, **kwargs
)
_physicalize_rules[conditionals.cond_p] = _cond_physicalize_rule
@lu.transformation2
def _physicalize_transform(f, *args):
vals, zeros = args[::2], args[1::2]
assert len(vals) == len(zeros)
wrapper = lambda *inner_vals: f(
*it.chain.from_iterable(zip(inner_vals, zeros))
)
return physicalize(wrapper)(*vals)
@lu.transformation2
def _physicalize_transform_bwd(f, const_avals, *args):
return [custom_derivatives.Zero(a) for a in const_avals] + list(
physicalize(f)(*args)
)
def _custom_vjp_call_physicalize_rule(
ctx: Context, *args, call_jaxpr, num_consts, fwd_jaxpr_thunk, bwd, **kwargs
):
_assert_no_fusion_types(ctx.avals_out)
new_jaxpr = physicalize_closed_jaxpr(call_jaxpr)
fun = lu.wrap_init(core.jaxpr_as_fun(new_jaxpr),
debug_info=call_jaxpr.jaxpr.debug_info)
fwd = custom_derivatives.lift_fwd(num_consts, fwd_jaxpr_thunk)
fwd_physicalized = _physicalize_transform(fwd)
const_avals, _ = util.split_list(new_jaxpr.in_avals, [num_consts])
bwd_physicalized = _physicalize_transform_bwd(bwd, const_avals)
return custom_derivatives.custom_vjp_call_p.bind(
fun, fwd_physicalized, bwd_physicalized, *args, **kwargs
)
_physicalize_rules[custom_derivatives.custom_vjp_call_p] = _custom_vjp_call_physicalize_rule
def _run_state_rule(ctx: Context, *args, jaxpr, which_linear, is_initialized):
_assert_no_fusion_types(ctx.avals_in)
_assert_no_fusion_types(ctx.avals_out)
jaxpr = physicalize_jaxpr(jaxpr)
return state_discharge.run_state_p.bind(
*args,
jaxpr=jaxpr,
which_linear=which_linear,
is_initialized=is_initialized,
)
_physicalize_rules[state_discharge.run_state_p] = _run_state_rule
def _core_map_rule(ctx: Context, *args, jaxpr, **params):
_assert_no_fusion_types(ctx.avals_in)
_assert_no_fusion_types(ctx.avals_out)
assert not jaxpr.invars
with core.extend_axis_env_nd(params["mesh"].shape.items()):
jaxpr = physicalize_jaxpr(jaxpr)
return pallas_core.core_map_p.bind(*args, jaxpr=jaxpr, **params)
_physicalize_rules[pallas_core.core_map_p] = _core_map_rule
def _run_scoped_rule(ctx: Context, *args, jaxpr, **params):
_assert_no_fusion_types(ctx.avals_out)
jaxpr = physicalize_jaxpr(jaxpr)
flat_args = tree_util.tree_leaves(args)
assert len(flat_args) == len(
jaxpr.constvars
), f"Length mismatch: {len(flat_args)=} != {len(jaxpr.constvars)=}"
return pallas_primitives.run_scoped_p.bind(*flat_args, jaxpr=jaxpr, **params)
_physicalize_rules[pallas_primitives.run_scoped_p] = _run_scoped_rule
def _scan_rule(ctx: Context, *args, jaxpr, **params):
_assert_no_fusion_types(ctx.avals_in)
_assert_no_fusion_types(ctx.avals_out)
jaxpr = physicalize_closed_jaxpr(jaxpr)
return jax.lax.scan_p.bind(*args, jaxpr=jaxpr, **params)
_physicalize_rules[jax.lax.scan_p] = _scan_rule
def _while_rule(
ctx: Context, *args, body_jaxpr, cond_jaxpr, body_nconsts,
cond_nconsts, **params
):
_assert_no_fusion_types(ctx.avals_out)
cond_avals = [v.aval for v in cond_jaxpr.jaxpr.invars]
_, cond_in_avals = util.split_list(cond_avals, [cond_nconsts])
_assert_no_fusion_types(cond_in_avals)
new_cond_jaxpr = physicalize_closed_jaxpr(cond_jaxpr)
new_num_cond_consts = (
cond_nconsts
+ len(new_cond_jaxpr.jaxpr.invars)
- len(cond_jaxpr.jaxpr.invars)
)
body_avals = [v.aval for v in body_jaxpr.jaxpr.invars]
_, body_in_avals = util.split_list(body_avals, [body_nconsts])
_assert_no_fusion_types(body_in_avals)
new_body_jaxpr = physicalize_closed_jaxpr(body_jaxpr)
new_num_body_consts = (
body_nconsts
+ len(new_body_jaxpr.jaxpr.invars)
- len(body_jaxpr.jaxpr.invars)
)
flat_args = tree_util.tree_leaves(args)
cond_consts, body_consts, flat_args = util.split_list(
flat_args, [new_num_cond_consts, new_num_body_consts]
)
assert len(flat_args) + len(body_consts) == len(
new_body_jaxpr.jaxpr.invars), (
f"Length mismatch: {len(flat_args) + len(body_consts)} !="
f" {len(new_body_jaxpr.jaxpr.invars)=}"
)
assert len(flat_args) + len(cond_consts) == len(
new_cond_jaxpr.jaxpr.invars), (
f"Length mismatch: {len(flat_args) + len(cond_consts)} !="
f" {len(new_cond_jaxpr.jaxpr.invars)=}"
)
return jax.lax.while_p.bind(
*(cond_consts + body_consts + flat_args),
body_jaxpr=new_body_jaxpr,
cond_jaxpr=new_cond_jaxpr,
body_nconsts=new_num_body_consts,
cond_nconsts=new_num_cond_consts,
**params,
)
_physicalize_rules[jax.lax.while_p] = _while_rule
def _pack_rule(_, *args, dtype):
del dtype
return args
_physicalize_rules[pack_dtype_p] = _pack_rule
def _unpack_rule(_, arg):
return arg
_physicalize_rules[unpack_dtype_p] = _unpack_rule
def _swap_rule(ctx: Context, ref, val, *args, tree):
ref_aval, *_ = ctx.avals_in
if not _is_fusion_type(ref_aval):
return state_primitives.swap_p.bind(ref, val, *args, tree=tree)
return ref_aval.dtype.swap(ref, val, *args, tree=tree)
_physicalize_rules[state_primitives.swap_p] = _swap_rule
def _get_rule(ctx: Context, ref, *args, tree):
ref_aval, *_ = ctx.avals_in
if not _is_fusion_type(ref_aval):
return state_primitives.get_p.bind(ref, *args, tree=tree)
return ref_aval.dtype.get(ref, *args, tree=tree)
_physicalize_rules[state_primitives.get_p] = _get_rule
@block_spec.register_eval_rule(pack_dtype_p)
def _pack_dtype_eval_rule(eval_ctx: block_spec.KernelEvalContext, *args, dtype):
return dtype.pack_eval_rule(eval_ctx, *args)
@block_spec.register_pull_block_spec_rule(pack_dtype_p)
def _pack_dtype_pull_rule(
ctx: block_spec.PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
dtype: FusionDType,
):
aval_out = ctx.avals_out[0]
return dtype.pull_block_spec_one_step(aval_out, block_spec) # pytype: disable=attribute-error
@block_spec.register_push_block_spec_rule(unpack_dtype_p)
def _unpack_dtype_push_rule(
ctx: block_spec.PushRuleContext,
block_spec: pallas_core.BlockSpec,
):
aval_in = ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
assert isinstance(aval_in.dtype, FusionDType), aval_in.dtype
return aval_in.dtype.unpack_push_block_spec(aval_in, block_spec) # pytype: disable=attribute-error
@block_spec.register_pull_block_spec_rule(unpack_dtype_p)
def _unpack_dtype_pull_rule(
ctx: block_spec.PushRuleContext,
block_specs: pallas_core.BlockSpec,
):
aval_in = ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
assert isinstance(aval_in.dtype, FusionDType), aval_in.dtype
return aval_in.dtype.unpack_pull_block_spec(aval_in, *block_specs)
@block_spec.register_eval_rule(unpack_dtype_p)
def _unpack_dtype_eval_rule(eval_ctx: block_spec.KernelEvalContext, *args):
aval_in = eval_ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
assert isinstance(aval_in.dtype, FusionDType), aval_in.dtype
return aval_in.dtype.unpack_eval_rule(eval_ctx, *args)
def _fusible_physicalize_rule(
_, *consts_and_args, jaxpr, num_consts, in_tree, out_tree, func
):
consts, _ = util.split_list(consts_and_args, [num_consts])
new_jaxpr = physicalize_closed_jaxpr(core.ClosedJaxpr(jaxpr, consts))
return fusible_p.bind(
*consts_and_args,
jaxpr=new_jaxpr.jaxpr,
num_consts=num_consts,
in_tree=in_tree,
out_tree=out_tree,
func=func,
)
_physicalize_rules[fusible_p] = _fusible_physicalize_rule
| Context |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.