language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | google__pytype | pytype/tests/test_stdlib2.py | {
"start": 3809,
"end": 18265
} | class ____(test_base.BaseTest, test_utils.TestCollectionsMixin):
"""Tests for files in typeshed/stdlib."""
def test_collections_smoke_test(self):
# These classes are not fully implemented in typing.py.
self.Check("""
import collections
collections.AsyncIterable
collections.AsyncIterator
collections.AsyncGenerator
collections.Awaitable
collections.Coroutine
""")
def test_collections_bytestring(self):
self._testCollectionsObject(
"ByteString",
"b'hello'",
"42",
r"Union\[bytearray, bytes, memoryview\].*int",
)
def test_collections_collection(self):
self._testCollectionsObject("Collection", "[]", "42", r"Collection.*int")
def test_collections_generator(self):
self._testCollectionsObject(
"Generator", "i for i in range(42)", "42", r"Generator.*int"
)
def test_collections_reversible(self):
self._testCollectionsObject("Reversible", "[]", "42", r"Reversible.*int")
def test_collections_mapping_view(self):
self._testCollectionsObject(
"MappingView", "{}.items()", "42", r"MappingView.*int"
)
def test_collections_items_view(self):
self._testCollectionsObject(
"ItemsView", "{}.items()", "42", r"ItemsView.*int"
)
def test_collections_keys_view(self):
self._testCollectionsObject("KeysView", "{}.keys()", "42", r"KeysView.*int")
def test_collections_values_view(self):
self._testCollectionsObject(
"ValuesView", "{}.values()", "42", r"ValuesView.*int"
)
def test_tempfile(self):
# TODO(b/63407497): Enabling --strict_parameter_checks leads to a bunch of
# str vs bytes wrong-arg-types errors.
self.options.tweak(strict_parameter_checks=False)
ty = self.Infer("""
import tempfile
import typing
import os
def f(fi: typing.IO):
fi.write("foobar")
pos = fi.tell()
fi.seek(0, os.SEEK_SET)
s = fi.read(6)
fi.close()
return s
f(tempfile.TemporaryFile("wb", suffix=".foo"))
f(tempfile.NamedTemporaryFile("wb", suffix=".foo"))
f(tempfile.SpooledTemporaryFile(1048576, "wb", suffix=".foo"))
""")
self.assertTypesMatchPytd(
ty,
"""
import os
import tempfile
import typing
from typing import Any, Union
def f(fi: typing.IO) -> Union[bytes, str]: ...
""",
)
def test_defaultdict(self):
self.Check("""
import collections
import itertools
ids = collections.defaultdict(itertools.count(17).__next__)
""")
def test_defaultdict_matches_dict(self):
self.Check("""
import collections
from typing import DefaultDict, Dict
def take_dict(d: Dict[int, str]): pass
def take_defaultdict(d: DefaultDict[int, str]): pass
d = collections.defaultdict(str, {1: "hello"})
take_dict(d)
take_defaultdict(d)
""")
def test_defaultdict_kwargs(self):
self.Check("""
import collections
from typing import DefaultDict, Union
def take_str_int_values(d: DefaultDict[str, Union[str, int]]): pass
d = collections.defaultdict(str, {'x': 'x'}, an_int = 1)
take_str_int_values(d)
def take_three_types(d: DefaultDict[str, Union[str, int, list]]): pass
e = collections.defaultdict(str, {'x': [1, 2]}, an_int = 3)
take_three_types(e)
collections.defaultdict(None, [(1, '2'), (3, '4')], a=1, b=2)
""")
def test_sys_version_info_lt(self):
ty = self.Infer("""
import sys
if sys.version_info[0] < 3:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
v = ... # type: str
""",
)
def test_sys_version_info_le(self):
ty = self.Infer("""
import sys
if sys.version_info[0] <= 3:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
v = ... # type: int
""",
)
def test_sys_version_info_eq(self):
ty = self.Infer("""
import sys
if sys.version_info[0] == 2:
v = 42
elif sys.version_info[0] == 3:
v = "hello world"
else:
v = None
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
v = ... # type: str
""",
)
def test_sys_version_info_ge(self):
ty = self.Infer("""
import sys
if sys.version_info[0] >= 3:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
v = ... # type: int
""",
)
def test_sys_version_info_gt(self):
ty = self.Infer("""
import sys
if sys.version_info[0] > 2:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
v = ... # type: int
""",
)
def test_sys_version_info_named_attribute(self):
ty = self.Infer("""
import sys
if sys.version_info.major == 2:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
v: str
""",
)
def test_sys_version_info_tuple(self):
ty = self.Infer("""
import sys
if sys.version_info >= (3, 5):
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
v: int
""",
)
def test_sys_version_info_slice(self):
ty = self.Infer("""
import sys
if sys.version_info[:2] >= (3, 5):
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
v: int
""",
)
def test_sys_platform(self):
self.options.tweak(platform="linux")
ty = self.Infer("""
import sys
if sys.platform == "linux":
x = 0
else:
x = "0"
""")
self.assertTypesMatchPytd(
ty,
"""
import sys
x: int
""",
)
def test_async(self):
"""Test various asyncio features."""
ty = self.Infer("""
import asyncio
async def log(x: str):
return x
class AsyncContextManager:
async def __aenter__(self):
await log("entering context")
async def __aexit__(self, exc_type, exc, tb):
await log("exiting context")
async def my_coroutine(seconds_to_sleep=0.4):
await asyncio.sleep(seconds_to_sleep)
async def test_with(x):
try:
async with x as y:
pass
finally:
pass
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(my_coroutine())
finally:
event_loop.close()
""")
self.assertTypesMatchPytd(
ty,
"""
import asyncio
from typing import Any, Coroutine
event_loop: asyncio.events.AbstractEventLoop
class AsyncContextManager:
def __aenter__(self) -> Coroutine[Any, Any, None]: ...
def __aexit__(self, exc_type, exc, tb) -> Coroutine[Any, Any, None]: ...
def log(x: str) -> Coroutine[Any, Any, str]: ...
def my_coroutine(seconds_to_sleep = ...) -> Coroutine[Any, Any, None]: ...
def test_with(x) -> Coroutine[Any, Any, None]: ...
""",
)
def test_async_iter(self):
ty = self.Infer("""
import asyncio
class AsyncIterable:
def __aiter__(self):
return self
async def __anext__(self):
data = await self.fetch_data()
if data:
return data
else:
raise StopAsyncIteration
async def fetch_data(self):
return 1
async def iterate(x):
async for i in x:
pass
else:
pass
iterate(AsyncIterable())
""")
self.assertTypesMatchPytd(
ty,
"""
import asyncio
from typing import Any, Coroutine, TypeVar
_TAsyncIterable = TypeVar('_TAsyncIterable', bound=AsyncIterable)
class AsyncIterable:
def __aiter__(self: _TAsyncIterable) -> _TAsyncIterable: ...
def __anext__(self) -> Coroutine[Any, Any, int]: ...
def fetch_data(self) -> Coroutine[Any, Any, int]: ...
def iterate(x) -> Coroutine[Any, Any, None]: ...
""",
)
def test_subprocess(self):
# Test an attribute new in Python 3.
self.Check("""
import subprocess
subprocess.run
""")
def test_popen_bytes(self):
ty = self.Infer("""
import subprocess
def run(cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
return stdout
""")
self.assertTypesMatchPytd(
ty,
"""
import subprocess
def run(cmd) -> bytes: ...
""",
)
def test_popen_bytes_no_encoding(self):
ty = self.Infer("""
import subprocess
def run(cmd):
proc = subprocess.Popen(cmd, encoding=None, stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
return stdout
""")
self.assertTypesMatchPytd(
ty,
"""
import subprocess
def run(cmd) -> bytes: ...
""",
)
def test_popen_bytes_no_universal_newlines(self):
ty = self.Infer("""
import subprocess
def run(cmd):
proc = subprocess.Popen(
cmd, universal_newlines=False, stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
return stdout
""")
self.assertTypesMatchPytd(
ty,
"""
import subprocess
def run(cmd) -> bytes: ...
""",
)
def test_popen_str_encoding(self):
ty = self.Infer("""
import subprocess
def run(cmd):
proc = subprocess.Popen(cmd, encoding='utf-8', stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
return stdout
""")
self.assertTypesMatchPytd(
ty,
"""
import subprocess
def run(cmd) -> str: ...
""",
)
def test_popen_str_universal_newlines(self):
ty = self.Infer("""
import subprocess
def run(cmd):
proc = subprocess.Popen(
cmd, universal_newlines=True, stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
return stdout
""")
self.assertTypesMatchPytd(
ty,
"""
import subprocess
def run(cmd) -> str: ...
""",
)
def test_popen_ambiguous_universal_newlines(self):
ty = self.Infer("""
import subprocess
from typing import Any
def run1(value: bool):
proc = subprocess.Popen(['ls'], universal_newlines=value)
stdout, _ = proc.communicate()
return stdout
def run2(value: Any):
proc = subprocess.Popen(['ls'], universal_newlines=value)
stdout, _ = proc.communicate()
return stdout
""")
self.assertTypesMatchPytd(
ty,
"""
import subprocess
from typing import Any
def run1(value: bool) -> Any: ...
def run2(value: Any) -> Any: ...
""",
)
def test_popen_kwargs(self):
self.Check("""
import subprocess
def popen(cmd: str, **kwargs):
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
process = subprocess.Popen(cmd, **kwargs)
stdout, _ = process.communicate()
assert_type(stdout, 'Any')
""")
def test_enum(self):
self.Check("""
import enum
class Foo(enum.Enum):
foo = 0
bar = enum.auto()
def f(x: Foo):
pass
f(Foo.foo)
""")
def test_contextlib(self):
self.Check("from contextlib import AbstractContextManager")
def test_chainmap(self):
ty = self.Infer("""
import collections
v1 = collections.ChainMap({'a': 'b'}, {b'c': 0})
v2 = v1.maps
v3 = v1.parents
v4 = v1.new_child()
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
from typing import ChainMap, List, MutableMapping, Union
v1: ChainMap[Union[bytes, str], Union[int, str]]
v2: List[MutableMapping[Union[bytes, str], Union[int, str]]]
v3: ChainMap[Union[bytes, str], Union[int, str]]
v4: ChainMap[Union[bytes, str], Union[int, str]]
""",
)
def test_re(self):
ty = self.Infer("""
import re
pattern = re.compile('')
match = pattern.fullmatch('')
if match:
group = match[0]
""")
self.assertTypesMatchPytd(
ty,
"""
import re
from typing import Optional
pattern: re.Pattern[str]
match: Optional[re.Match[str]]
group: str
""",
)
def test_textio_buffer(self):
self.Check("""
import sys
sys.stdout.buffer
""")
def test_io_open(self):
ty = self.Infer("""
import io
def f(name):
return io.open(name, "rb").read()
""")
self.assertTypesMatchPytd(
ty,
"""
import io
def f(name) -> bytes: ...
""",
)
def test_array_frombytes(self):
self.Check("""
import array
def f(x: array.array, y: bytes):
return x.frombytes(y)
""")
def test_property_attributes(self):
self.Check("""
class C:
@property
def x(self):
pass
print(C.x.fget, C.x.fset, C.x.fdel)
""")
def test_re_and_typing(self):
self.CheckWithErrors("""
import re
from typing import Match, Optional, Pattern
ok1: Pattern = re.compile("")
ok2: Optional[Match] = re.match("", "")
no1: Pattern = 0 # annotation-type-mismatch
no2: Match = 0 # annotation-type-mismatch
""")
def test_contextmanager_keywordonly(self):
ty = self.Infer("""
from contextlib import contextmanager
@contextmanager
def myctx(*, msg=None):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, Iterator, ParamSpec, TypeVar
_P = ParamSpec('_P')
_T_co = TypeVar('_T_co')
def contextmanager(
func: Callable[_P, Iterator[_T_co]]
) -> Callable[_P, contextlib._GeneratorContextManager[_T_co]]: ...
def myctx(*, msg = ...) -> contextlib._GeneratorContextManager: ...
""",
)
if __name__ == "__main__":
test_base.main()
| StdlibTestsFeatures |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 6714,
"end": 8346
} | class ____(GeneratedAirbyteDestination):
@public
def __init__(
self,
name: str,
host: str,
routing_key: str,
ssl: Optional[bool] = None,
port: Optional[int] = None,
virtual_host: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
exchange: Optional[str] = None,
):
"""Airbyte Destination for Rabbitmq.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/rabbitmq
Args:
name (str): The name of the destination.
ssl (Optional[bool]): SSL enabled.
host (str): The RabbitMQ host name.
port (Optional[int]): The RabbitMQ port.
virtual_host (Optional[str]): The RabbitMQ virtual host name.
username (Optional[str]): The username to connect.
password (Optional[str]): The password to connect.
exchange (Optional[str]): The exchange name.
routing_key (str): The routing key.
"""
self.ssl = check.opt_bool_param(ssl, "ssl")
self.host = check.str_param(host, "host")
self.port = check.opt_int_param(port, "port")
self.virtual_host = check.opt_str_param(virtual_host, "virtual_host")
self.username = check.opt_str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.exchange = check.opt_str_param(exchange, "exchange")
self.routing_key = check.str_param(routing_key, "routing_key")
super().__init__("Rabbitmq", name)
| RabbitmqDestination |
python | protocolbuffers__protobuf | python/google/protobuf/descriptor.py | {
"start": 1669,
"end": 2974
} | class ____(object):
"""Wrapper class of threading.Lock(), which is allowed by 'with'."""
def __new__(cls):
self = object.__new__(cls)
self._lock = threading.Lock() # pylint: disable=protected-access
return self
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
_lock = threading.Lock()
def _Deprecated(
name,
alternative='get/find descriptors from generated code or query the descriptor_pool',
):
if _Deprecated.count > 0:
_Deprecated.count -= 1
warnings.warn(
'Call to deprecated %s, use %s instead.' % (name, alternative),
category=DeprecationWarning,
stacklevel=3,
)
# These must match the values in descriptor.proto, but we can't use them
# directly because we sometimes need to reference them in feature helpers
# below *during* the build of descriptor.proto.
_FEATURESET_MESSAGE_ENCODING_DELIMITED = 2
_FEATURESET_FIELD_PRESENCE_IMPLICIT = 2
_FEATURESET_FIELD_PRESENCE_LEGACY_REQUIRED = 3
_FEATURESET_REPEATED_FIELD_ENCODING_PACKED = 1
_FEATURESET_ENUM_TYPE_CLOSED = 2
# Deprecated warnings will print 100 times at most which should be enough for
# users to notice and do not cause timeout.
_Deprecated.count = 100
_internal_create_key = object()
| _Lock |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-meta/llama_index/llms/meta/base.py | {
"start": 150,
"end": 1390
} | class ____(OpenAILike):
"""
Llama LLM.
Examples:
`pip install llama-index-llms-meta`
```python
from llama_index.llms.meta import LlamaLLM
# set api key in env or in llm
# import os
# os.environ["LLAMA_API_KEY"] = "your api key"
llm = LlamaLLM(
model="Llama-3.3-8B-Instruct", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str = "Llama-3.3-8B-Instruct",
api_key: Optional[str] = None,
api_base: str = "https://api.llama.com/compat/v1",
is_chat_model: bool = True,
# Slightly lower to account for tokenization defaults
context_window: int = 120000,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("LLAMA_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
context_window=context_window,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "LlamaLLM"
| LlamaLLM |
python | pytorch__pytorch | test/test_fx_passes.py | {
"start": 17722,
"end": 18266
} | class ____:
@staticmethod
def forward(x):
x += 3
x = x.dequantize()
x = torch.sigmoid(x)
x = x.to(torch.float16)
return x
@staticmethod
def pattern(x):
x = x.dequantize()
x = torch.sigmoid(x)
x = x.to(torch.float16)
return x
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 1),
TestCase(False, True, 0),
TestCase(True, True, 0)
]
| QuantizationModel |
python | jina-ai__jina | jina/serve/runtimes/gateway/streamer.py | {
"start": 19242,
"end": 22482
} | class ____:
def __init__(self, connection_pool: GrpcConnectionPool, executor_name: str) -> None:
self._connection_pool: GrpcConnectionPool = connection_pool
self.executor_name = executor_name
async def post(
self,
inputs: DocumentArray,
request_size: int = 100,
on: Optional[str] = None,
parameters: Optional[Dict] = None,
return_type: Type[DocumentArray] = DocumentArray,
**kwargs,
):
if not parameters:
parameters = {}
if not docarray_v2:
reqs = []
for docs_batch in inputs.batch(batch_size=request_size, shuffle=False):
req = DataRequest()
req.header.exec_endpoint = on
req.header.target_executor = self.executor_name
req.parameters = parameters
req.data.docs = docs_batch
reqs.append(req)
else:
from docarray import BaseDoc
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx : min(ndx + n, l)]
reqs = []
if len(inputs) > 0:
for docs_batch in batch(inputs, n=request_size):
req = DataRequest()
req.document_array_cls = DocList[docs_batch.doc_type]
req.data.docs = docs_batch
req.header.exec_endpoint = on
req.header.target_executor = self.executor_name
req.parameters = parameters
reqs.append(req)
else:
req = DataRequest()
req.document_array_cls = DocList[BaseDoc]
req.data.docs = DocList[BaseDoc]()
req.header.exec_endpoint = on
req.header.target_executor = self.executor_name
req.parameters = parameters
reqs.append(req)
tasks = [
self._connection_pool.send_requests_once(
requests=[req], deployment=self.executor_name, head=True, endpoint=on
)
for req in reqs
]
results = await asyncio.gather(*tasks)
if not docarray_v2:
docs = DocumentArray.empty()
for resp, _ in results:
docs.extend(resp.docs)
else:
docs = DocList[return_type.doc_type]()
for resp, _ in results:
resp.document_array_cls = return_type
docs.extend(resp.docs)
return docs
async def stream_doc(
self,
inputs: 'Document',
on: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs,
):
req: SingleDocumentRequest = SingleDocumentRequest(inputs.to_protobuf())
req.header.exec_endpoint = on
req.header.target_executor = self.executor_name
req.parameters = parameters
async_generator = self._connection_pool.send_single_document_request(
request=req, deployment=self.executor_name, head=True, endpoint=on
)
async for resp, _ in async_generator:
yield resp
| _ExecutorStreamer |
python | numba__numba | numba/core/errors.py | {
"start": 18817,
"end": 18913
} | class ____(NumbaError):
"""
Functionality is deprecated.
"""
pass
| DeprecationError |
python | ansible__ansible | lib/ansible/plugins/become/__init__.py | {
"start": 710,
"end": 5210
} | class ____(AnsiblePlugin):
name = None # type: str | None
# messages for detecting prompted password issues
fail = tuple() # type: tuple[str, ...]
missing = tuple() # type: tuple[str, ...]
# many connection plugins cannot provide tty, set to True if your become
# plugin requires a tty, i.e su
require_tty = False
# plugin allows for pipelining execution
pipelining = True
# prompt to match
prompt = ''
def __init__(self):
super(BecomeBase, self).__init__()
self._id = ''
self.success = ''
def get_option(self, option, hostvars=None, playcontext=None):
""" Overrides the base get_option to provide a fallback to playcontext vars in case a 3rd party plugin did not
implement the base become options required in Ansible. """
# TODO: add deprecation warning for ValueError in devel that removes the playcontext fallback
try:
return super(BecomeBase, self).get_option(option, hostvars=hostvars)
except KeyError:
pc_fallback = ['become_user', 'become_pass', 'become_flags', 'become_exe']
if option not in pc_fallback:
raise
return getattr(playcontext, option, None)
def expect_prompt(self) -> bool:
"""This function assists connection plugins in determining if they need to wait for
a prompt. Both a prompt and a password are required.
"""
return bool(self.prompt and self.get_option('become_pass'))
def _build_success_command(self, cmd, shell, noexe=False):
if not all((cmd, shell, self.success)):
return cmd
try:
cmd = shlex.quote('%s %s %s %s' % (shell.ECHO, self.success, shell.COMMAND_SEP, cmd))
except AttributeError as ex:
raise AnsibleError(f'The {shell._load_name!r} shell plugin does not support become. It is missing the {ex.name!r} attribute.')
exe = getattr(shell, 'executable', None)
if exe and not noexe:
cmd = '%s -c %s' % (exe, cmd)
return cmd
@abstractmethod
def build_become_command(self, cmd, shell):
self._id = _gen_id()
self.success = 'BECOME-SUCCESS-%s' % self._id
def strip_become_prompt(self, data: bytes) -> bytes:
"""
Strips the first found configured become prompt from `data`, trailing whitespace and anything that precedes the prompt, then returns the result.
If no prompt is expected, or the prompt is not `str` or `bytes`, `data` will be returned as-is.
"""
if not self.prompt or not isinstance(self.prompt, (str, bytes)) or not self.expect_prompt():
return data
return self._strip_through_prefix(self.prompt, data)
def strip_become_success(self, data: bytes) -> bytes:
"""Strips the first found success marker from `data`, trailing whitespace and anything that precedes the success marker, then returns the result."""
return self._strip_through_prefix(self.success, data)
@staticmethod
def _strip_through_prefix(match: str | bytes, data: bytes) -> bytes:
"""Strips the first occurrence of `match` from `data`, trailing whitespace and anything that precedes `match`, then returns the result."""
return re.sub(br'^.*?' + re.escape(to_bytes(match)) + br'\s*', b'', data, count=1, flags=re.DOTALL)
def check_success(self, b_output):
b_success = to_bytes(self.success)
return any(b_success in l.rstrip() for l in b_output.splitlines(True))
def check_password_prompt(self, b_output):
""" checks if the expected password prompt exists in b_output """
if self.prompt:
b_prompt = to_bytes(self.prompt).strip()
return any(l.strip().startswith(b_prompt) for l in b_output.splitlines())
return False
def _check_password_error(self, b_out, msg):
""" returns True/False if domain specific i18n version of msg is found in b_out """
b_fail = to_bytes(dgettext(self.name, msg))
return b_fail and b_fail in b_out
def check_incorrect_password(self, b_output):
for errstring in self.fail:
if self._check_password_error(b_output, errstring):
return True
return False
def check_missing_password(self, b_output):
for errstring in self.missing:
if self._check_password_error(b_output, errstring):
return True
return False
| BecomeBase |
python | pyodide__pyodide | tools/backport.py | {
"start": 2380,
"end": 3793
} | class ____:
"""Store the history of the GitHub PRs with a map from pr_number to CommitInfo"""
commits: dict[int, CommitInfo]
@classmethod
def from_git(self, *args):
result = run(["git", "log", "--oneline", *args], capture_output=True)
lines = result.stdout.splitlines()
return CommitHistory(lines)
def __init__(self, lines):
commits = {}
PR_NUMBER_RE = re.compile(r"\(#[0-9]+\)$")
for history_idx, line in enumerate(lines):
if not (m := PR_NUMBER_RE.search(line)):
continue
pr_number = int(m.group(0)[2:-1])
shorthash, shortlog = line.split(" ", 1)
commits[pr_number] = CommitInfo(pr_number, shorthash, shortlog, history_idx)
self.commits = commits
def lookup_pr(self, pr_number: int) -> CommitInfo:
return self.commits[pr_number]
def has_pr(self, pr_number: int) -> bool:
return pr_number in self.commits
@functools.cache
def get_commits() -> list[CommitInfo]:
"""Return the CommitInfo of the PRs we want to backport"""
pr_numbers = get_needs_backport_pr_numbers()
commit_history = CommitHistory.from_git("main")
commits = [commit_history.lookup_pr(x) for x in pr_numbers]
return sorted(commits, key=lambda c: -c.history_idx)
#
# Changelog parsing
#
# See tests in tools/tests/test_backports.py.
@dataclass
| CommitHistory |
python | realpython__materials | solid-principles-python/file_manager_srp.py | {
"start": 644,
"end": 909
} | class ____:
def __init__(self, filename):
self.path = Path(filename)
def read(self, encoding="utf-8"):
return self.path.read_text(encoding)
def write(self, data, encoding="utf-8"):
self.path.write_text(data, encoding)
| FileManager |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/scalarbool.py | {
"start": 498,
"end": 1393
} | class ____(int):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
anchor = kw.pop('anchor', None)
b = int.__new__(cls, *args, **kw)
if anchor is not None:
b.yaml_set_anchor(anchor, always_dump=True)
return b
@property
def anchor(self):
# type: () -> Any
if not hasattr(self, Anchor.attrib):
setattr(self, Anchor.attrib, Anchor())
return getattr(self, Anchor.attrib)
def yaml_anchor(self, any=False):
# type: (bool) -> Any
if not hasattr(self, Anchor.attrib):
return None
if any or self.anchor.always_dump:
return self.anchor
return None
def yaml_set_anchor(self, value, always_dump=False):
# type: (Any, bool) -> None
self.anchor.value = value
self.anchor.always_dump = always_dump
| ScalarBoolean |
python | getsentry__sentry | src/sentry/issue_detection/detectors/experiments/n_plus_one_api_calls_detector.py | {
"start": 952,
"end": 11039
} | class ____(PerformanceDetector):
"""
Detect parallel network calls to the same parameterized endpoint.
[-------- transaction -----------]
[-------- parent span -----------]
[n0] https://service.io/resources/1/?id=12443
[n1] https://service.io/resources/2/?id=13342
[n2] https://service.io/resources/3/?id=13441
...
"""
type = DetectorType.EXPERIMENTAL_N_PLUS_ONE_API_CALLS
settings_key = DetectorType.EXPERIMENTAL_N_PLUS_ONE_API_CALLS
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
# TODO: Only store the span IDs and timestamps instead of entire span objects
self.spans: list[Span] = []
@classmethod
def is_detection_allowed_for_system(cls) -> bool:
# Defer to the issue platform for whether to create issues
# See https://develop.sentry.dev/backend/issue-platform/#releasing-your-issue-type
return True
def visit_span(self, span: Span) -> None:
if not self._is_span_eligible(span):
return
op = span.get("op", None)
if op not in self.settings.get("allowed_span_ops", []):
return
previous_span = self.spans[-1] if len(self.spans) > 0 else None
if previous_span is None:
self.spans.append(span)
elif self._spans_are_concurrent(previous_span, span) and self._spans_are_similar(
previous_span, span
):
self.spans.append(span)
else:
self._maybe_store_problem()
self.spans = [span]
def is_creation_allowed_for_organization(self, organization: Organization) -> bool:
return features.has(
"organizations:experimental-n-plus-one-api-detector-rollout", organization
)
def is_creation_allowed_for_project(self, project: Project) -> bool:
return self.settings["detection_enabled"]
@classmethod
def is_event_eligible(cls, event: dict[str, Any], project: Project | None = None) -> bool:
trace_op = event.get("contexts", {}).get("trace", {}).get("op")
if trace_op and trace_op not in ["navigation", "pageload", "ui.load", "ui.action"]:
return False
return True
def _is_span_eligible(self, span: Span) -> bool:
span_id = span.get("span_id", None)
op = span.get("op", None)
hash = span.get("hash", None)
if not span_id or not op or not hash:
return False
description = span.get("description")
if not description:
return False
if description.strip()[:3].upper() != "GET":
return False
url = get_url_from_span(span)
# GraphQL URLs have complicated queries in them. Until we parse those
# queries to check for what's duplicated, we can't tell what is being
# duplicated. Ignore them for now
if "graphql" in url:
return False
# Next.js infixes its data URLs with a build ID. (e.g.,
# /_next/data/<uuid>/some-endpoint) This causes a fingerprinting
# explosion, since every deploy would change this ID and create new
# fingerprints. Since we're not parameterizing URLs yet, we need to
# exclude them
if "_next/data" in url:
return False
# Next.js error pages cause an N+1 API Call that isn't useful to anyone
if "__nextjs_original-stack-frame" in url:
return False
# LaunchDarkly SDK calls are not useful
if "https://app.launchdarkly.com/sdk/" in url:
return False
if not url:
return False
if is_filtered_url(url):
return False
# Once most users update their SDKs to use the latest standard, we
# won't have to do this, since the URLs will be sent in as `span.data`
# in a parsed format
parsed_url = urlparse(str(url))
# Ignore anything that looks like an asset. Some frameworks (and apps)
# fetch assets via XHR, which is not our concern
_pathname, extension = os.path.splitext(parsed_url.path)
if extension and extension in [".js", ".css", ".svg", ".png", ".mp3", ".jpg", ".jpeg"]:
return False
is_prefetch_span = get_path(span, "data", "http.request.prefetch")
if is_prefetch_span:
return False
return True
def on_complete(self) -> None:
self._maybe_store_problem()
self.spans = []
def _maybe_store_problem(self) -> None:
if len(self.spans) < 1:
return
if len(self.spans) < self.settings["count"]:
return
total_duration = get_total_span_duration(self.spans)
if total_duration < self.settings["total_duration"]:
return
last_span = self.spans[-1]
fingerprint = self._fingerprint()
if not fingerprint:
return
if self.stored_problems.get(fingerprint):
logging.info(
"Multiple occurrences detected for fingerprint",
extra={"detector": self.settings_key},
)
return
offender_span_ids = [span["span_id"] for span in self.spans]
problem_description = self._get_parameterized_url(self.spans[0])
if problem_description == "":
problem_description = os.path.commonprefix(
[span.get("description", "") or "" for span in self.spans]
)
parent_span_id = last_span.get("parent_span_id")
parameters = self._get_parameters()
self.stored_problems[fingerprint] = PerformanceProblem(
fingerprint=fingerprint,
op=last_span["op"],
desc=problem_description,
type=PerformanceNPlusOneAPICallsGroupType,
cause_span_ids=[],
parent_span_ids=[parent_span_id] if parent_span_id else [],
offender_span_ids=offender_span_ids,
evidence_data={
"op": last_span["op"],
"cause_span_ids": [],
"parent_span_ids": [parent_span_id] if parent_span_id else [],
"offender_span_ids": offender_span_ids,
"transaction_name": self._event.get("transaction", ""),
"num_repeating_spans": str(len(offender_span_ids)) if offender_span_ids else "",
"repeating_spans": self._get_path_prefix(self.spans[0]),
"repeating_spans_compact": get_span_evidence_value(self.spans[0], include_op=False),
"parameters": parameters["query_params"],
"path_parameters": parameters["path_params"],
"common_url": problem_description,
},
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
op=last_span["op"], desc=problem_description
),
# Has to be marked important to be displayed in the notifications
important=True,
)
],
)
def _get_parameters(self) -> dict[str, list[str]]:
if not self.spans or len(self.spans) == 0:
return {"query_params": [], "path_params": []}
parameterized_urls = [
parameterize_url_with_result(get_url_from_span(span)) for span in self.spans
]
path_params = [param["path_params"] for param in parameterized_urls]
query_dict: dict[str, list[str]] = defaultdict(list)
for parameterized_url in parameterized_urls:
query_params = parameterized_url["query_params"]
for key, value in query_params.items():
query_dict[key] += value
# Note: dict.fromkeys() is just to deduplicate values and Python dicts are ordered
path_params_list: list[str] = list(
dict.fromkeys(
[f"{', '.join(param_group)}" for param_group in path_params if param_group]
).keys()
)
query_params_list: list[str] = list(
dict.fromkeys(
[f"{key}: {', '.join(values)}" for key, values in query_dict.items()]
).keys()
)
return {
# Use sets to deduplicate the lists, but still preserve the order.
"path_params": path_params_list,
"query_params": query_params_list,
}
def _get_parameterized_url(self, span: Span) -> str:
return parameterize_url(get_url_from_span(span))
def _get_path_prefix(self, repeating_span: Span | None) -> str:
if not repeating_span:
return ""
url = get_url_from_span(repeating_span)
parsed_url = urlparse(url)
return parsed_url.path or ""
def _fingerprint(self) -> str | None:
first_url = get_url_from_span(self.spans[0])
parameterized_first_url = parameterize_url(first_url)
# Check if we parameterized the URL at all. If not, do not attempt
# fingerprinting. Unparameterized URLs run too high a risk of
# fingerprinting explosions.
if parameterized_first_url == first_url:
return None
fingerprint = fingerprint_http_spans([self.spans[0]])
return f"1-{PerformanceNPlusOneAPICallsGroupType.type_id}-{fingerprint}"
def _spans_are_concurrent(self, span_a: Span, span_b: Span) -> bool:
span_a_start = span_a["start_timestamp"]
span_b_start = span_b["start_timestamp"]
return timedelta(seconds=abs(span_a_start - span_b_start)) < timedelta(
milliseconds=self.settings["concurrency_threshold"]
)
def _spans_are_similar(self, span_a: Span, span_b: Span) -> bool:
return (
self._get_parameterized_url(span_a) == self._get_parameterized_url(span_b)
and span_a["parent_span_id"] == span_b["parent_span_id"]
)
| NPlusOneAPICallsExperimentalDetector |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amplitude/integration_tests/integration_test.py | {
"start": 406,
"end": 4905
} | class ____(YamlDeclarativeSource):
def __init__(self):
with open("../manifest.yaml", "r") as yaml_file:
primary_manifest = yaml.safe_load(yaml_file)
test_manifest = primary_manifest
stream_list = []
# We are only testing the annotations and cohorts streams
for stream in primary_manifest["streams"]:
if stream["name"] in ["annotations", "cohorts"]:
stream_list.append(stream)
test_manifest["streams"] = stream_list
with open("test_manifest.yaml", "w") as test_yaml:
yaml.dump(test_manifest, test_yaml)
super().__init__("test_manifest.yaml")
if os.path.exists("test_manifest.yaml"):
os.remove("test_manifest.yaml")
@pytest.fixture(scope="module")
def config():
with open(Path(__file__).parent.parent / "secrets/config.json", "r") as file:
return json.loads(file.read())
@pytest.fixture(scope="module")
def streams(config):
catalog = (
CatalogBuilder()
.with_stream("annotations_stream", sync_mode=SyncMode.full_refresh)
.with_stream("cohorts_stream", sync_mode=SyncMode.full_refresh)
.build()
)
return SourceAmplitudeTest().streams(config=config)
@pytest.fixture(scope="module")
def annotations_stream(streams):
return next(filter(lambda s: s.name == "annotations", streams))
@pytest.fixture(scope="module")
def cohorts_stream(streams):
return next(filter(lambda s: s.name == "cohorts", streams))
@pytest.mark.parametrize(
"stream_fixture_name, url, expected_records",
[
(
"annotations_stream",
"https://amplitude.com/api/2/annotations",
[
{"date": "2023-09-22", "details": "vacate et scire", "id": 1, "label": "veritas"},
{"date": "2023-09-22", "details": "valenter volenter", "id": 2, "label": "veritas"},
],
),
(
"cohorts_stream",
"https://amplitude.com/api/3/cohorts",
[
{
"appId": 1,
"archived": False,
"chart_id": "27f310c471e8409797a18f18fe2884fb",
"createdAt": 1695394830,
"definition": {},
"description": "Arduus ad Solem",
"edit_id": "fab12bc14de641589630c2ceced1c197",
"finished": True,
"hidden": False,
"id": 1,
"is_official_content": True,
"is_predictive": True,
"last_viewed": 1695394946,
"lastComputed": 1695394830,
"lastMod": 1695394830,
"location_id": "517974113223461a8468400b6ce88383",
"metadata": ["me", "ta", "da", "ta"],
"name": "Solem",
"owners": ["me", "mom"],
"popularity": 100,
"published": True,
"shortcut_ids": ["solem"],
"size": 186,
"type": "one",
"view_count": 2,
"viewers": ["me", "mom"],
}
],
),
],
)
def test_empty_streams(stream_fixture_name, url, expected_records, request, requests_mock):
"""
A test with synthetic data since we are not able to test `annotations_stream` and `cohorts_stream` streams
due to free subscription plan for the sandbox
"""
stream = request.getfixturevalue(stream_fixture_name)
empty_stream_slice = StreamSlice(partition={}, cursor_slice={})
records_reader = stream.read_records(sync_mode=SyncMode.full_refresh, cursor_field=None, stream_slice=empty_stream_slice)
requests_mock.get(url, status_code=200, json={"data": expected_records})
# Sort actual and expected records by ID.
# Prepare pairs of the actual and expected versions of the same record.
pairs = zip(*[sorted(record, key=operator.itemgetter("id")) for record in (list(records_reader), expected_records)])
# Calculate unmatched records and return their key, actual value and expected value
unmatched = [
[(key, _actual[key], _expected[key]) for key in _actual if _actual[key] != _expected[key]]
for _actual, _expected in pairs
if _actual != _expected
]
# Ensure we don't have any unmatched records
assert not any(unmatched)
| SourceAmplitudeTest |
python | joke2k__faker | faker/providers/person/zh_TW/__init__.py | {
"start": 81,
"end": 27338
} | class ____(PersonProvider):
# update: 2025 04 30
# source:
# 中華民國(ROC)人口 2025 3月: 23,374,742
# (As of March 2025, the total population of the Republic of China (Taiwan) is 23,374,742.)
# https://www.ris.gov.tw/app/portal/346
# 臺灣原住民人口 2024 12月 612,000
# (As of December 2024, the indigenous population in Taiwan is approximately 612,000, accounting for 2.7% of the
# total population.)
# https://www.moi.gov.tw/News_Content.aspx?n=2905&sms=10305&s=325345
# Although most Taiwanese people are ethnically Han, their culture has diverged significantly from mainland China
# over centuries.
# Taiwan’s Han communities—like Hoklo and Hakka—have developed unique languages, customs, and identities distinct
# from Chinese people today.
# *Taiwanese Indigenous peoples traditionally have their own naming systems*,
# which are different from Han Chinese names—they often reflect tribal identity, family lineage, or personal traits.
formats_female = OrderedDict(
(
("{{last_name}}{{first_name_female}}", 1), # 漢人 Han
# ("{{first_indigenous_name_female}} {{last_indigenous_name}}", 0.027), # 原住民 Taiwanese Indigenous Peoples
)
)
formats_male = OrderedDict(
(
("{{last_name}}{{first_name_male}}", 1), # 漢人 Han
# ("{{first_indigenous_name_male}} {{last_indigenous_name}}", 0.027), # 原住民 Taiwanese Indigenous Peoples
)
)
formats = formats_male.copy()
formats.update(formats_female)
# =============================================================================
# source:
# 中華民國(ROC)全國姓名統計 2023/6月
# (National Name Statistics of the Republic of China (Taiwan), June 2023)
# https://www.ris.gov.tw/documents/data/5/2/112namestat.pdf
# page 267: TOP 100 female first name
# page 281: The top 10 most common female names by year of birth
first_names_female = OrderedDict(
(
# top 100 names in all ages
("淑芬", 0.14),
("淑惠", 0.13),
("美玲", 0.12),
("麗華", 0.11),
("美惠", 0.11),
("淑貞", 0.1),
("雅婷", 0.1),
("秀英", 0.1),
("淑娟", 0.1),
("秀琴", 0.1),
("秀美", 0.09),
("美華", 0.09),
("怡君", 0.09),
("淑華", 0.09),
("美玉", 0.09),
("雅惠", 0.08),
("秀蘭", 0.08),
("淑美", 0.08),
("秀鳳", 0.08),
("美珠", 0.07),
("麗珠", 0.07),
("麗娟", 0.07),
("淑玲", 0.07),
("美雲", 0.07),
("雅雯", 0.07),
("雅玲", 0.07),
("美麗", 0.06),
("玉蘭", 0.06),
("月娥", 0.06),
("麗卿", 0.06),
("惠美", 0.06),
("麗美", 0.06),
("秀珠", 0.06),
("淑珍", 0.05),
("欣怡", 0.05),
("素貞", 0.05),
("秀珍", 0.05),
("素珍", 0.05),
("惠玲", 0.05),
("玉梅", 0.05),
("玉英", 0.05),
("淑慧", 0.05),
("秀玲", 0.05),
("明珠", 0.05),
("秋香", 0.05),
("秀玉", 0.05),
("麗雲", 0.05),
("秀梅", 0.05),
("麗玉", 0.05),
("寶珠", 0.05),
("怡婷", 0.05),
("麗玲", 0.05),
("宜蓁", 0.04),
("月英", 0.04),
("淑芳", 0.04),
("玉玲", 0.04),
("秀雲", 0.04),
("慧玲", 0.04),
("春美", 0.04),
("碧霞", 0.04),
("麗香", 0.04),
("美鳳", 0.04),
("美珍", 0.04),
("美英", 0.04),
("碧珠", 0.04),
("碧雲", 0.04),
("佳蓉", 0.04),
("美蘭", 0.04),
("秀娟", 0.04),
("美娟", 0.04),
("淑敏", 0.04),
("玉珍", 0.04),
("淑卿", 0.04),
("美慧", 0.04),
("靜宜", 0.04),
("素珠", 0.04),
("雅慧", 0.04),
("靜怡", 0.04),
("玉美", 0.04),
("雅萍", 0.04),
("素卿", 0.04),
("素琴", 0.04),
("秀枝", 0.04),
("金蓮", 0.04),
("秋月", 0.04),
("麗雪", 0.04),
("惠珍", 0.04),
("心怡", 0.04),
("佳玲", 0.04),
("鈺婷", 0.04),
("詩涵", 0.04),
("秀霞", 0.04),
("秀華", 0.03),
("麗琴", 0.03),
("金鳳", 0.03),
("麗珍", 0.03),
("玉鳳", 0.03),
("玉琴", 0.03),
("秀蓮", 0.03),
("素蘭", 0.03),
# top n names in younger generation
("婉婷", 0.01),
("佩珊", 0.01),
("怡萱", 0.01),
("雅筑", 0.01),
("郁婷", 0.01),
("宜庭", 0.01),
("欣妤", 0.01),
("思妤", 0.01),
("佳穎", 0.01),
("品妤", 0.01),
("子涵", 0.01),
("品妍", 0.01),
("子晴", 0.01),
("詠晴", 0.01),
("禹彤", 0.01),
("羽彤", 0.01),
("芯語", 0.01),
("宥蓁", 0.01),
("語彤", 0.01),
("苡晴", 0.01),
("苡菲", 0.01),
("雨霏", 0.01),
("芸菲", 0.01),
("苡安", 0.01),
("玥彤", 0.01),
)
)
# source:
# 中華民國(ROC)全國姓名統計 2023/6月
# (National Name Statistics of the Republic of China (Taiwan), June 2023)
# https://www.ris.gov.tw/documents/data/5/2/112namestat.pdf
# page 266: TOP 100 male first name
# page 280: The top 10 most common male names by year of birth
first_names_male = OrderedDict(
(
# top 100 names in all ages
("家豪", 0.06),
("志明", 0.05),
("建宏", 0.05),
("俊傑", 0.05),
("俊宏", 0.05),
("志豪", 0.05),
("志偉", 0.05),
("承翰", 0.04),
("冠宇", 0.04),
("志強", 0.04),
("宗翰", 0.04),
("志宏", 0.04),
("冠廷", 0.04),
("志成", 0.04),
("文雄", 0.04),
("承恩", 0.04),
("金龍", 0.04),
("文彬", 0.03),
("正雄", 0.03),
("明輝", 0.03),
("柏翰", 0.03),
("彥廷", 0.03),
("明德", 0.03),
("文龍", 0.03),
("俊賢", 0.03),
("志忠", 0.03),
("國華", 0.03),
("信宏", 0.03),
("家銘", 0.03),
("俊雄", 0.03),
("宇翔", 0.03),
("建成", 0.03),
("冠霖", 0.03),
("志銘", 0.02),
("志雄", 0.02),
("進財", 0.02),
("明哲", 0.02),
("榮華", 0.02),
("柏宇", 0.02),
("志鴻", 0.02),
("志賢", 0.02),
("俊良", 0.02),
("建華", 0.02),
("家瑋", 0.02),
("家榮", 0.02),
("文祥", 0.02),
("建志", 0.02),
("文正", 0.02),
("文忠", 0.02),
("凱翔", 0.02),
("家宏", 0.02),
("國雄", 0.02),
("明宏", 0.02),
("文賢", 0.02),
("世昌", 0.02),
("哲瑋", 0.02),
("文傑", 0.02),
("正義", 0.02),
("武雄", 0.02),
("建興", 0.02),
("志文", 0.02),
("嘉宏", 0.02),
("文章", 0.02),
("明宗", 0.02),
("宇軒", 0.02),
("進興", 0.02),
("俊豪", 0.02),
("俊廷", 0.02),
("冠宏", 0.02),
("仁傑", 0.02),
("威廷", 0.02),
("哲維", 0.02),
("宗霖", 0.02),
("文欽", 0.02),
("博文", 0.02),
("俊男", 0.02),
("宗憲", 0.02),
("子豪", 0.02),
("俊宇", 0.02),
("勝雄", 0.02),
("柏諺", 0.02),
("建良", 0.02),
("俊明", 0.02),
("俊銘", 0.02),
("世明", 0.02),
("義雄", 0.02),
("建銘", 0.02),
("永昌", 0.02),
("文華", 0.02),
("子翔", 0.02),
("柏宏", 0.02),
("政宏", 0.02),
("進發", 0.02),
("柏霖", 0.02),
("建中", 0.02),
("國榮", 0.02),
("志誠", 0.02),
("聰明", 0.02),
("俊佑", 0.02),
("志遠", 0.02),
# top n names in younger generation
("宥廷", 0.01),
("品睿", 0.01),
("宸睿", 0.01),
("宇恩", 0.01),
("宥辰", 0.01),
("柏睿", 0.01),
("睿恩", 0.01),
("恩碩", 0.01),
("子睿", 0.01),
("子宸", 0.01),
("子恩", 0.01),
)
)
# source:
# 中華民國(ROC)全國姓名統計 2023/6月
# (National Name Statistics of the Republic of China (Taiwan), June 2023)
# https://www.ris.gov.tw/documents/data/5/2/112namestat.pdf
# page 282, 283, 284: TOP 200 last name
last_names = OrderedDict(
(
("陳", 11.2),
("林", 8.33),
("黃", 6),
("張", 5.3),
("李", 5.13),
("王", 4.09),
("吳", 4),
("劉", 3.16),
("蔡", 2.93),
("楊", 2.64),
("許", 2.31),
("鄭", 1.89),
("謝", 1.77),
("洪", 1.51),
("郭", 1.5),
("邱", 1.47),
("曾", 1.45),
("廖", 1.35),
("賴", 1.33),
("徐", 1.26),
("周", 1.21),
("葉", 1.18),
("蘇", 1.14),
("莊", 0.95),
("江", 0.92),
("呂", 0.91),
("何", 0.85),
("蕭", 0.83),
("羅", 0.83),
("高", 0.77),
("潘", 0.69),
("簡", 0.68),
("朱", 0.66),
("鍾", 0.65),
("游", 0.59),
("彭", 0.59),
("詹", 0.58),
("施", 0.54),
("胡", 0.54),
("沈", 0.51),
("余", 0.51),
("盧", 0.48),
("梁", 0.46),
("趙", 0.44),
("顏", 0.44),
("柯", 0.44),
("翁", 0.4),
("魏", 0.38),
("孫", 0.36),
("戴", 0.35),
("范", 0.34),
("方", 0.33),
("宋", 0.32),
("鄧", 0.27),
("杜", 0.23),
("侯", 0.23),
("傅", 0.22),
("曹", 0.22),
("薛", 0.21),
("阮", 0.21),
("丁", 0.21),
("卓", 0.19),
("馬", 0.18),
("温", 0.18),
("董", 0.18),
("藍", 0.18),
("古", 0.18),
("石", 0.18),
("紀", 0.17),
("唐", 0.17),
("蔣", 0.17),
("姚", 0.17),
("連", 0.17),
("歐", 0.16),
("馮", 0.16),
("程", 0.16),
("湯", 0.15),
("田", 0.15),
("康", 0.15),
("黄", 0.15),
("姜", 0.15),
("白", 0.14),
("汪", 0.14),
("尤", 0.14),
("鄒", 0.14),
("黎", 0.13),
("巫", 0.12),
("鐘", 0.12),
("涂", 0.12),
("龔", 0.11),
("嚴", 0.09),
("韓", 0.09),
("袁", 0.09),
("金", 0.08),
("童", 0.08),
("陸", 0.07),
("柳", 0.07),
("凃", 0.07),
("夏", 0.07),
("邵", 0.07),
("錢", 0.06),
("伍", 0.06),
("倪", 0.06),
("溫", 0.06),
("駱", 0.05),
("譚", 0.05),
("于", 0.05),
("甘", 0.05),
("熊", 0.05),
("任", 0.05),
("秦", 0.05),
("章", 0.05),
("毛", 0.05),
("官", 0.05),
("顧", 0.05),
("史", 0.05),
("萬", 0.05),
("俞", 0.05),
("粘", 0.04),
("雷", 0.04),
("饒", 0.04),
("張簡", 0.04),
("闕", 0.04),
("凌", 0.04),
("武", 0.03),
("孔", 0.03),
("尹", 0.03),
("崔", 0.03),
("辛", 0.03),
("歐陽", 0.03),
("辜", 0.03),
("陶", 0.03),
("段", 0.03),
("易", 0.03),
("龍", 0.03),
("韋", 0.03),
("池", 0.03),
("葛", 0.03),
("褚", 0.03),
("孟", 0.02),
("麥", 0.02),
("殷", 0.02),
("莫", 0.02),
("文", 0.02),
("賀", 0.02),
("賈", 0.02),
("管", 0.02),
("關", 0.02),
("包", 0.02),
("向", 0.02),
("丘", 0.02),
("范姜", 0.02),
("梅", 0.02),
("華", 0.02),
("裴", 0.02),
("利", 0.02),
("全", 0.02),
("樊", 0.02),
("房", 0.02),
("佘", 0.02),
("花", 0.01),
("安", 0.01),
("左", 0.01),
("魯", 0.01),
("塗", 0.01),
("穆", 0.01),
("鮑", 0.01),
("蒲", 0.01),
("郝", 0.01),
("谷", 0.01),
("成", 0.01),
("邢", 0.01),
("練", 0.01),
("閻", 0.01),
("鄔", 0.01),
("陽", 0.01),
("盛", 0.01),
("常", 0.01),
("符", 0.01),
("耿", 0.01),
("解", 0.01),
("繆", 0.01),
("申", 0.01),
("聶", 0.01),
("祝", 0.01),
("岳", 0.01),
("曲", 0.01),
("籃", 0.01),
("齊", 0.01),
("應", 0.01),
("舒", 0.01),
("單", 0.01),
("喬", 0.01),
("畢", 0.01),
("留", 0.01),
("鄞", 0.01),
("翟", 0.01),
("牛", 0.01),
("龎", 0.01),
("覃", 0.01),
)
)
first_names = first_names_male.copy()
first_names.update(first_names_female)
# =============================================================================
# From https://en.wikipedia.org/wiki/Chinese_given_name#Common_Chinese_names
# The above information is slightly incorrect.
# 使用 pypinyin 進行姓名翻譯:https://github.com/mozillazg/python-pinyin
# Using pypinyin for name translation: https://github.com/mozillazg/python-pinyin
# print(lazy_pinyin("許", style=Style.WADEGILES, v_to_u=True)[0].replace("'","").upper().replace("Ü","U"))
# 轉換過程有部分姓氏拼音剛好是重複的或是複姓的
# 因為重建過程字典的特性無法重複所以就被忽略了 目前懶得修ouo
# Some surnames result in duplicate transliterations during the conversion process.
# Due to the nature of dictionaries (no duplicate keys), duplicates are ignored during reconstruction.
# 使用威妥瑪拼音,而不是漢語拼音
# Using Wade–Giles romanization instead of Hanyu Pinyin
last_romanized_names = OrderedDict(
(
("CHEN", 11.2),
("LIN", 8.33),
("HUANG", 0.15),
("CHANG", 0.01),
("LI", 0.02),
("WANG", 0.14),
("WU", 0.01),
("LIU", 0.01),
("TSAI", 2.93),
("YANG", 0.01),
("HSU", 1.26),
("CHENG", 0.01),
("HSIEH", 1.77),
("HUNG", 1.51),
("KUO", 1.5),
("CHIU", 0.02),
("TSENG", 1.45),
("LIAO", 1.35),
("LEI", 0.04),
("CHOU", 1.21),
("YEH", 1.18),
("SU", 1.14),
("CHUANG", 0.95),
("CHIANG", 0.15),
("LU", 0.01),
("HO", 0.02),
("HSIAO", 0.83),
("LO", 0.05),
("KAO", 0.77),
("PAN", 0.69),
("CHIEN", 0.06),
("CHU", 0.01),
("CHUNG", 0.12),
("YU", 0.05),
("PENG", 0.59),
("CHAN", 0.04),
("SHIH", 0.05),
("HU", 0.54),
("SHEN", 0.01),
("LIANG", 0.46),
("CHAO", 0.44),
("YEN", 0.01),
("KO", 0.03),
("WENG", 0.4),
("WEI", 0.03),
("SUN", 0.36),
("TAI", 0.35),
("FAN", 0.02),
("FANG", 0.02),
("SUNG", 0.32),
("TENG", 0.27),
("TU", 0.01),
("HOU", 0.23),
("FU", 0.01),
("TSAO", 0.22),
("HSUEH", 0.21),
("JUAN", 0.21),
("TING", 0.21),
("CHO", 0.19),
("MA", 0.18),
("WEN", 0.02),
("TUNG", 0.08),
("LAN", 0.01),
("KU", 0.01),
("CHI", 0.01),
("TANG", 0.15),
("YAO", 0.17),
("LIEN", 0.01),
("OU", 0.03),
("FENG", 0.16),
("TIEN", 0.15),
("KANG", 0.15),
("PAI", 0.14),
("TSOU", 0.14),
("KUNG", 0.03),
("HAN", 0.09),
("YUAN", 0.09),
("CHIN", 0.05),
("HSIA", 0.07),
("SHAO", 0.07),
("NI", 0.06),
("TAN", 0.01),
("KAN", 0.05),
("HSIUNG", 0.05),
("JEN", 0.05),
("MAO", 0.05),
("KUAN", 0.02),
("WAN", 0.05),
("JAO", 0.04),
("CHUEH", 0.04),
("LING", 0.04),
("YIN", 0.01),
("TSUI", 0.03),
("HSIN", 0.03),
("TAO", 0.03),
("TUAN", 0.03),
("I", 0.03),
("LUNG", 0.03),
("CHIH", 0.03),
("MENG", 0.02),
("MEI", 0.02),
("MO", 0.02),
("CHIA", 0.02),
("PAO", 0.01),
("HSIANG", 0.02),
("HUA", 0.01),
("PEI", 0.02),
("CHUAN", 0.02),
("SHE", 0.02),
("AN", 0.01),
("TSO", 0.01),
("MU", 0.01),
("PU", 0.01),
("HAO", 0.01),
("HSING", 0.01),
("SHENG", 0.01),
("KENG", 0.01),
("CHIEH", 0.01),
("MOU", 0.01),
("NIEH", 0.01),
("YUEH", 0.01),
("YING", 0.01),
("SHU", 0.01),
("CHIAO", 0.01),
("PI", 0.01),
("TI", 0.01),
("NIU", 0.01),
("PANG", 0.01),
)
)
first_romanized_names_male = OrderedDict(
(
("CHIA-HAO", 0.06),
("CHIH-MING", 0.02),
("CHIEN-HUNG", 0.05),
("CHUN-CHIEH", 0.05),
("CHUN-HUNG", 0.05),
("CHIH-HAO", 0.05),
("CHIH-WEI", 0.05),
("CHENG-HAN", 0.04),
("KUAN-YU", 0.04),
("CHIH-CHIANG", 0.04),
("TSUNG-HAN", 0.04),
("CHIH-HUNG", 0.02),
("KUAN-TING", 0.04),
("CHIH-CHENG", 0.02),
("WEN-HSIUNG", 0.04),
("CHENG-EN", 0.04),
("CHIN-LUNG", 0.04),
("WEN-PIN", 0.03),
("CHENG-HSIUNG", 0.03),
("MING-HUI", 0.03),
("PAI-HAN", 0.03),
("YEN-TING", 0.03),
("MING-TE", 0.03),
("WEN-LUNG", 0.03),
("CHUN-HSIEN", 0.03),
("CHIH-CHUNG", 0.03),
("KUO-HUA", 0.03),
("HSIN-HUNG", 0.03),
("CHIA-MING", 0.03),
("CHUN-HSIUNG", 0.03),
("YU-HSIANG", 0.03),
("CHIEN-CHENG", 0.03),
("KUAN-LIN", 0.03),
("CHIH-HSIUNG", 0.02),
("CHIN-TSAI", 0.02),
("MING-CHE", 0.02),
("JUNG-HUA", 0.02),
("PAI-YU", 0.02),
("CHIH-HSIEN", 0.02),
("CHUN-LIANG", 0.02),
("CHIEN-HUA", 0.02),
("CHIA-WEI", 0.02),
("CHIA-JUNG", 0.02),
("WEN-HSIANG", 0.02),
("CHIEN-CHIH", 0.02),
("WEN-CHENG", 0.02),
("WEN-CHUNG", 0.02),
("KAI-HSIANG", 0.02),
("CHIA-HUNG", 0.02),
("KUO-HSIUNG", 0.02),
("MING-HUNG", 0.02),
("WEN-HSIEN", 0.02),
("SHIH-CHANG", 0.02),
("CHE-WEI", 0.02),
("WEN-CHIEH", 0.02),
("CHENG-I", 0.02),
("WU-HSIUNG", 0.02),
("CHIEN-HSING", 0.02),
("CHIH-WEN", 0.02),
("WEN-CHANG", 0.02),
("MING-TSUNG", 0.02),
("YU-HSUAN", 0.02),
("CHIN-HSING", 0.02),
("CHUN-HAO", 0.02),
("CHUN-TING", 0.02),
("KUAN-HUNG", 0.02),
("JEN-CHIEH", 0.02),
("WEI-TING", 0.02),
("TSUNG-LIN", 0.02),
("WEN-CHIN", 0.02),
("PO-WEN", 0.02),
("CHUN-NAN", 0.02),
("TSUNG-HSIEN", 0.02),
("TZU-HAO", 0.02),
("CHUN-YU", 0.02),
("SHENG-HSIUNG", 0.02),
("PAI-YEN", 0.02),
("CHIEN-LIANG", 0.02),
("CHUN-MING", 0.02),
("SHIH-MING", 0.02),
("I-HSIUNG", 0.02),
("CHIEN-MING", 0.02),
("YUNG-CHANG", 0.02),
("WEN-HUA", 0.02),
("TZU-HSIANG", 0.02),
("PAI-HUNG", 0.02),
("CHENG-HUNG", 0.02),
("CHIN-FA", 0.02),
("PAI-LIN", 0.02),
("CHIEN-CHUNG", 0.02),
("KUO-JUNG", 0.02),
("TSUNG-MING", 0.02),
("CHIH-YUAN", 0.02),
("YU-TING", 0.01),
("PIN-JUI", 0.01),
("CHEN-JUI", 0.01),
("YU-EN", 0.01),
("YU-CHEN", 0.01),
("PAI-JUI", 0.01),
("JUI-EN", 0.01),
("EN-SHO", 0.01),
("TZU-JUI", 0.01),
("TZU-CHEN", 0.01),
("TZU-EN", 0.01),
)
)
first_romanized_names_female = OrderedDict(
(
("SHU-FEN", 0.14),
("SHU-HUI", 0.05),
("MEI-LING", 0.12),
("LI-HUA", 0.11),
("MEI-HUI", 0.04),
("SHU-CHEN", 0.05),
("YA-TING", 0.1),
("HSIU-YING", 0.1),
("SHU-CHUAN", 0.1),
("HSIU-CHIN", 0.1),
("HSIU-MEI", 0.05),
("MEI-HUA", 0.09),
("I-CHUN", 0.09),
("SHU-HUA", 0.09),
("MEI-YU", 0.09),
("YA-HUI", 0.04),
("HSIU-LAN", 0.08),
("SHU-MEI", 0.08),
("HSIU-FENG", 0.08),
("MEI-CHU", 0.07),
("LI-CHU", 0.07),
("LI-CHUAN", 0.07),
("SHU-LING", 0.07),
("MEI-YUN", 0.07),
("YA-WEN", 0.07),
("YA-LING", 0.07),
("MEI-LI", 0.06),
("YU-LAN", 0.06),
("YUEH-O", 0.06),
("LI-CHING", 0.06),
("HUI-MEI", 0.06),
("LI-MEI", 0.06),
("HSIU-CHU", 0.06),
("HSIN-I", 0.04),
("SU-CHEN", 0.05),
("HSIU-CHEN", 0.05),
("HUI-LING", 0.04),
("YU-MEI", 0.04),
("YU-YING", 0.05),
("HSIU-LING", 0.05),
("MING-CHU", 0.05),
("CHIU-HSIANG", 0.05),
("HSIU-YU", 0.05),
("LI-YUN", 0.05),
("LI-YU", 0.05),
("PAO-CHU", 0.05),
("I-TING", 0.01),
("LI-LING", 0.05),
("I-CHEN", 0.04),
("YUEH-YING", 0.04),
("SHU-FANG", 0.04),
("YU-LING", 0.04),
("HSIU-YUN", 0.04),
("CHUN-MEI", 0.04),
("PI-HSIA", 0.04),
("LI-HSIANG", 0.04),
("MEI-FENG", 0.04),
("MEI-CHEN", 0.04),
("MEI-YING", 0.04),
("PI-CHU", 0.04),
("PI-YUN", 0.04),
("CHIA-JUNG", 0.04),
("MEI-LAN", 0.04),
("HSIU-CHUAN", 0.04),
("MEI-CHUAN", 0.04),
("SHU-MIN", 0.04),
("YU-CHEN", 0.01),
("SHU-CHING", 0.04),
("CHING-I", 0.04),
("SU-CHU", 0.04),
("YA-PING", 0.04),
("SU-CHING", 0.04),
("SU-CHIN", 0.04),
("HSIU-CHIH", 0.04),
("CHIN-LIEN", 0.04),
("CHIU-YUEH", 0.04),
("LI-HSUEH", 0.04),
("HUI-CHEN", 0.04),
("CHIA-LING", 0.04),
("YU-TING", 0.01),
("SHIH-HAN", 0.04),
("HSIU-HSIA", 0.04),
("HSIU-HUA", 0.03),
("LI-CHIN", 0.03),
("CHIN-FENG", 0.03),
("LI-CHEN", 0.03),
("YU-FENG", 0.03),
("YU-CHIN", 0.03),
("HSIU-LIEN", 0.03),
("SU-LAN", 0.03),
("WAN-TING", 0.01),
("PEI-SHAN", 0.01),
("I-HSUAN", 0.01),
("YA-CHU", 0.01),
("HSIN-YU", 0.01),
("SSU-YU", 0.01),
("CHIA-YING", 0.01),
("PIN-YU", 0.01),
("TZU-HAN", 0.01),
("PIN-YEN", 0.01),
("TZU-CHING", 0.01),
("YUNG-CHING", 0.01),
("YU-TUNG", 0.01),
("I-CHING", 0.01),
("I-FEI", 0.01),
("YU-FEI", 0.01),
("YUN-FEI", 0.01),
("I-AN", 0.01),
("YUEH-TUNG", 0.01),
)
)
first_romanized_names = first_romanized_names_male.copy()
first_romanized_names.update(first_romanized_names_female)
romanized_formats_female = OrderedDict(
(("{{last_romanized_name}} {{first_romanized_name_female}}", 1),) # 漢人 Han
)
romanized_formats_male = OrderedDict((("{{last_romanized_name}} {{first_romanized_name_male}}", 1),)) # 漢人 Han
romanized_formats = romanized_formats_male.copy()
romanized_formats.update(romanized_formats_female)
def first_romanized_name_male(self) -> str: # 只有jp有實作
"""
:example: 'CHIA-HAO'
"""
return self.random_element(self.first_romanized_names_male)
def first_romanized_name_female(self) -> str: # 只有jp有實作
"""
:example: 'SHU-FEN'
"""
return self.random_element(self.first_romanized_names_female)
def romanized_name(self) -> str: # 姓名
"""
:example: 'WANG SHU-FEN'
"""
pattern: str = self.random_element(self.romanized_formats)
return self.generator.parse(pattern)
def first_romanized_name(self) -> str: # 只有姓
"""
:example: 'WANG'
"""
return self.random_element(self.first_romanized_names)
def last_romanized_name(self) -> str: # 只有名
"""
:example: 'SHU-FEN'
"""
return self.random_element(self.last_romanized_names)
def romanized_name_male(self) -> str: # 男生姓名
"""
:example: 'WANG CHIH-MING'
"""
pattern: str = self.random_element(self.romanized_formats_male)
return self.generator.parse(pattern)
def romanized_name_female(self) -> str: # 女生姓名
"""
:example: 'WANG SHU-FEN'
"""
pattern: str = self.random_element(self.romanized_formats_female)
return self.generator.parse(pattern)
| Provider |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_assorted_poly.py | {
"start": 55797,
"end": 58297
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, employees, tags, peopleTags
people = Table(
"people",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("_type", String(30), nullable=False),
)
employees = Table(
"employees",
metadata,
Column("id", Integer, ForeignKey("people.id"), primary_key=True),
)
tags = Table(
"tags",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("label", String(50), nullable=False),
)
peopleTags = Table(
"peopleTags",
metadata,
Column("person_id", Integer, ForeignKey("people.id")),
Column("tag_id", Integer, ForeignKey("tags.id")),
)
def test_basic(self):
"""test that Query uses the full set of mapper._eager_loaders
when generating SQL"""
class Person(ComparableEntity):
pass
class Employee(Person):
def __init__(self, name="bob"):
self.name = name
class Tag(ComparableEntity):
def __init__(self, label):
self.label = label
self.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c._type,
polymorphic_identity="person",
properties={
"tags": relationship(
Tag, secondary=peopleTags, backref="people", lazy="joined"
)
},
)
self.mapper_registry.map_imperatively(
Employee,
employees,
inherits=Person,
polymorphic_identity="employee",
)
self.mapper_registry.map_imperatively(Tag, tags)
session = fixture_session()
bob = Employee()
session.add(bob)
tag = Tag("crazy")
bob.tags.append(tag)
tag = Tag("funny")
bob.tags.append(tag)
session.flush()
session.expunge_all()
# query from Employee with limit, query needs to apply eager limiting
# subquery
instance = session.query(Employee).filter_by(id=1).limit(1).first()
assert len(instance.tags) == 2
| InheritingEagerTest |
python | huggingface__transformers | src/transformers/models/swin2sr/modeling_swin2sr.py | {
"start": 6164,
"end": 6699
} | class ____(nn.Module):
r"""Image to Patch Unembedding"""
def __init__(self, config):
super().__init__()
self.embed_dim = config.embed_dim
def forward(self, embeddings, x_size):
batch_size, height_width, num_channels = embeddings.shape
embeddings = embeddings.transpose(1, 2).view(batch_size, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
return embeddings
# Copied from transformers.models.swinv2.modeling_swinv2.Swinv2PatchMerging with Swinv2->Swin2SR
| Swin2SRPatchUnEmbeddings |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 967,
"end": 1096
} | class ____(BaseModel, extra="forbid"):
abort_transfer: "AbortShardTransfer" = Field(..., description="")
| AbortTransferOperation |
python | ApeWorX__ape | tests/functional/test_project.py | {
"start": 31758,
"end": 34434
} | class ____:
@pytest.fixture
def mock_github(self, mocker):
return mocker.MagicMock()
@pytest.fixture(scope="class")
def gitmodules(self):
return """
[submodule "lib/forge-std"]
path = lib/forge-std
url = https://github.com/foundry-rs/forge-std
branch = v1.5.2
[submodule "lib/openzeppelin-contracts"]
path = lib/openzeppelin-contracts
url = https://github.com/OpenZeppelin/openzeppelin-contracts
release = v4.9.5
branch = v4.9.5
[submodule "lib/erc4626-tests"]
path = lib/erc4626-tests
url = https://github.com/a16z/erc4626-tests.git
""".lstrip().replace(" ", "\t")
def test_extract_config(self, foundry_toml, gitmodules, mock_github):
with create_tempdir() as temp_dir:
cfg_file = temp_dir / "foundry.toml"
cfg_file.write_text(foundry_toml, encoding="utf8")
gitmodules_file = temp_dir / ".gitmodules"
gitmodules_file.write_text(gitmodules, encoding="utf8")
temp_project = Project(temp_dir)
api = temp_project.project_api
mock_github.get_repo.return_value = {"default_branch": "main"}
api._github_client = mock_github # type: ignore
assert isinstance(api, FoundryProject)
# Ensure solidity config migrated.
actual = temp_project.config.model_dump(
by_alias=True
) # Is result of ``api.extract_config()``.
assert actual["contracts_folder"] == "src"
assert "solidity" in actual, "Solidity failed to migrate"
actual_sol = actual["solidity"]
assert actual_sol["import_remapping"] == [
"@openzeppelin=src/.cache/openzeppelin/v4.9.5/",
"forge-std=src/.cache/forge-std/v1.5.2/src",
]
assert actual_sol["version"] == "0.8.18"
assert actual_sol["evm_version"] == "cancun"
assert actual_sol["via_ir"] is True
# Ensure dependencies migrated from .gitmodules.
assert "dependencies" in actual, "Dependencies failed to migrate"
actual_dependencies = actual["dependencies"]
expected_dependencies = [
{"github": "foundry-rs/forge-std", "name": "forge-std", "ref": "v1.5.2"},
{
"github": "OpenZeppelin/openzeppelin-contracts",
"name": "openzeppelin",
"version": "v4.9.5",
},
{"github": "a16z/erc4626-tests", "name": "erc4626-tests", "ref": "main"},
]
assert actual_dependencies == expected_dependencies
| TestFoundryProject |
python | spyder-ide__spyder | spyder/plugins/shortcuts/widgets/table.py | {
"start": 18535,
"end": 24115
} | class ____(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self._parent = parent
self.shortcuts = []
self.scores = []
self.rich_text = []
self.normal_text = []
self.context_rich_text = []
self.letters = ''
self.label = QLabel()
self.widths = []
# Needed to compensate for the HTMLDelegate color selection unawarness
self.text_color = SpyderPalette.COLOR_TEXT_1
self.text_color_highlight = SpyderPalette.COLOR_TEXT_1
def current_index(self):
"""Get the currently selected index in the parent table view."""
i = self._parent.proxy_model.mapToSource(self._parent.currentIndex())
return i
def sortByName(self):
"""Qt Override."""
self.shortcuts = sorted(self.shortcuts,
key=lambda x: x.context+'/'+x.name)
self.reset()
def flags(self, index):
"""Qt Override."""
if not index.isValid():
return Qt.ItemFlag.ItemIsEnabled
return QAbstractTableModel.flags(self, index)
def data(self, index, role=Qt.DisplayRole):
"""Qt Override."""
row = index.row()
if not index.isValid() or not (0 <= row < len(self.shortcuts)):
return to_qvariant()
shortcut = self.shortcuts[row]
key = shortcut.key
column = index.column()
if role == Qt.DisplayRole:
color = self.text_color
if self._parent == QApplication.focusWidget():
if self.current_index().row() == row:
color = self.text_color_highlight
else:
color = self.text_color
if column == CONTEXT:
if len(self.context_rich_text) > 0:
text = self.context_rich_text[row]
else:
text = shortcut.context
text = '<p style="color:{0}">{1}</p>'.format(color, text)
return to_qvariant(text)
elif column == NAME:
text = self.rich_text[row]
text = '<p style="color:{0}">{1}</p>'.format(color, text)
return to_qvariant(text)
elif column == SEQUENCE:
text = QKeySequence(key).toString(QKeySequence.NativeText)
text = '<p style="color:{0}">{1}</p>'.format(color, text)
return to_qvariant(text)
elif column == SEARCH_SCORE:
# Treating search scores as a table column simplifies the
# sorting once a score for a specific string in the finder
# has been defined. This column however should always remain
# hidden.
return to_qvariant(self.scores[row])
elif role == Qt.TextAlignmentRole:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
return to_qvariant()
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Qt Override."""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter))
if role != Qt.DisplayRole:
return to_qvariant()
if orientation == Qt.Horizontal:
if section == CONTEXT:
return to_qvariant(_("Context"))
elif section == NAME:
return to_qvariant(_("Name"))
elif section == SEQUENCE:
return to_qvariant(_("Shortcut"))
elif section == SEARCH_SCORE:
return to_qvariant(_("Score"))
return to_qvariant()
def rowCount(self, index=QModelIndex()):
"""Qt Override."""
return len(self.shortcuts)
def columnCount(self, index=QModelIndex()):
"""Qt Override."""
return 4
def setData(self, index, value, role=Qt.EditRole):
"""Qt Override."""
if index.isValid() and 0 <= index.row() < len(self.shortcuts):
shortcut = self.shortcuts[index.row()]
column = index.column()
text = from_qvariant(value, str)
if column == SEQUENCE:
shortcut.key = text
self.dataChanged.emit(index, index)
return True
return False
def update_search_letters(self, text):
"""Update search letters with text input in search box."""
self.letters = text
contexts = [shortcut.context for shortcut in self.shortcuts]
names = [shortcut.name for shortcut in self.shortcuts]
context_results = get_search_scores(
text, contexts, template='<b>{0}</b>')
results = get_search_scores(text, names, template='<b>{0}</b>')
__, self.context_rich_text, context_scores = (
zip(*context_results))
self.normal_text, self.rich_text, self.scores = zip(*results)
self.scores = [x + y for x, y in zip(self.scores, context_scores)]
self.reset()
def update_active_row(self):
"""Update active row to update color in selected text."""
self.data(self.current_index())
def row(self, row_num):
"""Get row based on model index. Needed for the custom proxy model."""
return self.shortcuts[row_num]
def reset(self):
""""Reset model to take into account new search letters."""
self.beginResetModel()
self.endResetModel()
| ShortcutsModel |
python | pydantic__pydantic | pydantic-core/tests/validators/test_is_instance.py | {
"start": 183,
"end": 210
} | class ____(Foo):
pass
| Bar |
python | numba__numba | numba/cuda/cudadrv/nvvm.py | {
"start": 6589,
"end": 14569
} | class ____(object):
def __init__(self):
self.driver = NVVM()
self._handle = nvvm_program()
err = self.driver.nvvmCreateProgram(byref(self._handle))
self.driver.check_error(err, 'Failed to create CU')
def __del__(self):
driver = NVVM()
err = driver.nvvmDestroyProgram(byref(self._handle))
driver.check_error(err, 'Failed to destroy CU', exit=True)
def add_module(self, buffer):
"""
Add a module level NVVM IR to a compilation unit.
- The buffer should contain an NVVM module IR either in the bitcode
representation (LLVM3.0) or in the text representation.
"""
err = self.driver.nvvmAddModuleToProgram(self._handle, buffer,
len(buffer), None)
self.driver.check_error(err, 'Failed to add module')
def lazy_add_module(self, buffer):
"""
Lazily add an NVVM IR module to a compilation unit.
The buffer should contain NVVM module IR either in the bitcode
representation or in the text representation.
"""
err = self.driver.nvvmLazyAddModuleToProgram(self._handle, buffer,
len(buffer), None)
self.driver.check_error(err, 'Failed to add module')
def compile(self, **options):
"""Perform Compilation.
Compilation options are accepted as keyword arguments, with the
following considerations:
- Underscores (`_`) in option names are converted to dashes (`-`), to
match NVVM's option name format.
- Options that take a value will be emitted in the form
"-<name>=<value>".
- Booleans passed as option values will be converted to integers.
- Options which take no value (such as `-gen-lto`) should have a value
of `None` passed in and will be emitted in the form "-<name>".
For documentation on NVVM compilation options, see the CUDA Toolkit
Documentation:
https://docs.nvidia.com/cuda/libnvvm-api/index.html#_CPPv418nvvmCompileProgram11nvvmProgramiPPKc
"""
def stringify_option(k, v):
k = k.replace('_', '-')
if v is None:
return f'-{k}'
if isinstance(v, bool):
v = int(v)
return f'-{k}={v}'
options = [stringify_option(k, v) for k, v in options.items()]
c_opts = (c_char_p * len(options))(*[c_char_p(x.encode('utf8'))
for x in options])
# verify
err = self.driver.nvvmVerifyProgram(self._handle, len(options), c_opts)
self._try_error(err, 'Failed to verify\n')
# compile
err = self.driver.nvvmCompileProgram(self._handle, len(options), c_opts)
self._try_error(err, 'Failed to compile\n')
# get result
reslen = c_size_t()
err = self.driver.nvvmGetCompiledResultSize(self._handle, byref(reslen))
self._try_error(err, 'Failed to get size of compiled result.')
output_buffer = (c_char * reslen.value)()
err = self.driver.nvvmGetCompiledResult(self._handle, output_buffer)
self._try_error(err, 'Failed to get compiled result.')
# get log
self.log = self.get_log()
if self.log:
warnings.warn(self.log, category=NvvmWarning)
return output_buffer[:]
def _try_error(self, err, msg):
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
def get_log(self):
reslen = c_size_t()
err = self.driver.nvvmGetProgramLogSize(self._handle, byref(reslen))
self.driver.check_error(err, 'Failed to get compilation log size.')
if reslen.value > 1:
logbuf = (c_char * reslen.value)()
err = self.driver.nvvmGetProgramLog(self._handle, logbuf)
self.driver.check_error(err, 'Failed to get compilation log.')
return logbuf.value.decode('utf8') # populate log attribute
return ''
COMPUTE_CAPABILITIES = (
(3, 5), (3, 7),
(5, 0), (5, 2), (5, 3),
(6, 0), (6, 1), (6, 2),
(7, 0), (7, 2), (7, 5),
(8, 0), (8, 6), (8, 7), (8, 9),
(9, 0)
)
# Maps CTK version -> (min supported cc, max supported cc) inclusive
CTK_SUPPORTED = {
(11, 2): ((3, 5), (8, 6)),
(11, 3): ((3, 5), (8, 6)),
(11, 4): ((3, 5), (8, 7)),
(11, 5): ((3, 5), (8, 7)),
(11, 6): ((3, 5), (8, 7)),
(11, 7): ((3, 5), (8, 7)),
(11, 8): ((3, 5), (9, 0)),
(12, 0): ((5, 0), (9, 0)),
(12, 1): ((5, 0), (9, 0)),
(12, 2): ((5, 0), (9, 0)),
(12, 3): ((5, 0), (9, 0)),
(12, 4): ((5, 0), (9, 0)),
}
def ccs_supported_by_ctk(ctk_version):
try:
# For supported versions, we look up the range of supported CCs
min_cc, max_cc = CTK_SUPPORTED[ctk_version]
return tuple([cc for cc in COMPUTE_CAPABILITIES
if min_cc <= cc <= max_cc])
except KeyError:
# For unsupported CUDA toolkit versions, all we can do is assume all
# non-deprecated versions we are aware of are supported.
return tuple([cc for cc in COMPUTE_CAPABILITIES
if cc >= config.CUDA_DEFAULT_PTX_CC])
def get_supported_ccs():
try:
from numba.cuda.cudadrv.runtime import runtime
cudart_version = runtime.get_version()
except: # noqa: E722
# We can't support anything if there's an error getting the runtime
# version (e.g. if it's not present or there's another issue)
_supported_cc = ()
return _supported_cc
# Ensure the minimum CTK version requirement is met
min_cudart = min(CTK_SUPPORTED)
if cudart_version < min_cudart:
_supported_cc = ()
ctk_ver = f"{cudart_version[0]}.{cudart_version[1]}"
unsupported_ver = (f"CUDA Toolkit {ctk_ver} is unsupported by Numba - "
f"{min_cudart[0]}.{min_cudart[1]} is the minimum "
"required version.")
warnings.warn(unsupported_ver)
return _supported_cc
_supported_cc = ccs_supported_by_ctk(cudart_version)
return _supported_cc
def find_closest_arch(mycc):
"""
Given a compute capability, return the closest compute capability supported
by the CUDA toolkit.
:param mycc: Compute capability as a tuple ``(MAJOR, MINOR)``
:return: Closest supported CC as a tuple ``(MAJOR, MINOR)``
"""
supported_ccs = NVVM().supported_ccs
if not supported_ccs:
msg = "No supported GPU compute capabilities found. " \
"Please check your cudatoolkit version matches your CUDA version."
raise NvvmSupportError(msg)
for i, cc in enumerate(supported_ccs):
if cc == mycc:
# Matches
return cc
elif cc > mycc:
# Exceeded
if i == 0:
# CC lower than supported
msg = "GPU compute capability %d.%d is not supported" \
"(requires >=%d.%d)" % (mycc + cc)
raise NvvmSupportError(msg)
else:
# return the previous CC
return supported_ccs[i - 1]
# CC higher than supported
return supported_ccs[-1] # Choose the highest
def get_arch_option(major, minor):
"""Matches with the closest architecture option
"""
if config.FORCE_CUDA_CC:
arch = config.FORCE_CUDA_CC
else:
arch = find_closest_arch((major, minor))
return 'compute_%d%d' % arch
MISSING_LIBDEVICE_FILE_MSG = '''Missing libdevice file.
Please ensure you have a CUDA Toolkit 11.2 or higher.
For CUDA 12, ``cuda-nvcc`` and ``cuda-nvrtc`` are required:
$ conda install -c conda-forge cuda-nvcc cuda-nvrtc "cuda-version>=12.0"
For CUDA 11, ``cudatoolkit`` is required:
$ conda install -c conda-forge cudatoolkit "cuda-version>=11.2,<12.0"
'''
| CompilationUnit |
python | pytorch__pytorch | test/test_fake_tensor.py | {
"start": 2564,
"end": 40906
} | class ____(TestCase):
def checkType(self, t, device_str, size):
self.assertTrue(isinstance(t, FakeTensor))
self.assertEqual(t.device.type, device_str)
self.assertEqual(list(t.size()), size)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_cuda_initialized(self):
# doesn't error
with FakeTensorMode():
p = torch.randn(4, 2, requires_grad=True, device="cuda")
x = torch.randn(8, 4, device="cuda")
y = torch.mm(x, p).square().sum()
y.backward()
def test_basic(self):
x = torch.empty(2, 2, device="cpu")
y = torch.empty(4, 2, 2, device="cpu")
with FakeTensorMode() as mode:
x = mode.from_tensor(x)
y = mode.from_tensor(y)
z = x + y
self.assertEqual(z.shape, (4, 2, 2))
self.assertEqual(z.device, torch.device("cpu"))
self.assertTrue(isinstance(z, FakeTensor))
def test_custom_op_fallback(self):
from torch.library import impl, Library
try:
test_lib = Library("my_test_op", "DEF") # noqa: TOR901
test_lib.define("foo(Tensor self) -> Tensor")
@impl(test_lib, "foo", "CPU")
def foo_impl(self):
return self.cos()
x = torch.empty(2, 2, device="cpu")
with self.assertRaisesRegex(
UnsupportedOperatorException, "my_test_op.foo.default"
):
with FakeTensorMode(allow_fallback_kernels=True) as mode:
x = mode.from_tensor(x)
torch.ops.my_test_op.foo(x)
finally:
test_lib._destroy()
def test_parameter_instantiation(self):
with FakeTensorMode():
x = torch.rand([4])
y = torch.nn.parameter.Parameter(x)
self.assertTrue(isinstance(y, torch.nn.Parameter))
@unittest.skipIf(not dist.is_available(), "requires distributed")
def test_fsdp_flat_param(self):
from torch.distributed.fsdp._flat_param import FlatParameter
with FakeTensorMode() as m:
data = torch.randn(2, 2)
param = FlatParameter(data, requires_grad=True)
self.assertIsInstance(param, FlatParameter)
self.assertIsInstance(param, torch.nn.Parameter)
self.assertIsInstance(param, FakeTensor)
def test_non_parameter_grad(self):
mode = FakeTensorMode()
t = torch.rand([4], requires_grad=True)
fake_t = mode.from_tensor(t)
self.assertEqual(fake_t.requires_grad, t.requires_grad)
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
@parametrize(
"dtype",
all_types_complex_float8_and(),
)
def test_index_cuda_with_cpu(self, dtype):
with FakeTensorMode():
x = torch.ones([2048], device="cuda", dtype=dtype)
out = x[torch.zeros([36], dtype=torch.int64)]
self.checkType(out, "cuda", [36])
self.assertEqual(out.dtype, dtype)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_shape_take_not_device(self):
with FakeTensorMode():
x = torch.empty(1, device="cpu")
y = torch.empty(8, 8, device="cuda")
out = x.resize_as_(y)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.device.type, "cpu")
self.assertTrue(isinstance(out, FakeTensor))
def test_repr(self):
with FakeTensorMode():
x = torch.empty(2, 2, device="cpu")
self.assertEqual(repr(x), "FakeTensor(..., size=(2, 2))")
x = torch.empty(2, 2, device="meta")
self.assertEqual(repr(x), "FakeTensor(..., device='meta', size=(2, 2))")
def test_convert_fake_to_real(self):
x = torch.ones([20])
with FakeTensorMode(allow_non_fake_inputs=True) as m:
_ = x + 1
out = torch._subclasses.fake_utils.try_convert_fake_to_real([x[0:10]])
self.assertEqual(torch.ones([10]), out[0])
def test_conv_nhwc(self):
x = torch.randn([1, 1024, 16, 16]).to(memory_format=torch.channels_last)
w = torch.randn([256, 1024, 4, 4]).to(memory_format=torch.channels_last)
b = torch.randn([256])
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, w, b):
return torch.ops.aten.convolution(
x, w, b, [1, 1], [0, 0], [1, 1], False, [0, 0], 1
)
model = Model()
with FakeTensorMode(allow_non_fake_inputs=True) as mode:
fake_out = model.forward(x, w, b)
eager_out = model.forward(x, w, b)
self.assertEqual(fake_out.stride(), eager_out.stride())
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_zero_dim(self):
with FakeTensorMode() as mode:
x = torch.tensor(0.0)
y = torch.rand([4, 4], device="cuda")
out = x + y
self.assertEqual(out.shape, (4, 4))
self.assertEqual(out.device, y.device)
self.assertTrue(isinstance(out, FakeTensor))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_op_with_zero_dim_bypassed(self):
if torch._functorch.config.fake_tensor_propagate_real_tensors:
self.skipTest("Propagate real tensor not supported")
shape_env = ShapeEnv()
mode = FakeTensorMode(shape_env=shape_env)
x = torch.tensor(1.0, device="cuda")
y = torch.tensor(2.0)
fake_x = mode.from_tensor(x)
fake_y = mode.from_tensor(y)
with self.assertRaisesRegex(
RuntimeError, "Unhandled FakeTensor Device Propagation for.*"
) as exc:
torch.nextafter(fake_x, fake_y)
def test_nan_to_num(self):
with FakeTensorMode():
for dtype in [torch.float16, torch.float32]:
x = torch.rand([4], dtype=dtype)
y = torch.nan_to_num(x, nan=None)
z = torch.nan_to_num(x, 0.0)
self.assertEqual(dtype, y.dtype)
self.assertEqual(dtype, z.dtype)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_throw(self):
x = torch.tensor(0.0) # TODO: tensor() errors
with FakeTensorMode() as mode:
x_conv = mode.from_tensor(x)
y = torch.rand([4, 4], device="cuda")
z = torch.rand([4, 4], device="cpu")
self.assertRaises(Exception, lambda: torch.lerp(x_conv, y, z))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_type_as(self):
with FakeTensorMode():
x = torch.rand([16, 1], device="cpu")
y = torch.rand([4, 4], device="cuda")
out = x.type_as(y)
self.assertEqual(out.device.type, "cuda")
self.assertTrue(isinstance(out, FakeTensor))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_setitem(self):
for device in ["cpu", "cuda"]:
with FakeTensorMode():
x = torch.rand([16, 1], device=device)
x[..., 0] = 0
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_device_inplace_copy(self):
with FakeTensorMode():
x = torch.rand([8, 8], device="cpu")
y = torch.rand([8, 8], device="cuda")
assert x.copy_(y).device.type == "cpu"
assert y.copy_(x).device.type == "cuda"
def test_fake_device(self):
t = torch.ones(3)
t = t.view(1, 3)
fake_mode1 = FakeTensorMode(allow_non_fake_inputs=True)
fake_t = fake_mode1.from_tensor(t)
fake_t.fake_device = torch.device("cuda")
fake_mode2 = FakeTensorMode(allow_non_fake_inputs=True)
new_fake_t = fake_mode2.from_tensor(fake_t)
self.assertEqual(new_fake_t.device, fake_t.device)
def test_fake_dispatch_keys(self):
with FakeTensorMode():
x = torch.rand([4])
f = (
FileCheck()
.check("CPU")
.check("ADInplaceOrView")
.check("AutogradCPU")
.check("AutocastCPU")
)
f.run(torch._C._dispatch_key_set(x))
with torch.inference_mode():
x = torch.rand([4])
y = x + x
FileCheck().check("CPU").check("AutocastCPU").run(
torch._C._dispatch_key_set(y)
)
FileCheck().check_not("ADInplaceOrView").check_not("Autograd").run(
torch._C._dispatch_key_set(y)
)
def test_batch_tensor(self):
x = torch.rand((3, 4, 5))
b = _add_batch_dim(x, 0, 0)
mode = FakeTensorMode()
fake_b = mode.from_tensor(b)
prims.utils.compare_tensor_meta(b, fake_b, check_strides=True)
b1 = _add_batch_dim(x, 1, 1)
b2 = _add_batch_dim(b1, 0, 2)
fake_b2 = mode.from_tensor(b2)
prims.utils.compare_tensor_meta(b2, fake_b2, check_strides=True)
self.assertTrue(is_batchedtensor(fake_b2))
fake_b1 = get_unwrapped(fake_b2)
self.assertTrue(is_batchedtensor(fake_b1))
fake_tensor = get_unwrapped(fake_b1)
self.assertIsInstance(fake_tensor, FakeTensor)
def test_constructor(self):
with FakeTensorMode():
x = torch.rand([4, 4], device="cpu")
self.assertTrue(isinstance(x, FakeTensor))
self.assertTrue(x.device.type == "cpu")
def test_mode(self):
with FakeTensorMode():
y = torch.rand([4], device="cpu")
out = y + y
self.assertTrue(isinstance(out, FakeTensor))
def test_full(self):
# Test torch.full returns tensor with correct dtype
with torch._subclasses.CrossRefFakeMode():
y = torch.full((4, 4), 1)
def check_function_with_fake(self, fn):
out = fn()
with torch._subclasses.FakeTensorMode():
out_fake = fn()
for a, b in zip(pytree.tree_leaves(out), pytree.tree_leaves(out_fake)):
if not isinstance(a, torch.Tensor):
self.assertTrue(not isinstance(b, torch.Tensor))
continue
prims.utils.compare_tensor_meta(a, b, check_strides=True)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_non_kwarg_device(self):
with FakeTensorMode():
x = torch.rand([16, 1], device="cpu")
y = x.to(torch.device("cpu"))
self.assertIs(x, y)
z = x.to(torch.device("cuda"))
self.assertEqual(z.device.type, "cuda")
def test_non_overlapping_stride_zero(self):
def foo():
x = torch.empty_strided([1, 3, 427, 640], (0, 1, 1920, 3))
return x.half()
self.check_function_with_fake(foo)
def test_fake_mode_error(self):
x = torch.rand([4, 4])
with self.assertRaisesRegex(Exception, "Please convert all Tensors"):
with FakeTensorMode():
y = x[0]
def test_no_tag_func(self):
import functools
from torch.nn.attention.flex_attention import _identity, flex_attention
def create_attention(score_mod, block_mask, enable_gqa=False):
return functools.partial(
flex_attention,
score_mod=score_mod,
block_mask=block_mask,
enable_gqa=enable_gqa,
)
input_shape = (4, 16, 128, 64)
q = torch.randn(
input_shape,
dtype=torch.bfloat16,
device="cpu",
requires_grad=False,
)
k = torch.randn(
input_shape,
dtype=torch.bfloat16,
device="cpu",
requires_grad=False,
)
v = torch.randn(
input_shape,
dtype=torch.bfloat16,
device="cpu",
requires_grad=False,
)
sdpa_partial = create_attention(_identity, None)
with FakeTensorMode(allow_non_fake_inputs=True):
sdpa_partial(q, k, v, return_lse=False)
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
def test_fake_grad_copy(self):
x = torch.rand([4, 4], requires_grad=True)
x.grad = torch.rand([4, 4])
mode = FakeTensorMode()
fake_x = mode.from_tensor(x)
prims.utils.compare_tensor_meta(fake_x, x)
prims.utils.compare_tensor_meta(fake_x.grad, x.grad)
self.assertTrue(isinstance(fake_x.grad, FakeTensor))
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_index_put_error(self):
mode = FakeTensorMode()
for context in [contextlib.nullcontext, lambda: mode]:
with context():
y = torch.randn(2, 2, 3)
x = torch.randn(2, 2, 3).to("cuda")
with self.assertRaises(RuntimeError):
x[[1, 1]] = y
with self.assertRaises(RuntimeError):
torch.ops.aten.index_put(x, torch.tensor([1, 1], device="cuda"), y)
# no error
torch.ops.aten.index_put(
x, torch.tensor([1, 1], device="cuda"), torch.tensor(5.0)
)
torch.ops.aten.index_put_(
x, torch.tensor([1, 1], device="cuda"), torch.tensor(5.0)
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_like_constructor(self):
with FakeTensorMode():
x = torch.rand([4, 4])
y = torch.ones_like(x)
self.assertTrue(isinstance(y, FakeTensor))
self.assertEqual(y.device.type, "cpu")
z = torch.ones_like(x, device="cuda")
self.assertTrue(isinstance(z, FakeTensor))
self.assertEqual(z.device.type, "cuda")
def test_binary_op_type_promotion(self):
with FakeTensorMode():
x = torch.empty([2, 2], dtype=torch.float)
y = torch.empty([2, 2], dtype=torch.int64)
out = x / y
self.assertEqual(out.dtype, torch.float)
self.assertEqual(out.device.type, "cpu")
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
def test_from_numpy(self):
with FakeTensorMode():
x = torch.tensor(np.zeros([4, 4]))
self.checkType(x, "cpu", [4, 4])
def test_randperm(self):
x = torch.randperm(10)
y = torch.randperm(5, device="cpu")
with FakeTensorMode():
x1 = torch.randperm(10)
prims.utils.compare_tensor_meta(x, x1)
y1 = torch.randperm(5, device="cpu")
prims.utils.compare_tensor_meta(y, y1)
def test_print_in_fake_mode(self):
x = torch.zeros(2)
# does not fail
with FakeTensorMode():
out = str(x)
assert "FakeTensor" not in out
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_upsample_bilinear_small_channels(self):
out = []
mode = FakeTensorMode()
for context in [contextlib.nullcontext, lambda: mode]:
with context():
arg0_1 = torch.empty_strided(
(3, 427, 640), (1, 1920, 3), dtype=torch.float32, device="cuda"
)
unsqueeze = torch.ops.aten.unsqueeze.default(arg0_1, 0)
out.append(
torch.ops.aten.upsample_bilinear2d.default(
unsqueeze, [800, 1199], False
)
)
self.assertTrue(out[1].is_contiguous())
self.checkMetaProps(out[0], out[1])
def test_split_return_self(self):
def fn(x):
return torch.functional.split(x, 0)[0]
# meta should not return self
with FakeTensorMode(), enable_python_dispatcher():
out_fake = fn(torch.empty((0,)))
out_eager = fn(torch.empty((0,)))
self.checkMetaProps(out_fake, out_eager)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_cpu_fallback(self):
with FakeTensorMode(allow_fallback_kernels=False):
filters = torch.randn(8, 4, 3, 3).cuda()
inputs = torch.randn(1, 4, 5, 5).cuda()
out = torch.nn.functional.conv2d(inputs, filters, padding=1)
self.assertEqual(out.device.type, "cuda")
self.assertEqual(list(out.size()), [1, 8, 5, 5])
with FakeTensorMode(allow_fallback_kernels=True):
# intentionally bad inputs
filters = torch.randn(8, 20, 3, 3).cuda()
inputs = torch.randn(1, 7, 10, 5).cuda()
with self.assertRaises(RuntimeError):
torch.nn.functional.conv2d(inputs, filters, padding=1)
with FakeTensorMode(allow_fallback_kernels=True):
filters = torch.randn(8, 4, 3, 3).cuda()
inputs = torch.randn(1, 4, 5, 5).cuda()
out = torch.nn.functional.conv2d(inputs, filters, padding=1)
self.assertEqual(out.device.type, "cuda")
self.assertEqual(list(out.size()), [1, 8, 5, 5])
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_out_multi_device(self):
with FakeTensorMode():
x = torch.rand([4])
y = torch.rand([4], device="cuda")
with self.assertRaisesRegex(Exception, "found.+two.+devices"):
torch.sin(x, out=y)
with self.assertRaisesRegex(Exception, "found.+two.+devices"):
x.add_(y)
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_normalize_device(self):
with FakeTensorMode():
x = torch.empty(1, device="cuda")
y = torch.empty(1, device=f"cuda:{torch.cuda.current_device()}")
out = x + y
self.checkType(out, "cuda", [1])
def test_recursive_invocation(self):
mode = FakeTensorMode()
with mode:
x = torch.tensor(2)
mode.in_kernel_invocation = True
y = x + x
self.assertTrue(mode.in_kernel_invocation)
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
@skipIfRocm
@parametrize(
"allow_fallback_kernels",
[False, True],
lambda a: "with_fallback" if a else "without_fallback",
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_cudnn_rnn(self, allow_fallback_kernels):
def fn(
a0,
b0,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
b9,
b10,
b11,
b12,
b13,
b14,
b15,
a3,
a4,
a5,
):
a1 = [
b0,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
b9,
b10,
b11,
b12,
b13,
b14,
b15,
]
return torch.ops.aten._cudnn_rnn(
a0,
a1,
4,
a3,
a4,
a5,
2,
2048,
0,
2,
False,
0.0,
False,
True,
[],
None,
)
mode = FakeTensorMode(allow_fallback_kernels=allow_fallback_kernels)
for i, context in enumerate([contextlib.nullcontext, lambda: mode]):
with context():
inps1 = [
torch.randn([92, 8, 2048]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192, 4096]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192, 4096]).cuda(),
torch.randn([8192, 2048]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([8192]).cuda(),
torch.randn([167837696]).cuda(),
torch.randn([4, 8, 2048]).cuda(),
torch.randn([4, 8, 2048]).cuda(),
]
inps2 = inps1
inps2[len(inps2) - 1] = None # argument `cx` can be None
for inps in [inps1, inps2]:
out = fn(*inps)
self.assertIs(out[4], inps[-3])
for ten in out:
if i == 1:
self.assertTrue(isinstance(ten, FakeTensor))
self.assertEqual(ten.device.type, "cuda")
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_cuda_lstm(self):
# Ensure CUDA (non-cuDNN) impl succeeds with fake tensors.
with torch.backends.cudnn.flags(enabled=False):
fake_tensor_mode = FakeTensorMode(allow_fallback_kernels=False)
with fake_tensor_mode:
N = 5
L = 4
H_in = 2
hidden_size = 3
proj_size = 2
num_layers = 2
bidir = False
D = 2 if bidir else 1
H_out = proj_size if proj_size > 0 else hidden_size
lstm = torch.nn.LSTM(
input_size=H_in,
hidden_size=hidden_size,
num_layers=num_layers,
proj_size=proj_size,
batch_first=False,
bias=True,
bidirectional=bidir,
device="cuda",
)
h_0 = torch.randn((num_layers * D, N, H_out), device="cuda")
c_0 = torch.randn((num_layers * D, N, hidden_size), device="cuda")
inp = torch.randn((L, N, H_in), device="cuda")
(output, (h_n, c_n)) = lstm(inp, (h_0, c_0))
output.sum().backward()
self.assertEqual(output.shape, (L, N, D * H_out))
self.assertEqual(h_n.shape, (D * num_layers, N, H_out))
self.assertEqual(c_n.shape, (D * num_layers, N, hidden_size))
def test_data_dependent_operator(self):
with FakeTensorMode(allow_fallback_kernels=False):
x = torch.rand([10, 10])
self.assertRaises(DynamicOutputShapeException, lambda: torch.nonzero(x))
def test_parameter_view(self):
x = torch.nn.Parameter(torch.randn(4))
x_view = x.view(4)
mode = FakeTensorMode()
fake_x_view = mode.from_tensor(x_view)
fake_x = mode.from_tensor(x)
self.assertFalse(isinstance(fake_x_view, torch.nn.Parameter))
self.assertTrue(isinstance(fake_x, torch.nn.Parameter))
def test_tolist(self):
shape_env = ShapeEnv()
with FakeTensorMode(allow_fallback_kernels=False, shape_env=shape_env):
x = torch.rand([10])
x.tolist()
# Propagate real tensors doesn't work with fake-on-fake
@expectedFailurePropagateRealTensors
def test_same_shape_env_preserved(self):
shape_env = ShapeEnv()
mode1 = FakeTensorMode(shape_env=shape_env)
t1 = mode1.from_tensor(
torch.randn(10),
symbolic_context=StatelessSymbolicContext(
dynamic_sizes=[DimDynamic.DYNAMIC], constraint_sizes=[None]
),
)
mode2 = FakeTensorMode(shape_env=shape_env)
t2 = mode2.from_tensor(t1)
# t2.size(0) is still dynamic, even though we didn't pass DYNAMIC here
self.assertIsNot(t2, t1)
self.assertIs(t1.fake_mode, mode1)
self.assertIs(t2.fake_mode, mode2)
self.assertIs(t2.size(0).node.shape_env, t1.size(0).node.shape_env)
self.assertEqual(str(t2.size(0)), str(t1.size(0)))
# TODO: Support NJT. There's also some funny business with dynamic shapes
# which would need to be dealt with as well
@expectedFailurePropagateRealTensors
def test_jagged_fake_to_fake_preserved(self):
from torch.nested._internal.nested_tensor import jagged_from_list
S0, S1, S2 = 3, 4, 5
D = 4
a = torch.randn(S0, D, requires_grad=True, dtype=torch.float64)
b = torch.randn(S1, D, requires_grad=True, dtype=torch.float64)
c = torch.randn(S2, D, requires_grad=True, dtype=torch.float64)
offsets = None
jt, _ = jagged_from_list([a, b, c], offsets)
shape_env = ShapeEnv()
mode1 = FakeTensorMode(shape_env=shape_env)
t1 = mode1.from_tensor(jt)
mode2 = FakeTensorMode(shape_env=shape_env)
t2 = mode2.from_tensor(t1)
# It's not obvious that the invocation above makes it dynamic but it
# does!
self.assertTrue(free_symbols(t1.size()))
self.assertIsNot(t2, t1)
self.assertIs(t1.offsets().fake_mode, mode1)
self.assertIs(t2.offsets().fake_mode, mode2)
self.assertIs(t2.size(1).node.shape_env, t1.size(1).node.shape_env)
self.assertEqual(str(t2.size(1)), str(t1.size(1)))
def checkMetaProps(self, t1, t2):
prims.utils.compare_tensor_meta(t1, t2, check_strides=True)
@skipIfCrossRef
def test_deepcopy(self):
with FakeTensorMode() as mode:
pass
mod = torch.nn.BatchNorm2d(10)
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
mod_copied = copy.deepcopy(mod)
def check_copy(mod, mod_copied):
for name, param in itertools.chain(
mod.named_parameters(), mod.named_buffers()
):
param_copied = getattr(mod_copied, name)
self.checkMetaProps(param, param_copied)
self.assertTrue(isinstance(param_copied, FakeTensor))
self.assertEqual(
isinstance(param, torch.nn.Parameter),
isinstance(param_copied, torch.nn.Parameter),
)
self.assertEqual(param.requires_grad, param_copied.requires_grad)
check_copy(mod, mod_copied)
class ModuleNew(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.rand([10, 2])
self.b = self.a
self.c = self.a[0]
mod = ModuleNew()
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
mod_copied = copy.deepcopy(mod)
self.assertIs(mod_copied.a, mod_copied.b)
self.assertEqual(mod_copied.b.storage()._cdata, mod_copied.a.storage()._cdata)
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_new(self):
with FakeTensorMode():
a = torch.rand([16, 1])
self.checkType(a.new(10, 10), "cpu", [10, 10])
self.checkType(a.new([1, 2, 3, 4]), "cpu", [4])
b = torch.rand([4, 4], device="cuda")
self.checkType(b.new(device="cuda"), "cuda", [0])
self.checkType(a.new(torch.rand([1])), "cpu", [1])
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
def test_scalar_inputs(self):
with FakeTensorMode():
self.checkType(torch.div(3, 2), "cpu", [])
ten = torch.zeros(2, dtype=torch.int32) * 2.0
self.assertEqual(ten.dtype, torch.float)
self.checkType(ten, "cpu", [2])
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
def test_allow_meta(self):
def run_meta():
with FakeTensorMode():
x = torch.rand([4], device="meta")
return x + x
self.checkType(run_meta(), "meta", [4])
with patch.object(torch._functorch.config, "fake_tensor_allow_meta", False):
self.assertRaises(Exception, run_meta)
def test_embedding_bag_meta(self):
def f():
# This behavior was originally unintentional but we see people
# relying on it
embedding = torch.nn.EmbeddingBag(10, 3, mode="sum", device="meta")
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
offsets = torch.tensor([0, 4], dtype=torch.long)
return embedding(input, offsets)
real_out = f()
with FakeTensorMode():
fake_out = f()
for r, f in zip(real_out, fake_out):
self.assertEqual(r.size(), f.size())
self.assertEqual(r.device, f.device)
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
def test_mixed_real_and_fake_inputs(self):
class _TestPattern(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.bn = torch.nn.BatchNorm2d(1)
def forward(self, input):
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
weight_shape = [1] * len(self.conv.weight.shape)
weight_shape[0] = -1
bias_shape = [1] * len(self.conv.weight.shape)
bias_shape[1] = -1
scaled_weight = self.conv.weight * scale_factor.reshape(weight_shape)
zero_bias = torch.zeros_like(self.conv.bias, dtype=input.dtype)
conv = self.conv._conv_forward(input, scaled_weight, zero_bias)
conv_orig = conv / scale_factor.reshape(bias_shape)
conv_orig = conv_orig + self.conv.bias.reshape(bias_shape)
conv = self.bn(conv_orig)
return conv
example_inputs = (torch.randn(1, 1, 3, 3),)
mod = _TestPattern()
with FakeTensorMode(allow_non_fake_inputs=True):
out = mod(torch.randn(1, 1, 3, 3))
self.checkType(out, "cpu", (1, 1, 3, 3))
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_aten_copy_multi_device(self):
with FakeTensorMode():
x1 = torch.rand(4, device="cpu")
x2 = torch.rand(4, device="cuda")
copy1 = torch.ops.aten.copy.default(x1, x2)
copy2 = torch.ops.aten.copy.default(x2, x1)
out = torch.empty(4, device="cpu")
torch.ops.aten.copy.out(x1, x2, out=out)
self.checkType(copy1, "cpu", (4,))
self.checkType(copy2, "cuda", (4,))
self.checkType(out, "cpu", (4,))
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_aten_index_multi_device(self):
with FakeTensorMode():
x1 = torch.rand(4, 4, device="cpu")
x2 = torch.rand(4, 4, device="cuda")
i1 = torch.tensor([0, 1], device="cuda")
i2 = torch.tensor([0, 1], device="cpu")
# NB: This one does not work: cuda indices not allowed on cpu
# tensor
# r1 = torch.ops.aten.index(x1, i1)
r2 = torch.ops.aten.index(x2, i2)
y1 = torch.rand(4, device="cpu")
y2 = torch.rand(4, device="cuda")
j1 = torch.tensor([2], device="cuda")
j2 = torch.tensor([2], device="cpu")
r3 = torch.ops.aten.index_put.default(x1, j1, y1)
r4 = torch.ops.aten.index_put.default(x2, j2, y2)
# self.checkType(r1, "cpu", ())
self.checkType(r2, "cuda", ())
self.checkType(r3, "cpu", (4, 4))
self.checkType(r4, "cuda", (4, 4))
@unittest.skipIf(
TEST_WITH_TORCHDYNAMO, "isinstance check for FakeTensor won't work with compile"
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_aten_slice_scatter_multi_device(self):
with FakeTensorMode():
x1 = torch.rand(4, 4, device="cpu")
y1 = torch.rand(2, 4, device="cuda")
x2 = torch.rand(4, 4, device="cuda")
y2 = torch.rand(2, 4, device="cpu")
out = torch.empty(4, 4, device="cpu")
r1 = torch.ops.aten.slice_scatter.default(x1, y1, start=2)
r2 = torch.ops.aten.slice_scatter.default(x2, y2, start=2)
r3 = torch.ops.aten.slice_scatter.out(x1, y1, out=out, start=2)
self.checkType(r1, "cpu", (4, 4))
self.checkType(r2, "cuda", (4, 4))
self.checkType(r3, "cpu", (4, 4))
self.checkType(out, "cpu", (4, 4))
def test__adaptive_avg_pool2d_backward(self):
with FakeTensorMode():
grad_out = torch.rand(2, 3, 4, 4)
inp = torch.rand(2, 3, 4, 4).to(memory_format=torch.channels_last)
grad_in = torch.ops.aten._adaptive_avg_pool2d_backward(grad_out, inp)
self.assertTrue(
torch._prims_common.suggest_memory_format(grad_in)
== torch.channels_last
)
def test_export_numpy(self):
class MyNumpyModel(torch.nn.Module):
def forward(self, input):
input = input.numpy()
return input + np.random.randn(*input.shape)
with FakeTensorMode():
ep = torch.export.export(
MyNumpyModel(), args=(torch.randn(1000),), strict=True
)
self.assertTrue(isinstance(ep, torch.export.ExportedProgram))
def test_unsqueeze_copy(self):
shape_env = ShapeEnv()
t1 = torch.ones(2, 2, 768)
with FakeTensorMode(shape_env=shape_env) as fake_mode:
t = fake_mode.from_tensor(
t1,
symbolic_context=StatelessSymbolicContext(
dynamic_sizes=[
DimDynamic.DYNAMIC,
DimDynamic.STATIC,
DimDynamic.STATIC,
],
),
)
self.assertEqual(t.shape[0], torch.ops.aten.unsqueeze_copy(t, 1).shape[0])
def test_alias_call(self):
fwAD = torch.autograd.forward_ad
def f(x):
return 4312491 * x
with torch._subclasses.fake_tensor.FakeTensorMode():
with fwAD.dual_level():
x = torch.randn(3, device="cpu")
y = torch.ones_like(x)
dual = fwAD.make_dual(x, y)
r = f(dual)
self.assertIsInstance(r, FakeTensor)
self.assertEqual(r.size(), [3])
@parametrize("reverse", [False, True])
def test_scan(self, reverse):
def add(x, y):
return x + y, x + y
with torch._subclasses.fake_tensor.FakeTensorMode():
x = torch.randn((3, 5, 7), device="cpu")
init = torch.randn((3, 7), device="cpu")
r = scan(add, init, x, dim=1, reverse=reverse)
self.assertIsInstance(r[0], FakeTensor)
self.assertIsInstance(r[1], FakeTensor)
def test_fast_div_int_to_float(self):
mode = FakeTensorMode()
with mode:
x = torch.empty(2, 2, device="cpu", dtype=torch.int32)
y = torch.empty(2, 2, device="cpu", dtype=torch.int32)
from torch._subclasses.fake_impls import get_fast_op_impls
fast_div = get_fast_op_impls()[torch.ops.aten.div.Tensor]
z = fast_div(mode, x, y)
self.assertEqual(z.dtype, torch.float32)
def test_fast_div(self):
mode = FakeTensorMode()
with mode:
x = torch.empty(2, 2, device="cpu", dtype=torch.int32)
from torch._subclasses.fake_impls import get_fast_op_impls
fast_div = get_fast_op_impls()[torch.ops.aten.div.Tensor]
y = fast_div(mode, x, 2)
self.assertEqual(y.dtype, torch.float32)
def test_nanmean_out(self):
# Regression test to ensure we don't error out.
with torch._subclasses.fake_tensor.FakeTensorMode() as mode:
x = torch.randn(10)
out = torch.empty(())
torch.nanmean(x, out=out)
self.assertEqual(out.dtype, x.dtype)
def test_unbind_copy_out(self):
# Regression test to ensure we don't error out.
with torch._subclasses.fake_tensor.FakeTensorMode() as mode:
eye = torch.eye(3)
out = (torch.zeros(3), torch.zeros(3), torch.zeros(3))
torch.unbind_copy(eye, out=out)
self.assertEqual(out[0].dtype, eye.dtype)
self.assertEqual(out[1].dtype, eye.dtype)
self.assertEqual(out[2].dtype, eye.dtype)
instantiate_parametrized_tests(FakeTensorTest)
def make_propagate_real_tensors_cls(cls):
cls = make_test_cls_with_patches(
cls,
"PropagateRealTensors",
"_propagate_real_tensors",
(torch._functorch.config, "fake_tensor_propagate_real_tensors", True),
xfail_prop="_expected_failure_propagate_real_tensors",
decorator=skipIfTorchDynamo("propagate_real_tensors affects Dynamo"),
)
cls.__file__ = __file__
cls.__module__ = __name__
globals()[cls.__name__] = cls
make_propagate_real_tensors_cls(FakeTensorTest)
| FakeTensorTest |
python | huggingface__transformers | src/transformers/models/albert/modeling_albert.py | {
"start": 1636,
"end": 5675
} | class ____(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config: AlbertConfig):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
# NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0])
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| AlbertEmbeddings |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 129515,
"end": 131726
} | class ____(Response):
"""
Response of projects.get_task_tags endpoint.
:param tags: The list of unique tag values
:type tags: Sequence[str]
:param system_tags: The list of unique system tag values. Returned only if
'include_system' is set to 'true' in the request
:type system_tags: Sequence[str]
"""
_service = "projects"
_action = "get_task_tags"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"system_tags": {
"description": "The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "The list of unique tag values",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(GetTaskTagsResponse, self).__init__(**kwargs)
self.tags = tags
self.system_tags = system_tags
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
| GetTaskTagsResponse |
python | doocs__leetcode | solution/2400-2499/2455.Average Value of Even Numbers That Are Divisible by Three/Solution.py | {
"start": 0,
"end": 221
} | class ____:
def averageValue(self, nums: List[int]) -> int:
s = n = 0
for x in nums:
if x % 6 == 0:
s += x
n += 1
return 0 if n == 0 else s // n
| Solution |
python | django__django | django/utils/dateformat.py | {
"start": 1485,
"end": 5935
} | class ____(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
if isinstance(obj, datetime):
# Timezone is only supported when formatting datetime objects, not
# date objects (timezone information not appropriate), or time
# objects (against established django policy).
if is_naive(obj):
timezone = get_default_timezone()
else:
timezone = obj.tzinfo
if not _datetime_ambiguous_or_imaginary(obj, timezone):
self.timezone = timezone
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _("p.m.")
return _("a.m.")
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _("PM")
return _("AM")
def e(self):
"""
Timezone name.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
try:
if getattr(self.data, "tzinfo", None):
return self.data.tzname() or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
hour = self.data.hour % 12 or 12
minute = self.data.minute
return "%d:%02d" % (hour, minute) if minute else hour
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
return self.data.hour % 12 or 12
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return "%02d" % (self.data.hour % 12 or 12)
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return "%02d" % self.data.hour
def i(self):
"Minutes; i.e. '00' to '59'"
return "%02d" % self.data.minute
def O(self): # NOQA: E743, E741
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, return an empty string.
"""
if self.timezone is None:
return ""
offset = self.timezone.utcoffset(self.data)
seconds = offset.days * 86400 + offset.seconds
sign = "-" if seconds < 0 else "+"
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left
off if they're zero and the strings 'midnight' and 'noon' if
appropriate. Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon',
'12:30 p.m.' Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _("midnight")
if self.data.minute == 0 and self.data.hour == 12:
return _("noon")
return "%s %s" % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return "%02d" % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, return an empty string.
"""
if self.timezone is None:
return ""
return str(self.timezone.tzname(self.data))
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return "%06d" % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, return an empty string.
"""
if self.timezone is None:
return ""
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive.
# e.g.: UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
| TimeFormat |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/pex_builder/parse_workspace.py | {
"start": 104,
"end": 1220
} | class ____:
name: str
directory: str
build_folder: str
location_file: str
def get_locations(dagster_cloud_yaml_file) -> list[Location]:
"""Returns list of locations parsed from dagster_cloud.yaml."""
base_dir = os.path.abspath(os.path.dirname(dagster_cloud_yaml_file))
with open(dagster_cloud_yaml_file, encoding="utf-8") as yaml_file:
workspace_contents = yaml_file.read()
workspace_contents_yaml = yaml.safe_load(workspace_contents)
locations = []
for location in workspace_contents_yaml["locations"]:
location_dir = os.path.join(
base_dir, location.get("build", {"directory": "."}).get("directory")
)
locations.append(
Location(
name=location["location_name"],
directory=location_dir,
build_folder=location_dir,
location_file=os.path.abspath(dagster_cloud_yaml_file),
)
)
ui.print(f"Parsed {len(locations)} locations from {locations}")
return locations
| Location |
python | tox-dev__tox | src/tox/config/loader/toml/_replace.py | {
"start": 3949,
"end": 5952
} | class ____(ReplaceReference):
def __init__(self, conf: Config, loader: TomlLoader) -> None:
self.conf = conf
self.loader = loader
def __call__(self, value: str, conf_args: ConfigLoadArgs) -> str | None:
if match := _REFERENCE_PATTERN.search(value):
settings = match.groupdict()
exception: Exception | None = None
try:
for src in self._config_value_sources(settings["section"], conf_args.env_name):
try:
value = src.load(settings["key"], conf_args.chain)
except KeyError as exc: # if fails, keep trying maybe another source can satisfy # noqa: PERF203
exception = exc
else:
return stringify(value)[0]
except Exception as exc: # noqa: BLE001
exception = exc
if exception is not None:
if isinstance(exception, KeyError): # if the lookup failed replace - else keep
default = settings["default"]
if default is not None:
return default
raise exception
return value
def _config_value_sources(self, sec: str | None, current_env: str | None) -> Iterator[ConfigSet | RawLoader]:
if sec is None:
if current_env is not None: # pragma: no branch
yield self.conf.get_env(current_env)
yield self.conf.core
return
section: TomlSection = self.loader.section # type: ignore[assignment]
core_prefix = section.core_prefix()
env_prefix = section.env_prefix()
if sec.startswith(env_prefix):
env = sec[len(env_prefix) + len(section.SEP) :]
yield self.conf.get_env(env)
else:
yield RawLoader(self.loader, sec)
if sec == core_prefix:
yield self.conf.core # try via registered configs
| TomlReplaceLoader |
python | streamlit__streamlit | lib/streamlit/elements/layouts.py | {
"start": 1953,
"end": 47147
} | class ____:
@gather_metrics("container")
def container(
self,
*,
border: bool | None = None,
key: Key | None = None,
width: Width = "stretch",
height: Height = "content",
horizontal: bool = False,
horizontal_alignment: HorizontalAlignment = "left",
vertical_alignment: VerticalAlignment = "top",
gap: Gap | None = "small",
) -> DeltaGenerator:
"""Insert a multi-element container.
Inserts an invisible container into your app that can be used to hold
multiple elements. This allows you to, for example, insert multiple
elements into your app out of order.
To add elements to the returned container, you can use the ``with``
notation (preferred) or just call commands directly on the returned
object. See examples below.
Parameters
----------
border : bool or None
Whether to show a border around the container. If ``None`` (default), a
border is shown if the container is set to a fixed height and not
shown otherwise.
key : str or None
An optional string to give this container a stable identity.
Additionally, if ``key`` is provided, it will be used as CSS
class name prefixed with ``st-key-``.
width : "stretch", "content", or int
The width of the container. This can be one of the following:
- ``"stretch"`` (default): The width of the container matches the
width of the parent container.
- ``"content"``: The width of the container matches the width of
its content.
- An integer specifying the width in pixels: The container has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the container matches the width
of the parent container.
height : "content", "stretch", or int
The height of the container. This can be one of the following:
- ``"content"`` (default): The height of the container matches the
height of its content.
- ``"stretch"``: The height of the container matches the height of
its content or the height of the parent container, whichever is
larger. If the container is not in a parent container, the height
of the container matches the height of its content.
- An integer specifying the height in pixels: The container has a
fixed height. If the content is larger than the specified
height, scrolling is enabled.
.. note::
Use scrolling containers sparingly. If you use scrolling
containers, avoid heights that exceed 500 pixels. Otherwise,
the scroll surface of the container might cover the majority of
the screen on mobile devices, which makes it hard to scroll the
rest of the app.
horizontal : bool
Whether to use horizontal flexbox layout. If this is ``False``
(default), the container's elements are laid out vertically. If
this is ``True``, the container's elements are laid out
horizontally and will overflow to the next line if they don't fit
within the container's width.
horizontal_alignment : "left", "center", "right", or "distribute"
The horizontal alignment of the elements inside the container. This
can be one of the following:
- ``"left"`` (default): Elements are aligned to the left side of
the container.
- ``"center"``: Elements are horizontally centered inside the
container.
- ``"right"``: Elements are aligned to the right side of the
container.
- ``"distribute"``: Elements are distributed evenly in the
container. This increases the horizontal gap between elements to
fill the width of the container. A standalone element is aligned
to the left.
When ``horizontal`` is ``False``, ``"distribute"`` aligns the
elements the same as ``"left"``.
vertical_alignment : "top", "center", "bottom", or "distribute"
The vertical alignment of the elements inside the container. This
can be one of the following:
- ``"top"`` (default): Elements are aligned to the top of the
container.
- ``"center"``: Elements are vertically centered inside the
container.
- ``"bottom"``: Elements are aligned to the bottom of the
container.
- ``"distribute"``: Elements are distributed evenly in the
container. This increases the vertical gap between elements to
fill the height of the container. A standalone element is aligned
to the top.
When ``horizontal`` is ``True``, ``"distribute"`` aligns the
elements the same as ``"top"``.
gap : "small", "medium", "large", or None
The minimum gap size between the elements inside the container.
This can be one of the following:
- ``"small"`` (default): 1rem gap between the elements.
- ``"medium"``: 2rem gap between the elements.
- ``"large"``: 4rem gap between the elements.
- ``None``: No gap between the elements.
The rem unit is relative to the ``theme.baseFontSize``
configuration option.
The minimum gap applies to both the vertical and horizontal gaps
between the elements. Elements may have larger gaps in one
direction if you use a distributed horizontal alignment or fixed
height.
Examples
--------
**Example 1: Inserting elements using ``with`` notation**
You can use the ``with`` statement to insert any element into a
container.
>>> import streamlit as st
>>>
>>> with st.container():
... st.write("This is inside the container")
...
... # You can call any Streamlit command, including custom components:
... st.bar_chart(np.random.randn(50, 3))
>>>
>>> st.write("This is outside the container")
.. output ::
https://doc-container1.streamlit.app/
height: 520px
**Example 2: Inserting elements out of order**
When you create a container, its position in the app remains fixed and
you can add elements to it at any time. This allows you to insert
elements out of order in your app. You can also write to the container
by calling commands directly on the container object.
>>> import streamlit as st
>>>
>>> container = st.container(border=True)
>>> container.write("This is inside the container")
>>> st.write("This is outside the container")
>>>
>>> container.write("This is inside too")
.. output ::
https://doc-container2.streamlit.app/
height: 300px
**Example 3: Grid layout with columns and containers**
You can create a grid with a fixed number of elements per row by using
columns and containers.
>>> import streamlit as st
>>>
>>> row1 = st.columns(3)
>>> row2 = st.columns(3)
>>>
>>> for col in row1 + row2:
>>> tile = col.container(height=120)
>>> tile.title(":balloon:")
.. output ::
https://doc-container3.streamlit.app/
height: 350px
**Example 4: Vertically scrolling container**
You can create a vertically scrolling container by setting a fixed
height.
>>> import streamlit as st
>>>
>>> long_text = "Lorem ipsum. " * 1000
>>>
>>> with st.container(height=300):
>>> st.markdown(long_text)
.. output ::
https://doc-container4.streamlit.app/
height: 400px
**Example 5: Horizontal container**
You can create a row of widgets using a horizontal container. Use
``horizontal_alignment`` to specify the alignment of the elements.
>>> import streamlit as st
>>>
>>> flex = st.container(horizontal=True, horizontal_alignment="right")
>>>
>>> for card in range(3):
>>> flex.button(f"Button {card + 1}")
.. output ::
https://doc-container5.streamlit.app/
height: 250px
"""
key = to_key(key)
block_proto = BlockProto()
block_proto.allow_empty = False
block_proto.flex_container.border = border or False
block_proto.flex_container.gap_config.gap_size = get_gap_size(
gap, "st.container"
)
validate_horizontal_alignment(horizontal_alignment)
validate_vertical_alignment(vertical_alignment)
if horizontal:
block_proto.flex_container.wrap = True
block_proto.flex_container.direction = (
BlockProto.FlexContainer.Direction.HORIZONTAL
)
block_proto.flex_container.justify = get_justify(horizontal_alignment)
block_proto.flex_container.align = get_align(vertical_alignment)
else:
block_proto.flex_container.wrap = False
block_proto.flex_container.direction = (
BlockProto.FlexContainer.Direction.VERTICAL
)
block_proto.flex_container.justify = get_justify(vertical_alignment)
block_proto.flex_container.align = get_align(horizontal_alignment)
validate_width(width, allow_content=True)
block_proto.width_config.CopyFrom(get_width_config(width))
if isinstance(height, int) or border:
block_proto.allow_empty = True
if border is not None:
block_proto.flex_container.border = border
elif isinstance(height, int):
block_proto.flex_container.border = True
else:
block_proto.flex_container.border = False
validate_height(height, allow_content=True)
block_proto.height_config.CopyFrom(get_height_config(height))
if key:
# At the moment, the ID is only used for extracting the
# key on the frontend and setting it as CSS class.
# There are plans to use the ID for other container features
# in the future. This might require including more container
# parameters in the ID calculation.
block_proto.id = compute_and_register_element_id(
"container", user_key=key, dg=None, key_as_main_identity=False
)
return self.dg._block(block_proto)
@gather_metrics("columns")
def columns(
self,
spec: SpecType,
*,
gap: Gap | None = "small",
vertical_alignment: Literal["top", "center", "bottom"] = "top",
border: bool = False,
width: WidthWithoutContent = "stretch",
) -> list[DeltaGenerator]:
"""Insert containers laid out as side-by-side columns.
Inserts a number of multi-element containers laid out side-by-side and
returns a list of container objects.
To add elements to the returned containers, you can use the ``with`` notation
(preferred) or just call methods directly on the returned object. See
examples below.
.. note::
To follow best design practices and maintain a good appearance on
all screen sizes, don't nest columns more than once.
Parameters
----------
spec : int or Iterable of numbers
Controls the number and width of columns to insert. Can be one of:
- An integer that specifies the number of columns. All columns have equal
width in this case.
- An Iterable of numbers (int or float) that specify the relative width of
each column. E.g. ``[0.7, 0.3]`` creates two columns where the first
one takes up 70% of the available with and the second one takes up 30%.
Or ``[1, 2, 3]`` creates three columns where the second one is two times
the width of the first one, and the third one is three times that width.
gap : "small", "medium", "large", or None
The size of the gap between the columns. This can be one of the
following:
- ``"small"`` (default): 1rem gap between the columns.
- ``"medium"``: 2rem gap between the columns.
- ``"large"``: 4rem gap between the columns.
- ``None``: No gap between the columns.
The rem unit is relative to the ``theme.baseFontSize``
configuration option.
vertical_alignment : "top", "center", or "bottom"
The vertical alignment of the content inside the columns. The
default is ``"top"``.
border : bool
Whether to show a border around the column containers. If this is
``False`` (default), no border is shown. If this is ``True``, a
border is shown around each column.
width : "stretch" or int
The width of the column group. This can be one of the following:
- ``"stretch"`` (default): The width of the column group matches the
width of the parent container.
- An integer specifying the width in pixels: The column group has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the column group matches the
width of the parent container.
Returns
-------
list of containers
A list of container objects.
Examples
--------
**Example 1: Use context management**
You can use the ``with`` statement to insert any element into a column:
>>> import streamlit as st
>>>
>>> col1, col2, col3 = st.columns(3)
>>>
>>> with col1:
... st.header("A cat")
... st.image("https://static.streamlit.io/examples/cat.jpg")
>>>
>>> with col2:
... st.header("A dog")
... st.image("https://static.streamlit.io/examples/dog.jpg")
>>>
>>> with col3:
... st.header("An owl")
... st.image("https://static.streamlit.io/examples/owl.jpg")
.. output ::
https://doc-columns1.streamlit.app/
height: 620px
**Example 2: Use commands as container methods**
You can just call methods directly on the returned objects:
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df = rng(0).standard_normal((10, 1))
>>> col1, col2 = st.columns([3, 1])
>>>
>>> col1.subheader("A wide column with a chart")
>>> col1.line_chart(df)
>>>
>>> col2.subheader("A narrow column with the data")
>>> col2.write(df)
.. output ::
https://doc-columns2.streamlit.app/
height: 550px
**Example 3: Align widgets**
Use ``vertical_alignment="bottom"`` to align widgets.
>>> import streamlit as st
>>>
>>> left, middle, right = st.columns(3, vertical_alignment="bottom")
>>>
>>> left.text_input("Write something")
>>> middle.button("Click me", use_container_width=True)
>>> right.checkbox("Check me")
.. output ::
https://doc-columns-bottom-widgets.streamlit.app/
height: 200px
**Example 4: Use vertical alignment to create grids**
Adjust vertical alignment to customize your grid layouts.
>>> import streamlit as st
>>>
>>> vertical_alignment = st.selectbox(
>>> "Vertical alignment", ["top", "center", "bottom"], index=2
>>> )
>>>
>>> left, middle, right = st.columns(3, vertical_alignment=vertical_alignment)
>>> left.image("https://static.streamlit.io/examples/cat.jpg")
>>> middle.image("https://static.streamlit.io/examples/dog.jpg")
>>> right.image("https://static.streamlit.io/examples/owl.jpg")
.. output ::
https://doc-columns-vertical-alignment.streamlit.app/
height: 600px
**Example 5: Add borders**
Add borders to your columns instead of nested containers for consistent
heights.
>>> import streamlit as st
>>>
>>> left, middle, right = st.columns(3, border=True)
>>>
>>> left.markdown("Lorem ipsum " * 10)
>>> middle.markdown("Lorem ipsum " * 5)
>>> right.markdown("Lorem ipsum ")
.. output ::
https://doc-columns-borders.streamlit.app/
height: 250px
"""
weights = spec
if isinstance(weights, int):
# If the user provided a single number, expand into equal weights.
# E.g. (1,) * 3 => (1, 1, 1)
# NOTE: A negative/zero spec will expand into an empty tuple.
weights = (1,) * weights
if len(weights) == 0 or any(weight <= 0 for weight in weights):
raise StreamlitInvalidColumnSpecError()
vertical_alignment_mapping: dict[
str, BlockProto.Column.VerticalAlignment.ValueType
] = {
"top": BlockProto.Column.VerticalAlignment.TOP,
"center": BlockProto.Column.VerticalAlignment.CENTER,
"bottom": BlockProto.Column.VerticalAlignment.BOTTOM,
}
if vertical_alignment not in vertical_alignment_mapping:
raise StreamlitInvalidVerticalAlignmentError(
vertical_alignment=vertical_alignment,
element_type="st.columns",
)
gap_size = get_gap_size(gap, "st.columns")
gap_config = GapConfig()
gap_config.gap_size = gap_size
def column_proto(normalized_weight: float) -> BlockProto:
col_proto = BlockProto()
col_proto.column.weight = normalized_weight
col_proto.column.gap_config.CopyFrom(gap_config)
col_proto.column.vertical_alignment = vertical_alignment_mapping[
vertical_alignment
]
col_proto.column.show_border = border
col_proto.allow_empty = True
return col_proto
block_proto = BlockProto()
block_proto.flex_container.direction = (
BlockProto.FlexContainer.Direction.HORIZONTAL
)
block_proto.flex_container.wrap = True
block_proto.flex_container.gap_config.CopyFrom(gap_config)
block_proto.flex_container.scale = 1
block_proto.flex_container.align = BlockProto.FlexContainer.Align.STRETCH
validate_width(width=width)
block_proto.width_config.CopyFrom(get_width_config(width=width))
row = self.dg._block(block_proto)
total_weight = sum(weights)
return [row._block(column_proto(w / total_weight)) for w in weights]
@gather_metrics("tabs")
def tabs(
self,
tabs: Sequence[str],
*,
width: WidthWithoutContent = "stretch",
default: str | None = None,
) -> Sequence[DeltaGenerator]:
r"""Insert containers separated into tabs.
Inserts a number of multi-element containers as tabs.
Tabs are a navigational element that allows users to easily
move between groups of related content.
To add elements to the returned containers, you can use the ``with`` notation
(preferred) or just call methods directly on the returned object. See
the examples below.
.. note::
All content within every tab is computed and sent to the frontend,
regardless of which tab is selected. Tabs do not currently support
conditional rendering. If you have a slow-loading tab, consider
using a widget like ``st.segmented_control`` to conditionally
render content instead.
Parameters
----------
tabs : list of str
Creates a tab for each string in the list. The first tab is selected
by default. The string is used as the name of the tab and can
optionally contain GitHub-flavored Markdown of the following types:
Bold, Italics, Strikethroughs, Inline Code, Links, and Images.
Images display like icons, with a max height equal to the font
height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
width : "stretch" or int
The width of the tab container. This can be one of the following:
- ``"stretch"`` (default): The width of the container matches the
width of the parent container.
- An integer specifying the width in pixels: The container has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the container matches the width
of the parent container.
default : str or None
The default tab to select. If this is ``None`` (default), the first
tab is selected. If this is a string, it must be one of the tab
labels. If two tabs have the same label as ``default``, the first
one is selected.
Returns
-------
list of containers
A list of container objects.
Examples
--------
*Example 1: Use context management*
You can use ``with`` notation to insert any element into a tab:
>>> import streamlit as st
>>>
>>> tab1, tab2, tab3 = st.tabs(["Cat", "Dog", "Owl"])
>>>
>>> with tab1:
... st.header("A cat")
... st.image("https://static.streamlit.io/examples/cat.jpg", width=200)
>>> with tab2:
... st.header("A dog")
... st.image("https://static.streamlit.io/examples/dog.jpg", width=200)
>>> with tab3:
... st.header("An owl")
... st.image("https://static.streamlit.io/examples/owl.jpg", width=200)
.. output ::
https://doc-tabs1.streamlit.app/
height: 620px
*Example 2: Call methods directly*
You can call methods directly on the returned objects:
>>> import streamlit as st
>>> from numpy.random import default_rng as rng
>>>
>>> df = rng(0).standard_normal((10, 1))
>>>
>>> tab1, tab2 = st.tabs(["📈 Chart", "🗃 Data"])
>>>
>>> tab1.subheader("A tab with a chart")
>>> tab1.line_chart(df)
>>>
>>> tab2.subheader("A tab with the data")
>>> tab2.write(df)
.. output ::
https://doc-tabs2.streamlit.app/
height: 700px
*Example 3: Set the default tab and style the tab labels*
Use the ``default`` parameter to set the default tab. You can also use
Markdown in the tab labels.
>>> import streamlit as st
>>>
>>> tab1, tab2, tab3 = st.tabs(
... [":cat: Cat", ":dog: Dog", ":rainbow[Owl]"], default=":rainbow[Owl]"
... )
>>>
>>> with tab1:
>>> st.header("A cat")
>>> st.image("https://static.streamlit.io/examples/cat.jpg", width=200)
>>> with tab2:
>>> st.header("A dog")
>>> st.image("https://static.streamlit.io/examples/dog.jpg", width=200)
>>> with tab3:
>>> st.header("An owl")
>>> st.image("https://static.streamlit.io/examples/owl.jpg", width=200)
.. output ::
https://doc-tabs3.streamlit.app/
height: 620px
"""
if not tabs:
raise StreamlitAPIException(
"The input argument to st.tabs must contain at least one tab label."
)
if default and default not in tabs:
raise StreamlitAPIException(
f"The default tab '{default}' is not in the list of tabs."
)
if any(not isinstance(tab, str) for tab in tabs):
raise StreamlitAPIException(
"The tabs input list to st.tabs is only allowed to contain strings."
)
def tab_proto(label: str) -> BlockProto:
tab_proto = BlockProto()
tab_proto.tab.label = label
tab_proto.allow_empty = True
return tab_proto
block_proto = BlockProto()
block_proto.tab_container.SetInParent()
validate_width(width)
block_proto.width_config.CopyFrom(get_width_config(width))
default_index = tabs.index(default) if default else 0
block_proto.tab_container.default_tab_index = default_index
tab_container = self.dg._block(block_proto)
return tuple(tab_container._block(tab_proto(tab)) for tab in tabs)
@gather_metrics("expander")
def expander(
self,
label: str,
expanded: bool = False,
*,
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> DeltaGenerator:
r"""Insert a multi-element container that can be expanded/collapsed.
Inserts a container into your app that can be used to hold multiple elements
and can be expanded or collapsed by the user. When collapsed, all that is
visible is the provided label.
To add elements to the returned container, you can use the ``with`` notation
(preferred) or just call methods directly on the returned object. See
examples below.
.. note::
All content within the expander is computed and sent to the
frontend, even if the expander is closed.
To follow best design practices and maintain a good appearance on
all screen sizes, don't nest expanders.
Parameters
----------
label : str
A string to use as the header for the expander. The label can optionally
contain GitHub-flavored Markdown of the following types: Bold, Italics,
Strikethroughs, Inline Code, Links, and Images. Images display like
icons, with a max height equal to the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
expanded : bool
If True, initializes the expander in "expanded" state. Defaults to
False (collapsed).
icon : str, None
An optional emoji or icon to display next to the expander label. If ``icon``
is ``None`` (default), no icon is displayed. If ``icon`` is a
string, the following options are valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols \
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
width : "stretch" or int
The width of the expander container. This can be one of the following:
- ``"stretch"`` (default): The width of the container matches the
width of the parent container.
- An integer specifying the width in pixels: The container has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the container matches the width
of the parent container.
Examples
--------
You can use the ``with`` notation to insert any element into an expander
>>> import streamlit as st
>>>
>>> st.bar_chart({"data": [1, 5, 2, 6, 2, 1]})
>>>
>>> with st.expander("See explanation"):
... st.write('''
... The chart above shows some numbers I picked for you.
... I rolled actual dice for these, so they're *guaranteed* to
... be random.
... ''')
... st.image("https://static.streamlit.io/examples/dice.jpg")
.. output ::
https://doc-expander.streamlit.app/
height: 750px
Or you can just call methods directly on the returned objects:
>>> import streamlit as st
>>>
>>> st.bar_chart({"data": [1, 5, 2, 6, 2, 1]})
>>>
>>> expander = st.expander("See explanation")
>>> expander.write('''
... The chart above shows some numbers I picked for you.
... I rolled actual dice for these, so they're *guaranteed* to
... be random.
... ''')
>>> expander.image("https://static.streamlit.io/examples/dice.jpg")
.. output ::
https://doc-expander.streamlit.app/
height: 750px
"""
if label is None:
raise StreamlitAPIException("A label is required for an expander")
expandable_proto = BlockProto.Expandable()
expandable_proto.expanded = expanded
expandable_proto.label = label
if icon is not None:
expandable_proto.icon = validate_icon_or_emoji(icon)
block_proto = BlockProto()
block_proto.allow_empty = True
block_proto.expandable.CopyFrom(expandable_proto)
validate_width(width)
block_proto.width_config.CopyFrom(get_width_config(width))
return self.dg._block(block_proto=block_proto)
@gather_metrics("popover")
def popover(
self,
label: str,
*,
type: Literal["primary", "secondary", "tertiary"] = "secondary",
help: str | None = None,
icon: str | None = None,
disabled: bool = False,
use_container_width: bool | None = None,
width: Width = "content",
) -> DeltaGenerator:
r"""Insert a popover container.
Inserts a multi-element container as a popover. It consists of a button-like
element and a container that opens when the button is clicked.
Opening and closing the popover will not trigger a rerun. Interacting
with widgets inside of an open popover will rerun the app while keeping
the popover open. Clicking outside of the popover will close it.
To add elements to the returned container, you can use the "with"
notation (preferred) or just call methods directly on the returned object.
See examples below.
.. note::
To follow best design practices, don't nest popovers.
Parameters
----------
label : str
The label of the button that opens the popover container.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
help : str or None
A tooltip that gets displayed when the popover button is hovered
over. If this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
type : "primary", "secondary", or "tertiary"
An optional string that specifies the button type. This can be one
of the following:
- ``"primary"``: The button's background is the app's primary color
for additional emphasis.
- ``"secondary"`` (default): The button's background coordinates
with the app's background color for normal emphasis.
- ``"tertiary"``: The button is plain text without a border or
background for subtlety.
icon : str
An optional emoji or icon to display next to the button label. If ``icon``
is ``None`` (default), no icon is displayed. If ``icon`` is a
string, the following options are valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols \
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
disabled : bool
An optional boolean that disables the popover button if set to
``True``. The default is ``False``.
use_container_width : bool
Whether to expand the button's width to fill its parent container.
If ``use_container_width`` is ``False`` (default), Streamlit sizes
the button to fit its content. If ``use_container_width`` is
``True``, the width of the button matches its parent container.
In both cases, if the content of the button is wider than the
parent container, the content will line wrap.
The popover container's minimum width matches the width of its
button. The popover container may be wider than its button to fit
the container's content.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``. For ``use_container_width=False``, use
``width="content"``.
width : int, "stretch", or "content"
The width of the button. This can be one of the following:
- ``"content"`` (default): The width of the button matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the button matches the width of the
parent container.
- An integer specifying the width in pixels: The button has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the button matches the width
of the parent container.
The popover container's minimum width matches the width of its
button. The popover container may be wider than its button to fit
the container's contents.
Examples
--------
You can use the ``with`` notation to insert any element into a popover:
>>> import streamlit as st
>>>
>>> with st.popover("Open popover"):
>>> st.markdown("Hello World 👋")
>>> name = st.text_input("What's your name?")
>>>
>>> st.write("Your name:", name)
.. output ::
https://doc-popover.streamlit.app/
height: 400px
Or you can just call methods directly on the returned objects:
>>> import streamlit as st
>>>
>>> popover = st.popover("Filter items")
>>> red = popover.checkbox("Show red items.", True)
>>> blue = popover.checkbox("Show blue items.", True)
>>>
>>> if red:
... st.write(":red[This is a red item.]")
>>> if blue:
... st.write(":blue[This is a blue item.]")
.. output ::
https://doc-popover2.streamlit.app/
height: 400px
"""
if label is None:
raise StreamlitAPIException("A label is required for a popover")
if use_container_width is not None:
width = "stretch" if use_container_width else "content"
# Checks whether the entered button type is one of the allowed options
if type not in ["primary", "secondary", "tertiary"]:
raise StreamlitAPIException(
'The type argument to st.popover must be "primary", "secondary", or "tertiary". '
f'\nThe argument passed was "{type}".'
)
popover_proto = BlockProto.Popover()
popover_proto.label = label
popover_proto.disabled = disabled
popover_proto.type = type
if help:
popover_proto.help = str(help)
if icon is not None:
popover_proto.icon = validate_icon_or_emoji(icon)
block_proto = BlockProto()
block_proto.allow_empty = True
block_proto.popover.CopyFrom(popover_proto)
validate_width(width, allow_content=True)
block_proto.width_config.CopyFrom(get_width_config(width))
return self.dg._block(block_proto=block_proto)
@gather_metrics("status")
def status(
self,
label: str,
*,
expanded: bool = False,
state: Literal["running", "complete", "error"] = "running",
width: WidthWithoutContent = "stretch",
) -> StatusContainer:
r"""Insert a status container to display output from long-running tasks.
Inserts a container into your app that is typically used to show the status and
details of a process or task. The container can hold multiple elements and can
be expanded or collapsed by the user similar to ``st.expander``.
When collapsed, all that is visible is the status icon and label.
The label, state, and expanded state can all be updated by calling ``.update()``
on the returned object. To add elements to the returned container, you can
use ``with`` notation (preferred) or just call methods directly on the returned
object.
By default, ``st.status()`` initializes in the "running" state. When called using
``with`` notation, it automatically updates to the "complete" state at the end
of the "with" block. See examples below for more details.
.. note::
All content within the status container is computed and sent to the
frontend, even if the status container is closed.
To follow best design practices and maintain a good appearance on
all screen sizes, don't nest status containers.
Parameters
----------
label : str
The initial label of the status container. The label can optionally
contain GitHub-flavored Markdown of the following types: Bold, Italics,
Strikethroughs, Inline Code, Links, and Images. Images display like
icons, with a max height equal to the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
expanded : bool
If True, initializes the status container in "expanded" state. Defaults to
False (collapsed).
state : "running", "complete", or "error"
The initial state of the status container which determines which icon is
shown:
- ``running`` (default): A spinner icon is shown.
- ``complete``: A checkmark icon is shown.
- ``error``: An error icon is shown.
width : "stretch" or int
The width of the status container. This can be one of the following:
- ``"stretch"`` (default): The width of the container matches the
width of the parent container.
- An integer specifying the width in pixels: The container has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the container matches the width
of the parent container.
Returns
-------
StatusContainer
A mutable status container that can hold multiple elements. The label, state,
and expanded state can be updated after creation via ``.update()``.
Examples
--------
You can use the ``with`` notation to insert any element into an status container:
>>> import time
>>> import streamlit as st
>>>
>>> with st.status("Downloading data..."):
... st.write("Searching for data...")
... time.sleep(2)
... st.write("Found URL.")
... time.sleep(1)
... st.write("Downloading data...")
... time.sleep(1)
>>>
>>> st.button("Rerun")
.. output ::
https://doc-status.streamlit.app/
height: 300px
You can also use ``.update()`` on the container to change the label, state,
or expanded state:
>>> import time
>>> import streamlit as st
>>>
>>> with st.status("Downloading data...", expanded=True) as status:
... st.write("Searching for data...")
... time.sleep(2)
... st.write("Found URL.")
... time.sleep(1)
... st.write("Downloading data...")
... time.sleep(1)
... status.update(
... label="Download complete!", state="complete", expanded=False
... )
>>>
>>> st.button("Rerun")
.. output ::
https://doc-status-update.streamlit.app/
height: 300px
"""
return get_dg_singleton_instance().status_container_cls._create(
self.dg, label, expanded=expanded, state=state, width=width
)
def _dialog(
self,
title: str,
*,
dismissible: bool = True,
width: Literal["small", "large", "medium"] = "small",
on_dismiss: Literal["ignore", "rerun"] | WidgetCallback = "ignore",
) -> Dialog:
"""Inserts the dialog container.
Marked as internal because it is used by the dialog_decorator and is not supposed to be used directly.
The dialog_decorator also has a more descriptive docstring since it is user-facing.
"""
return get_dg_singleton_instance().dialog_container_cls._create(
self.dg, title, dismissible=dismissible, width=width, on_dismiss=on_dismiss
)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| LayoutsMixin |
python | numba__numba | numba/tests/test_ufuncs.py | {
"start": 50985,
"end": 57541
} | class ____(TestCase):
"""Test code generation for the different loop types defined by ufunc.
This test relies on class variables to configure the test. Subclasses
of this class can just override some of these variables to check other
ufuncs in a different compilation context. The variables supported are:
_funcs: the ufuncs to test
_skip_types: letter types that force skipping the loop when testing
if present in the NumPy ufunc signature.
_supported_types: only test loops where all the types in the loop
signature are in this collection. If unset, all.
Note that both, _skip_types and _supported_types must be met for a loop
to be tested.
The NumPy ufunc signature has a form like 'ff->f' (for a binary ufunc
loop taking 2 floats and resulting in a float). In a NumPy ufunc object
you can get a list of supported signatures by accessing the attribute
'types'.
"""
_skip_types = 'OegG'
# Allowed deviation between Numpy and Numba results
_ulps = {('arccos', 'F'): 2,
('arcsin', 'D'): 4,
('arcsin', 'F'): 4,
('log10', 'D'): 5,
('tanh', 'F'): 2,
('cbrt', 'd'): 2,
('logaddexp2', 'd'): 2,
}
def _arg_for_type(self, a_letter_type, index=0):
"""return a suitable array argument for testing the letter type"""
# Note all possible arrays must have the same size, since they
# may be used as inputs to the same func.
if a_letter_type in 'bhilq':
# an integral
return np.array([1, 4, 0, -2], dtype=a_letter_type)
if a_letter_type in 'BHILQ':
return np.array([1, 2, 4, 0], dtype=a_letter_type)
elif a_letter_type in '?':
# a boolean
return np.array([True, False, False, True], dtype=a_letter_type)
elif a_letter_type[0] == 'm':
# timedelta64
if len(a_letter_type) == 1:
a_letter_type = 'm8[D]'
return np.array([2, -3, 'NaT', 0], dtype=a_letter_type)
elif a_letter_type[0] == 'M':
# datetime64
if len(a_letter_type) == 1:
a_letter_type = 'M8[D]'
return np.array(['Nat', 1, 25, 0], dtype=a_letter_type)
elif a_letter_type in 'fd':
# floating point
return np.array([1.5, -3.5, 0.0, float('nan')],
dtype=a_letter_type)
elif a_letter_type in 'FD':
# complex
if sys.platform != 'win32':
# Other platforms have better handling of negative zeros,
# test them
negzero = -(0.0 + 1.0j)
else:
negzero = 0.0 - 1.0j
return np.array([negzero, 1.5 + 1.5j, 1j * float('nan'), 0j],
dtype=a_letter_type)
else:
raise RuntimeError("type %r not understood" % (a_letter_type,))
def _check_loop(self, fn, ufunc, loop):
# the letter types for the args
letter_types = loop[:ufunc.nin] + loop[-ufunc.nout:]
# ignore the loops containing an object argument. They will always
# fail in no python mode. Usually the last loop in ufuncs is an all
# object fallback
supported_types = getattr(self, '_supported_types', [])
if (supported_types and
any(l not in supported_types for l in letter_types)):
return
skip_types = getattr(self, '_skip_types', [])
if any(l in skip_types for l in letter_types):
return
# if the test case requires some types to be present, skip loops
# not involving any of those types.
required_types = getattr(self, '_required_types', [])
if required_types and not any(l in letter_types
for l in required_types):
return
self._check_ufunc_with_dtypes(fn, ufunc, letter_types)
def _check_ufunc_with_dtypes(self, fn, ufunc, dtypes):
# Arrays created with datetime and timedelta types (e.g. with np.array)
# will have units, so in order to ensure that the dtypes of arguments
# match the dtypes in the signature, we add units to unitless datetime
# and timedelta types. This corresponds with the addition of units in
# _arg_for_type() above.
dtypes_with_units = []
for t in dtypes:
if t in ('m', 'M'):
t = t + '8[D]'
dtypes_with_units.append(t)
arg_dty = [np.dtype(t) for t in dtypes_with_units]
arg_nbty = tuple([types.Array(from_dtype(t), 1, 'C') for t in arg_dty])
cfunc = njit(arg_nbty)(fn)
# Ensure a good mix of input values
c_args = [self._arg_for_type(t, index=index).repeat(2)
for index, t in enumerate(dtypes)]
for arr in c_args:
self.random.shuffle(arr)
py_args = [a.copy() for a in c_args]
cfunc(*c_args)
fn(*py_args)
# Check each array (including inputs, to ensure they weren't
# mutated).
for dtype, py_arg, c_arg in zip(arg_dty, py_args, c_args):
py_arg, c_arg = self._fixup_results(dtype, py_arg, c_arg)
typechar = c_arg.dtype.char
ulps = self._ulps.get((ufunc.__name__, typechar), 1)
prec = 'single' if typechar in 'fF' else 'exact'
prec = 'double' if typechar in 'dD' else prec
msg = '\n'.join(["ufunc '{0}' arrays differ ({1}):",
"args: {2}", "expected {3}", "got {4}"])
msg = msg.format(ufunc.__name__, c_args, prec, py_arg, c_arg)
self.assertPreciseEqual(py_arg, c_arg, prec=prec, msg=msg,
ulps=ulps)
def _fixup_results(self, dtype, py_arg, c_arg):
return py_arg, c_arg
@classmethod
def _check_ufunc_loops(cls, ufunc):
for loop in ufunc.types:
cls._inject_test(ufunc, loop)
@classmethod
def _inject_test(cls, ufunc, loop):
def test_template(self):
fn = _make_ufunc_usecase(ufunc)
self._check_loop(fn, ufunc, loop)
setattr(cls, "test_{0}_{1}".format(ufunc.__name__,
loop.replace('->', '_')),
test_template)
@classmethod
def autogenerate(cls):
for ufunc in cls._ufuncs:
cls._check_ufunc_loops(ufunc)
| _LoopTypesTester |
python | eth-brownie__brownie | brownie/network/contract.py | {
"start": 31794,
"end": 47719
} | class ____(_DeployedContractBase):
"""
Object to interact with a deployed contract outside of a project.
"""
def __init__(
self,
address_or_alias: HexAddress | ContractName,
*args: Any,
owner: Optional[AccountsType] = None,
**kwargs: Any,
) -> None:
"""
Recreate a `Contract` object from the local database.
The init method is used to access deployments that have already previously
been stored locally. For new deployments use `from_abi` or `from_etherscan`.
Arguments
---------
address_or_alias : str
Address or user-defined alias of the deployment.
owner : Account, optional
Contract owner. If set, transactions without a `from` field
will be performed using this account.
"""
address_or_alias = address_or_alias.strip()
if args or kwargs:
warnings.warn(
"Initializing `Contract` in this manner is deprecated." " Use `from_abi` instead.",
DeprecationWarning,
)
kwargs["owner"] = owner
return self._deprecated_init(address_or_alias, *args, **kwargs)
address = ""
try:
address = _resolve_address(address_or_alias)
build, sources = _get_deployment(address)
except Exception:
build, sources = _get_deployment(alias=address_or_alias)
if build is not None:
address = build["address"]
if build is None or sources is None:
if (
not address
or not CONFIG.settings.get("autofetch_sources")
or not CONFIG.active_network.get("explorer")
):
if not address:
raise ValueError(f"Unknown alias: '{address_or_alias}'")
else:
raise ValueError(f"Unknown contract address: '{address}'")
contract = self.from_explorer(address, owner=owner, silent=True)
build, sources = contract._build, contract._sources
address = contract.address
_ContractBase.__init__(self, None, build, sources)
_DeployedContractBase.__init__(self, address, owner)
def _deprecated_init(
self,
name: ContractName,
address: Optional[HexAddress] = None,
abi: Optional[List[ABIElement]] = None,
manifest_uri: Optional[str] = None,
owner: Optional[AccountsType] = None,
) -> None:
if manifest_uri:
raise ValueError("ethPM functionality removed")
if not address:
raise TypeError("Address cannot be None unless creating object from manifest")
build = {"abi": abi, "contractName": name, "type": "contract"}
_ContractBase.__init__(self, None, build, {})
_DeployedContractBase.__init__(self, address, owner, None)
@classmethod
def from_abi(
cls,
name: ContractName,
address: HexAddress,
abi: List[ABIElement],
owner: Optional[AccountsType] = None,
persist: bool = True,
) -> "Contract":
"""
Create a new `Contract` object from an ABI.
Arguments
---------
name : str
Name of the contract.
address : str
Address where the contract is deployed.
abi : dict
Contract ABI, given as a dictionary.
owner : Account, optional
Contract owner. If set, transactions without a `from` field
will be performed using this account.
"""
address = _resolve_address(address)
build = {
"abi": abi,
"address": address,
"contractName": name,
"type": "contract",
# removeprefix is used for compatibility with both hexbytes<1 and >=1
"deployedBytecode": web3.eth.get_code(address).hex().removeprefix("0x"),
}
self = cls.__new__(cls)
_ContractBase.__init__(self, None, build, {})
_DeployedContractBase.__init__(self, address, owner, None)
if persist:
_add_deployment(self)
return self
@classmethod
def from_explorer(
cls,
address: HexAddress,
as_proxy_for: Optional[str] = None,
owner: Optional[AccountsType] = None,
silent: bool = False,
persist: bool = True,
) -> "Contract":
"""
Create a new `Contract` object with source code queried from a block explorer.
Arguments
---------
address : str
Address where the contract is deployed.
as_proxy_for : str, optional
Address of the implementation contract, if `address` is a proxy contract.
The generated object will send transactions to `address`, but use the ABI
and NatSpec of `as_proxy_for`. This field is only required when the
block explorer API does not provide an implementation address.
owner : Account, optional
Contract owner. If set, transactions without a `from` field will be
performed using this account.
"""
address = _resolve_address(address)
data = _fetch_from_explorer(address, "getsourcecode", silent)
is_verified = bool(data["result"][0].get("SourceCode"))
if is_verified:
abi = ujson_loads(data["result"][0]["ABI"])
name = data["result"][0]["ContractName"]
else:
# if the source is not available, try to fetch only the ABI
try:
data_abi = _fetch_from_explorer(address, "getabi", True)
except ValueError as exc:
_unverified_addresses.add(address)
raise exc
abi = ujson_loads(data_abi["result"].strip())
name = "UnknownContractName"
if not silent:
warnings.warn(
f"{address}: Was able to fetch the ABI but not the source code. "
"Some functionality will not be available.",
BrownieCompilerWarning,
)
if as_proxy_for is None:
# always check for an EIP1967 proxy - https://eips.ethereum.org/EIPS/eip-1967
implementation_eip1967 = web3.eth.get_storage_at(
address, int(web3.keccak(text="eip1967.proxy.implementation").hex(), 16) - 1
)
# always check for an EIP1822 proxy - https://eips.ethereum.org/EIPS/eip-1822
implementation_eip1822 = web3.eth.get_storage_at(address, web3.keccak(text="PROXIABLE"))
if len(implementation_eip1967) > 0 and int(implementation_eip1967.hex(), 16):
as_proxy_for = _resolve_address(implementation_eip1967[-20:])
elif len(implementation_eip1822) > 0 and int(implementation_eip1822.hex(), 16):
as_proxy_for = _resolve_address(implementation_eip1822[-20:])
elif data["result"][0].get("Implementation"):
# for other proxy patterns, we only check if etherscan indicates
# the contract is a proxy. otherwise we could have a false positive
# if there is an `implementation` method on a regular contract.
try:
# first try to call `implementation` per EIP897
# https://eips.ethereum.org/EIPS/eip-897
contract = cls.from_abi(name, address, abi)
as_proxy_for = contract.implementation.call()
except Exception:
# if that fails, fall back to the address provided by etherscan
as_proxy_for = _resolve_address(data["result"][0]["Implementation"])
if as_proxy_for == address:
as_proxy_for = None
# if this is a proxy, fetch information for the implementation contract
if as_proxy_for is not None:
implementation_contract = Contract.from_explorer(as_proxy_for)
abi = implementation_contract._build["abi"]
if not is_verified:
return cls.from_abi(name, address, abi, owner)
compiler_str = data["result"][0]["CompilerVersion"]
if compiler_str.startswith("vyper:"):
try:
version = to_vyper_version(compiler_str[6:])
is_compilable = version in get_installable_vyper_versions()
except Exception:
is_compilable = False
else:
try:
version = cls.get_solc_version(compiler_str, address)
is_compilable = (
version >= Version("0.4.22")
and version
in solcx.get_installable_solc_versions() + solcx.get_installed_solc_versions()
)
except Exception:
is_compilable = False
if not is_compilable:
if not silent:
warnings.warn(
f"{address}: target compiler '{compiler_str}' cannot be installed or is not "
"supported by Brownie. Some debugging functionality will not be available.",
BrownieCompilerWarning,
)
return cls.from_abi(name, address, abi, owner)
elif data["result"][0]["OptimizationUsed"] in ("true", "false"):
if not silent:
warnings.warn(
"Blockscout explorer API has limited support by Brownie. " # noqa
"Some debugging functionality will not be available.",
BrownieCompilerWarning,
)
return cls.from_abi(name, address, abi, owner)
optimizer = {
"enabled": bool(int(data["result"][0]["OptimizationUsed"])),
"runs": int(data["result"][0]["Runs"]),
}
evm_version = data["result"][0].get("EVMVersion", "Default")
if evm_version == "Default":
evm_version = None
source_str = "\n".join(data["result"][0]["SourceCode"].splitlines())
try:
if source_str.startswith("{{"):
# source was verified using compiler standard JSON
input_json = ujson_loads(source_str[1:-1])
sources = {k: v["content"] for k, v in input_json["sources"].items()}
evm_version = input_json["settings"].get("evmVersion", evm_version)
remappings = input_json["settings"].get("remappings", [])
compiler.set_solc_version(str(version))
input_json.update(
compiler.generate_input_json(
sources, optimizer=optimizer, evm_version=evm_version, remappings=remappings
)
)
output_json = compiler.compile_from_input_json(input_json)
build_json = compiler.generate_build_json(input_json, output_json)
else:
if source_str.startswith("{"):
# source was submitted as multiple files
sources = {k: v["content"] for k, v in ujson_loads(source_str).items()}
else:
# source was submitted as a single file
if compiler_str.startswith("vyper"):
path_str = f"{name}.vy"
else:
path_str = f"{name}-flattened.sol"
sources = {path_str: source_str}
build_json = compiler.compile_and_format(
sources,
solc_version=str(version),
vyper_version=str(version),
optimizer=optimizer,
evm_version=evm_version,
)
except Exception as e:
if not silent:
warnings.warn(
f"{address}: Compilation failed due to {type(e).__name__}. Falling back to ABI,"
" some functionality will not be available.",
BrownieCompilerWarning,
)
return cls.from_abi(name, address, abi, owner)
build_json = build_json[name]
if as_proxy_for is not None:
build_json.update(abi=abi, natspec=implementation_contract._build.get("natspec"))
if not _verify_deployed_code(
address, build_json["deployedBytecode"], build_json["language"]
):
if not silent:
warnings.warn(
f"{address}: Locally compiled and on-chain bytecode do not match!",
BrownieCompilerWarning,
)
del build_json["pcMap"]
self = cls.__new__(cls)
_ContractBase.__init__(self, None, build_json, sources)
_DeployedContractBase.__init__(self, address, owner)
if persist:
_add_deployment(self)
return self
@classmethod
def get_solc_version(cls, compiler_str: str, address: str) -> Version:
"""
Return the solc compiler version either from the passed compiler string
or try to find the latest available patch semver compiler version.
Arguments
---------
compiler_str: str
The compiler string passed from the contract metadata.
address: str
The contract address to check for.
"""
version = Version(compiler_str.lstrip("v")).truncate()
compiler_config = _load_project_compiler_config(Path(os.getcwd()))
solc_config = compiler_config["solc"]
if "use_latest_patch" in solc_config:
use_latest_patch = solc_config["use_latest_patch"]
needs_patch_version = False
if isinstance(use_latest_patch, bool):
needs_patch_version = use_latest_patch
elif isinstance(use_latest_patch, list):
needs_patch_version = address in use_latest_patch
if needs_patch_version:
versions = [Version(str(i)) for i in solcx.get_installable_solc_versions()]
for v in filter(lambda x: x < version.next_minor(), versions):
if v > version:
version = v
return version
@classmethod
def remove_deployment(
cls,
address: Optional[ChecksumAddress] = None,
alias: Optional[ContractName] = None,
) -> Tuple[Optional[Dict], Optional[Dict]]:
"""
Removes this contract from the internal deployments db
with the passed address or alias.
Arguments
---------
address: str | None
An address to apply
alias: str | None
An alias to apply
"""
return _remove_deployment(address, alias)
def set_alias(self, alias: Optional[str], persist: bool = True) -> None:
"""
Apply a unique alias this object. The alias can be used to restore the
object in future sessions.
Arguments
---------
alias: str | None
An alias to apply. If `None`, any existing alias is removed.
"""
if "chainid" not in CONFIG.active_network:
raise ValueError("Cannot set aliases in a development environment")
if alias is not None:
if "." in alias or alias.lower().startswith("0x"):
raise ValueError("Invalid alias")
build, _ = _get_deployment(alias=alias)
if build is not None:
if build["address"] != self.address:
raise ValueError("Alias is already in use on another contract")
return
if persist:
_add_deployment(self, alias)
self._build["alias"] = alias
@property
def alias(self) -> Optional[str]:
return self._build.get("alias")
| Contract |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_test.py | {
"start": 5652,
"end": 7196
} | class ____(test_lib.TestCase):
def _log_poisson_loss(self, x, z, compute_full_loss=False):
lpl = np.exp(x) - z * x
if compute_full_loss:
stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return lpl
def testLogPoissonLoss(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)
y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)
y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)
y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=True)
y_tf_np = self.evaluate(y_tf)
y_tf_np_stirling = self.evaluate(y_tf_stirling)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
# TODO(b/241834841): Test with `compute_full_loss` set as True
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_impl.log_poisson_loss, [z_np, x_tf])
self.assertAllClose(theoretical, numerical)
@test_util.run_all_in_graph_and_eager_modes
| LogPoissonLossTest |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 26404,
"end": 27918
} | class ____(Container):
"""
Accordion Group (pane) object. It wraps given fields inside an accordion
tab. It takes accordion tab name as first argument.
Tab object. It wraps fields in a div whose default class is "tab-pane" and
takes a name as first argument.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
css_class : str, optional
CSS classes to be applied to the ``<div>``. By default "".
Parameters
----------
name : str
The name of the container.
*fields : str, LayoutObject
Any number of fields as positional arguments to be rendered within
the container.
css_id : str, optional
A DOM id for the layout object which will be added to the ``<div>`` if
provided. By default None.
css_class : str, optional
Additional CSS classes to be applied in addition to those declared by
the class itself. By default None.
template : str, optional
Overrides the default template, if provided. By default None.
**kwargs : dict, optional
Additional attributes are passed to ``flatatt`` and converted into
key="value", pairs. These attributes are added to the ``<div>``.
Examples
--------
Example::
AccordionGroup("group name", "form_field_1", "form_field_2")
"""
template = "%s/accordion-group.html"
data_parent = "" # accordion parent div id.
| AccordionGroup |
python | Lightning-AI__lightning | tests/tests_pytorch/strategies/test_ddp_integration.py | {
"start": 10037,
"end": 12663
} | class ____(BoringModel):
def configure_optimizers(self):
return ZeroRedundancyOptimizer(self.layer.parameters(), optimizer_class=torch.optim.Adam, lr=0.1)
# ZeroRedundancyOptimizer internally calls `torch.load` with `weights_only` not set, triggering the FutureWarning
@pytest.mark.filterwarnings("ignore::FutureWarning")
@RunIf(min_cuda_gpus=2, skip_windows=True)
@pytest.mark.parametrize("strategy", [pytest.param("ddp", marks=RunIf(standalone=True)), "ddp_spawn"])
def test_ddp_strategy_checkpoint_zero_redundancy_optimizer(strategy, tmp_path):
"""Test to ensure that checkpoint is saved correctly when using zero redundancy optimizer."""
model = BoringZeroRedundancyOptimizerModel()
trainer = Trainer(default_root_dir=tmp_path, accelerator="gpu", devices=2, strategy=strategy, max_steps=1)
trainer.fit(model)
checkpoint_path = os.path.join(tmp_path, "model.pt")
# need to broadcast because tmp_path is different on each process
checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
saved_model = BoringModel.load_from_checkpoint(checkpoint_path)
# Assert model parameters are identical after loading
for trained_param, loaded_param in zip(model.parameters(), saved_model.parameters()):
assert torch.equal(trained_param.to("cpu"), loaded_param)
def test_configure_launcher_create_processes_externally():
class MyClusterEnvironment(ClusterEnvironment):
@property
def creates_processes_externally(self):
return True
@property
def main_address(self):
return ""
@property
def main_port(self):
return 8080
@staticmethod
def detect():
return True
def world_size(self):
return 1
def set_world_size(self):
pass
def global_rank(self):
return 0
def set_global_rank(self):
pass
def local_rank(self):
return 0
def node_rank(self):
return 0
ddp_strategy = DDPStrategy(cluster_environment=MyClusterEnvironment(), parallel_devices=[torch.device("cpu")])
assert ddp_strategy.launcher is None
ddp_strategy._configure_launcher()
assert isinstance(ddp_strategy.launcher, _SubprocessScriptLauncher)
ddp_strategy.launcher._call_children_scripts = Mock()
launch_fn = Mock()
ddp_strategy.launcher.launch(launch_fn)
ddp_strategy.launcher._call_children_scripts.assert_not_called()
launch_fn.assert_called_once()
| BoringZeroRedundancyOptimizerModel |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0087_relink_crons_to_compatible_issue_workflows.py | {
"start": 1996,
"end": 13585
} | class ____:
"""Represents a workflow with all its conditions and actions."""
workflow: Any
project_id: int
environment_id: int | None
frequency: int | None
when_conditions: tuple[ConditionData, ...] = field(default_factory=tuple)
action_groups: tuple[ActionGroupData, ...] = field(default_factory=tuple)
def is_compatible_for_monitor(self, monitor: Any) -> bool:
"""Check if all conditions are compatible with cron events for this monitor."""
for condition in self.when_conditions:
if not self._is_condition_compatible(condition, monitor):
return False
for group in self.action_groups:
for condition in group.conditions:
if not self._is_condition_compatible(condition, monitor):
return False
return True
def _is_condition_compatible(self, condition: ConditionData, monitor: Any) -> bool:
"""Check if a condition is compatible with cron events for this monitor."""
if condition.type == "assigned_to":
target_type = condition.comparison.get("target_type")
target_identifier = condition.comparison.get("target_identifier")
if target_type == "Team" and monitor.owner_team_id:
return target_identifier == monitor.owner_team_id
elif target_type == "Member" and monitor.owner_user_id:
return target_identifier == monitor.owner_user_id
elif target_type == "Unassigned":
# Unassigned is compatible if monitor has no owner
return monitor.owner_team_id is None and monitor.owner_user_id is None
else:
return False
if condition.type == "issue_category":
return condition.comparison.get("value") == CRON_GROUP_CATEGORY
return condition.type in ALLOWED_CONDITIONS
def get_hash(self) -> str:
"""Get a unique hash for deduplication."""
hash_data = {
"project_id": self.project_id,
"environment_id": self.environment_id,
"frequency": self.frequency,
"when_conditions": [
c.to_dict()
for c in sorted(
self.when_conditions,
key=lambda c: (c.type, json.dumps(c.comparison, sort_keys=True)),
)
],
"action_groups": [
{
"conditions": [
c.to_dict()
for c in sorted(
g.conditions,
key=lambda c: (c.type, json.dumps(c.comparison, sort_keys=True)),
)
],
"actions": [
a.to_dict()
for a in sorted(
g.actions, key=lambda a: (a.type, json.dumps(a.config, sort_keys=True))
)
],
}
for g in sorted(
self.action_groups, key=lambda g: json.dumps(g.to_dict(), sort_keys=True)
)
],
}
return json.dumps(hash_data, sort_keys=True)
def build_workflow_data(
workflow: Any,
rule_id: int,
rules_by_id: dict[int, Any],
when_conditions: list[Any],
action_groups_data: list[tuple[list[Any], list[Any]]],
) -> WorkflowData | None:
"""Build a WorkflowData object from a workflow and its related data."""
rule = rules_by_id.get(rule_id)
if not rule:
return None
when_condition_list = []
for condition in when_conditions:
when_condition_list.append(
ConditionData(
type=condition.type,
comparison=condition.comparison,
result=condition.condition_result,
)
)
action_group_list = []
for group_conditions, group_actions in action_groups_data:
conditions = tuple(
ConditionData(
type=c.type,
comparison=c.comparison,
result=c.condition_result,
)
for c in group_conditions
)
actions = tuple(
ActionData(
type=a.type,
config=a.config,
)
for a in group_actions
)
action_group_list.append(ActionGroupData(conditions=conditions, actions=actions))
return WorkflowData(
workflow=workflow,
project_id=rule.project_id,
environment_id=workflow.environment_id,
frequency=rule.data.get("frequency", 30),
when_conditions=tuple(when_condition_list),
action_groups=tuple(action_group_list),
)
def link_crons_to_compatible_issue_workflows(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
"""
Re-link cron detectors to compatible issue workflows after migration 0086.
This migration:
1. Filters issue workflows to only those with compatible conditions for cron events
2. Deduplicates workflows with identical conditions and actions
3. Links cron detectors to the unique, compatible workflows
4. For assigned_to conditions, only allows them if they match the monitor's owner
"""
Detector = apps.get_model("workflow_engine", "Detector")
DetectorWorkflow = apps.get_model("workflow_engine", "DetectorWorkflow")
Workflow = apps.get_model("workflow_engine", "Workflow")
Rule = apps.get_model("sentry", "Rule")
DataCondition = apps.get_model("workflow_engine", "DataCondition")
WorkflowDataConditionGroup = apps.get_model("workflow_engine", "WorkflowDataConditionGroup")
DataConditionGroupAction = apps.get_model("workflow_engine", "DataConditionGroupAction")
AlertRuleWorkflow = apps.get_model("workflow_engine", "AlertRuleWorkflow")
DataSourceDetector = apps.get_model("workflow_engine", "DataSourceDetector")
Monitor = apps.get_model("monitors", "Monitor")
cron_detectors_all = Detector.objects.filter(type="monitor_check_in_failure")
detectors_by_project = defaultdict(list)
for detector in cron_detectors_all:
detectors_by_project[detector.project_id].append(detector)
if not detectors_by_project:
logger.info("No cron detectors found, skipping migration")
return
data_source_detectors = DataSourceDetector.objects.filter(
detector__type="monitor_check_in_failure", data_source__type="cron_monitor"
).select_related("data_source")
detector_to_monitor_id = {}
for dsd in data_source_detectors:
detector_to_monitor_id[dsd.detector_id] = int(dsd.data_source.source_id)
monitors_by_id = {m.id: m for m in Monitor.objects.all()}
total_links_created = 0
total_projects_processed = 0
for project_id, cron_detectors in detectors_by_project.items():
project_rules = list(Rule.objects.filter(project_id=project_id, source=0))
if not project_rules:
continue
rule_ids = [r.id for r in project_rules]
rules_by_id = {r.id: r for r in project_rules}
issue_workflows = list(
Workflow.objects.filter(alertruleworkflow__rule_id__in=rule_ids)
.select_related("when_condition_group")
.prefetch_related(
Prefetch(
"when_condition_group__conditions",
queryset=DataCondition.objects.all(),
to_attr="prefetched_when_conditions",
),
Prefetch(
"workflowdataconditiongroup_set",
queryset=WorkflowDataConditionGroup.objects.select_related(
"condition_group"
).prefetch_related(
Prefetch(
"condition_group__conditions",
queryset=DataCondition.objects.all(),
to_attr="prefetched_conditions",
),
Prefetch(
"condition_group__dataconditiongroupaction_set",
queryset=DataConditionGroupAction.objects.select_related("action"),
to_attr="prefetched_actions",
),
),
to_attr="prefetched_action_groups",
),
Prefetch(
"alertruleworkflow_set",
queryset=AlertRuleWorkflow.objects.all(),
to_attr="prefetched_rule_workflows",
),
)
.distinct()
)
if not issue_workflows:
continue
workflow_data_list = []
for workflow in issue_workflows:
if not workflow.prefetched_rule_workflows:
continue
rule_workflow = workflow.prefetched_rule_workflows[0]
when_conditions = []
if workflow.when_condition_group:
when_conditions = workflow.when_condition_group.prefetched_when_conditions
action_groups_data = []
for wdcg in workflow.prefetched_action_groups:
group_conditions = wdcg.condition_group.prefetched_conditions
group_actions = [ga.action for ga in wdcg.condition_group.prefetched_actions]
action_groups_data.append((group_conditions, group_actions))
workflow_data = build_workflow_data(
workflow, rule_workflow.rule_id, rules_by_id, when_conditions, action_groups_data
)
if workflow_data:
workflow_data_list.append(workflow_data)
if not workflow_data_list:
continue
# Link cron detectors to compatible workflows in this project
project_links_created = 0
for detector in cron_detectors:
monitor_id = detector_to_monitor_id.get(detector.id)
if not monitor_id:
continue
monitor = monitors_by_id.get(monitor_id)
if not monitor:
continue
compatible_workflows = []
for wd in workflow_data_list:
if wd.is_compatible_for_monitor(monitor):
compatible_workflows.append(wd)
hash_to_workflow_data = {}
for wd in compatible_workflows:
workflow_hash = wd.get_hash()
if workflow_hash not in hash_to_workflow_data:
hash_to_workflow_data[workflow_hash] = wd
for wd in hash_to_workflow_data.values():
_, created = DetectorWorkflow.objects.get_or_create(
detector=detector,
workflow=wd.workflow,
)
if created:
project_links_created += 1
if project_links_created > 0:
logger.info(
"Processed project",
extra={
"project_id": project_id,
"links_created": project_links_created,
"total_workflows": len(workflow_data_list),
"cron_detectors": len(cron_detectors),
},
)
total_links_created += project_links_created
total_projects_processed += 1
logger.info(
"Migration complete",
extra={
"total_links_created": total_links_created,
"projects_processed": total_projects_processed,
},
)
| WorkflowData |
python | scrapy__scrapy | tests/test_extension_periodic_log.py | {
"start": 1785,
"end": 2127
} | class ____(PeriodicLog):
def set_a(self):
self.stats._stats = stats_dump_1
def set_b(self):
self.stats._stats = stats_dump_2
def extension(settings: dict[str, Any] | None = None) -> CustomPeriodicLog:
crawler = get_crawler(MetaSpider, settings)
return CustomPeriodicLog.from_crawler(crawler)
| CustomPeriodicLog |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/black/cases/class_blank_parentheses.py | {
"start": 423,
"end": 541
} | class ____ (
):
def func_for_testing(self, first, second):
sum = first + second
return sum
| NormalClass |
python | tensorflow__tensorflow | tensorflow/python/profiler/model_analyzer_test.py | {
"start": 1750,
"end": 31972
} | class ____(test.TestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def testDumpToFile(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = builder(builder.trainable_variables_parameter()).with_file_output(
outfile).build()
with session.Session(config=self._no_rewrite_session_config()) as sess:
_ = lib.BuildSmallModel()
model_analyzer.profile(sess.graph, options=opts)
with gfile.Open(outfile, 'r') as f:
self.assertEqual(
u'node name | # parameters\n'
'_TFProfRoot (--/451 params)\n'
' DW (3x3x3x6, 162/162 params)\n'
' DW2 (2x2x6x12, 288/288 params)\n'
' ScalarW (1, 1/1 params)\n', lib.CheckAndRemoveDoc(f.read()))
@test_util.run_v1_only('b/120545219')
def testSelectEverythingDetail(self):
ops.reset_default_graph()
dev = '/device:GPU:0' if test.is_gpu_available() else '/device:CPU:0'
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (
builder(builder.trainable_variables_parameter()).with_file_output(
outfile).with_accounted_types(['.*']).select([
'micros', 'bytes', 'params', 'float_ops', 'occurrence',
'device', 'op_types', 'input_shapes'
]).build())
with profile_context.ProfileContext(
test.get_temp_dir(), trace_steps=[], dump_steps=[]) as pctx:
with session.Session(
config=self._no_rewrite_session_config()) as sess, ops.device(dev):
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
pctx.trace_next_step()
pctx.dump_next_step()
_ = self.evaluate(x)
pctx.profiler.profile_name_scope(options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
dump_str = lib.CheckAndRemoveDoc(f.read())
outputs = dump_str.split('\n')
self.assertEqual(
outputs[0],
'node name | # parameters | # float_ops | requested bytes | total execution time | accelerator execution time | cpu execution time | assigned devices | op types | op count (run|defined) | input shapes'
)
for o in outputs[1:]:
if o.find('Conv2D ') > 0:
metrics = o[o.find('(') + 1:o.find(')')].split(',')
# Make sure time is profiled.
gap = 1 if test.is_gpu_available() else 2
for i in range(3, 6, gap):
mat = re.search('(.*)(?:us|ms|sec)/(.*)(?:us|ms|sec)',
metrics[i])
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure device is profiled.
if test.is_gpu_available():
self.assertTrue(metrics[6].find('gpu') > 0)
self.assertFalse(metrics[6].find('cpu') > 0)
else:
self.assertFalse(metrics[6].find('gpu') > 0)
self.assertTrue(metrics[6].find('cpu') > 0)
# Make sure float_ops is profiled.
mat = re.search('(.*)k/(.*)k flops', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure op_count is profiled.
self.assertEqual(metrics[8].strip(), '1/1|1/1')
# Make sure input_shapes is profiled.
self.assertEqual(metrics[9].strip(), '0:2x6x6x3|1:3x3x3x6')
if o.find('DW (3x3x3x6') > 0:
metrics = o[o.find('(') + 1:o.find(')')].split(',')
mat = re.search('(.*)/(.*) params', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# pylint: enable=line-too-long
# Test that profiler restored from profile file gives the same result.
gfile.Remove(outfile)
profile_file = os.path.join(test.get_temp_dir(), 'profile_1')
with lib.ProfilerFromFile(profile_file) as profiler:
profiler.profile_name_scope(options=opts)
with gfile.Open(outfile, 'r') as f:
self.assertEqual(dump_str, lib.CheckAndRemoveDoc(f.read()))
def testSelectEverything(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (
builder(builder.trainable_variables_parameter()).with_file_output(
outfile).with_accounted_types(['.*']).select([
'params', 'float_ops', 'occurrence', 'device', 'op_types',
'input_shapes'
]).build())
with session.Session(config=self._no_rewrite_session_config()
) as sess, ops.device('/device:CPU:0'):
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(sess.graph, run_meta, options=opts)
def testSimpleCodeView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
# TODO(xpan): Test 'micros'. Since the execution time changes each run,
# it's a bit difficult to test it now.
opts = (
builder(builder.trainable_variables_parameter()).with_file_output(
outfile).with_accounted_types(['.*']).with_node_names(
show_name_regexes=['.*model_analyzer_testlib.*'
]).account_displayed_op_only(False).select([
'bytes', 'params', 'float_ops',
'num_hidden_ops', 'device', 'input_shapes'
]).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(sess.graph, run_meta, cmd='code', options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'node name | requested bytes | # parameters | # float_ops | assigned devices | in',
lib.CheckAndRemoveDoc(f.read())[0:80])
# pylint: enable=line-too-long
@test_util.run_v1_only('b/120545219')
def testComplexCodeView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (
builder(builder.trainable_variables_parameter()).with_file_output(
outfile).with_accounted_types(['.*']).with_node_names(
show_name_regexes=['.*model_analyzer_testlib.py.*'
]).account_displayed_op_only(False).select(
['params', 'float_ops']).build())
with profile_context.ProfileContext(
test.get_temp_dir(), trace_steps=[], dump_steps=[]) as pctx:
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
pctx.trace_next_step()
_ = self.evaluate(x)
tfprof_node = pctx.profiler.profile_python(options=opts)
# pylint: disable=line-too-long
with gfile.Open(outfile, 'r') as f:
lines = f.read().split('\n')
self.assertGreater(len(lines), 5)
result = '\n'.join(l[:min(len(l), 80)] for l in lines)
self.assertTrue(
compat.as_text(lib.CheckAndRemoveDoc(result)).startswith(
'node name | # parameters | # float_ops'))
self.assertLess(0, tfprof_node.total_exec_micros)
self.assertEqual(2844, tfprof_node.total_parameters)
#The graph is modified when MKL is enabled,total_float_ops will
#be different
if test_util.IsMklEnabled():
self.assertLess(101600, tfprof_node.total_float_ops)
else:
self.assertLess(145660, tfprof_node.total_float_ops)
self.assertEqual(10, len(tfprof_node.children))
self.assertEqual('_TFProfRoot', tfprof_node.name)
self.assertEqual('model_analyzer_testlib.py:59:BuildFullModel',
tfprof_node.children[0].name)
self.assertEqual(
'model_analyzer_testlib.py:59:BuildFullModel (gradient)',
tfprof_node.children[1].name)
self.assertEqual('model_analyzer_testlib.py:62:BuildFullModel',
tfprof_node.children[2].name)
self.assertEqual(
'model_analyzer_testlib.py:62:BuildFullModel (gradient)',
tfprof_node.children[3].name)
self.assertEqual('model_analyzer_testlib.py:63:BuildFullModel',
tfprof_node.children[4].name)
self.assertEqual('model_analyzer_testlib.py:63:BuildFullModel (gradient)',
tfprof_node.children[5].name)
self.assertEqual(
'model_analyzer_testlib.py:65:BuildFullModel',
tfprof_node.children[6].name)
self.assertEqual('model_analyzer_testlib.py:66:BuildFullModel',
tfprof_node.children[7].name)
# pylint: enable=line-too-long
def testCodeViewLeafGraphNode(self):
ops.reset_default_graph()
opts = (
builder(builder.trainable_variables_parameter()).with_empty_output()
.with_accounted_types(['.*']).account_displayed_op_only(False).select(
['bytes', 'params', 'float_ops', 'device']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
leaf = tfprof_node
while leaf.children:
self.assertEqual(0, len(leaf.graph_nodes))
leaf = leaf.children[0]
self.assertEqual(1, len(leaf.graph_nodes))
def testTimeline(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'timeline')
opts = (
builder(builder.trainable_variables_parameter()).with_max_depth(100000)
.with_step(0).with_timeline_output(outfile).with_accounted_types(
['.*']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(
sess.graph, run_meta, cmd='graph', options=opts)
with gfile.Open(outfile + '_0', 'r') as f:
# Test that a json file is created.
# TODO(xpan): tfprof Timeline isn't quite correct on Windows.
# Investigate why.
if os.name != 'nt':
self.assertLess(1000, len(f.read()))
else:
self.assertLess(1, len(f.read()))
def testOpView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (
builder(builder.trainable_variables_parameter()).with_file_output(
outfile).with_accounted_types(
['.*']).with_min_occurrence(10).order_by('occurrence').select([
'params', 'micros', 'bytes', 'peak_bytes', 'residual_bytes',
'output_bytes', 'occurrence', 'input_shapes'
]).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, cmd='op', options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'nodename|requestedbytes|peakbytes|residualbytes|outputbytes|totalexecutiontime|acceleratorexecutiontime|cpuexecutiontime|#parameters|opoccurrence(run|defined)|inputshapes',
lib.CheckAndRemoveDoc(f.read()).replace('\t',
'').replace(' ', '')[0:170])
# pylint: enable=line-too-long
total_children = 0
last_occurrence = 1e32
input_shapes = 0
last_total_micros = tfprof_node.total_exec_micros
last_micros = tfprof_node.exec_micros
while tfprof_node.children:
for gnode in tfprof_node.graph_nodes:
input_shapes += len(gnode.input_shapes)
self.assertEqual(len(tfprof_node.children), 1)
tfprof_node = tfprof_node.children[0]
self.assertEqual(last_total_micros,
tfprof_node.total_exec_micros + last_micros)
last_total_micros = tfprof_node.total_exec_micros
last_micros = tfprof_node.exec_micros
total_children += 1
self.assertLessEqual(len(tfprof_node.graph_nodes), last_occurrence)
last_occurrence = len(tfprof_node.graph_nodes)
self.assertGreater(input_shapes, 0)
def testAdvisor(self):
ops.reset_default_graph()
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
advice_pb = model_analyzer.advise(sess.graph, run_meta)
self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
self.assertTrue('OperationChecker' in advice_pb.checkers)
checker = advice_pb.checkers['AcceleratorUtilizationChecker']
if test.is_gpu_available():
self.assertGreater(len(checker.reports), 0)
else:
self.assertEqual(len(checker.reports), 0)
checker = advice_pb.checkers['ExpensiveOperationChecker']
self.assertGreater(len(checker.reports), 0)
def pprof_test_helper(self, attribute, should_fail=False):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), attribute + '_pprof.pb.gz')
opts = (
builder(builder.time_and_memory()).select([
attribute
]).with_max_depth(100000).with_node_names(
trim_name_regexes=['ops.py.*']).with_pprof_output(outfile).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(sess.graph, run_meta, cmd='code', options=opts)
if should_fail:
self.assertFalse(gfile.Exists(outfile))
return
profile_pb = profile_pb2.Profile()
with gfile.Open(outfile, 'rb') as f:
with gzip.GzipFile(fileobj=io.BytesIO(f.read())) as gzipf:
profile_pb.ParseFromString(gzipf.read())
self.assertGreater(len(profile_pb.sample), 10)
self.assertGreater(len(profile_pb.location), 10)
self.assertGreater(len(profile_pb.function), 10)
self.assertGreater(len(profile_pb.string_table), 30)
has_rnn = False
for s in profile_pb.string_table:
if s.find('rnn') > 0:
has_rnn = True
self.assertFalse(s.startswith('ops.py'))
self.assertTrue(has_rnn)
def testPprof(self):
for attr in [
'micros', 'bytes', 'accelerator_micros', 'cpu_micros', 'params',
'float_ops'
]:
self.pprof_test_helper(attr)
for attr in ['op_types', 'device', 'input_shapes']:
self.pprof_test_helper(attr, True)
def testMinOption(self):
ops.reset_default_graph()
def check_min(nodes, mm=0, mam=0, mcm=0, mb=0, mpb=0, mrb=0, mob=0):
for n in nodes:
if mm > 0:
self.assertGreaterEqual(n.exec_micros, mm)
if mam > 0:
self.assertGreaterEqual(n.accelerator_exec_micros, mam)
if mcm > 0:
self.assertGreaterEqual(n.cpu_exec_micros, mcm)
if mb > 0:
self.assertGreaterEqual(n.requested_bytes, mb)
if mpb > 0:
self.assertGreaterEqual(n.peak_bytes, mpb)
if mrb > 0:
self.assertGreaterEqual(n.residual_bytes, mrb)
if mob > 0:
self.assertGreaterEqual(n.output_bytes, mob)
check_min(n.children, mm, mam, mcm, mb, mpb, mrb, mob)
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
min_val = random.randint(0, 10000)
opts = builder(builder.time_and_memory(
min_micros=min_val)).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mm=min_val)
opts = builder(builder.time_and_memory(
min_accelerator_micros=min_val)).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mam=min_val)
opts = builder(builder.time_and_memory(
min_cpu_micros=min_val)).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mcm=min_val)
opts = builder(builder.time_and_memory(
min_bytes=min_val)).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mb=min_val)
opts = builder(builder.time_and_memory(
min_peak_bytes=min_val)).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mpb=min_val)
opts = builder(builder.time_and_memory(
min_residual_bytes=min_val)).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mrb=min_val)
opts = builder(builder.time_and_memory(
min_output_bytes=min_val)).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mob=min_val)
def testSelectOption(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
def check_selection(selected, not_selected):
with gfile.Open(outfile, 'r') as f:
s = f.read()
for attr in selected:
self.assertTrue(s.find(attr) > 0, s)
for attr in not_selected:
self.assertFalse(s.find(attr) > 0, s)
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
self.evaluate(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
opts = builder(
builder.time_and_memory()).with_file_output(outfile).select(
['micros']).build()
_ = model_analyzer.profile(sess.graph, run_meta=run_meta, options=opts)
check_selection(['total execution time', 'accelerator execution time'],
['bytes'])
opts = builder(
builder.time_and_memory()).with_file_output(outfile).select(
['bytes']).build()
_ = model_analyzer.profile(sess.graph, run_meta=run_meta, options=opts)
check_selection(['requested bytes'],
['peak bytes', 'residual bytes', 'output bytes'])
opts = builder(
builder.time_and_memory()).with_file_output(outfile).select(
['peak_bytes', 'residual_bytes', 'output_bytes']).build()
_ = model_analyzer.profile(sess.graph, run_meta=run_meta, options=opts)
check_selection(['peak bytes', 'residual bytes', 'output bytes'],
['requested_bytes'])
def _trainLoop(self, train_op, train_steps, time_dir, time_step, memory_dir,
memory_step, profile_dir, dump_step):
with session.Session(config=self._no_rewrite_session_config()) as sess:
self.evaluate(variables.global_variables_initializer())
# start from 1 because variable_initializer took one step.
for i in range(1, train_steps + 1):
_ = self.evaluate(train_op)
if i in time_step:
ret = gfile.ListDirectory(time_dir)
self.assertEqual(len(ret), 1)
self.assertTrue(
gfile.Open(os.path.join(time_dir, ret[0]), 'r').read().find(
'execution time') > 0)
_ = [gfile.Remove(os.path.join(time_dir, x)) for x in ret]
else:
self.assertEqual(len(gfile.ListDirectory(time_dir)), 0)
if i in memory_step:
ret = gfile.ListDirectory(memory_dir)
self.assertEqual(len(ret), 1)
self.assertTrue(
gfile.Open(os.path.join(memory_dir, ret[0]), 'r').read().find(
'requested bytes') > 0)
_ = [gfile.Remove(os.path.join(memory_dir, x)) for x in ret]
else:
self.assertEqual(len(gfile.ListDirectory(memory_dir)), 0)
if i in dump_step:
ret = gfile.ListDirectory(profile_dir)
self.assertAllEqual(ret, ['profile_%d' % i])
_ = [gfile.Remove(os.path.join(profile_dir, x)) for x in ret]
else:
if i < dump_step[0]:
self.assertFalse(gfile.Exists(profile_dir))
else:
self.assertEqual(len(gfile.ListDirectory(profile_dir)), 0)
@test_util.run_v1_only('b/120545219')
def testAutoProfiling(self):
ops.reset_default_graph()
time_dir = os.path.join(test.get_temp_dir(), 'time')
memory_dir = os.path.join(test.get_temp_dir(), 'memory')
profile_dir = os.path.join(test.get_temp_dir(), 'dir/dir2/profile')
# TODO(xpan): Should we create parent directory for them?
gfile.MkDir(time_dir)
gfile.MkDir(memory_dir)
time_opts = (
builder(builder.time_and_memory()).with_file_output(
os.path.join(time_dir, 'profile')).select(['micros']).build())
memory_opts = (
builder(builder.time_and_memory()).with_file_output(
os.path.join(memory_dir, 'profile')).select(['bytes']).build())
time_steps = [2, 3]
memory_steps = [1, 3]
dump_steps = [3, 4]
x = lib.BuildSmallModel()
with profile_context.ProfileContext(
profile_dir, trace_steps=[1, 2, 3], dump_steps=[3, 4]) as pctx:
pctx.add_auto_profiling('scope', time_opts, time_steps)
pctx.add_auto_profiling('scope', memory_opts, memory_steps)
self._trainLoop(x, 10, time_dir, time_steps, memory_dir, memory_steps,
profile_dir, dump_steps)
@test_util.run_v1_only('b/120545219')
def testOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
with ops.device('/device:GPU:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(config=self._no_rewrite_session_config()) as sess:
sess.run(
c,
options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# This trace reports allocations for to random tensor.
self.assertTrue('OOM when allocating tensor with shape[30000,10000,20000]'
in exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
@test_util.run_v1_only('b/120545219')
def testDistributedOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
workers, _ = test_util.create_local_cluster(2, 0)
with ops.device('/job:worker/replica:0/task:0/gpu:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
with ops.device('/job:worker/replica:0/task:1/gpu:0'):
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(workers[1].target) as sess:
sess.run(
c,
options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# test_random2 is reported because it's allocated in worker 1.
self.assertTrue('Current usage from device: '
'/job:worker/replica:0/task:1/device:GPU:0, '
'allocator: GPU_0_bfc' in exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
# test_random1 is not reported because it's allocated in worker 0.
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertTrue(mat is None)
@test_util.run_v1_only('b/120545219')
def testTrackPersistentBytes(self):
ops.reset_default_graph()
a = array_ops.constant(np.ones((100, 100)))
b = array_ops.constant(np.ones((100, 100)))
c = a * b
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.min_graph_nodes = -1
with session.Session(config=config) as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
ret = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
ret2 = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
n = lib.SearchTFProfNode(ret, 'mul')
n2 = lib.SearchTFProfNode(ret2, 'mul')
self.assertGreater(n.peak_bytes, 0)
self.assertGreater(n.output_bytes, 0)
self.assertGreater(n.residual_bytes, 0)
self.assertEqual(n.peak_bytes, n2.peak_bytes)
self.assertEqual(n.output_bytes, n2.output_bytes)
self.assertEqual(n.residual_bytes, n2.residual_bytes)
@test_util.run_v1_only('b/120545219')
def testTraceLoopBytes(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
steps = 100
with ops.device('/gpu:0'):
x = array_ops.ones((100, 100), dtype=dtypes.float32)
n = array_ops.constant(steps, dtype=dtypes.int32)
x1 = array_ops.ones((100, 100))
x *= x1
def loop_body(i, x):
x *= x
return i + 1, x
_, y = while_loop.while_loop(lambda i, x: i < n, loop_body,
[array_ops.constant(0), x])
grad = gradients.gradients(y, [x1])
with session.Session(config=self._no_rewrite_session_config()) as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(grad, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['min_micros'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
options['output'] = 'none'
ret_pb = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
self.assertGreater(ret_pb.total_requested_bytes, 1000000)
if __name__ == '__main__':
test.main()
| PrintModelAnalysisTest |
python | jazzband__django-polymorphic | example/pexp/models.py | {
"start": 1233,
"end": 1409
} | class ____(ProxyBase):
class Meta:
proxy = True
def __unicode__(self):
return f"<ProxyB: {self.title}>"
# Internals for management command tests
| ProxyB |
python | davidhalter__jedi | test/static_analysis/star_arguments.py | {
"start": 2140,
"end": 2237
} | class ____(): pass
#! 12 type-error-star-star
simple(1, **A())
#! 11 type-error-star
simple(1, *1)
| A |
python | pydantic__pydantic | pydantic/types.py | {
"start": 104314,
"end": 105431
} | class ____(_fields.PydanticMetadata, BaseMetadata):
"""A `FailFast` annotation can be used to specify that validation should stop at the first error.
This can be useful when you want to validate a large amount of data and you only need to know if it's valid or not.
You might want to enable this setting if you want to validate your data faster (basically, if you use this,
validation will be more performant with the caveat that you get less information).
```python
from typing import Annotated
from pydantic import BaseModel, FailFast, ValidationError
class Model(BaseModel):
x: Annotated[list[int], FailFast()]
# This will raise a single error for the first invalid value and stop validation
try:
obj = Model(x=[1, 2, 'a', 4, 5, 'b', 7, 8, 9, 'c'])
except ValidationError as e:
print(e)
'''
1 validation error for Model
x.2
Input should be a valid integer, unable to parse string as an integer [type=int_parsing, input_value='a', input_type=str]
'''
```
"""
fail_fast: bool = True
| FailFast |
python | PrefectHQ__prefect | src/integrations/prefect-aws/tests/observers/test_ecs_observer.py | {
"start": 43344,
"end": 44066
} | class ____:
@patch("prefect_aws.observers.ecs.ecs_observer")
async def test_start_and_stop_observer(self, mock_observer):
mock_observer.run = AsyncMock(
side_effect=lambda started_event: started_event.set()
)
await start_observer()
mock_observer.run.assert_called_once()
await start_observer()
# Shouldn't be called again
mock_observer.run.assert_called_once()
await stop_observer()
async def test_stop_observer_not_running(self):
# Shouldn't raise
await stop_observer()
async def async_generator_from_list(items: list) -> AsyncGenerator[Any, None]:
for item in items:
yield item
| TestObserverManagement |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 958,
"end": 2672
} | class ____(Definition):
__slots__ = ('loc', 'operation', 'name', 'variable_definitions', 'directives', 'selection_set',)
_fields = ('operation', 'name', 'variable_definitions', 'directives', 'selection_set',)
def __init__(self, operation, selection_set, name=None, variable_definitions=None, directives=None, loc=None):
self.loc = loc
self.operation = operation
self.name = name
self.variable_definitions = variable_definitions
self.directives = directives
self.selection_set = selection_set
def __eq__(self, other):
return (
self is other or (
isinstance(other, OperationDefinition) and
# self.loc == other.loc and
self.operation == other.operation and
self.name == other.name and
self.variable_definitions == other.variable_definitions and
self.directives == other.directives and
self.selection_set == other.selection_set
)
)
def __repr__(self):
return ('OperationDefinition('
'operation={self.operation!r}'
', name={self.name!r}'
', variable_definitions={self.variable_definitions!r}'
', directives={self.directives!r}'
', selection_set={self.selection_set!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.operation,
self.selection_set,
self.name,
self.variable_definitions,
self.directives,
self.loc
)
def __hash__(self):
return id(self)
| OperationDefinition |
python | walkccc__LeetCode | solutions/3189. Minimum Moves to Get a Peaceful Board/3189.py | {
"start": 0,
"end": 347
} | class ____:
def minMoves(self, rooks: list[list[int]]) -> int:
n = len(rooks)
sortedByRow = sorted(rooks, key=lambda x: x[0])
sortedByCol = sorted(rooks, key=lambda x: x[1])
return (sum(abs(i - row) for (i, _), row in zip(sortedByRow, range(n))) +
sum(abs(j - col) for (_, j), col in zip(sortedByCol, range(n))))
| Solution |
python | PyCQA__pylint | tests/functional/a/access/access_member_before_definition.py | {
"start": 329,
"end": 758
} | class ____:
A = 23
B = A
def __getattr__(self, attr):
try:
return self.__repo
except AttributeError:
self.__repo = attr
return attr
def catchme(self, attr):
"""no AttributeError caught"""
try:
return self._repo # [access-member-before-definition]
except ValueError:
self._repo = attr
return attr
| Bbbb |
python | tiangolo__fastapi | docs_src/request_form_models/tutorial002.py | {
"start": 84,
"end": 267
} | class ____(BaseModel):
username: str
password: str
model_config = {"extra": "forbid"}
@app.post("/login/")
async def login(data: FormData = Form()):
return data
| FormData |
python | Lightning-AI__lightning | src/lightning/pytorch/_graveyard/hpu.py | {
"start": 1216,
"end": 1440
} | class ____:
def __init__(self, *_: Any, **__: Any) -> None:
raise NotImplementedError(
"The `SingleHPUStrategy` class has been removed. Please contact developer@lightning.ai"
)
| SingleHPUStrategy |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/execute_in_process_result.py | {
"start": 661,
"end": 5868
} | class ____(ExecutionResult):
"""Result object returned by in-process testing APIs.
Users should not instantiate this object directly. Used for retrieving run success, events, and outputs from execution methods that return this object.
This object is returned by:
- :py:meth:`dagster.GraphDefinition.execute_in_process`
- :py:meth:`dagster.JobDefinition.execute_in_process`
- :py:meth:`dagster.materialize_to_memory`
- :py:meth:`dagster.materialize`
"""
_handle: NodeHandle
_event_list: Sequence[DagsterEvent]
_dagster_run: DagsterRun
_output_capture: Mapping[StepOutputHandle, Any]
_job_def: JobDefinition
def __init__(
self,
event_list: Sequence[DagsterEvent],
dagster_run: DagsterRun,
output_capture: Optional[Mapping[StepOutputHandle, Any]],
job_def: JobDefinition,
):
self._job_def = job_def
self._event_list = event_list
self._dagster_run = dagster_run
self._output_capture = check.opt_mapping_param(
output_capture, "output_capture", key_type=StepOutputHandle
)
@public
@property
def job_def(self) -> JobDefinition:
"""JobDefinition: The job definition that was executed."""
return self._job_def
@public
@property
def dagster_run(self) -> DagsterRun:
"""DagsterRun: The Dagster run that was executed."""
return self._dagster_run
@public
@property
def all_events(self) -> Sequence[DagsterEvent]:
"""List[DagsterEvent]: All dagster events emitted during execution."""
return self._event_list
@public
@property
def run_id(self) -> str:
"""str: The run ID of the executed :py:class:`DagsterRun`."""
return self.dagster_run.run_id
def _get_output_for_handle(self, handle: NodeHandle, output_name: str) -> Any:
mapped_outputs = {}
step_key = str(handle)
output_found = False
for step_output_handle, value in self._output_capture.items():
# For the mapped output case, where step keys are in the format
# "step_key[upstream_mapped_output_name]" within the step output handle.
if (
step_output_handle.step_key.startswith(f"{step_key}[")
and step_output_handle.output_name == output_name
):
output_found = True
key_start = step_output_handle.step_key.find("[")
key_end = step_output_handle.step_key.find("]")
upstream_mapped_output_name = step_output_handle.step_key[key_start + 1 : key_end]
mapped_outputs[upstream_mapped_output_name] = value
# For all other cases, search for exact match.
elif (
step_key == step_output_handle.step_key
and step_output_handle.output_name == output_name
):
output_found = True
if not step_output_handle.mapping_key:
return self._output_capture[step_output_handle]
mapped_outputs[step_output_handle.mapping_key] = value
if not output_found:
raise DagsterInvariantViolationError(
f"No outputs found for output '{output_name}' from node '{handle}'."
)
return mapped_outputs
@public
def output_for_node(self, node_str: str, output_name: str = DEFAULT_OUTPUT) -> Any:
"""Retrieves output value with a particular name from the in-process run of the job.
Args:
node_str (str): Name of the op/graph whose output should be retrieved. If the intended
graph/op is nested within another graph, the syntax is `outer_graph.inner_node`.
output_name (Optional[str]): Name of the output on the op/graph to retrieve. Defaults to
`result`, the default output name in dagster.
Returns:
Any: The value of the retrieved output.
"""
return super().output_for_node(node_str, output_name=output_name)
@public
def asset_value(self, asset_key: CoercibleToAssetKey) -> Any:
"""Retrieves the value of an asset that was materialized during the execution of the job.
Args:
asset_key (CoercibleToAssetKey): The key of the asset to retrieve.
Returns:
Any: The value of the retrieved asset.
"""
node_output_handle = self._job_def.asset_layer.get_op_output_handle(
AssetKey.from_coercible(asset_key)
)
return self.output_for_node(
node_str=str(node_output_handle.node_handle), output_name=node_output_handle.output_name
)
@public
def output_value(self, output_name: str = DEFAULT_OUTPUT) -> Any:
"""Retrieves output of top-level job, if an output is returned.
Args:
output_name (Optional[str]): The name of the output to retrieve. Defaults to `result`,
the default output name in dagster.
Returns:
Any: The value of the retrieved output.
"""
return super().output_value(output_name=output_name)
| ExecuteInProcessResult |
python | ansible__ansible | test/integration/targets/jinja_plugins/filter_plugins/bad_filter.py | {
"start": 180,
"end": 261
} | class ____:
def filters(self):
raise TypeError('bad_filter')
| FilterModule |
python | dask__distributed | distributed/shuffle/tests/test_shuffle.py | {
"start": 96395,
"end": 96825
} | class ____(ShuffleSchedulerPlugin):
def __init__(self, scheduler):
super().__init__(scheduler)
self.counts = defaultdict(int)
def get(self, *args, **kwargs):
self.counts["get"] += 1
return super().get(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
self.counts["get_or_create"] += 1
return super().get_or_create(*args, **kwargs)
| RequestCountingSchedulerPlugin |
python | chroma-core__chroma | chromadb/utils/embedding_functions/chroma_cloud_splade_embedding_function.py | {
"start": 481,
"end": 5552
} | class ____(SparseEmbeddingFunction[Documents]):
def __init__(
self,
api_key_env_var: str = "CHROMA_API_KEY",
model: ChromaCloudSpladeEmbeddingModel = ChromaCloudSpladeEmbeddingModel.SPLADE_PP_EN_V1,
):
"""
Initialize the ChromaCloudSpladeEmbeddingFunction.
Args:
api_key_env_var (str, optional): Environment variable name that contains your API key.
Defaults to "CHROMA_API_KEY".
"""
try:
import httpx
except ImportError:
raise ValueError(
"The httpx python package is not installed. Please install it with `pip install httpx`"
)
self.api_key_env_var = api_key_env_var
self.api_key = os.getenv(self.api_key_env_var)
if not self.api_key:
raise ValueError(
f"API key not found in environment variable {self.api_key_env_var}"
)
self.model = model
self._api_url = "https://embed.trychroma.com/embed_sparse"
self._session = httpx.Client()
self._session.headers.update(
{
"x-chroma-token": self.api_key,
"x-chroma-embedding-model": self.model.value,
}
)
def __del__(self) -> None:
"""
Cleanup the HTTP client session when the object is destroyed.
"""
if hasattr(self, "_session"):
self._session.close()
def close(self) -> None:
"""
Explicitly close the HTTP client session.
Call this method when you're done using the embedding function.
"""
if hasattr(self, "_session"):
self._session.close()
def __call__(self, input: Documents) -> SparseVectors:
"""
Generate embeddings for the given documents.
Args:
input (Documents): The documents to generate embeddings for.
"""
if not input:
return []
payload: Dict[str, Union[str, Documents]] = {
"texts": list(input),
"task": "",
"target": "",
}
try:
import httpx
response = self._session.post(self._api_url, json=payload, timeout=60)
response.raise_for_status()
json_response = response.json()
return self._parse_response(json_response)
except httpx.HTTPStatusError as e:
raise RuntimeError(
f"Failed to get embeddings from Chroma Cloud API: HTTP {e.response.status_code} - {e.response.text}"
)
except httpx.TimeoutException:
raise RuntimeError("Request to Chroma Cloud API timed out after 60 seconds")
except httpx.HTTPError as e:
raise RuntimeError(f"Failed to get embeddings from Chroma Cloud API: {e}")
except Exception as e:
raise RuntimeError(f"Unexpected error calling Chroma Cloud API: {e}")
def _parse_response(self, response: Any) -> SparseVectors:
"""
Parse the response from the Chroma Cloud Sparse Embedding API.
"""
raw_embeddings = response["embeddings"]
# Normalize each sparse vector (sort indices and validate)
normalized_vectors: SparseVectors = []
for emb in raw_embeddings:
# Handle both dict format and SparseVector format
if isinstance(emb, dict):
indices = emb.get("indices", [])
values = emb.get("values", [])
else:
# Already a SparseVector, extract its data
indices = emb.indices
values = emb.values
normalized_vectors.append(
normalize_sparse_vector(indices=indices, values=values)
)
return normalized_vectors
@staticmethod
def name() -> str:
return "chroma-cloud-splade"
@staticmethod
def build_from_config(
config: Dict[str, Any]
) -> "SparseEmbeddingFunction[Documents]":
api_key_env_var = config.get("api_key_env_var")
model = config.get("model")
if model is None:
raise ValueError("model must be provided in config")
if not api_key_env_var:
raise ValueError("api_key_env_var must be provided in config")
return ChromaCloudSpladeEmbeddingFunction(
api_key_env_var=api_key_env_var,
model=ChromaCloudSpladeEmbeddingModel(model),
)
def get_config(self) -> Dict[str, Any]:
return {"api_key_env_var": self.api_key_env_var, "model": self.model.value}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
if "model" in new_config:
raise ValueError(
"model cannot be changed after the embedding function has been initialized"
)
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
validate_config_schema(config, "chroma-cloud-splade") | ChromaCloudSpladeEmbeddingFunction |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/builtin/cpython/mac_os.py | {
"start": 611,
"end": 2194
} | class ____(CPython, ABC):
@classmethod
def can_describe(cls, interpreter):
return is_mac_os_framework(interpreter) and super().can_describe(interpreter)
def create(self):
super().create()
# change the install_name of the copied python executables
target = self.desired_mach_o_image_path()
current = self.current_mach_o_image_path()
for src in self._sources:
if isinstance(src, ExePathRefToDest) and (src.must == RefMust.COPY or not self.symlinks):
exes = [self.bin_dir / src.base]
if not self.symlinks:
exes.extend(self.bin_dir / a for a in src.aliases)
for exe in exes:
fix_mach_o(str(exe), current, target, self.interpreter.max_size)
@classmethod
def _executables(cls, interpreter):
for _, targets, must, when in super()._executables(interpreter):
# Make sure we use the embedded interpreter inside the framework, even if sys.executable points to the
# stub executable in ${sys.prefix}/bin.
# See http://groups.google.com/group/python-virtualenv/browse_thread/thread/17cab2f85da75951
fixed_host_exe = Path(interpreter.prefix) / "Resources" / "Python.app" / "Contents" / "MacOS" / "Python"
yield fixed_host_exe, targets, must, when
@abstractmethod
def current_mach_o_image_path(self):
raise NotImplementedError
@abstractmethod
def desired_mach_o_image_path(self):
raise NotImplementedError
| CPythonmacOsFramework |
python | ray-project__ray | rllib/models/torch/mingpt.py | {
"start": 4067,
"end": 7829
} | class ____(nn.Module):
"""an unassuming Transformer block"""
def __init__(self, config: GPTConfig):
super().__init__()
self.ln_1 = nn.LayerNorm(config.n_embed)
self.attn = CausalSelfAttention(config)
self.ln_2 = nn.LayerNorm(config.n_embed)
self.mlp = nn.ModuleDict(
dict(
c_fc=nn.Linear(config.n_embed, 4 * config.n_embed),
c_proj=nn.Linear(4 * config.n_embed, config.n_embed),
act=NewGELU(),
dropout=nn.Dropout(config.resid_pdrop),
)
)
def forward(self, x, attention_masks=None):
# Multi-head attention sub-layer.
x_att, att = self.attn(self.ln_1(x), attention_masks=attention_masks)
# Residual of multi-head attention sub-layer.
x = x + x_att
# Position-wise FFN sub-layer: fc + activation + fc + dropout
x_ffn = self.mlp.dropout(self.mlp.c_proj(self.mlp.act(self.mlp.c_fc(x))))
# Residual of position-wise FFN sub-layer.
x = x + x_ffn
return x, att
@Deprecated(error=False)
def configure_gpt_optimizer(
model: nn.Module,
learning_rate: float,
weight_decay: float,
betas: Tuple[float, float] = (0.9, 0.95),
**kwargs,
) -> torch.optim.Optimizer:
"""
This long function is unfortunately doing something very simple and is
being very defensive: We are separating out all parameters of the model
into two buckets: those that will experience weight decay for regularization
and those that won't (biases, and layernorm/embedding weights). We are then
returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience
# regularizing weight decay
decay = set()
no_decay = set()
whitelist_w_modules = (torch.nn.Linear,)
blacklist_w_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = "%s.%s" % (mn, pn) if mn else pn # full param name
# random note: because named_modules and named_parameters are
# recursive we will see the same tensors p many many times. but
# doing it this way allows us to know which parent module any
# tensor p belongs to...
if pn.endswith("bias"):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith("weight") and isinstance(m, whitelist_w_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith("weight") and isinstance(m, blacklist_w_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# validate that we considered every parameter
param_dict = dict(model.named_parameters())
inter_params = decay & no_decay
union_params = decay | no_decay
assert (
len(inter_params) == 0
), f"parameters {str(inter_params)} made it into both decay/no_decay sets!"
assert len(param_dict.keys() - union_params) == 0, (
f"parameters {str(param_dict.keys() - union_params)} were not "
f"separated into either decay/no_decay set!"
)
# create the pytorch optimizer object
optim_groups = [
{
"params": [param_dict[pn] for pn in sorted(decay)],
"weight_decay": weight_decay,
},
{
"params": [param_dict[pn] for pn in sorted(no_decay)],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **kwargs)
return optimizer
@Deprecated(error=False)
| Block |
python | lepture__authlib | tests/django/test_oauth2/models.py | {
"start": 3364,
"end": 4044
} | class ____(Model, AuthorizationCodeMixin):
user = ForeignKey(User, on_delete=CASCADE)
client_id = CharField(max_length=48, db_index=True)
code = CharField(max_length=120, unique=True, null=False)
redirect_uri = TextField(default="", null=True)
response_type = TextField(default="")
scope = TextField(default="", null=True)
auth_time = IntegerField(null=False, default=now_timestamp)
def is_expired(self):
return self.auth_time + 300 < time.time()
def get_redirect_uri(self):
return self.redirect_uri
def get_scope(self):
return self.scope or ""
def get_auth_time(self):
return self.auth_time
| OAuth2Code |
python | PrefectHQ__prefect | tests/client/api/test_flow_runs.py | {
"start": 222,
"end": 4971
} | class ____:
@pytest.fixture
async def flow_runs(self, flow, work_queue_1, session):
flow_2 = await models.flows.create_flow(
session=session,
flow=actions.FlowCreate(name="another-test"),
)
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=actions.FlowRunCreate(flow_id=flow.id, name="fr1", tags=["red"]),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=actions.FlowRunCreate(flow_id=flow.id, name="fr2", tags=["blue"]),
)
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow_2.id,
name="fr3",
tags=["blue", "red"],
work_queue_id=work_queue_1.id,
),
)
await session.commit()
return [flow_run_1, flow_run_2, flow_run_3]
@pytest.fixture
async def parent_flow_run(self, flow, session):
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
flow_version="1.0",
state=schemas.states.Pending(),
),
)
await session.commit()
return flow_run
@pytest.fixture
async def child_runs(
self,
flow,
parent_flow_run,
session,
):
children = []
for i in range(5):
dummy_task = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=parent_flow_run.id,
name=f"dummy-{i}",
task_key=f"dummy-{i}",
dynamic_key=f"dummy-{i}",
),
)
children.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
flow_version="1.0",
state=schemas.states.Pending(),
parent_task_run_id=dummy_task.id,
),
)
)
return children
@pytest.fixture
async def grandchild_runs(self, flow, child_runs, session):
grandchildren = []
for child in child_runs:
for i in range(3):
dummy_task = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=child.id,
name=f"dummy-{i}",
task_key=f"dummy-{i}",
dynamic_key=f"dummy-{i}",
),
)
grandchildren.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
flow_version="1.0",
state=schemas.states.Pending(),
parent_task_run_id=dummy_task.id,
),
)
)
return grandchildren
async def test_read_subflow_runs(
self,
prefect_client,
parent_flow_run,
child_runs,
# included to make sure we're only going 1 level deep
grandchild_runs,
# included to make sure we're not bringing in extra flow runs
flow_runs,
):
"""We should be able to find all subflow runs of a given flow run."""
subflow_filter = filters.FlowRunFilter(
parent_flow_run_id=filters.FlowRunFilterParentFlowRunId(
any_=[parent_flow_run.id]
)
)
response = await prefect_client.read_flow_runs(flow_run_filter=subflow_filter)
assert len(response) == len(child_runs)
returned = {run.id for run in response}
expected = {run.id for run in child_runs}
assert returned == expected
async def test_read_subflow_runs_non_existant(
self,
prefect_client,
):
"""With a UUID that isn't of a flow run, an empty list should be returned."""
subflow_filter = filters.FlowRunFilter(
parent_flow_run_id=filters.FlowRunFilterParentFlowRunId(any_=[uuid4()])
)
response = await prefect_client.read_flow_runs(flow_run_filter=subflow_filter)
assert len(response) == 0
| TestReadFlowRuns |
python | astropy__astropy | astropy/coordinates/builtin_frames/baseradec.py | {
"start": 1457,
"end": 2017
} | class ____(BaseCoordinateFrame):
"""
A base class that defines default representation info for frames that
represent longitude and latitude as Right Ascension and Declination
following typical "equatorial" conventions.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "ra"),
RepresentationMapping("lat", "dec"),
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
| BaseRADecFrame |
python | PyCQA__pylint | tests/functional/a/arguments.py | {
"start": 6907,
"end": 8341
} | class ____:
def _pick_fruit(fruit):
def _print_selection(self):
print(f"Selected: {fruit}!")
return _print_selection
pick_apple = _pick_fruit("apple")
pick_pear = _pick_fruit("pear")
picker = FruitPicker()
picker.pick_apple()
picker.pick_pear()
def name1(apple, /, **kwargs):
"""
Positional-only parameter with `**kwargs`.
Calling this function with the `apple` keyword should not emit
`redundant-keyword-arg` since it is added to `**kwargs`.
>>> name1("Red apple", apple="Green apple")
"Red apple"
{"apple": "Green apple"}
"""
print(apple)
print(kwargs)
name1("Red apple", apple="Green apple")
def name2(apple, /, banana, **kwargs):
"""
Positional-only parameter with positional-or-keyword parameter and `**kwargs`.
"""
# `banana` is redundant
# +1:[redundant-keyword-arg]
name2("Red apple", "Yellow banana", apple="Green apple", banana="Green banana")
# Test `no-value-for-parameter` in the context of positional-only parameters
def name3(param1, /, **kwargs): ...
def name4(param1, /, param2, **kwargs): ...
def name5(param1=True, /, **kwargs): ...
def name6(param1, **kwargs): ...
name3(param1=43) # [no-value-for-parameter]
name3(43)
name4(1, param2=False)
name5()
name6(param1=43)
# https://github.com/pylint-dev/pylint/issues/9036
# No value for argument 'string' in staticmethod call (no-value-for-parameter)
| FruitPicker |
python | crytic__slither | slither/core/declarations/solidity_variables.py | {
"start": 4206,
"end": 5343
} | class ____(SourceMapping):
def __init__(self, name: str) -> None:
super().__init__()
self._check_name(name)
self._name = name
# dev function, will be removed once the code is stable
def _check_name(self, name: str) -> None:
assert name in SOLIDITY_VARIABLES or name.endswith(("_slot", "_offset"))
@property
def state_variable(self) -> str:
if self._name.endswith("_slot"):
return self._name[:-5]
if self._name.endswith("_offset"):
return self._name[:-7]
to_log = f"Incorrect YUL parsing. {self} is not a solidity variable that can be seen as a state variable"
raise SlitherException(to_log)
@property
def name(self) -> str:
return self._name
@property
def type(self) -> ElementaryType:
return ElementaryType(SOLIDITY_VARIABLES[self.name])
def __str__(self) -> str:
return self._name
def __eq__(self, other: Any) -> bool:
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self) -> int:
return hash(self.name)
| SolidityVariable |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams5.py | {
"start": 380,
"end": 503
} | class ____[T]:
...
# This should generate an error because variadic type params don't
# support bound expressions.
| ClassE |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/diag_op_test.py | {
"start": 22603,
"end": 31133
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testSquare(self):
with self.session():
v = np.array([1.0, 2.0, 3.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])
mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],
[1.0, 1.0, 3.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, self.evaluate(output))
# Diagonal bands.
for align in alignment_list:
_, tests = square_cases(align)
for diags, (vecs, banded_mat) in tests.items():
mask = banded_mat[0] == 0
input_mat = np.random.randint(10, size=mask.shape)
solution = input_mat * mask + banded_mat[0]
output = array_ops.matrix_set_diag(
input_mat, vecs[0], k=diags, align=align)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output, solution)
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session():
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((2, 3), output.get_shape())
self.assertAllEqual(expected, self.evaluate(output))
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 2), output.get_shape())
self.assertAllEqual(expected, self.evaluate(output))
# Diagonal bands.
for align in alignment_list:
for _, tests in [tall_cases(align), fat_cases(align)]:
for diags, (vecs, banded_mat) in tests.items():
mask = banded_mat[0] == 0
input_mat = np.random.randint(10, size=mask.shape)
solution = input_mat * mask + banded_mat[0]
output = array_ops.matrix_set_diag(
input_mat, vecs[0], k=diags, align=align)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output, solution)
def _testSquareBatch(self, dtype):
with self.cached_session():
v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0],
[2.0, 0.0, 6.0]]]).astype(dtype)
mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0],
[1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0],
[2.0, 0.0, -6.0]]]).astype(dtype)
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
# Diagonal bands.
for align in alignment_list:
_, tests = square_cases(align)
for diags, (vecs, banded_mat) in tests.items():
mask = banded_mat == 0
input_mat = np.random.randint(10, size=mask.shape).astype(dtype)
solution = (input_mat * mask + banded_mat).astype(dtype)
output = array_ops.matrix_set_diag(
input_mat, vecs.astype(dtype), k=diags, align=align)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output, solution)
@test_util.run_deprecated_v1
def testSquareBatch(self):
self._testSquareBatch(dtypes_lib.bfloat16.as_numpy_dtype)
self._testSquareBatch(np.float32)
self._testSquareBatch(np.float64)
self._testSquareBatch(np.int32)
self._testSquareBatch(np.int64)
self._testSquareBatch(np.bool_)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
with self.session():
v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])
mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]])
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 2, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
# Diagonal bands.
for align in alignment_list:
for _, tests in [tall_cases(align), fat_cases(align)]:
for diags, pair in tests.items():
vecs, banded_mat = pair
mask = banded_mat == 0
input_mat = np.random.randint(10, size=mask.shape)
solution = input_mat * mask + banded_mat
output = array_ops.matrix_set_diag(
input_mat, vecs, k=diags, align=align)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output, solution)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegex(ValueError, "must be at least rank 2"):
array_ops.matrix_set_diag(0, [0])
with self.assertRaisesRegex(ValueError, "must be at least rank 1"):
array_ops.matrix_set_diag([[0]], 0)
@test_util.run_deprecated_v1
def testInvalidShapeAtEval(self):
with self.session():
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError("diagonal must be at least 1-dim"):
array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
d = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError(
"first dimensions of diagonal don't match"):
array_ops.matrix_set_diag(v, d).eval(feed_dict={
v: np.zeros((2, 3, 3)),
d: np.ones((2, 4))
})
def _testGrad(self, input_shape, diag_shape, diags, align):
with self.session():
x = constant_op.constant(
np.random.rand(*input_shape), dtype=dtypes_lib.float32)
x_diag = constant_op.constant(
np.random.rand(*diag_shape), dtype=dtypes_lib.float32)
y = array_ops.matrix_set_diag(x, x_diag, k=diags, align=align)
error_x = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error_x, 1e-4)
error_x_diag = gradient_checker.compute_gradient_error(
x_diag,
x_diag.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error_x_diag, 1e-4)
@test_util.run_deprecated_v1
def testGrad(self):
input_shapes = [(3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8)]
diag_bands = [(0, 0)]
diag_bands.append((-1, 1))
for input_shape, diags, align in itertools.product(input_shapes, diag_bands,
alignment_list):
lower_diag_index, upper_diag_index = diags
num_diags = upper_diag_index - lower_diag_index + 1
num_diags_dim = () if num_diags == 1 else (num_diags,)
diag_shape = input_shape[:-2] + num_diags_dim + (min(input_shape[-2:]),)
self._testGrad(input_shape, diag_shape, diags, align)
@test_util.run_deprecated_v1
def testGradWithNoShapeInformation(self):
with self.session() as sess:
v = array_ops.placeholder(dtype=dtypes_lib.float32)
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
output = array_ops.matrix_set_diag(mat, v)
grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input)
grad_input_val = np.random.rand(3, 3).astype(np.float32)
grad_vals = sess.run(
grads,
feed_dict={
v: 2 * np.ones(3),
mat: np.ones((3, 3)),
grad_input: grad_input_val
})
self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
grad_vals[0])
| MatrixSetDiagTest |
python | jazzband__django-polymorphic | example/pexp/models.py | {
"start": 1969,
"end": 2048
} | class ____(NormalModelB):
field3 = models.CharField(max_length=10)
| NormalModelC |
python | joke2k__faker | tests/providers/test_color.py | {
"start": 14281,
"end": 14633
} | class ____:
"""Test de_CH color provider methods"""
def test_color_name(self, faker, num_samples):
for _ in range(num_samples):
color_name = faker.color_name()
assert isinstance(color_name, str)
assert color_name in DeChColorProvider.all_colors.keys()
assert "ß" not in color_name
| TestDeCh |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 3660,
"end": 4470
} | class ____(Operation):
def call(self, x):
return backend.nn.softplus(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.softplus", "keras.ops.nn.softplus"])
def softplus(x):
"""Softplus activation function.
It is defined as `f(x) = log(exp(x) + 1)`, where `log` is the natural
logarithm and `exp` is the exponential function.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-0.555, 0.0, 0.555])
>>> keras.ops.softplus(x)
array([0.45366603, 0.6931472, 1.008666], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Softplus().symbolic_call(x)
return backend.nn.softplus(x)
| Softplus |
python | getsentry__sentry | src/sentry/identity/bitbucket/provider.py | {
"start": 372,
"end": 603
} | class ____(Provider):
key = IntegrationProviderSlug.BITBUCKET.value
name = "Bitbucket"
def get_pipeline_views(self) -> list[PipelineView[IdentityPipeline]]:
return [BitbucketLoginView()]
| BitbucketIdentityProvider |
python | pytorch__pytorch | torch/testing/_internal/distributed/fake_pg.py | {
"start": 119,
"end": 1126
} | class ____(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _create_fake_pg(common_opts, backend_opts):
"""
A fake process group (not related to FakeTensor) is a process group which
doesn't actually do any communication, it just hallucinates some
communication. You can run a single rank with a fake process group
without needing multiple processes (simulates per-rank behavior)
NOTE: This is not a real process group, and it would produce wrong results
for every collective. It should be used as a convenient tool when playing
with distributed but don't care about the actual data.
"""
return FakeProcessGroup._create_internal(
common_opts.group_rank, common_opts.group_size, backend_opts
)
dist.Backend.register_backend(
"fake", _create_fake_pg, extended_api=True, devices=["cpu", "cuda", "hpu", "xpu"]
)
| FakeStore |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 332240,
"end": 333158
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateDiscussion"""
__schema__ = github_schema
__field_names__ = ("discussion_id", "title", "body", "category_id", "client_mutation_id")
discussion_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="discussionId")
"""The Node ID of the discussion to update."""
title = sgqlc.types.Field(String, graphql_name="title")
"""The new discussion title."""
body = sgqlc.types.Field(String, graphql_name="body")
"""The new contents of the discussion body."""
category_id = sgqlc.types.Field(ID, graphql_name="categoryId")
"""The Node ID of a discussion category within the same repository to
change this discussion to.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateDiscussionInput |
python | getsentry__sentry | src/sentry/snuba/metrics/fields/base.py | {
"start": 30827,
"end": 31386
} | class ____(DerivedMetricExpressionDefinition, MetricExpressionBase, ABC):
def _raise_entity_validation_exception(self, func_name: str) -> None:
raise DerivedMetricParseException(
f"Method `{func_name}` can only be called on instance of "
f"{self.__class__.__name__} "
f"{get_public_name_from_mri(self.metric_mri)} with a `projects` attribute."
)
def __str__(self) -> str:
return self.metric_mri
def get_meta_type(self) -> str | None:
return self.meta_type
| DerivedMetricExpression |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 16913,
"end": 21031
} | class ____:
project_id: int
event_id: str
def query_suspect_span_groups(
snuba_params: SnubaParams,
fields: list[str],
query: str | None,
span_ops: list[str] | None,
exclude_span_ops: list[str] | None,
span_groups: list[str] | None,
direction: str,
orderby: str,
limit: int,
offset: int,
min_exclusive_time: float | None = None,
max_exclusive_time: float | None = None,
) -> list[SuspectSpan]:
suspect_span_columns = SPAN_PERFORMANCE_COLUMNS[orderby]
selected_columns: list[str] = [
column
for column in suspect_span_columns.suspect_op_group_columns + fields
if not is_equation(column)
] + [
"array_join(spans_op)",
"array_join(spans_group)",
# want a single event id to fetch from nodestore for the span description
"any(id)",
]
equations: list[str] = [
strip_equation(column)
for column in suspect_span_columns.suspect_op_group_columns + fields
if is_equation(column)
]
builder = DiscoverQueryBuilder(
dataset=Dataset.Discover,
params={},
snuba_params=snuba_params,
selected_columns=selected_columns,
equations=equations,
query=query,
orderby=[direction + column for column in suspect_span_columns.suspect_op_group_sort],
limit=limit,
offset=offset,
config=QueryBuilderConfig(
auto_aggregations=True,
use_aggregate_conditions=True,
functions_acl=["array_join", "sumArray", "percentileArray", "maxArray"],
),
)
extra_conditions = []
if span_ops:
extra_conditions.append(
Condition(
builder.resolve_function("array_join(spans_op)"),
Op.IN,
Function("tuple", span_ops),
)
)
if exclude_span_ops:
extra_conditions.append(
Condition(
builder.resolve_function("array_join(spans_op)"),
Op.NOT_IN,
Function("tuple", exclude_span_ops),
)
)
if span_groups:
extra_conditions.append(
Condition(
builder.resolve_function("array_join(spans_group)"),
Op.IN,
Function("tuple", span_groups),
)
)
if min_exclusive_time is not None:
extra_conditions.append(
Condition(
builder.resolve_function("array_join(spans_exclusive_time)"),
Op.GT,
min_exclusive_time,
)
)
if max_exclusive_time is not None:
extra_conditions.append(
Condition(
builder.resolve_function("array_join(spans_exclusive_time)"),
Op.LT,
max_exclusive_time,
)
)
if extra_conditions:
builder.add_conditions(extra_conditions)
snql_query = builder.get_snql_query()
results = raw_snql_query(snql_query, "api.organization-events-spans-performance-suspects")
return [
SuspectSpan(
op=suspect["array_join_spans_op"],
group=suspect["array_join_spans_group"],
description=get_span_description(
EventID(snuba_params.project_ids[0], suspect["any_id"]),
span_op=suspect["array_join_spans_op"],
span_group=suspect["array_join_spans_group"],
),
frequency=suspect.get("count_unique_id"),
count=suspect.get("count"),
avg_occurrences=suspect.get("equation[0]"),
sum_exclusive_time=suspect.get("sumArray_spans_exclusive_time"),
p50_exclusive_time=suspect.get("percentileArray_spans_exclusive_time_0_50"),
p75_exclusive_time=suspect.get("percentileArray_spans_exclusive_time_0_75"),
p95_exclusive_time=suspect.get("percentileArray_spans_exclusive_time_0_95"),
p99_exclusive_time=suspect.get("percentileArray_spans_exclusive_time_0_99"),
)
for suspect in results["data"]
]
| EventID |
python | walkccc__LeetCode | solutions/253. Meeting Rooms II/253.py | {
"start": 0,
"end": 368
} | class ____:
def minMeetingRooms(self, intervals: list[list[int]]) -> int:
minHeap = [] # Store the end times of each room.
for start, end in sorted(intervals):
# There's no overlap, so we can reuse the same room.
if minHeap and start >= minHeap[0]:
heapq.heappop(minHeap)
heapq.heappush(minHeap, end)
return len(minHeap)
| Solution |
python | django__django | tests/serializers/models/base.py | {
"start": 1360,
"end": 1521
} | class ____(models.Model):
name = models.CharField(max_length=255)
category = models.ForeignKey(Category, models.CASCADE)
objects = TopicManager()
| Topic |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops.py | {
"start": 16104,
"end": 211194
} | class ____:
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
@tf_export("math.divide", "divide")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`.
For example:
>>> x = tf.constant([16, 12, 11])
>>> y = tf.constant([4, 6, 2])
>>> tf.divide(x,y)
<tf.Tensor: shape=(3,), dtype=float64,
numpy=array([4. , 2. , 5.5])>
Args:
x: A `Tensor`
y: A `Tensor`
name: A name for the operation (optional).
Returns:
A `Tensor` with same shape as input
"""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
# We do conversion here to make sure at least x is a tensor.
if not tensor_util.is_tf_type(x):
dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None
x = ops.convert_to_tensor(x, dtype=dtype)
return x / y
@tf_export("math.multiply", "multiply")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def multiply(x, y, name=None):
"""Returns an element-wise x * y.
For example:
>>> x = tf.constant(([1, 2, 3, 4]))
>>> tf.math.multiply(x, x)
<tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1, 4, 9, 16], dtype=int32)>
Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also
pass in non-`Tensor` arguments:
>>> tf.math.multiply(7,6)
<tf.Tensor: shape=(), dtype=int32, numpy=42>
If `x.shape` is not the same as `y.shape`, they will be broadcast to a
compatible shape. (More about broadcasting
[here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)
For example:
>>> x = tf.ones([1, 2]);
>>> y = tf.ones([2, 1]);
>>> x * y # Taking advantage of operator overriding
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[1., 1.],
[1., 1.]], dtype=float32)>
The reduction version of this elementwise operation is `tf.math.reduce_prod`
Args:
x: A Tensor. Must be one of the following types: `bfloat16`,
`half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
`int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
Raises:
* InvalidArgumentError: When `x` and `y` have incompatible shapes or types.
"""
return gen_math_ops.mul(x, y, name)
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops.mul(x, y, name)
if gen_math_ops.mul.__doc__ is not None:
_mul.__doc__ = gen_math_ops.mul.__doc__ + (
"" if _mul.__doc__ is None else _mul.__doc__
)
@tf_export("math.subtract", "subtract")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def subtract(x, y, name=None):
return gen_math_ops.sub(x, y, name)
subtract.__doc__ = gen_math_ops.sub.__doc__
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops.sub(x, y, name)
if gen_math_ops.sub.__doc__ is not None:
_sub.__doc__ = gen_math_ops.sub.__doc__ + (
"" if _sub.__doc__ is None else _sub.__doc__
)
negative = gen_math_ops.neg
# pylint: disable=g-docstring-has-escape
@deprecation.deprecated(
"2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export(v1=["math.scalar_mul", "scalar_mul"])
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def scalar_mul(scalar, x, name=None):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
This is a special case of `tf.math.multiply`, where the first value must be a
`scalar`. Unlike the general form of `tf.math.multiply`, this is operation is
guaranteed to be efficient for `tf.IndexedSlices`.
>>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])
>>> with tf.GradientTape() as g:
... g.watch(x)
... y = tf.gather(x, [1, 2]) # IndexedSlices
... z = tf.math.scalar_mul(10.0, y)
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
name: A name for the operation (optional).
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
base_dtype = dtypes.as_dtype(x.dtype).base_dtype
scalar = ops.convert_to_tensor(
scalar, dtype=base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, indexed_slices.IndexedSlices):
return indexed_slices.IndexedSlices(
gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError(
f"The input scalar must be a 0-D value. Received shape {shape}.")
@tf_export("math.softplus", "nn.softplus", v1=["math.softplus", "nn.softplus"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def softplus(features, name=None):
"""Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.
`softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
takes on positive values.
<img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">
Example:
>>> import tensorflow as tf
>>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
array([0.6931472, 1.3132616], dtype=float32)
Args:
features: `Tensor`
name: Optional: name to associate with this operation.
Returns:
`Tensor`
"""
return gen_nn_ops.softplus(features, name)
@tf_export("math.scalar_mul", "scalar_mul", v1=[])
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
@_set_doc(scalar_mul.__doc__)
def scalar_mul_v2(scalar, x, name=None):
with ops.name_scope(name, "scalar_mul", [x]) as name:
return scalar_mul(scalar, x, name)
@tf_export("math.pow", "pow")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def pow(x, y, name=None): # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
@tf_export("dtypes.complex", "complex")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
Raises:
TypeError: Real and imag must be correct types
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError(
f"The `real` and `imag` components have incorrect types: "
f"{real.dtype.name} {imag.dtype.name}. They must be consistent, and "
f"one of {[dtypes.float32, dtypes.float64]}")
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
@tf_export("math.sign", "sign")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def sign(x, name=None):
r"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.
For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.
Example usage:
>>> # real number
>>> tf.math.sign([0., 2., -3.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([ 0., 1., -1.], dtype=float32)>
>>> # complex number
>>> tf.math.sign([1 + 1j, 0 + 0j])
<tf.Tensor: shape=(2,), dtype=complex128,
numpy=array([0.70710678+0.70710678j, 0. +0.j ])>
Args:
x: A Tensor. Must be one of the following types: bfloat16, half, float32,
float64, int32, int64, complex64, complex128.
name: A name for the operation (optional).
Returns:
A Tensor. Has the same type as x.
If x is a SparseTensor, returns SparseTensor(x.indices,
tf.math.sign(x.values, ...), x.dense_shape).
"""
x = ops.convert_to_tensor(x)
if x.dtype.is_complex:
return gen_math_ops.div_no_nan(
x,
cast(
gen_math_ops.complex_abs(
x,
Tout=dtypes.float32
if x.dtype == dtypes.complex64 else dtypes.float64),
dtype=x.dtype),
name=name)
return gen_math_ops.sign(x, name=name)
@tf_export("math.real", v1=["math.real", "real"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("real")
def real(input, name=None):
r"""Returns the real part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the real part of each element in `input` considered as a complex number.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.real(x) # [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
elif input.dtype.is_numeric:
return input
else:
raise TypeError(
"input must be a numeric tensor, but got tensor with dtype {}".format(
input.dtype
)
)
@tf_export("math.imag", v1=["math.imag", "imag"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("imag")
def imag(input, name=None):
r"""Returns the imaginary part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the imaginary part of each element in `input` considered as a complex
number. If `input` is real, a tensor of all zeros is returned.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.imag(x) # [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)
@tf_export("math.angle", v1=["math.angle", "angle"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("angle")
def angle(input, name=None):
r"""Returns the element-wise argument of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the argument of each element in `input` considered as a complex number.
The elements in `input` are considered to be complex numbers of the form
\\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
If `input` is real then *b* is zero by definition.
The argument returned by this function is of the form \\(atan2(b, a)\\).
If `input` is real, a tensor of all zeros is returned.
For example:
```
input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
tf.math.angle(input).numpy()
# ==> array([2.0131705, 1.056345 ], dtype=float32)
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Angle", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.where(input < 0, np.pi * array_ops.ones_like(input),
array_ops.zeros_like(input))
# pylint: enable=redefined-outer-name,redefined-builtin
@tf_export("math.round", "round")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def round(x, name=None): # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
# TODO(mdan): Include a full_type argument to replace dtype.
@tf_export("cast", "dtypes.cast")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
For example:
>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
Notice `tf.cast` has an alias `tf.dtypes.cast`:
>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.dtypes.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
The operation supports data types (for `x` and `dtype`) of
`uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
In case of casting from complex types (`complex64`, `complex128`) to real
types, only the real part of `x` is returned. In case of casting from real
types to complex types (`complex64`, `complex128`), the imaginary part of the
returned value is set to `0`. The handling of complex types here matches the
behavior of numpy.
Note casting nan and inf values to integral types has undefined behavior.
Note this operation can lead to a loss of precision when converting native
Python `float` and `complex` variables to `tf.float64` or `tf.complex128`
tensors, since the input is first converted to the `float32` data type and
then widened. It is recommended to use `tf.convert_to_tensor` instead of
`tf.cast` for any non-tensor inputs.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
`int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
`bfloat16`.
dtype: The destination type. The list of supported dtypes is the same as
`x`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
same type as `dtype`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
if (
isinstance(x, tensor_lib.Tensor) or _pywrap_utils.IsResourceVariable(x)
) and base_type == x.dtype:
return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
elif isinstance(x, indexed_slices.IndexedSlices):
values_cast = cast(x.values, base_type, name=name)
x = indexed_slices.IndexedSlices(values_cast, x.indices, x.dense_shape)
else:
# TODO(josh11b): If x is not already a Tensor, we could return
# ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex and base_type.is_floating:
logging.warn(
f"You are casting an input of type {x.dtype.name} to an "
f"incompatible dtype {base_type.name}. This will "
"discard the imaginary part and may not be what you "
"intended."
)
if x.dtype != base_type:
x = gen_math_ops.cast(x, base_type, name=name)
return x
@tf_export("dtypes.saturate_cast", "saturate_cast")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without overflow. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast. See `tf.cast` for more
details.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
in_dtype = value.dtype
if in_dtype.is_complex:
if dtype.is_complex:
# Clamp real and imag components separately, if required.
real_in_dtype = in_dtype.real_dtype
real_out_dtype = dtype.real_dtype
if (
real_in_dtype.min < real_out_dtype.min
or real_in_dtype.max > real_out_dtype.max
):
value = gen_math_ops._clip_by_value(
value,
ops.convert_to_tensor(
builtins.complex(real_out_dtype.min, real_out_dtype.min),
dtype=in_dtype),
ops.convert_to_tensor(
builtins.complex(real_out_dtype.max, real_out_dtype.max),
dtype=in_dtype),
name="clamp")
return cast(value, dtype, name=name)
else:
# Extract real component and fall through to clamp+cast.
value = real(value)
logging.warn("Casting complex to real discards imaginary part.")
in_dtype = in_dtype.real_dtype
# in_dtype is real, but out_dtype could be complex.
out_real_dtype = dtype.real_dtype
# TODO: b/288437118 - unconditionally apply `clip_by_value` to fix `inf`
# behavior.
if (
forward_compat.forward_compatible(2024, 11, 1)
or in_dtype.min < out_real_dtype.min
or in_dtype.max > out_real_dtype.max
):
# The output min/max may not actually be representable in the
# in_dtype (e.g. casting float32 to uint32). This can lead to undefined
# behavior when trying to cast a value outside the valid range of the
# target type. We work around this by nudging the min/max to fall within
# the valid output range. The catch is that we may actually saturate
# to a value less than the true saturation limit, but this is the best we
# can do in order to avoid UB without introducing a separate SaturateCast
# op.
np_dtype = in_dtype.as_numpy_dtype
# We promote types *before* comparison in order to not lose precision.
# The Try/Except block is mostly to work around bfloat16 types which are
# not numpy dtypes.
try:
promoted_type = np.promote_types(
np_dtype, out_real_dtype.as_numpy_dtype
)
except TypeError:
# On newer numpy versions this is DTypePromotionError.
# Fall back to just floats. This should be sufficient in most cases
# since we only expect to hit this error in cases of bloat16.
promoted_type = float
min_limit = np_dtype(np.maximum(in_dtype.min, out_real_dtype.min))
promoted = np.array([min_limit, out_real_dtype.min], dtype=promoted_type)
if promoted[0] < promoted[1]:
min_limit = np.nextafter(min_limit, np_dtype(0), dtype=np_dtype)
max_limit = np_dtype(np.minimum(float(in_dtype.max),
float(out_real_dtype.max)))
promoted = np.array([max_limit, out_real_dtype.max], dtype=promoted_type)
if promoted[0] > promoted[1]:
max_limit = np.nextafter(max_limit, np_dtype(0), dtype=np_dtype)
value = gen_math_ops._clip_by_value(
value,
ops.convert_to_tensor(min_limit, dtype=in_dtype),
ops.convert_to_tensor(max_limit, dtype=in_dtype),
name="clamp",
)
return cast(value, dtype, name=name)
@tf_export(v1=["to_float"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
@compatibility(TF2)
This name was deprecated and removed in TF2, but has an exact replacement
`tf.cast(..., tf.float32)`. There are no further issues with eager execution
or tf.function.
Before:
>>> tf.compat.v1.to_float(tf.constant(3.14, dtype=tf.double))
<tf.Tensor: shape=(), dtype=float32, numpy=3.14>
After:
>>> tf.cast(tf.constant(3.14, dtype=tf.double), tf.float32)
<tf.Tensor: shape=(), dtype=float32, numpy=3.14>
@end_compatibility
"""
return cast(x, dtypes.float32, name=name)
@tf_export(v1=["to_double"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
@compatibility(TF2)
This name was deprecated and removed in TF2, but has an exact replacement
`tf.cast(..., tf.double)`. There are no further issues with eager execution or
tf.function.
Before:
>>> tf.compat.v1.to_double(tf.constant(3.14, dtype=tf.float32))
<tf.Tensor: shape=(), dtype=float64, numpy=3.14>
After:
>>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.double)
<tf.Tensor: shape=(), dtype=float64, numpy=3.14>
@end_compatibility
"""
return cast(x, dtypes.float64, name=name)
@tf_export(v1=["to_int32"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
@compatibility(TF2)
This name was deprecated and removed in TF2, but has an exact replacement
`tf.cast(..., tf.int32)`. There are no further issues with eager execution or
tf.function.
Before:
>>> tf.compat.v1.to_int32(tf.constant(1, dtype=tf.int64))
<tf.Tensor: shape=(), dtype=int32, numpy=1>
After:
>>> tf.cast(tf.constant(1, dtype=tf.int64), tf.int32)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
@end_compatibility
"""
return cast(x, dtypes.int32, name=name)
@tf_export(v1=["to_int64"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
@compatibility(TF2)
This name was deprecated and removed in TF2, but has an exact replacement
`tf.cast(..., tf.int64)`. There are no further issues with eager execution or
tf.function.
Before:
>>> tf.compat.v1.to_int64(tf.constant(1, dtype=tf.int32))
<tf.Tensor: shape=(), dtype=int64, numpy=1>
After:
>>> tf.cast(tf.constant(1, dtype=tf.int32), tf.int64)
<tf.Tensor: shape=(), dtype=int64, numpy=1>
@end_compatibility
"""
return cast(x, dtypes.int64, name=name)
@tf_export(v1=["to_bfloat16"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
@compatibility(TF2)
This name was deprecated and removed in TF2, but has an exact replacement
`tf.cast(..., tf.bfloat16)`. There are no further issues with eager execution
or tf.function.
Before:
>>> tf.compat.v1.to_bfloat16(tf.constant(3.14, dtype=tf.float32))
<tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>
After:
>>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.bfloat16)
<tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>
@end_compatibility
"""
return cast(x, dtypes.bfloat16, name=name)
@tf_export(v1=["to_complex64"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
def to_complex64(x, name="ToComplex64"):
"""Casts a tensor to type `complex64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex64`.
Raises:
TypeError: If `x` cannot be cast to the `complex64`.
@compatibility(TF2)
This name was deprecated and removed in TF2, but has an exact replacement
`tf.cast(..., tf.complex64)`. There are no further issues with eager execution
or tf.function.
Before:
>>> tf.compat.v1.to_complex64(tf.constant(1. + 2.j, dtype=tf.complex128))
<tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>
After:
>>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex128), tf.complex64)
<tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>
@end_compatibility
"""
return cast(x, dtypes.complex64, name=name)
@tf_export(v1=["to_complex128"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
def to_complex128(x, name="ToComplex128"):
"""Casts a tensor to type `complex128`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex128`.
Raises:
TypeError: If `x` cannot be cast to the `complex128`.
@compatibility(TF2)
This name was deprecated and removed in TF2, but has an exact replacement
`tf.cast(..., tf.complex128)`. There are no further issues with eager
execution or tf.function.
Before:
>>> tf.compat.v1.to_complex128(tf.constant(1. + 2.j, dtype=tf.complex64))
<tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>
After:
>>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex64), tf.complex128)
<tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>
@end_compatibility
"""
return cast(x, dtypes.complex128, name=name)
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.uint32: dtypes.float64,
dtypes.int32: dtypes.float64,
dtypes.uint64: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.bfloat16: None,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError(f"`x` and `y` must have the same dtype, "
f"got {x_dtype!r} != {y_dtype!r}.")
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError(
f"Invalid dtype {x_dtype!r} in __truediv__. Expected one "
f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}.")
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics.
Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError(f"`x` and `y` must have the same dtype, "
f"got {x_dtype!r} != {y_dtype!r}.")
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops.real_div(x, y, name=name)
else:
return gen_math_ops.floor_div(x, y, name=name)
@tf_export("math.truediv", "truediv")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. If you want integer
division that rounds down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Example:
>>> # Division with integer tensors (returns float)
>>> x1 = tf.constant([10, 20, 30], dtype=tf.int32)
>>> y1 = tf.constant([2, 4, 5], dtype=tf.int32)
>>> result1 = tf.math.truediv(x1, y1)
<tf.Tensor: shape=(3,), dtype=float64, numpy=array([5., 5., 6.])>
>>> # Division with different shaped tensors (broadcasting)
>>> x2 = tf.constant([[10, 20], [30, 40]], dtype=tf.float64)
>>> y2 = tf.constant([2, 5], dtype=tf.float64)
>>> result2 = tf.math.truediv(x2, y2)
<tf.Tensor: shape=(2, 2),dtype=float64,numpy= array([[ 5., 4.],[15., 8.]])>
# Handling potential division by zero (returns inf)
>>> x3 = tf.constant(5, dtype=tf.float32)
>>> y3 = tf.constant(0, dtype=tf.float32)
>>> result3 = tf.math.truediv(x3, y3)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
@tf_export(v1=["div"])
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated(
date=None,
instructions="Deprecated in favor of operator or tf.math.divide.")
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
@compatibility(TF2)
This function is deprecated in TF2. Prefer using the Tensor division operator,
`tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator
semantics.
@end_compatibility
This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
and `y` are both integers then the result will be an integer. This is in
contrast to Python 3, where division with `/` is always a float while division
with `//` is always an integer.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("div_no_nan")
def div_no_nan(x, y, name=None):
"""Computes a safe divide which returns 0 if `y` (denominator) is zero.
For example:
>>> tf.constant(3.0) / 0.0
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
>>> tf.math.divide_no_nan(3.0, 0.0)
<tf.Tensor: shape=(), dtype=float32, numpy=0.0>
Note that 0 is returned if `y` is 0 even if `x` is nonfinite:
>>> tf.math.divide_no_nan(np.nan, 0.0)
<tf.Tensor: shape=(), dtype=float32, numpy=0.0>
Args:
x: A `Tensor` of a floating or integer dtype.
y: A `Tensor` with the same dtype as `x` and a compatible shape.
name: A name for the operation (optional).
Returns:
The element-wise quotient as in `tf.math.divide(x, y)`,
except that division by zero produces `0.0`, not `nan`.
"""
with ops.name_scope(name, "div_no_nan", [x, y]) as name:
if not tensor_util.is_tf_type(x) and tensor_util.is_tf_type(y):
# Treat this case specially like divide() does above.
y = ops.convert_to_tensor(y, name="y")
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
else:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError(f"`x` and `y` must have the same dtype, "
f"got {x_dtype!r} != {y_dtype!r}.")
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError as e:
raise TypeError(
f"Invalid dtype {x_dtype!r} in tf.math.divide_no_nan. Expected one "
f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}."
) from e
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div_no_nan(x, y, name=name)
@tf_export("math.multiply_no_nan")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def multiply_no_nan(x, y, name=None):
"""Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.
Note this is noncommutative: if y is NaN or infinite and x is 0, the result
will be NaN.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x times y.
"""
with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError(f"`x` and `y` must have the same dtype, "
f"got {x_dtype!r} != {y_dtype!r}")
return gen_math_ops.mul_no_nan(x, y, name=name)
def mod(x, y, name=None):
r"""Returns element-wise remainder of division.
This follows Python semantics in that the
result here is consistent with a flooring divide. E.g.
`floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y.
*NOTE*: `math.floormod` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`,
`int64`, `uint8`, `uint16`, `uint32`, `uint64`, `bfloat16`, `half`,
`float32`, `float64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "mod", [x, y]) as name:
return gen_math_ops.floor_mod(x, y, name=name)
@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("floordiv")
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
Mathematically, this is equivalent to floor(x / y). For example:
floor(8.4 / 4.0) = floor(2.1) = 2.0
floor(-8.4 / 4.0) = floor(-2.1) = -3.0
This is equivalent to the '//' operator in Python 3.0 and above.
Note: `x` and `y` must have the same type, and the result will have the same
type as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded toward -infinity.
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops.floor_div(x, y, name=name)
realdiv = gen_math_ops.real_div
truncatediv = gen_math_ops.truncate_div
floor_div = gen_math_ops.floor_div
truncatemod = gen_math_ops.truncate_mod
floormod = gen_math_ops.floor_mod
@tf_export("__operators__.add", v1=[])
@dispatch.add_dispatch_support
def _add_dispatch(x, y, name=None):
"""The operation invoked by the `Tensor.__add__` operator.
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__add__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
x: The left-hand side of the `+` operator.
y: The right-hand side of the `+` operator.
name: an optional name for the operation.
Returns:
The result of the elementwise `+` operation.
"""
if ops.is_auto_dtype_conversion_enabled():
return add(x, y, name=name)
if not isinstance(y, tensor_lib.Tensor) and not isinstance(
y, sparse_tensor.SparseTensor):
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
if x.dtype == dtypes.string:
return gen_math_ops.add(x, y, name=name)
else:
return gen_math_ops.add_v2(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
if isinstance(y, sparse_tensor.SparseTensor): # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
else:
return multiply(x, y, name=name)
@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("logical_xor")
def logical_xor(x, y, name="LogicalXor"):
"""Logical XOR function.
x ^ y = (x | y) & ~(x & y)
Requires that `x` and `y` have the same shape or have
[broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
shapes. For example, `x` and `y` can be:
- Two single elements of type `bool`
- One `tf.Tensor` of type `bool` and one single `bool`, where the result will
be calculated by applying logical XOR with the single element to each
element in the larger Tensor.
- Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
the result will be the element-wise logical XOR of the two input tensors.
Usage:
>>> a = tf.constant([True])
>>> b = tf.constant([False])
>>> tf.math.logical_xor(a, b)
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
>>> c = tf.constant([True])
>>> x = tf.constant([False, True, True, False])
>>> tf.math.logical_xor(c, x)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False, True])>
>>> y = tf.constant([False, False, True, True])
>>> z = tf.constant([False, True, False, True])
>>> tf.math.logical_xor(y, z)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
Args:
x: A `tf.Tensor` type bool.
y: A `tf.Tensor` of type bool.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
"""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
def and_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_and(x, y, name)
return gen_bitwise_ops.bitwise_and(x, y)
def or_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_or(x, y, name)
return gen_bitwise_ops.bitwise_or(x, y)
def xor_(x, y, name=None):
if x.dtype == dtypes.bool:
return logical_xor(x, y, name)
return gen_bitwise_ops.bitwise_xor(x, y)
def invert_(x, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_not(x, name=name)
return gen_bitwise_ops.invert(x, name=name)
@tf_export("math.equal", "equal")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def equal(x, y, name=None):
"""Returns the truth value of (x == y) element-wise.
Performs a [broadcast](
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
arguments and then an element-wise equality comparison, returning a Tensor of
boolean values.
For example:
>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
Args:
x: A `tf.Tensor`.
y: A `tf.Tensor`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
Raises:
`tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
"""
return gen_math_ops.equal(x, y, name=name)
@tf_export("math.not_equal", "not_equal")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def not_equal(x, y, name=None):
"""Returns the truth value of (x != y) element-wise.
Performs a [broadcast](
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
arguments and then an element-wise inequality comparison, returning a Tensor
of boolean values.
For example:
>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])>
>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
Args:
x: A `tf.Tensor`.
y: A `tf.Tensor`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
Raises:
`tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
"""
return gen_math_ops.not_equal(x, y, name=name)
@tf_export("__operators__.eq", v1=[])
@dispatch.add_dispatch_support
def tensor_equals(self, other):
"""The operation invoked by the `Tensor.__eq__` operator.
Compares two tensors element-wise for equality if they are
broadcast-compatible; or returns False if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__eq__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `==` operator.
other: The right-hand side of the `==` operator.
Returns:
The result of the elementwise `==` operation, or `False` if the arguments
are not broadcast-compatible.
"""
if other is None:
return False
g = getattr(self, "graph", None)
if (
tensor_lib.Tensor._USE_EQUALITY
and ops.executing_eagerly_outside_functions()
and (g is None or g.building_function)
):
self, other = override_binary_operator.maybe_promote_tensors(self, other)
return gen_math_ops.equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is other
@tf_export("__operators__.ne", v1=[])
@dispatch.add_dispatch_support
def tensor_not_equals(self, other):
"""The operation invoked by the `Tensor.__ne__` operator.
Compares two tensors element-wise for inequality if they are
broadcast-compatible; or returns True if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.not_equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__ne__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `!=` operator.
other: The right-hand side of the `!=` operator.
Returns:
The result of the elementwise `!=` operation, or `True` if the arguments
are not broadcast-compatible.
"""
if other is None:
return True
if (
tensor_lib.Tensor._USE_EQUALITY
and ops.executing_eagerly_outside_functions()
):
self, other = override_binary_operator.maybe_promote_tensors(self, other)
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is not other
@tf_export("range")
@dispatch.add_dispatch_support
def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
>>> start = 3
>>> limit = 18
>>> delta = 3
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([ 3, 6, 9, 12, 15], dtype=int32)>
>>> start = 3
>>> limit = 1
>>> delta = -0.5
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>
>>> limit = 5
>>> tf.range(limit)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([0, 1, 2, 3, 4], dtype=int32)>
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
is not None; otherwise, acts as range limit and first entry defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
defaults to the value of `start` while the first entry of the range
defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
if not isinstance(start, tensor_lib.Tensor):
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
if not isinstance(limit, tensor_lib.Tensor):
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
if not isinstance(delta, tensor_lib.Tensor):
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32,
dtypes.int64,
dtypes.float16,
dtypes.bfloat16,
dtypes.float32,
dtypes.float64,
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
# Always try to perform a cast even when start/limit/delta are already
# tensors. This will resolve the case where start/limit/delta's original's
# dtype is different from provided dtype.
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
def _range_tensor_conversion_function(value, dtype=None, name=None,
as_ref=False):
del as_ref
return range(value.start, value.stop, value.step, dtype=dtype, name=name)
tensor_conversion_registry.register_tensor_conversion_function(
builtins.range, _range_tensor_conversion_function)
# Reduction operations
def _ReductionDims(x, axis): # pylint: disable=invalid-name
"""Returns range(0, rank(x)) if axis is None."""
if axis is not None:
return axis
else:
try:
x_rank = x.shape.rank
except AttributeError:
x_rank = None
# Fast path: avoid creating Rank and Range ops if ndims is known.
if x_rank:
return constant_op.constant(np.arange(x_rank, dtype=np.int32))
else:
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def _has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
def _may_reduce_to_scalar(keepdims, axis, output):
"""Set a reduction's output shape to be a scalar if we are certain."""
if not _has_fully_defined_shape(output) and (not keepdims) and (
axis is None):
output.set_shape(())
return output
@tf_export(v1=["math.reduce_sum", "reduce_sum"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_sum_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy().item()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy().item()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_sum(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_sum", "reduce_sum", v1=[])
@dispatch.add_dispatch_support
def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy().item()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy().item()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor)]`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))
def reduce_sum_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._sum(input_tensor, dims, keepdims, name=name))
@tf_export("math.reduce_euclidean_norm")
@dispatch.add_dispatch_support
def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the Euclidean norm of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
tf.math.reduce_euclidean_norm(x) # returns 4 as dtype is tf.int32
y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
tf.math.reduce_euclidean_norm(y) # returns 4.1231055 which is sqrt(17)
tf.math.reduce_euclidean_norm(y, 0) # [sqrt(2), sqrt(5), sqrt(10)]
tf.math.reduce_euclidean_norm(y, 1) # [sqrt(14), sqrt(3)]
tf.math.reduce_euclidean_norm(y, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]]
tf.math.reduce_euclidean_norm(y, [0, 1]) # sqrt(17)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
"""
keepdims = bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.euclidean_norm(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.count_nonzero", "count_nonzero"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
@deprecation.deprecated_args(
None, "reduction_indices is deprecated, use axis instead",
"reduction_indices")
def count_nonzero(input_tensor=None,
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None,
reduction_indices=None,
keep_dims=None,
input=None): # pylint: disable=redefined-builtin
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x) # 3
tf.math.count_nonzero(x, 0) # [1, 2, 0]
tf.math.count_nonzero(x, 1) # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.math.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
`string`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
input: Overrides input_tensor. For compatibility.
Returns:
The reduced tensor (number of nonzero values).
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
input_tensor = deprecation.deprecated_argument_lookup("input", input,
"input_tensor",
input_tensor)
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
@tf_export("math.count_nonzero", v1=[])
@dispatch.add_dispatch_support
def count_nonzero_v2(
input, # pylint: disable=redefined-builtin
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x) # 3
tf.math.count_nonzero(x, 0) # [1, 2, 0]
tf.math.count_nonzero(x, 1) # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.math.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input), rank(input))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
if keepdims is None:
keepdims = False
with ops.name_scope(name, "count_nonzero", [input]):
input = ops.convert_to_tensor(input, name="input")
# if the input is already of type bool, then there is no need
# to compare to zero.
if input.dtype == dtypes.bool:
predicate = input
else:
# A scalar of 'zero' is enough as `not_equal` will broadcast.
zero = array_ops.zeros([], dtype=input.dtype)
predicate = gen_math_ops.not_equal(input, zero)
return cast(
reduce_sum(
# int64 reduction happens on GPU
cast(predicate, dtypes.int64),
axis=axis,
keepdims=keepdims,
),
dtype=dtype,
)
@tf_export(v1=["math.reduce_mean", "reduce_mean"])
@dispatch.add_dispatch_support
def reduce_mean_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis` by computing the
mean of elements across the dimensions in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a tensor with a single
element is returned.
For example:
>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_mean(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_mean", "reduce_mean", v1=[])
@dispatch.add_dispatch_support
def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis` by computing the
mean of elements across the dimensions in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a tensor with a single
element is returned.
For example:
>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.mean(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export("math.reduce_variance")
@dispatch.add_dispatch_support
def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the variance of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_variance(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.25>
>>> tf.math.reduce_variance(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)>
>>> tf.math.reduce_variance(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)>
Args:
input_tensor: The tensor to reduce. Should have real or complex type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor. Note, for
`complex64` or `complex128` input, the returned `Tensor` will be of type
`float32` or `float64`, respectively.
@compatibility(numpy)
Equivalent to np.var
Please note `np.var` has a `dtype` parameter that could be used to specify the
output type. By default this is `dtype=float64`. On the other hand,
`tf.math.reduce_variance` has aggressive type inference from `input_tensor`.
@end_compatibility
"""
name = name if name else "reduce_variance"
with ops.name_scope(name):
input_tensor = ops.convert_to_tensor(input_tensor)
means = reduce_mean(input_tensor, axis=axis, keepdims=True)
if means.dtype.is_integer:
raise TypeError(f"Input must be either real or complex. "
f"Received integer type {means.dtype}.")
diff = input_tensor - means
if diff.dtype.is_complex:
# For complex values we need to take the absolute value before squaring.
# This is achieved by multiplying with the conjugate.
real_dtype = diff.dtype.real_dtype
squared_deviations = gen_math_ops.real(
gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype)
else:
squared_deviations = gen_math_ops.square(diff)
return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
@tf_export("math.reduce_std")
@dispatch.add_dispatch_support
def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the standard deviation of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_std(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.118034>
>>> tf.math.reduce_std(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>
>>> tf.math.reduce_std(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have real or complex type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor. Note, for
`complex64` or `complex128` input, the returned `Tensor` will be of type
`float32` or `float64`, respectively.
@compatibility(numpy)
Equivalent to np.std
Please note `np.std` has a `dtype` parameter that could be used to specify the
output type. By default this is `dtype=float64`. On the other hand,
`tf.math.reduce_std` has aggressive type inference from `input_tensor`.
@end_compatibility
"""
name = name if name else "reduce_std"
with ops.name_scope(name):
input_tensor = ops.convert_to_tensor(input_tensor)
variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
return gen_math_ops.sqrt(variance)
@tf_export("math.reduce_prod", "reduce_prod", v1=[])
@dispatch.add_dispatch_support
def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.prod(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_prod", "reduce_prod"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_prod_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_prod(input_tensor, axis, keepdims, name)
@tf_export(v1=["math.reduce_min", "reduce_min"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_min_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-5>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=-inf>
See the numpy docs for `np.amin` and `np.nanmin` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_min(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_min", "reduce_min", v1=[])
@dispatch.add_dispatch_support
def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> a = tf.constant([
... [[1, 2], [3, 4]],
... [[1, 2], [3, 4]]
... ])
>>> tf.reduce_min(a)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
Choosing a specific axis returns minimum element in the given axis:
>>> b = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tf.reduce_min(b, axis=0)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)>
>>> tf.reduce_min(b, axis=1)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 4], dtype=int32)>
Setting `keepdims` to `True` retains the dimension of `input_tensor`:
>>> tf.reduce_min(a, keepdims=True)
<tf.Tensor: shape=(1, 1, 1), dtype=int32, numpy=array([[[1]]], dtype=int32)>
>>> tf.math.reduce_min(a, axis=0, keepdims=True)
<tf.Tensor: shape=(1, 2, 2), dtype=int32, numpy=
array([[[1, 2],
[3, 4]]], dtype=int32)>
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._min(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_max", "reduce_max"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_max_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_max(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_max", "reduce_max", v1=[])
@dispatch.add_dispatch_support
def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return reduce_max_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))
def reduce_max_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._max(input_tensor, dims, keepdims, name=name))
@tf_export(v1=["math.reduce_all", "reduce_all"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_all_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_all(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_all", "reduce_all", v1=[])
@dispatch.add_dispatch_support
def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._all(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_any", "reduce_any"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_any_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_any(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_any", "reduce_any", v1=[])
@dispatch.add_dispatch_support
def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._any(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_logsumexp_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_logsumexp(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
@dispatch.add_dispatch_support
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
raw_max = reduce_max(input_tensor, axis=axis, keepdims=True)
my_max = array_ops.stop_gradient(
gen_math_ops.select_v2(
gen_math_ops.is_finite(raw_max), raw_max,
0))
result = gen_math_ops.log(
reduce_sum(
exp(subtract(input_tensor, my_max)),
axis=axis,
keepdims=keepdims))
if not keepdims:
my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
result = add(result, my_max, name=name)
return _may_reduce_to_scalar(keepdims, axis, result)
@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("trace")
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])`
For example:
```python
x = tf.constant([[1, 2], [3, 4]])
tf.linalg.trace(x) # 5
x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
tf.linalg.trace(x) # 15
x = tf.constant([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
tf.linalg.trace(x) # [15, -15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
@tf_export("linalg.matmul", "matmul")
@dispatch.add_dispatch_support
def matmul(
a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
output_type=None,
grad_a=False,
grad_b=False,
name=None,
):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication dimensions,
and any further outer dimensions specify matching batch size.
Both matrices must be of the same type. The supported types are:
`bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
A simple 2-D tensor matrix multiplication:
>>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
>>> a # 2-D tensor
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
>>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
>>> b # 2-D tensor
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[ 7, 8],
[ 9, 10],
[11, 12]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c # `a` * `b`
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 58, 64],
[139, 154]], dtype=int32)>
A batch matrix multiplication with batch shape [2]:
>>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
>>> a # 3-D tensor
<tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
array([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]], dtype=int32)>
>>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
>>> b # 3-D tensor
<tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
array([[[13, 14],
[15, 16],
[17, 18]],
[[19, 20],
[21, 22],
[23, 24]]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c # `a` * `b`
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 94, 100],
[229, 244]],
[[508, 532],
[697, 730]]], dtype=int32)>
Since python >= 3.5 the @ operator is supported
(see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
it simply calls the `tf.matmul()` function, so the following lines are
equivalent:
>>> d = a @ b @ [[10], [11]]
>>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])
Args:
a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
`complex64`, `complex128` and rank > 1.
b: `tf.Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this
**does not support `tf.sparse.SparseTensor`**, it just makes optimizations
that assume most values in `a` are zero. See
`tf.sparse.sparse_dense_matmul` for some support for
`tf.sparse.SparseTensor` multiplication.
b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this
**does not support `tf.sparse.SparseTensor`**, it just makes optimizations
that assume most values in `b` are zero. See
`tf.sparse.sparse_dense_matmul` for some support for
`tf.sparse.SparseTensor` multiplication.
output_type: The output datatype if needed. Defaults to None in which case
the output_type is the same as input type. Currently only works when input
tensors are type (u)int8 and output_type can be int32.
grad_a: Set it to `True` to hint that Tensor `a` is for the backward pass.
grad_b: Set it to `True` to hint that Tensor `b` is for the backward pass.
name: Name for the operation (optional).
Returns:
A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
is the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
for all indices `i`, `j`.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
`adjoint_b` are both set to `True`.
TypeError: If output_type is specified but the types of `a`, `b` and
`output_type` is not (u)int8, (u)int8 and int32.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError(
f"Only one of `transpose_a` and `adjoint_a` can be True. "
f"Received `transpose_a`={transpose_a}, "
f"`adjoint_a`={adjoint_a}.")
if transpose_b and adjoint_b:
raise ValueError(
f"Only one of `transpose_b` and `adjoint_b` can be True. "
f"Received `transpose_b`={transpose_b}, "
f"`adjoint_b`={adjoint_b}.")
if context.executing_eagerly():
if not (
isinstance(a, ops.EagerTensor) or _pywrap_utils.IsResourceVariable(a)
):
a = ops.convert_to_tensor(a, name="a")
if not isinstance(b, ops.EagerTensor) or _pywrap_utils.IsResourceVariable(
b):
b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
else:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
# TODO(apassos) remove _shape_tuple here when it is not needed.
a_shape = a._shape_tuple() # pylint: disable=protected-access
b_shape = b._shape_tuple() # pylint: disable=protected-access
output_may_have_non_empty_batch_shape = (
(a_shape is None or len(a_shape) > 2) or
(b_shape is None or len(b_shape) > 2))
# TODO(b/178749687): remove this boolean and all related branches once the
# bridges are ready.
# batch_matmul_v3 is for when input type is different from output type.
use_batch_matmul_v3 = False
if output_type and (output_type != a.dtype or output_type != b.dtype):
use_batch_matmul_v3 = True
if (not a_is_sparse and
not b_is_sparse) and output_may_have_non_empty_batch_shape:
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
if use_batch_matmul_v3:
return gen_math_ops.batch_mat_mul_v3(
a,
b,
adj_x=adjoint_a,
adj_y=adjoint_b,
Tout=output_type,
grad_x=grad_a,
grad_y=grad_b,
name=name,
)
else:
return gen_math_ops.batch_mat_mul_v2(
a,
b,
adj_x=adjoint_a,
adj_y=adjoint_b,
grad_x=grad_a,
grad_y=grad_b,
name=name,
)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
use_sparse_matmul = False
if a_is_sparse or b_is_sparse:
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (
a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
if (((a.dtype == dtypes.bfloat16 and
b.dtype not in (dtypes.int8, dtypes.uint8)) or
(b.dtype == dtypes.bfloat16 and
a.dtype not in (dtypes.int8, dtypes.uint8))) and a.dtype != b.dtype):
# matmul currently doesn't handle mixed-precision inputs other than
# fp16 * int8 which is supported in BatchMatMulV3.
use_sparse_matmul = True
if use_sparse_matmul:
ret = sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
# sparse_matmul always returns float32, even with
# bfloat16 inputs. This prevents us from configuring bfloat16 training.
# casting to bfloat16 also matches non-sparse matmul behavior better.
if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
ret = cast(ret, dtypes.bfloat16)
return ret
else:
if use_batch_matmul_v3:
adjoint_a = adjoint_a or transpose_a
adjoint_b = adjoint_b or transpose_b
return gen_math_ops.batch_mat_mul_v3(
a,
b,
adj_x=adjoint_a,
adj_y=adjoint_b,
Tout=output_type,
grad_x=grad_a,
grad_y=grad_b,
name=name,
)
else:
return gen_math_ops.mat_mul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
grad_a=grad_a,
grad_b=grad_b,
name=name,
)
@tf_export("linalg.matvec")
@dispatch.add_dispatch_support
def matvec(a,
b,
transpose_a=False,
adjoint_a=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by vector `b`, producing `a` * `b`.
The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
with `shape(b)[:-1]`.
Both `a` and `b` must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Matrix `a` can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the inputs contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices/vectors (rank-2/1
tensors) with datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 1-D tensor `b`
# [7, 9, 11]
b = tf.constant([7, 9, 11], shape=[3])
# `a` * `b`
# [ 58, 139]
c = tf.linalg.matvec(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 2-D tensor `b`
# [[13, 14, 15],
# [16, 17, 18]]
b = tf.constant(np.arange(13, 19, dtype=np.int32),
shape=[2, 3])
# `a` * `b`
# [[ 86, 212],
# [410, 563]]
c = tf.linalg.matvec(a, b)
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type as `a` and compatible dimensions.
transpose_a: If `True`, `a` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most vector is
the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
all transpose or adjoint attributes are `False`:
`output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
Note: This is matrix-vector product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a are both set to True.
"""
with ops.name_scope(name, "MatVec", [a, b]) as name:
output = matmul(
a,
array_ops.expand_dims(b, axis=-1),
transpose_a=transpose_a,
adjoint_a=adjoint_a,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse)
return array_ops.squeeze(output, axis=-1)
# TODO(b/178650720): Also support numpy-style type promotion in freestanding TF
# functions (e.g. tf.add).
def matmul_wrapper(a, b, name=None): # pylint: disable=missing-function-docstring
if ops.is_numpy_style_type_promotion():
return a._matmul(b)
return matmul(a, b, name=name)
matmul_wrapper.__doc__ = matmul.__doc__
sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
gen_math_ops.sparse_mat_mul)
tf_export(v1=["sparse_matmul"])(sparse_matmul)
@dispatch.add_dispatch_support
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (tensor_lib.Tensor, indexed_slices.IndexedSlices)):
raise TypeError(f"Not a Tensor or IndexedSlices: {type(x)}.")
if isinstance(x, indexed_slices.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return indexed_slices.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError(f"Expected a list or tuple, not {type(inputs)}.")
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
indexed_slices.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
@tf_export("math.add", "add")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def add(x, y, name=None):
"""Returns x + y element-wise.
Example usages below.
Add a scalar and a list:
>>> x = [1, 2, 3, 4, 5]
>>> y = 1
>>> tf.add(x, y)
<tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
dtype=int32)>
Note that binary `+` operator can be used instead:
>>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])
>>> y = tf.convert_to_tensor(1)
>>> x + y
<tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
dtype=int32)>
Add a tensor and a list of same shape:
>>> x = [1, 2, 3, 4, 5]
>>> y = tf.constant([1, 2, 3, 4, 5])
>>> tf.add(x, y)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([ 2, 4, 6, 8, 10], dtype=int32)>
**Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a
non-tensor, the non-tensor input will adopt (or get casted to) the data type
of the tensor input. This can potentially cause unwanted overflow or underflow
conversion.
For example,
>>> x = tf.constant([1, 2], dtype=tf.int8)
>>> y = [2**7 + 1, 2**7 + 2]
>>> tf.add(x, y)
<tf.Tensor: shape=(2,), dtype=int8, numpy=array([-126, -124], dtype=int8)>
When adding two input values of different shapes, `Add` follows NumPy
broadcasting rules. The two input array shapes are compared element-wise.
Starting with the trailing dimensions, the two dimensions either have to be
equal or one of them needs to be `1`.
For example,
>>> x = np.ones(6).reshape(1, 2, 1, 3)
>>> y = np.ones(6).reshape(2, 1, 3, 1)
>>> tf.add(x, y).shape.as_list()
[2, 2, 3, 3]
Another example with two arrays of different dimension.
>>> x = np.ones([1, 2, 1, 4])
>>> y = np.ones([3, 4])
>>> tf.add(x, y).shape.as_list()
[1, 2, 3, 4]
The reduction version of this elementwise operation is `tf.math.reduce_sum`
Args:
x: A `tf.Tensor`. Must be one of the following types: bfloat16, half,
float16, float32, float64, uint8, uint16, uint32, uint64, int8, int16,
int32, int64, complex64, complex128, string.
y: A `tf.Tensor`. Must have the same type as x.
name: A name for the operation (optional)
"""
with ops.name_scope(name, "Add", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
if x.dtype == dtypes.string:
return gen_math_ops.add(x, y, name=name)
else:
return gen_math_ops.add_v2(x, y, name=name)
@tf_export("math.add_n", "add_n")
@dispatch.add_dispatch_support(iterable_parameters=["inputs"])
def add_n(inputs, name=None):
"""Returns the element-wise sum of a list of tensors.
All inputs in the list must have the same shape. This op does not
[broadcast](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
instead.
For example:
>>> a = tf.constant([[3, 5], [4, 8]])
>>> b = tf.constant([[1, 6], [2, 9]])
>>> tf.math.add_n([a, b, a]).numpy()
array([[ 7, 16],
[10, 25]], dtype=int32)
See Also:
* `tf.reduce_sum(inputs, axis=0)` - This performs the same mathematical
operation, but `tf.add_n` may be more efficient because it sums the
tensors directly. `reduce_sum` on the other hand calls
`tf.convert_to_tensor` on the list of tensors, unnecessarily stacking them
into a single tensor before summing.
Args:
inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
same shape and type. `tf.IndexedSlices` objects will be converted into
dense tensors prior to adding.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of the same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, collections_abc.Iterable):
raise ValueError("Inputs must be an iterable of at least one "
"Tensor/IndexedSlices with the same dtype and shape.")
inputs = indexed_slices.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(
isinstance(x, (tensor_lib.Tensor, indexed_slices.IndexedSlices))
for x in inputs):
raise ValueError("Inputs must be an iterable of at least one "
"Tensor/IndexedSlices with the same dtype and shape.")
if len(inputs) == 1:
if isinstance(inputs[0], indexed_slices.IndexedSlices):
values = ops.convert_to_tensor(inputs[0])
else:
values = inputs[0]
if name:
return array_ops.identity(values, name=name)
return values
return gen_math_ops.add_n(inputs, name=name)
@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
@dispatch.add_dispatch_support
@deprecation.deprecated(None, "Use `tf.math.add_n` Instead")
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
For example:
>>> a = tf.constant([[1, 2], [3, 4]])
>>> b = tf.constant([[5, 0], [0, 6]])
>>> tf.math.accumulate_n([a, b, a]).numpy()
array([[ 7, 4],
[ 6, 14]], dtype=int32)
>>> # Explicitly pass shape and type
>>> tf.math.accumulate_n(
... [a, b, a], shape=[2, 2], tensor_dtype=tf.int32).numpy()
array([[ 7, 4],
[ 6, 14]], dtype=int32)
Note: The input must be a list or tuple. This function does not handle
`IndexedSlices`
See Also:
* `tf.reduce_sum(inputs, axis=0)` - This performe the same mathematical
operation, but `tf.add_n` may be more efficient because it sums the
tensors directly. `reduce_sum` on the other hand calls
`tf.convert_to_tensor` on the list of tensors, unncessairly stacking them
into a single tensor before summing.
* `tf.add_n` - This is another python wrapper for the same Op. It has
nearly identical functionality.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Expected shape of elements of `inputs` (optional). Also controls the
output shape of this op, which may affect type inference in other ops. A
value of `None` means "infer the input shape from the shapes in `inputs`".
tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
means "infer the input dtype from `inputs[0]`".
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
def _input_error():
return ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not inputs or not isinstance(inputs, (list, tuple)):
raise _input_error()
inputs = indexed_slices.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, tensor_lib.Tensor) for x in inputs):
raise _input_error()
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _input_error()
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, tensor_lib.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError(
f"The `tensor_dtype` argument is {tensor_dtype}, but `input` is of "
f"type {inputs[0].dtype}. These must be equal. Try casting the input "
f"to the desired type.")
if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)
return add_n(inputs, name=name)
@ops.RegisterGradient("AccumulateNV2")
def _accumulate_n_grad(op, grad):
"""Same as gradient for AddN. Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def sigmoid(x, name=None):
r"""Computes sigmoid of `x` element-wise.
Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$.
For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$.
Example Usage:
If a positive number is large, then its sigmoid will approach to 1 since the
formula will be `y = <large_num> / (1 + <large_num>)`
>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([0.5, 0.7310586, 1.0, 1.0], dtype=float32)>
If a negative number is large, its sigmoid will approach to 0 since the
formula will be `y = 1 / (1 + <large_num>)`
>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
dtype=float32)>
Args:
x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
`complex128`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
Usage Example:
>>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
>>> tf.sigmoid(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0. , 0.5, 1. ], dtype=float32)>
@compatibility(scipy)
Equivalent to scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.sigmoid(x, name=name)
@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("log_sigmoid")
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
Usage Example:
If a positive number is large, then its log_sigmoid will approach to 0 since
the formula will be `y = log( <large_num> / (1 + <large_num>) )` which
approximates to `log (1)` which is 0.
>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.log_sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00],
dtype=float32)>
If a negative number is large, its log_sigmoid will approach to the number
itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is
`log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>`
that is the number itself.
>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.log_sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([-100. , -50. , -1.3132616, -0.6931472],
dtype=float32)>
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name) # pylint: disable=invalid-unary-operand-type
@tf_export("math.cumsum", "cumsum")
@dispatch.add_dispatch_support
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
For example:
>>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 2, 6, 12, 20], dtype=int32)>
>>> # using varying `axis` values
>>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
>>> tf.cumsum(y, axis=0)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2, 4, 6, 8],
[ 3, 7, 11, 15]], dtype=int32)>
>>> tf.cumsum(y, axis=1)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2, 6, 12, 20],
[ 1, 4, 9, 16]], dtype=int32)>
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
>>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 0, 2, 6, 12], dtype=int32)>
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
>>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([20, 18, 14, 8], dtype=int32)>
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
>>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([18, 14, 8, 0], dtype=int32)>
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("cumprod")
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
@dispatch.add_dispatch_support
def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative log-sum-exp of the tensor `x` along the `axis`.
By default, this operation performs an inclusive cumulative log-sum-exp, which
means that the first element of the input is identical to the first element of
the output.
This operation is significantly more numerically stable than the equivalent
Tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
it computes the same result given infinite numerical precision. However, note
that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
for a given element, as it applies the "log-sum-exp trick" in a different
way.
More precisely, where `tf.math.reduce_logsumexp` uses the following trick:
```
log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
```
it cannot be directly used here as there is no fast way of applying it
to each prefix `x[:i]`. Instead, this function implements a prefix
scan using pairwise log-add-exp, which is a commutative and associative
(up to floating point precision) operator:
```
log_add_exp(x, y) = log(exp(x) + exp(y))
= log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
```
However, reducing using the above operator leads to a different computation
tree (logs are taken repeatedly instead of only at the end), and the maximum
is only computed pairwise instead of over the entire prefix. In general, this
leads to a different and slightly less precise computation.
Args:
x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
`float64`.
axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
range `[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumulative log-sum-exp.
reverse: If `True`, performs the cumulative log-sum-exp in the reverse
direction.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same shape and type as `x`.
"""
with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumulative_logsumexp(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.conj", v1=["math.conj", "conj"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("conj")
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `x` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `x`. The
complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the
real part and `b` is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
>>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
>>> tf.math.conj(x)
<tf.Tensor: shape=(2,), dtype=complex128,
numpy=array([-2.25-4.75j, 3.25-5.75j])>
If `x` is real, it is returned unchanged.
For example:
>>> x = tf.constant([-2.25, 3.25])
>>> tf.math.conj(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([-2.25, 3.25], dtype=float32)>
Args:
x: `Tensor` to conjugate. Must have numeric or variant type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
@compatibility(numpy)
Equivalent to numpy.conj.
@end_compatibility
"""
if isinstance(x, tensor_lib.Tensor):
dt = x.dtype
if dt.is_floating or dt.is_integer:
return x
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex or x.dtype == dtypes.variant:
return gen_math_ops.conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError(
f"Expected numeric or variant tensor, got dtype {x.dtype!r}.")
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keepdims were set to True.
"""
# TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to
# `input_shape` rather than `tf.shape` of it. Then we can check if the shape
# is fully defined here, which may be faster executing eagerly than running
# `tf.shape` and then fetching its constant value.
constant_input_shape = tensor_util.constant_value(input_shape)
if constant_input_shape is not None:
constant_axes = tensor_util.constant_value(axes)
if constant_axes is not None:
constant_axes = np.array(constant_axes, dtype=np.int32)
constant_input_shape = np.array(constant_input_shape, dtype=np.int32)
constant_input_shape[constant_axes] = 1
return constant_input_shape
axes = ops.convert_to_tensor(axes)
input_rank = array_ops.size(input_shape, out_type=axes.dtype) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[range(input_rank), axes], # [0, 1, 2, 3] # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.ones(axes_shape, dtype=input_shape.dtype),
],
) # [1, 1]
def _unsorted_segment_N(data, segment_ids, num_segments):
"""Helper function for unsorted_segment_mean/_sqrtN.
Computes the number of segment entries with 0-entries set to 1 to allow
division by N.
Args:
data: A `Tensor` with data that will be assembled in the output.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`. The
values must be in the range `[0, num_segments)`. The values are always
validated to be in range on CPU, never validated on TPU/GPU.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
Returns:
A `Tensor` with the number of segment entries with 0-entries set to 1.
"""
num_segments = ops.convert_to_tensor(num_segments)
# bincount doesn't support negative indices so we use unsorted_segment_sum
segment_ids_shape = array_ops.shape_internal(segment_ids)
ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
# add dimensions for all non-reduced axes
broadcastable_shape = array_ops.concat(
[num_segments[array_ops.newaxis],
array_ops.ones([array_ops.rank(data)
- array_ops.rank(segment_ids)],
dtype=num_segments.dtype)],
axis=0)
n = array_ops.reshape(n, broadcastable_shape)
return gen_math_ops.maximum(n, 1)
@tf_export(
"math.unsorted_segment_mean",
v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("unsorted_segment_mean")
def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
r"""Computes the mean along segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
This operator is similar to the `tf.math.unsorted_segment_sum` operator.
Instead of computing the sum over segments, it computes the mean of all
entries belonging to a segment such that:
\\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
`j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Caution: On CPU, values in `segment_ids` are always validated to be less than
`num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
result in safe but unspecified behavior, which may include ignoring
out-of-bound indices or outputting a tensor with a 0 stored in the first
dimension of its shape if `num_segments` is 0.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
The values must be less than `num_segments`.
The values are always validated to be in range on CPU,
never validated on GPU.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentMean"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / N
@tf_export(
"math.unsorted_segment_sqrt_n",
v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
This operator is similar to the `tf.math.unsorted_segment_sum` operator.
Additionally to computing the sum over segments, it divides the results by
sqrt(N).
\\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
number of occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
Note that this op only supports floating point and complex dtypes,
due to tf.sqrt only supporting these types.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Caution: On CPU, values in `segment_ids` are always validated to be less than
`num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
result in safe but unspecified behavior, which may include ignoring
out-of-bound indices or outputting a tensor with a 0 stored in the first
dimension of its shape if `num_segments` is 0.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
The values must be in the range `[0, num_segments)`.
The values are always validated to be in range on CPU,
never validated on GPU.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentSqrtN"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / gen_math_ops.sqrt(N)
@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
@deprecation.deprecated_endpoints("sparse_segment_sum")
def sparse_segment_sum(
data,
indices,
segment_ids,
name=None,
num_segments=None,
sparse_gradient=False,
):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
first dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
gradient of this function will be sparse (`IndexedSlices`) instead of
dense (`Tensor`). The sparse gradient will contain one non-zero row for
each unique index in `indices`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sum_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
sparse_gradient=sparse_gradient,
name=name,
)
else:
return gen_math_ops.sparse_segment_sum(
data=data,
indices=indices,
segment_ids=segment_ids,
sparse_gradient=sparse_gradient,
name=name,
)
@tf_export("sparse.sampled_addmm", v1=[])
def sampled_addmm(
indices,
values,
dense_shape,
mat1,
mat2,
beta=1.0,
alpha=1.0,
output_type=dtypes.float32,
):
"""Performs the sampled matrix multiplication of two dense matrices.
Multiplies matrix `mat1` by matrix `mat2` at the locations defined by
`indices`. The product is scaled and added to `values`,
producing `alpha` * (`mat1` @ `mat2`) * spy(`indices`) + `beta` * `values`.
The function `spy(indices)` is the sparsity pattern matrix derived from
`indices`.
The `mat1` and `mat2` inputs must be tensors of rank >= 2 where the inner 2
dimensions specify valid matrix multiplication dimensions, and any further
dimensions specify matching batch size.
The `indices`, `values`, and `dense_shape` inputs make up the components of a
`SparseTensor` which defines the sparsity pattern of the output. The sparsity
pattern has values of 1 at the positions defined by the `SparseTensor`, and 0
elsewhere.
The `alpha` and `beta` inputs are the scaling factors.
The supported types for `values`, `mat1`, and `mat2` are:
`bfloat16`, `float16`, `float32`, `float64`.
A simple 2-D tensor operation:
>>> indices = tf.constant([0, 0, 1, 1], shape=[2, 2])
>>> indices
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[0, 0],
[1, 1]], dtype=int32)>
>>> values = tf.constant([0.5, 0.3])
>>> values
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.3], dtype=float32)>
>>> dense_shape = tf.constant([2, 2])
>>> dense_shape
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>
>>> mat1 = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=tf.float32)
>>> mat1
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[1., 2., 3.],
[4., 5., 6.]], dtype=float32)>
>>> mat2 = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2], dtype=tf.float32)
>>> mat2
<tf.Tensor: shape=(3, 2), dtype=float32, numpy=
array([[ 7., 8.],
[ 9., 10.],
[11., 12.]], dtype=float32)>
>>> tf.sparse.sampled_addmm(indices, values, dense_shape, mat1, mat2,
... alpha=0.75, beta=0.25)
(<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[0, 0],
[1, 1]], dtype=int32)>, <tf.Tensor: shape=(2,), dtype=float32, numpy=
array([ 43.625, 115.575], dtype=float32)>,
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>)
A batch operation:
>>> indices = tf.constant([0, 1, 1, 0, 0, 0, 1, 0], shape=[2, 2, 2])
>>> indices
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[0, 1],
[1, 0]],
[[0, 0],
[1, 0]]], dtype=int32)>
>>> values = tf.constant([3, 5, 2, 7], shape=[2, 2], dtype=tf.float32)
>>> values
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[3., 5.],
[2., 7.]], dtype=float32)>
>>> dense_shape = tf.constant([2, 2])
>>> dense_shape
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>
>>> mat1 = tf.constant(np.arange(1, 13), shape=[2, 2, 3], dtype=tf.float32)
>>> mat1
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[10., 11., 12.]]], dtype=float32)>
>>> mat2 = tf.constant(np.arange(13, 25), shape=[2, 3, 2], dtype=tf.float32)
>>> mat2
<tf.Tensor: shape=(2, 3, 2), dtype=float32, numpy=
array([[[13., 14.],
[15., 16.],
[17., 18.]],
[[19., 20.],
[21., 22.],
[23., 24.]]], dtype=float32)>
>>> tf.sparse.sampled_addmm(indices, values, dense_shape, mat1, mat2,
... alpha=0.75, beta=0.25)
(<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[0, 1],
[1, 0]],
[[0, 0],
[1, 0]]], dtype=int32)>, <tf.Tensor: shape=(2, 2), dtype=float32,
numpy=array([[ 75.75, 173. ],
[381.5 , 524.5 ]], dtype=float32)>,
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>)
Args:
indices: `tf.Tensor` containing coordinates for the rows and columns to be
multiplied. Must have rank > 1.
values: `tf.Tensor` containing the values to be scaled and added to the
sampled dot product.
dense_shape: `tf.Tensor` defining the dense shape of the output.
mat1: `tf.Tensor` to be multiplied. Must have rank > 1.
mat2: `tf.Tensor` to be multiplied. Must have rank > 1.
beta: Number to be multiplied with `values`. Defaults to 1.0.
alpha: Number to be multiplied with the sampled dot product of `mat1` and
`mat2`. Defaults to 1.0.
output_type: The output datatype if needed. Defaults to float32.
Returns:
A tuple representing the `SparseTensor` components of the result of the
operation.
Raises:
ValueError: If `dense_shape` does not match the shape of the product.
"""
indices = ops.convert_to_tensor(indices)
values = ops.convert_to_tensor(values, dtype=output_type)
dense_shape = ops.convert_to_tensor(dense_shape, dtype=dtypes.int32)
mat1 = ops.convert_to_tensor(mat1, dtype=output_type)
mat2 = ops.convert_to_tensor(mat2, dtype=output_type)
mat1_shape = tensor_util.constant_value(array_ops.shape(mat1))
mat2_shape = tensor_util.constant_value(array_ops.shape(mat2))
dense_rows = mat1_shape[-2]
dense_cols = mat2_shape[-1]
output_shape = array_ops_stack.stack([dense_rows, dense_cols])
condition = reduce_all(equal(dense_shape, output_shape))
# Use dense_shape to validate input matrix shapes.
if context.executing_eagerly():
if not condition:
raise ValueError(
f"Dense shape: {dense_shape} does not match "
f"output shape: {output_shape}"
)
else: # not context.executing_eagerly()
dense_shape_static = tensor_util.constant_value(dense_shape)
output_shape_static = tensor_util.constant_value(output_shape)
if dense_shape_static is not None and output_shape_static is not None:
condition_static = np.all(
np.equal(dense_shape_static, output_shape_static)
)
if not condition_static:
raise ValueError(
f"Dense shape: {dense_shape} does not match "
f"output shape: {output_shape}"
)
data = [
"Dense shape: ",
dense_shape,
" does not match output shape: ",
output_shape,
]
gen_logging_ops._assert(condition, data, None, name="Assert")
# Extract row and column indices.
batch_indices = indices[..., :-2]
row_indices = indices[..., :-1]
col_indices = array_ops.concat([batch_indices, indices[..., -1:]], axis=-1)
# Calculate batch dimensions.
rank = tensor_util.constant_value(array_ops.rank(mat1))
batch_dims = rank - 2
# Extract rows and columns.
rows = array_ops.gather_nd(mat1, row_indices, batch_dims=batch_dims)
cols = array_ops.gather_nd(
array_ops.matrix_transpose(mat2), col_indices, batch_dims=batch_dims
)
# Calculate dot product for the extracted rows and columns.
dot = reduce_sum(rows * cols, axis=-1)
return (indices, dot * alpha + values * beta, dense_shape)
@tf_export("sparse.segment_sum", v1=[])
def sparse_segment_sum_v2(
data,
indices,
segment_ids,
num_segments=None,
name=None,
sparse_gradient=False,
):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
first dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
gradient of this function will be sparse (`IndexedSlices`) instead of
dense (`Tensor`). The sparse gradient will contain one non-zero row for
each unique index in `indices`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sum(
data,
indices,
segment_ids,
name=name,
num_segments=num_segments,
sparse_gradient=sparse_gradient,
)
@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
@deprecation.deprecated_endpoints("sparse_segment_mean")
def sparse_segment_mean(
data,
indices,
segment_ids,
name=None,
num_segments=None,
sparse_gradient=False,
):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
`data`'s first dimension, selecting a subset of dimension 0, specified by
`indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
gradient of this function will be sparse (`IndexedSlices`) instead of
dense (`Tensor`). The sparse gradient will contain one non-zero row for
each unique index in `indices`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_mean_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name,
sparse_gradient=sparse_gradient,
)
else:
return gen_math_ops.sparse_segment_mean(
data=data,
indices=indices,
segment_ids=segment_ids,
name=name,
sparse_gradient=sparse_gradient,
)
@tf_export("sparse.segment_mean", v1=[])
def sparse_segment_mean_v2(
data,
indices,
segment_ids,
num_segments=None,
name=None,
sparse_gradient=False,
):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
`data`'s first dimension, selecting a subset of dimension 0, specified by
`indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
gradient of this function will be sparse (`IndexedSlices`) instead of
dense (`Tensor`). The sparse gradient will contain one non-zero row for
each unique index in `indices`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_mean(
data,
indices,
segment_ids,
name=name,
num_segments=num_segments,
sparse_gradient=sparse_gradient,
)
@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
def sparse_segment_sqrt_n(
data,
indices,
segment_ids,
name=None,
num_segments=None,
sparse_gradient=False,
):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
`N` is the size of the segment being reduced.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
gradient of this function will be sparse (IndexedSlices) instead of dense
(Tensor).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name,
sparse_gradient=sparse_gradient,
)
else:
return gen_math_ops.sparse_segment_sqrt_n(
data=data,
indices=indices,
segment_ids=segment_ids,
name=name,
sparse_gradient=sparse_gradient,
)
@tf_export("sparse.segment_sqrt_n", v1=[])
def sparse_segment_sqrt_n_v2(
data,
indices,
segment_ids,
num_segments=None,
name=None,
sparse_gradient=False,
):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
segment, `N`, divide by `sqrt(N)` instead.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
gradient of this function will be sparse (`IndexedSlices`) instead of
dense (`Tensor`). The sparse gradient will contain one non-zero row for
each unique index in `indices`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sqrt_n(
data,
indices,
segment_ids,
name=name,
num_segments=num_segments,
sparse_gradient=sparse_gradient,
)
@tf_export("tensordot", "linalg.tensordot")
@dispatch.add_dispatch_support
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes and outer product.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `axes`.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes=1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
the outer product, a tensor of order 4.
Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
For example:
```python
import numpy as np
import tensorflow as tf
a = np.arange(60).reshape(3,4,5)
b = np.arange(24).reshape(4,3,2)
c = tf.tensordot(a,b, axes=([1,0],[0,1]))
c
<tf.Tensor: shape=(5, 2), dtype=int64, numpy=
array([[4400, 4730],
[4532, 4874],
[4664, 5018],
[4796, 5162],
[4928, 5306]])>
# Another example
d = tf.random.uniform((3,4,5))
e = tf.random.uniform((5,3,2))
f = tf.tensordot(d,e, axes=([2,0],[0,1]))
f
<tf.Tensor: shape=(4, 2), dtype=float32, numpy=
array([[4.8271146, 4.493 ],
[5.8537536, 5.492961 ],
[5.2579894, 5.2020206],
[3.5817177, 4.2104754]], dtype=float32)>
```
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes of
b in order. If axes is a list or `Tensor` the first and second row contain
the set of unique integers specifying axes along which the contraction is
computed, for `a` and `b`, respectively. The number of axes for `a` and
`b` must be equal. If `axes=0`, computes the outer product between `a` and
`b`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in builtins.range(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
if (perm != np.arange(len(shape_a))).any():
a_trans = array_ops.transpose(a, perm)
else:
a_trans = a
if a_trans.get_shape().as_list() != new_shape:
reshaped_a = array_ops.reshape(a_trans, new_shape)
else:
reshaped_a = a_trans
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in builtins.range(len(shape_a)) if i not in axes]
axes_dims = [shape_a[i] for i in axes]
free_dims = [shape_a[i] for i in free]
free_dims_static = free_dims
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
shape_a = array_ops.shape(a)
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops_stack.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops_stack.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 0:
raise ValueError(f"`axes` must be at least 0. Received: {axes}.")
if a_shape.ndims is not None:
if axes > a_shape.ndims:
raise ValueError(f"`axes` must not be larger than the number of "
f"dimensions of tensor {a}. Received {axes}, vs "
f"tensor dimensions {a_shape.ndims}.")
return (list(builtins.range(a_shape.ndims - axes,
a_shape.ndims)), list(builtins.range(axes)))
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank,
dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError(
f"`axes` must be an integer or have length 2. Received {axes}.")
a_axes = axes[0]
b_axes = axes[1]
if isinstance(a_axes, compat.integral_types) and \
isinstance(b_axes, compat.integral_types):
a_axes = [a_axes]
b_axes = [b_axes]
if len(a_axes) != len(b_axes):
raise ValueError(f"Different number of contraction axes `a` and `b`, "
f"{len(a_axes)} != {len(b_axes)}.")
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
b, b_axes, True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
if (ab_matmul.get_shape().is_fully_defined() and
ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
return ab_matmul
else:
return array_ops.reshape(
ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
@tf_export("math.polyval")
@dispatch.add_dispatch_support
def polyval(coeffs, x, name=None):
r"""Computes the elementwise value of a polynomial.
If `x` is a tensor and `coeffs` is a list n + 1 tensors,
this function returns the value of the n-th order polynomial
`p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)`
evaluated using Horner's method, i.e.
```python
p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0]))
```
Usage Example:
>>> coefficients = [1.0, 2.5, -4.2]
>>> x = 5.0
>>> y = tf.math.polyval(coefficients, x)
>>> y
<tf.Tensor: shape=(), dtype=float32, numpy=33.3>
Usage Example:
>>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
<tf.Tensor: shape=(), dtype=int32, numpy=21>
`tf.math.polyval` can also be used in polynomial regression. Taking
advantage of this function can facilitate writing a polynomial equation
as compared to explicitly writing it out, especially for higher degree
polynomials.
>>> x = tf.constant(3)
>>> theta1 = tf.Variable(2)
>>> theta2 = tf.Variable(1)
>>> theta3 = tf.Variable(0)
>>> tf.math.polyval([theta1, theta2, theta3], x)
<tf.Tensor: shape=(), dtype=int32, numpy=21>
Args:
coeffs: A list of `Tensor` representing the coefficients of the polynomial.
x: A `Tensor` representing the variable of the polynomial.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as the expression p(x) with usual broadcasting
rules for element-wise addition and multiplication applied.
@compatibility(numpy)
Equivalent to numpy.polyval.
@end_compatibility
"""
if not isinstance(coeffs, list):
raise ValueError(
f"Argument coeffs must be list type. Received type {type(coeffs)}.")
with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if len(coeffs) < 1:
return array_ops.zeros_like(x, name=name)
coeffs = [
ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
for index, coeff in enumerate(coeffs)
]
p = coeffs[0]
for c in coeffs[1:]:
p = c + p * x
return p
@tf_export("math.reciprocal_no_nan")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def reciprocal_no_nan(x, name=None):
"""Performs a safe reciprocal operation, element wise.
If a particular element is zero, the reciprocal for that element is
also set to zero.
For example:
```python
x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
tf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
Raises:
TypeError: x must be of a valid dtype.
"""
with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
x = ops.convert_to_tensor(x, name="x")
one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
return gen_math_ops.div_no_nan(one, x, name=scope)
@tf_export("math.xdivy")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def xdivy(x, y, name=None):
"""Computes `x / y`.
Given `x` and `y`, computes `x / y`. This function safely returns
zero when `x = 0`, no matter what the value of `y` is.
Example:
>>> tf.math.xdivy(1., 2.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
>>> tf.math.xdivy(0., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.0>
>>> tf.math.xdivy(0., 0.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.0>
>>> tf.math.xdivy(1., 0.)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
Args:
x: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
`complex128`
y: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
`complex128`
name: A name for the operation (optional).
Returns:
`x / y`.
"""
with ops.name_scope(name, "xdivy", [x]):
return gen_math_ops.xdivy(x, y)
@tf_export("math.xlog1py")
@dispatch.register_binary_elementwise_api
@dispatch.add_dispatch_support
def xlog1py(x, y, name=None):
r"""Compute x * log1p(y).
Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
zero when `x = 0`, no matter what the value of `y` is.
Example:
>>> tf.math.xlog1py(0., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
>>> tf.math.xlog1py(1., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
>>> tf.math.xlog1py(2., 2.)
<tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
>>> tf.math.xlog1py(0., -1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
Args:
x: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
`complex128`
y: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
`complex128`
name: A name for the operation (optional).
Returns:
`x * log1p(y)`.
@compatibility(scipy)
Equivalent to scipy.special.xlog1py
@end_compatibility
"""
with ops.name_scope(name, "xlog1py", [x]):
return gen_math_ops.xlog1py(x, y)
@tf_export("math.erfinv")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def erfinv(x, name=None):
"""Compute inverse error function.
Given `x`, compute the inverse error function of `x`. This function
is the inverse of `tf.math.erf`.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
"""
with ops.name_scope(name, "erfinv", [x]):
return gen_math_ops.erfinv(x)
@tf_export("math.ndtri")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def ndtri(x, name=None):
"""Compute quantile of Standard Normal.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
"""
with ops.name_scope(name, "ndtri", [x]):
return gen_math_ops.ndtri(x)
@tf_export("math.erfcinv")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def erfcinv(x, name=None):
"""Computes the inverse of complementary error function.
Given `x`, compute the inverse complementary error function of `x`.
This function is the inverse of `tf.math.erfc`, and is defined on
`[0, 2]`.
>>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.])
<tf.Tensor: shape=(5,), dtype=float32, numpy=
array([ inf, 0.9061935, -0. , -0.4769363, -inf],
dtype=float32)>
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse complementary error function of `x`.
@compatibility(numpy)
Equivalent to scipy.special.erfcinv
@end_compatibility
"""
with ops.name_scope(name, "erfcinv", [x]):
x = ops.convert_to_tensor(x, name="start")
return -ndtri(0.5 * x) * np.sqrt(0.5)
@tf_export("math.ceil", v1=["math.ceil", "ceil"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("ceil")
def ceil(x, name=None):
"""Return the ceiling of the input, element-wise.
For example:
>>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([-1., -1., -0., 1., 2., 2., 2.], dtype=float32)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`. `int32`
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.ceil
@end_compatibility
"""
return gen_math_ops.ceil(x, name)
@tf_export("math.sqrt", "sqrt")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def sqrt(x, name=None): # pylint: disable=redefined-builtin
r"""Computes element-wise square root of the input tensor.
Note: This operation does not support integer types.
>>> x = tf.constant([[4.0], [16.0]])
>>> tf.sqrt(x)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[2.],
[4.]], dtype=float32)>
>>> y = tf.constant([[-4.0], [16.0]])
>>> tf.sqrt(y)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[nan],
[ 4.]], dtype=float32)>
>>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
>>> tf.sqrt(z)
<tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
array([[0.0+1.j],
[4.0+0.j]])>
Note: In order to support complex type, please provide an input tensor
of `complex64` or `complex128`.
Args:
x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of same size, type and sparsity as `x`.
"""
return gen_math_ops.sqrt(x, name)
# pylint: disable=g-docstring-has-escape
@tf_export("math.exp", "exp")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def exp(x, name=None):
r"""Computes exponential of x element-wise. \\(y = e^x\\).
This function computes the exponential of the input tensor element-wise.
i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
\\(e\\) denotes Euler's number and is approximately equal to 2.718281.
Output is positive for any real input.
>>> x = tf.constant(2.0)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=float32, numpy=7.389056>
>>> x = tf.constant([2.0, 8.0])
>>> tf.math.exp(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([ 7.389056, 2980.958 ], dtype=float32)>
For complex numbers, the exponential value is calculated as
$$
e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)})
$$
For `1+1j` the value would be computed as:
$$
e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j)
$$
>>> x = tf.constant(1 + 1j)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=complex128,
numpy=(1.4686939399158851+2.2873552871788423j)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.exp
@end_compatibility
"""
return gen_math_ops.exp(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export("math.sobol_sample")
@dispatch.add_dispatch_support
def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
"""Generates points from the Sobol sequence.
Creates a Sobol sequence with `num_results` samples. Each sample has dimension
`dim`. Skips the first `skip` samples.
Args:
dim: Positive scalar `Tensor` representing each sample's dimension.
num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
points to return in the output.
skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
initial points of the Sobol sequence to skip. Default value is 0.
dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
`tf.float64`. Defaults to `tf.float32`.
name: (Optional) Python `str` name prefixed to ops created by this function.
Returns:
`Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
"""
with ops.name_scope(name, "sobol", [dim, num_results, skip]):
return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)
@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("rsqrt")
def rsqrt(x, name=None):
"""Computes reciprocal of square root of x element-wise.
For example:
>>> x = tf.constant([2., 0., -2.])
>>> tf.math.rsqrt(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
"""
return gen_math_ops.rsqrt(x, name)
@tf_export("math.acos", "acos")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def acos(x, name=None):
"""Computes acos of x element-wise.
Provided an input tensor, the `tf.math.acos` operation
returns the inverse cosine of each element of the tensor.
If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
For example:
>>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)
>>> tf.math.acos(x)
<tf.Tensor: shape=(6,), dtype=float32,
numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan],
dtype=float32)>
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as x.
"""
return gen_math_ops.acos(x, name)
@tf_export("math.floor", "floor")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def floor(x, name=None):
"""Returns element-wise largest integer not greater than x.
Both input range is `(-inf, inf)` and the
output range consists of all integer values.
For example:
>>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
>>> tf.floor(x).numpy()
array([ 1., -2., 5., -3., 0., inf], dtype=float32)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as x.
"""
return gen_math_ops.floor(x, name)
# Register elementwise ops that don't have Python wrappers.
# Binary elementwise ops.
dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_and)
dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_or)
dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_xor)
dispatch.register_binary_elementwise_api(gen_bitwise_ops.left_shift)
dispatch.register_binary_elementwise_api(gen_bitwise_ops.right_shift)
dispatch.register_unary_elementwise_api(gen_bitwise_ops.invert)
dispatch.register_binary_elementwise_api(gen_math_ops.atan2)
dispatch.register_binary_elementwise_api(gen_math_ops.floor_div)
dispatch.register_binary_elementwise_api(gen_math_ops.floor_mod)
dispatch.register_binary_elementwise_api(gen_math_ops.greater)
dispatch.register_binary_elementwise_api(gen_math_ops.greater_equal)
dispatch.register_binary_elementwise_api(gen_math_ops.less)
dispatch.register_binary_elementwise_api(gen_math_ops.less_equal)
dispatch.register_binary_elementwise_api(gen_math_ops.logical_and)
dispatch.register_binary_elementwise_api(gen_math_ops.logical_or)
dispatch.register_binary_elementwise_api(gen_math_ops.maximum)
dispatch.register_binary_elementwise_api(gen_math_ops.minimum)
dispatch.register_binary_elementwise_api(gen_math_ops.real_div)
dispatch.register_binary_elementwise_api(gen_math_ops.squared_difference)
dispatch.register_binary_elementwise_api(gen_math_ops.truncate_div)
dispatch.register_binary_elementwise_api(gen_math_ops.truncate_mod)
dispatch.register_binary_elementwise_api(gen_math_ops.xlogy)
dispatch.register_binary_elementwise_api(gen_math_ops.zeta)
# Unary elementwise ops.
dispatch.register_unary_elementwise_api(gen_math_ops.acosh)
dispatch.register_unary_elementwise_api(gen_math_ops.asin)
dispatch.register_unary_elementwise_api(gen_math_ops.asinh)
dispatch.register_unary_elementwise_api(gen_math_ops.atan)
dispatch.register_unary_elementwise_api(gen_math_ops.atanh)
dispatch.register_unary_elementwise_api(gen_math_ops.cos)
dispatch.register_unary_elementwise_api(gen_math_ops.cosh)
dispatch.register_unary_elementwise_api(gen_math_ops.digamma)
dispatch.register_unary_elementwise_api(gen_math_ops.erf)
dispatch.register_unary_elementwise_api(gen_math_ops.erfc)
dispatch.register_unary_elementwise_api(gen_math_ops.expm1)
dispatch.register_unary_elementwise_api(gen_math_ops.is_finite)
dispatch.register_unary_elementwise_api(gen_math_ops.is_inf)
dispatch.register_unary_elementwise_api(gen_math_ops.is_nan)
dispatch.register_unary_elementwise_api(gen_math_ops.lgamma)
dispatch.register_unary_elementwise_api(gen_math_ops.log)
dispatch.register_unary_elementwise_api(gen_math_ops.log1p)
dispatch.register_unary_elementwise_api(gen_math_ops.logical_not)
dispatch.register_unary_elementwise_api(gen_math_ops.neg)
dispatch.register_unary_elementwise_api(gen_math_ops.next_after)
dispatch.register_unary_elementwise_api(gen_math_ops.reciprocal)
dispatch.register_unary_elementwise_api(gen_math_ops.rint)
dispatch.register_unary_elementwise_api(gen_math_ops.sin)
dispatch.register_unary_elementwise_api(gen_math_ops.sinh)
dispatch.register_unary_elementwise_api(gen_math_ops.square)
dispatch.register_unary_elementwise_api(gen_math_ops.tan)
dispatch.register_unary_elementwise_api(gen_math_ops.tanh)
| DivideDelegateWithName |
python | huggingface__transformers | src/transformers/models/rembert/modeling_rembert.py | {
"start": 1579,
"end": 3855
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.input_embedding_size, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.input_embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.input_embedding_size)
self.LayerNorm = nn.LayerNorm(config.input_embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RemBert
| RemBertEmbeddings |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/conjecture/test_provider.py | {
"start": 18949,
"end": 21413
} | class ____(ExhaustibleProvider):
scope = "verified"
@pytest.mark.parametrize("provider", [ExhaustibleProvider, UnsoundVerifierProvider])
def test_notes_incorrect_verification(provider):
msg = "backend='p' claimed to verify this test passes - please send them a bug report!"
with temp_register_backend("p", provider):
@given(st.integers())
@settings(backend="p", database=None, max_examples=100)
def test_function(x):
assert x >= 0 # True from this backend, false in general!
with pytest.raises(AssertionError) as ctx:
test_function()
assert (msg in ctx.value.__notes__) == (provider is UnsoundVerifierProvider)
def test_invalid_provider_kw():
with pytest.raises(InvalidArgument, match="got an instance instead"):
ConjectureData(
random=None,
provider=TrivialProvider(None),
provider_kw={"one": "two"},
)
def test_available_providers_deprecation():
with pytest.warns(errors.HypothesisDeprecationWarning):
from hypothesis.internal.conjecture.data import AVAILABLE_PROVIDERS # noqa
with pytest.raises(ImportError):
from hypothesis.internal.conjecture.data import does_not_exist # noqa
@pytest.mark.parametrize("backend", AVAILABLE_PROVIDERS.keys())
@pytest.mark.parametrize(
"strategy", [st.integers(), st.text(), st.floats(), st.booleans(), st.binary()]
)
def test_can_generate_from_all_available_providers(backend, strategy):
# note: database=InMemoryExampleDatabase() is for compatibility with HypoFuzz
# here.
@given(strategy)
@settings(backend=backend, database=InMemoryExampleDatabase())
def f(x):
raise ValueError
with (
pytest.raises(ValueError),
(
pytest.warns(
HypothesisWarning, match="/dev/urandom is not available on windows"
)
if backend == "hypothesis-urandom" and WINDOWS
else nullcontext()
),
):
f()
def test_saves_on_fatal_error_with_backend():
with temp_register_backend("trivial", TrivialProvider):
db = InMemoryExampleDatabase()
@given(st.integers())
@settings(backend="trivial", database=db)
def test_function(n):
raise BaseException("marker")
with pytest.raises(BaseException, match="marker"):
test_function()
assert len(db.data) == 1
| UnsoundVerifierProvider |
python | django__django | tests/model_fields/models.py | {
"start": 6681,
"end": 6980
} | class ____(models.Model):
ip = models.GenericIPAddressField(null=True, protocol="ipv4")
###############################################################################
# These models aren't used in any test, just here to ensure they validate
# successfully.
# See ticket #16570.
| GenericIPAddress |
python | neetcode-gh__leetcode | python/2554-maximum-number-of-integers-to-choose-from-a-range-i.py | {
"start": 0,
"end": 454
} | class ____:
def maxCount(self, banned: List[int], n: int, maxSum: int) -> int:
nums = {x:1 for x in range(1, n + 1)} # hashmap for storing the required elements
for i in banned:
if nums.get(i):
del nums[i]
sum = 0
count = 0
for i in nums:
sum += i
if sum <= maxSum:
count += 1
else:
break
return count
| Solution |
python | huggingface__transformers | src/transformers/models/edgetam_video/modeling_edgetam_video.py | {
"start": 30387,
"end": 31718
} | class ____(nn.Module):
def __init__(self, config: EdgeTamVideoConfig):
super().__init__()
hidden_size = config.memory_encoder_hidden_size
output_channels = config.memory_encoder_output_channels
self.mask_downsampler = EdgeTamVideoMaskDownSampler(config)
self.feature_projection = nn.Conv2d(hidden_size, hidden_size, kernel_size=1)
self.memory_fuser = EdgeTamVideoMemoryFuser(config)
self.position_encoding = EdgeTamVideoPositionEmbeddingSine(num_pos_feats=output_channels // 2, normalize=True)
self.projection = nn.Conv2d(hidden_size, output_channels, kernel_size=1)
def forward(
self,
vision_features: torch.Tensor,
masks: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
## Process masks
masks = self.mask_downsampler(masks)
## Fuse pixel_features and downsampled masks
vision_features = self.feature_projection(vision_features)
vision_features = vision_features + masks
vision_features = self.memory_fuser(vision_features)
vision_features = self.projection(vision_features)
vision_pos_enc = self.position_encoding(vision_features.shape, vision_features.device, vision_features.dtype)
return vision_features, vision_pos_enc
| EdgeTamVideoMemoryEncoder |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_point_within_geo_region.py | {
"start": 1024,
"end": 3098
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
# Please see {some doc} for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.point_within_geo_region"
condition_value_keys = ("country_iso_a3", "polygon_points")
world = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, country_iso_a3, polygon_points, **kwargs):
# Check if the parameter are None
if polygon_points is not None:
polygon = Polygon(polygon_points)
elif country_iso_a3 is not None:
country_shapes = cls.world[["geometry", "iso_a3"]]
country_shapes = country_shapes[country_shapes["iso_a3"] == country_iso_a3]
country_shapes.reset_index(drop=True, inplace=True)
if country_shapes.empty:
raise Exception("This ISO country code is not supported.") # noqa: TRY002, TRY003
polygon = country_shapes["geometry"][0]
else:
raise Exception("Specify country_iso_a3 or polygon_points") # noqa: TRY002, TRY003
points = geopandas.GeoSeries(column.apply(Point))
return points.within(polygon)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# return column.isin([3])
# This class defines the Expectation itself
# The main business logic for calculation lives here.
| ColumnValuesPointWithinGeoRegion |
python | hynek__structlog | tests/processors/test_processors.py | {
"start": 1727,
"end": 2426
} | class ____:
def test_decodes(self):
"""
Byte strings get decoded (as UTF-8 by default).
"""
ud = UnicodeDecoder()
assert {"foo": "b\xe4r"} == ud(None, None, {"foo": b"b\xc3\xa4r"})
def test_passes_arguments(self):
"""
Encoding options are passed into the encoding call.
"""
ud = UnicodeDecoder("utf-8", "ignore")
assert {"foo": ""} == ud(None, None, {"foo": b"\xa1\xa4"})
def test_bytes_nop(self):
"""
If the value is already unicode, don't do anything.
"""
ud = UnicodeDecoder()
assert {"foo": "b\u2013r"} == ud(None, None, {"foo": "b\u2013r"})
| TestUnicodeDecoder |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_tool_selection.py | {
"start": 20531,
"end": 20859
} | class ____:
"""Test edge cases and error handling."""
def test_empty_tools_list_raises_error(self) -> None:
"""Test that empty tools list raises an error in schema creation."""
with pytest.raises(AssertionError, match="tools must be non-empty"):
_create_tool_selection_response([])
| TestEdgeCases |
python | getsentry__sentry | tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py | {
"start": 14105,
"end": 14355
} | class ____(BaseSafeMigrationTest):
app = "good_flow_delete_field_pending_with_not_null_m2m_app"
migrate_from = "0001"
migrate_to = "0002"
def test(self) -> None:
self.run_migration()
| DeletionFieldGoodDeletePendingWithNotNullM2M |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/pipes/client.py | {
"start": 5225,
"end": 6357
} | class ____(ABC):
@abstractmethod
@contextmanager
def inject_context(self, context_data: "PipesContextData") -> Iterator[PipesParams]:
"""A `@contextmanager` that injects context data into the external process.
This method should write the context data to a location accessible to the external
process. It should yield parameters that the external process can use to locate and load the
context data.
Args:
context_data (PipesContextData): The context data to inject.
Yields:
PipesParams: A JSON-serializable dict of parameters to be used used by the external
process to locate and load the injected context data.
"""
@abstractmethod
def no_messages_debug_text(self) -> str:
"""A message to be displayed when no messages are received from the external process to aid with debugging.
Example: "Attempted to inject context using a magic portal. Expected PipesMagicPortalContextLoader to be
explicitly passed to open_dagster_pipes in the external process."
"""
@public
| PipesContextInjector |
python | doocs__leetcode | solution/1900-1999/1913.Maximum Product Difference Between Two Pairs/Solution.py | {
"start": 0,
"end": 151
} | class ____:
def maxProductDifference(self, nums: List[int]) -> int:
nums.sort()
return nums[-1] * nums[-2] - nums[0] * nums[1]
| Solution |
python | cython__cython | Cython/Compiler/StringEncoding.py | {
"start": 677,
"end": 1632
} | class ____:
"""Assemble a byte string or char value.
"""
def __init__(self, target_encoding):
self.chars = []
self.target_encoding = target_encoding
def append(self, characters):
if isinstance(characters, str):
characters = characters.encode(self.target_encoding)
assert isinstance(characters, bytes), str(type(characters))
self.chars.append(characters)
def append_charval(self, char_number):
self.chars.append( chr(char_number).encode('ISO-8859-1') )
def append_uescape(self, char_number, escape_string):
self.append(escape_string)
def getstring(self):
# this *must* return a byte string!
return bytes_literal(b''.join(self.chars), self.target_encoding)
def getchar(self):
# this *must* return a byte string!
return self.getstring()
def getstrings(self):
return (self.getstring(), None)
| BytesLiteralBuilder |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/mock_communicator.py | {
"start": 849,
"end": 3931
} | class ____(Communicator):
def __init__(
self,
discrete_action=False,
visual_inputs=0,
num_agents=3,
brain_name="RealFakeBrain",
vec_obs_size=3,
):
"""
Python side of the grpc communication. Python is the client and Unity the server
"""
super().__init__()
self.is_discrete = discrete_action
self.steps = 0
self.visual_inputs = visual_inputs
self.has_been_closed = False
self.num_agents = num_agents
self.brain_name = brain_name
self.vec_obs_size = vec_obs_size
def initialize(
self, inputs: UnityInputProto, poll_callback: Optional[PollCallback] = None
) -> UnityOutputProto:
if self.is_discrete:
action_spec = ActionSpecProto(
num_discrete_actions=2, discrete_branch_sizes=[3, 2]
)
else:
action_spec = ActionSpecProto(num_continuous_actions=2)
bp = BrainParametersProto(
brain_name=self.brain_name, is_training=True, action_spec=action_spec
)
rl_init = UnityRLInitializationOutputProto(
name="RealFakeAcademy",
communication_version=UnityEnvironment.API_VERSION,
package_version="mock_package_version",
log_path="",
brain_parameters=[bp],
)
output = UnityRLOutputProto(agentInfos=self._get_agent_infos())
return UnityOutputProto(rl_initialization_output=rl_init, rl_output=output)
def _get_agent_infos(self):
dict_agent_info = {}
list_agent_info = []
vector_obs = [1, 2, 3]
observations = [
ObservationProto(
compressed_data=None,
shape=[30, 40, 3],
compression_type=COMPRESSION_TYPE_PNG,
)
for _ in range(self.visual_inputs)
]
vector_obs_proto = ObservationProto(
float_data=ObservationProto.FloatData(data=vector_obs),
shape=[len(vector_obs)],
compression_type=COMPRESSION_TYPE_NONE,
)
observations.append(vector_obs_proto)
for i in range(self.num_agents):
list_agent_info.append(
AgentInfoProto(
reward=1,
done=(i == 2),
max_step_reached=False,
id=i,
observations=observations,
)
)
dict_agent_info["RealFakeBrain"] = UnityRLOutputProto.ListAgentInfoProto(
value=list_agent_info
)
return dict_agent_info
def exchange(
self, inputs: UnityInputProto, poll_callback: Optional[PollCallback] = None
) -> UnityOutputProto:
result = UnityRLOutputProto(agentInfos=self._get_agent_infos())
return UnityOutputProto(rl_output=result)
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the grpc connection.
"""
self.has_been_closed = True
| MockCommunicator |
python | huggingface__transformers | src/transformers/models/fsmt/modeling_fsmt.py | {
"start": 19672,
"end": 26099
} | class ____(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`]
Args:
config: FSMTConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: FSMTConfig):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, self.padding_idx)
embed_dim = self.embed_tokens.embedding_dim
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
)
self.layers = nn.ModuleList([DecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) # type: list[DecoderLayer]
self.output_projection = nn.Linear(config.d_model, config.tgt_vocab_size, bias=False)
def forward(
self,
input_ids: torch.Tensor,
encoder_hidden_states: torch.Tensor,
encoder_padding_mask: torch.Tensor,
decoder_padding_mask: torch.Tensor,
decoder_causal_mask: torch.Tensor,
inputs_embeds: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
):
"""
Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al.,
EMNLP 2019).
Args:
input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`):
previous decoder outputs for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
past_key_values (dict or None): dictionary used for storing state during generation
Returns:
BaseModelOutputWithPast or tuple:
- the decoder's features of shape *(batch, tgt_len, embed_dim)*
- the cache
- hidden states
- attentions
"""
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = invert_mask(encoder_padding_mask)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
# embed positions
positions = self.embed_positions(input_ids)
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:] # happens after we embed them
x = self.embed_tokens(input_ids) * self.embed_scale
elif inputs_embeds is not None:
# We assume zeros hidden states correspond to padding tokens
# and create `position_ids` where inputs_embeds[:, :, 0] == 0
position_ids = inputs_embeds[:, :, 0].masked_fill(
inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
)
positions = self.embed_positions(position_ids)
x = inputs_embeds * self.embed_scale
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
x += positions
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
# Convert to FSMT output format: (BS, seq_len, model_dim) -> (seq_len, BS, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if output_attentions else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
x = x.transpose(0, 1)
all_hidden_states += (x,)
x = x.transpose(0, 1)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
x, layer_self_attn, layer_cross_attn = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=past_key_values,
causal_mask=decoder_causal_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
if output_attentions:
all_self_attns += (layer_self_attn,)
all_cross_attns += (layer_cross_attn,)
# add hidden states from the last decoder layer
if output_hidden_states:
x = x.transpose(0, 1)
all_hidden_states += (x,)
x = x.transpose(0, 1)
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
x = self.output_projection(x)
if not return_dict:
return tuple(
v for v in [x, past_key_values, all_hidden_states, all_self_attns, all_cross_attns] if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=x,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
def _reorder_buffer(attn_cache, new_order):
for k, input_buffer_k in attn_cache.items():
if input_buffer_k is not None:
attn_cache[k] = input_buffer_k.index_select(0, new_order)
return attn_cache
| FSMTDecoder |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/utils/config.py | {
"start": 11348,
"end": 14620
} | class ____:
"""
Configuration for statistics-based query planning.
These options can be configured via environment variables
with the prefix ``CUDF_POLARS__EXECUTOR__STATS_PLANNING__``.
Parameters
----------
use_io_partitioning
Whether to use estimated file-size statistics to calculate
the ideal input-partition count for IO operations.
This option currently applies to Parquet data only.
Default is True.
use_reduction_planning
Whether to use estimated column statistics to calculate
the output-partition count for reduction operations
like `Distinct`, `GroupBy`, and `Select(unique)`.
Default is False.
use_join_heuristics
Whether to use join heuristics to estimate row-count
and unique-count statistics. Default is True.
These statistics may only be collected when they are
actually needed for query planning and when row-count
statistics are available for the underlying datasource
(e.g. Parquet and in-memory LazyFrame data).
use_sampling
Whether to sample real data to estimate unique-value
statistics. Default is True.
These statistics may only be collected when they are
actually needed for query planning, and when the
underlying datasource supports sampling (e.g. Parquet
and in-memory LazyFrame data).
default_selectivity
The default selectivity of a predicate.
Default is 0.8.
"""
_env_prefix = "CUDF_POLARS__EXECUTOR__STATS_PLANNING"
use_io_partitioning: bool = dataclasses.field(
default_factory=_make_default_factory(
f"{_env_prefix}__USE_IO_PARTITIONING", _bool_converter, default=True
)
)
use_reduction_planning: bool = dataclasses.field(
default_factory=_make_default_factory(
f"{_env_prefix}__USE_REDUCTION_PLANNING", _bool_converter, default=False
)
)
use_join_heuristics: bool = dataclasses.field(
default_factory=_make_default_factory(
f"{_env_prefix}__USE_JOIN_HEURISTICS", _bool_converter, default=True
)
)
use_sampling: bool = dataclasses.field(
default_factory=_make_default_factory(
f"{_env_prefix}__USE_SAMPLING", _bool_converter, default=True
)
)
default_selectivity: float = dataclasses.field(
default_factory=_make_default_factory(
f"{_env_prefix}__DEFAULT_SELECTIVITY", float, default=0.8
)
)
def __post_init__(self) -> None: # noqa: D105
if not isinstance(self.use_io_partitioning, bool):
raise TypeError("use_io_partitioning must be a bool")
if not isinstance(self.use_reduction_planning, bool):
raise TypeError("use_reduction_planning must be a bool")
if not isinstance(self.use_join_heuristics, bool):
raise TypeError("use_join_heuristics must be a bool")
if not isinstance(self.use_sampling, bool):
raise TypeError("use_sampling must be a bool")
if not isinstance(self.default_selectivity, float):
raise TypeError("default_selectivity must be a float")
@dataclasses.dataclass(frozen=True, eq=True)
| StatsPlanningOptions |
python | kubernetes-client__python | kubernetes/client/models/v1_storage_os_persistent_volume_source.py | {
"start": 383,
"end": 8636
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'read_only': 'bool',
'secret_ref': 'V1ObjectReference',
'volume_name': 'str',
'volume_namespace': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'read_only': 'readOnly',
'secret_ref': 'secretRef',
'volume_name': 'volumeName',
'volume_namespace': 'volumeNamespace'
}
def __init__(self, fs_type=None, read_only=None, secret_ref=None, volume_name=None, volume_namespace=None, local_vars_configuration=None): # noqa: E501
"""V1StorageOSPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._read_only = None
self._secret_ref = None
self._volume_name = None
self._volume_namespace = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
if volume_name is not None:
self.volume_name = volume_name
if volume_namespace is not None:
self.volume_namespace = volume_namespace
@property
def fs_type(self):
"""Gets the fs_type of this V1StorageOSPersistentVolumeSource. # noqa: E501
fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:return: The fs_type of this V1StorageOSPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1StorageOSPersistentVolumeSource.
fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:param fs_type: The fs_type of this V1StorageOSPersistentVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def read_only(self):
"""Gets the read_only of this V1StorageOSPersistentVolumeSource. # noqa: E501
readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:return: The read_only of this V1StorageOSPersistentVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1StorageOSPersistentVolumeSource.
readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:param read_only: The read_only of this V1StorageOSPersistentVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1StorageOSPersistentVolumeSource. # noqa: E501
:return: The secret_ref of this V1StorageOSPersistentVolumeSource. # noqa: E501
:rtype: V1ObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1StorageOSPersistentVolumeSource.
:param secret_ref: The secret_ref of this V1StorageOSPersistentVolumeSource. # noqa: E501
:type: V1ObjectReference
"""
self._secret_ref = secret_ref
@property
def volume_name(self):
"""Gets the volume_name of this V1StorageOSPersistentVolumeSource. # noqa: E501
volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. # noqa: E501
:return: The volume_name of this V1StorageOSPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._volume_name
@volume_name.setter
def volume_name(self, volume_name):
"""Sets the volume_name of this V1StorageOSPersistentVolumeSource.
volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. # noqa: E501
:param volume_name: The volume_name of this V1StorageOSPersistentVolumeSource. # noqa: E501
:type: str
"""
self._volume_name = volume_name
@property
def volume_namespace(self):
"""Gets the volume_namespace of this V1StorageOSPersistentVolumeSource. # noqa: E501
volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. # noqa: E501
:return: The volume_namespace of this V1StorageOSPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._volume_namespace
@volume_namespace.setter
def volume_namespace(self, volume_namespace):
"""Sets the volume_namespace of this V1StorageOSPersistentVolumeSource.
volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. # noqa: E501
:param volume_namespace: The volume_namespace of this V1StorageOSPersistentVolumeSource. # noqa: E501
:type: str
"""
self._volume_namespace = volume_namespace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StorageOSPersistentVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StorageOSPersistentVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1StorageOSPersistentVolumeSource |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_container.py | {
"start": 11751,
"end": 13643
} | class ____:
def test_valid(self) -> None:
prop0 = bcpc.Tuple()
assert prop0.is_valid(())
prop1 = bcpc.Tuple(Int)
assert prop1.is_valid((0,))
prop2 = bcpc.Tuple(Int, Int)
assert prop2.is_valid((0, 0))
prop = bcpc.Tuple(Int, String, bcpc.List(Int))
assert prop.is_valid((1, "", [1, 2, 3]))
def test_invalid(self) -> None:
prop0 = bcpc.Tuple()
assert not prop0.is_valid((0,))
prop1 = bcpc.Tuple(Int)
assert not prop1.is_valid(())
assert not prop1.is_valid((0, 0))
prop2 = bcpc.Tuple(Int, Int)
assert not prop2.is_valid(())
assert not prop2.is_valid((0,))
prop = bcpc.Tuple(Int, String, bcpc.List(Int))
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(np.array([1,2,3]))
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid((1.0, "", [1, 2, 3]))
assert not prop.is_valid((1, True, [1, 2, 3]))
assert not prop.is_valid((1, "", (1, 2, 3)))
assert not prop.is_valid((1, "", [1, 2, "xyz"]))
def test_has_ref(self) -> None:
prop = bcpc.Tuple(Int, Int)
assert not prop.has_ref
prop = bcpc.Tuple(Int, Instance(_TestModel))
assert prop.has_ref
def test_str(self) -> None:
prop = bcpc.Tuple(Int, Int)
assert str(prop) == "Tuple(Int, Int)"
| Test_Tuple |
python | doocs__leetcode | solution/0600-0699/0606.Construct String from Binary Tree/Solution.py | {
"start": 192,
"end": 623
} | class ____:
def tree2str(self, root: Optional[TreeNode]) -> str:
def dfs(root):
if root is None:
return ''
if root.left is None and root.right is None:
return str(root.val)
if root.right is None:
return f'{root.val}({dfs(root.left)})'
return f'{root.val}({dfs(root.left)})({dfs(root.right)})'
return dfs(root)
| Solution |
python | django__django | tests/gis_tests/test_geoip2.py | {
"start": 654,
"end": 7736
} | class ____(SimpleTestCase):
fqdn = "sky.uk"
ipv4_str = "2.125.160.216"
ipv6_str = "::ffff:027d:a0d8"
ipv4_addr = ipaddress.ip_address(ipv4_str)
ipv6_addr = ipaddress.ip_address(ipv6_str)
query_values = (fqdn, ipv4_str, ipv6_str, ipv4_addr, ipv6_addr)
expected_city = {
"accuracy_radius": 100,
"city": "Boxford",
"continent_code": "EU",
"continent_name": "Europe",
"country_code": "GB",
"country_name": "United Kingdom",
"is_in_european_union": False,
"latitude": 51.75,
"longitude": -1.25,
"metro_code": None,
"postal_code": "OX1",
"region_code": "ENG",
"region_name": "England",
"time_zone": "Europe/London",
# Kept for backward compatibility.
"dma_code": None,
"region": "ENG",
}
expected_country = {
"continent_code": "EU",
"continent_name": "Europe",
"country_code": "GB",
"country_name": "United Kingdom",
"is_in_european_union": False,
}
@classmethod
def setUpClass(cls):
# Avoid referencing __file__ at module level.
cls.enterClassContext(override_settings(GEOIP_PATH=build_geoip_path()))
# Always mock host lookup to avoid test breakage if DNS changes.
cls.enterClassContext(
mock.patch("socket.gethostbyname", return_value=cls.ipv4_str)
)
super().setUpClass()
def test_init(self):
# Everything inferred from GeoIP path.
g1 = GeoIP2()
# Path passed explicitly.
g2 = GeoIP2(settings.GEOIP_PATH, GeoIP2.MODE_AUTO)
# Path provided as a string.
g3 = GeoIP2(str(settings.GEOIP_PATH))
# Only passing in the location of one database.
g4 = GeoIP2(settings.GEOIP_PATH / settings.GEOIP_CITY, country="")
g5 = GeoIP2(settings.GEOIP_PATH / settings.GEOIP_COUNTRY, city="")
for g in (g1, g2, g3, g4, g5):
self.assertTrue(g._reader)
# Improper parameters.
bad_params = (23, "foo", 15.23)
for bad in bad_params:
with self.assertRaises(GeoIP2Exception):
GeoIP2(cache=bad)
if isinstance(bad, str):
e = GeoIP2Exception
else:
e = TypeError
with self.assertRaises(e):
GeoIP2(bad, GeoIP2.MODE_AUTO)
def test_no_database_file(self):
invalid_path = pathlib.Path(__file__).parent.joinpath("data/invalid").resolve()
msg = "Path must be a valid database or directory containing databases."
with self.assertRaisesMessage(GeoIP2Exception, msg):
GeoIP2(invalid_path)
def test_bad_query(self):
g = GeoIP2(city="<invalid>")
functions = (g.city, g.geos, g.lat_lon, g.lon_lat)
msg = "Invalid GeoIP city data file: "
for function in functions:
with self.subTest(function=function.__qualname__):
with self.assertRaisesMessage(GeoIP2Exception, msg):
function("example.com")
functions += (g.country, g.country_code, g.country_name)
values = (123, 123.45, b"", (), [], {}, set(), frozenset(), GeoIP2)
msg = (
"GeoIP query must be a string or instance of IPv4Address or IPv6Address, "
"not type"
)
for function, value in itertools.product(functions, values):
with self.subTest(function=function.__qualname__, type=type(value)):
with self.assertRaisesMessage(TypeError, msg):
function(value)
def test_country(self):
g = GeoIP2(city="<invalid>")
self.assertIs(g.is_city, False)
self.assertIs(g.is_country, True)
for query in self.query_values:
with self.subTest(query=query):
self.assertEqual(g.country(query), self.expected_country)
self.assertEqual(
g.country_code(query), self.expected_country["country_code"]
)
self.assertEqual(
g.country_name(query), self.expected_country["country_name"]
)
def test_country_using_city_database(self):
g = GeoIP2(country="<invalid>")
self.assertIs(g.is_city, True)
self.assertIs(g.is_country, False)
for query in self.query_values:
with self.subTest(query=query):
self.assertEqual(g.country(query), self.expected_country)
self.assertEqual(
g.country_code(query), self.expected_country["country_code"]
)
self.assertEqual(
g.country_name(query), self.expected_country["country_name"]
)
def test_city(self):
g = GeoIP2(country="<invalid>")
self.assertIs(g.is_city, True)
self.assertIs(g.is_country, False)
for query in self.query_values:
with self.subTest(query=query):
self.assertEqual(g.city(query), self.expected_city)
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
self.assertEqual(geom.srid, 4326)
expected_lat = self.expected_city["latitude"]
expected_lon = self.expected_city["longitude"]
self.assertEqual(geom.tuple, (expected_lon, expected_lat))
self.assertEqual(g.lat_lon(query), (expected_lat, expected_lon))
self.assertEqual(g.lon_lat(query), (expected_lon, expected_lat))
# Country queries should still work.
self.assertEqual(g.country(query), self.expected_country)
self.assertEqual(
g.country_code(query), self.expected_country["country_code"]
)
self.assertEqual(
g.country_name(query), self.expected_country["country_name"]
)
def test_not_found(self):
g1 = GeoIP2(city="<invalid>")
g2 = GeoIP2(country="<invalid>")
for function, query in itertools.product(
(g1.country, g2.city), ("127.0.0.1", "::1")
):
with self.subTest(function=function.__qualname__, query=query):
msg = f"The address {query} is not in the database."
with self.assertRaisesMessage(geoip2.errors.AddressNotFoundError, msg):
function(query)
def test_del(self):
g = GeoIP2()
reader = g._reader
self.assertIs(reader._db_reader.closed, False)
del g
self.assertIs(reader._db_reader.closed, True)
def test_repr(self):
g = GeoIP2()
m = g._metadata
version = f"{m.binary_format_major_version}.{m.binary_format_minor_version}"
self.assertEqual(repr(g), f"<GeoIP2 [v{version}] _path='{g._path}'>")
@skipUnless(HAS_GEOIP2, "GeoIP2 is required.")
@override_settings(
GEOIP_CITY="GeoIP2-City-Test.mmdb",
GEOIP_COUNTRY="GeoIP2-Country-Test.mmdb",
)
| GeoLite2Test |
python | great-expectations__great_expectations | great_expectations/metrics/batch/batch_column_types.py | {
"start": 220,
"end": 279
} | class ____(BaseModel):
name: str
type: Any
| ColumnType |
python | donnemartin__system-design-primer | solutions/system_design/query_cache/query_cache_snippets.py | {
"start": 1071,
"end": 2610
} | class ____(object):
def __init__(self, MAX_SIZE):
self.MAX_SIZE = MAX_SIZE
self.size = 0
self.lookup = {}
self.linked_list = LinkedList()
def get(self, query):
"""Get the stored query result from the cache.
Accessing a node updates its position to the front of the LRU list.
"""
node = self.lookup[query]
if node is None:
return None
self.linked_list.move_to_front(node)
return node.results
def set(self, results, query):
"""Set the result for the given query key in the cache.
When updating an entry, updates its position to the front of the LRU list.
If the entry is new and the cache is at capacity, removes the oldest entry
before the new entry is added.
"""
node = self.map[query]
if node is not None:
# Key exists in cache, update the value
node.results = results
self.linked_list.move_to_front(node)
else:
# Key does not exist in cache
if self.size == self.MAX_SIZE:
# Remove the oldest entry from the linked list and lookup
self.lookup.pop(self.linked_list.tail.query, None)
self.linked_list.remove_from_tail()
else:
self.size += 1
# Add the new key and value
new_node = Node(query, results)
self.linked_list.append_to_front(new_node)
self.lookup[query] = new_node
| Cache |
python | tiangolo__fastapi | tests/test_additional_responses_custom_validationerror.py | {
"start": 178,
"end": 261
} | class ____(JSONResponse):
media_type = "application/vnd.api+json"
| JsonApiResponse |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 23999,
"end": 24707
} | class ____:
class B:
def foo():
bar(
"[{}]: xxx_xxxxxxxxxx(xxxxx={}, xxxx={}, xxxxx={}"
" xxxx_xxxx_xxxxxxxxxx={}, xxxx={})".format(
xxxx._xxxxxxxxxxxxxx, xxxxx, xxxx, xxxx_xxxx_xxxxxxxxxx, xxxxxxx
),
varX,
varY,
varZ,
)
def foo(xxxx):
for xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx in xxxx:
for xxx in xxx_xxxx:
assert ("x" in xxx) or (xxx in xxx_xxx_xxxxx), (
"{0} xxxxxxx xx {1}, xxx {1} xx xxx xx xxxx xx xxx xxxx: xxx xxxx {2}"
.format(xxx_xxxx, xxx, xxxxxx.xxxxxxx(xxx_xxx_xxxxx))
)
| A |
python | getsentry__sentry | tests/sentry_plugins/bitbucket/endpoints/test_webhooks.py | {
"start": 395,
"end": 1895
} | class ____(APITestCase):
def test_get(self) -> None:
project = self.project # force creation
url = f"/plugins/bitbucket/organizations/{project.organization.id}/webhook/"
response = self.client.get(url)
assert response.status_code == 405
def test_unregistered_event(self) -> None:
project = self.project # force creation
url = f"/plugins/bitbucket/organizations/{project.organization.id}/webhook/"
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_EVENT_KEY="UnregisteredEvent",
REMOTE_ADDR=BITBUCKET_IP,
)
assert response.status_code == 204
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_EVENT_KEY="UnregisteredEvent",
REMOTE_ADDR=BITBUCKET_IP_IN_RANGE,
)
assert response.status_code == 204
def test_invalid_signature_ip(self) -> None:
project = self.project # force creation
url = f"/plugins/bitbucket/organizations/{project.organization.id}/webhook/"
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_EVENT_KEY="repo:push",
REMOTE_ADDR=BAD_IP,
)
assert response.status_code == 401
| WebhookTest |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-dappier/tests/test_tools_dappier_real_time_search.py | {
"start": 609,
"end": 2601
} | class ____:
def test_init_without_api_key_raises_value_error(self, monkeypatch):
monkeypatch.delenv("DAPPIER_API_KEY", raising=False)
dappier_client = MagicMock()
with patch("dappier.Dappier", return_value=dappier_client):
with pytest.raises(ValueError) as excinfo:
DappierRealTimeSearchToolSpec()
assert "API key is required" in str(excinfo.value)
def test_search_real_time_data_returns_response_message(self, tool, dappier_client):
response = MagicMock()
response.message = "Real-time data result"
dappier_client.search_real_time_data.return_value = response
result = tool.search_real_time_data("test query")
assert result == "Real-time data result"
dappier_client.search_real_time_data.assert_called_once_with(
query="test query", ai_model_id="am_01j0rzq4tvfscrgzwac7jv1p4c"
)
def test_search_stock_market_data_returns_response_message(
self, tool, dappier_client
):
response = MagicMock()
response.message = "Stock market data result"
dappier_client.search_real_time_data.return_value = response
result = tool.search_stock_market_data("stock query")
assert result == "Stock market data result"
dappier_client.search_real_time_data.assert_called_once_with(
query="stock query", ai_model_id="am_01j749h8pbf7ns8r1bq9s2evrh"
)
def test_search_real_time_data_no_response(self, tool, dappier_client):
dappier_client.search_real_time_data.return_value = None
result = tool.search_real_time_data("test query")
assert result == "No real-time data found."
def test_search_stock_market_data_no_response(self, tool, dappier_client):
dappier_client.search_real_time_data.return_value = None
result = tool.search_stock_market_data("stock query")
assert result == "No stock market data found."
| TestDappierRealTimeSearchTool |
python | langchain-ai__langchain | libs/core/tests/unit_tests/fake/callbacks.py | {
"start": 6848,
"end": 9683
} | class ____(AsyncCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake async callback handler for testing."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
@override
async def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
@override
async def on_llm_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_start_common()
@override
async def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_new_token_common()
@override
async def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_end_common()
@override
async def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_error_common(*args, **kwargs)
@override
async def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_start_common()
@override
async def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_end_common()
@override
async def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_error_common()
@override
async def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_start_common()
@override
async def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_end_common()
@override
async def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_error_common()
@override
async def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_action_common()
@override
async def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_finish_common()
@override
async def on_text(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_text_common()
# Overriding since BaseModel has __deepcopy__ method as well
def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore[override]
return self
| FakeAsyncCallbackHandler |
python | walkccc__LeetCode | solutions/1409. Queries on a Permutation With Key/1409.py | {
"start": 0,
"end": 421
} | class ____:
def __init__(self, n: int):
self.sums = [0] * (n + 1)
def add(self, i: int, delta: int) -> None:
while i < len(self.sums):
self.sums[i] += delta
i += FenwickTree.lowbit(i)
def get(self, i: int) -> int:
summ = 0
while i > 0:
summ += self.sums[i]
i -= FenwickTree.lowbit(i)
return summ
@staticmethod
def lowbit(i: int) -> int:
return i & -i
| FenwickTree |
python | dagster-io__dagster | scripts/gen_airbyte_classes.py | {
"start": 2845,
"end": 3928
} | class ____(SchemaType):
def __init__(self, schema_type_str: str, const_value: Optional[Any] = None):
if schema_type_str in TYPE_MAPPING:
self.type_str = TYPE_MAPPING[schema_type_str]
else:
self.type_str = schema_type_str
self._const_value = const_value
def __str__(self):
return self.type_str
@property
def const_value(self):
return self._const_value
def annotation(
self, scope: Optional[str] = None, quote: bool = False, hide_default: bool = False
):
if self.type_str in CHECK_MAPPING:
return self.type_str
scope = f"{scope}." if scope else ""
if quote:
return f'"{scope}{self.type_str}"'
return f"{scope}{self.type_str}"
def get_check(self, name: str, scope: Optional[str] = None):
if self.type_str in CHECK_MAPPING:
return CHECK_MAPPING[self.type_str].format(name, name)
scope = f"{scope}." if scope else ""
return f"check.inst_param({name}, '{name}', {scope}{self.type_str})"
| RawType |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_change_char_type_that_unsafe_app/migrations/0001_initial.py | {
"start": 153,
"end": 647
} | class ____(CheckedMigration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="TestTable",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("field", models.CharField(max_length=120)),
],
),
]
| Migration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.