language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | gevent__gevent | src/greentest/3.12/test_weakref.py | {
"start": 67616,
"end": 67936
} | class ____(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
| WeakValueDictionaryTestCase |
python | doocs__leetcode | solution/0600-0699/0609.Find Duplicate File in System/Solution.py | {
"start": 0,
"end": 384
} | class ____:
def findDuplicate(self, paths: List[str]) -> List[List[str]]:
d = defaultdict(list)
for p in paths:
ps = p.split()
for f in ps[1:]:
i = f.find('(')
name, content = f[:i], f[i + 1 : -1]
d[content].append(ps[0] + '/' + name)
return [v for v in d.values() if len(v) > 1]
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/multi/test_formats.py | {
"start": 1002,
"end": 8312
} | class ____:
def test_unicode_repr_issues(self):
levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])]
codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, codes=codes)
repr(index.levels)
repr(index.get_level_values(1))
def test_repr_max_seq_items_equal_to_n(self, idx):
# display.max_seq_items == n
with pd.option_context("display.max_seq_items", 6):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
def test_repr(self, idx):
result = idx[:1].__repr__()
expected = """\
MultiIndex([('foo', 'one')],
names=['first', 'second'])"""
assert result == expected
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
with pd.option_context("display.max_seq_items", 5):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
...
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'], length=6)"""
assert result == expected
# display.max_seq_items == 1
with pd.option_context("display.max_seq_items", 1):
result = idx.__repr__()
expected = """\
MultiIndex([...
('qux', 'two')],
names=['first', ...], length=6)"""
assert result == expected
def test_rjust(self):
n = 1000
ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n))
dti = pd.date_range("2000-01-01", freq="s", periods=n * 2)
mi = MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=["a", "b", "dti"])
result = mi[:1].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi[::500].__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:08:20'),
('abc', 10, '2000-01-01 00:16:40'),
('abc', 10, '2000-01-01 00:25:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:00:01'),
( 'a', 9, '2000-01-01 00:00:02'),
( 'a', 9, '2000-01-01 00:00:03'),
( 'a', 9, '2000-01-01 00:00:04'),
( 'a', 9, '2000-01-01 00:00:05'),
( 'a', 9, '2000-01-01 00:00:06'),
( 'a', 9, '2000-01-01 00:00:07'),
( 'a', 9, '2000-01-01 00:00:08'),
( 'a', 9, '2000-01-01 00:00:09'),
...
('abc', 10, '2000-01-01 00:33:10'),
('abc', 10, '2000-01-01 00:33:11'),
('abc', 10, '2000-01-01 00:33:12'),
('abc', 10, '2000-01-01 00:33:13'),
('abc', 10, '2000-01-01 00:33:14'),
('abc', 10, '2000-01-01 00:33:15'),
('abc', 10, '2000-01-01 00:33:16'),
('abc', 10, '2000-01-01 00:33:17'),
('abc', 10, '2000-01-01 00:33:18'),
('abc', 10, '2000-01-01 00:33:19')],
names=['a', 'b', 'dti'], length=2000)"""
assert result == expected
def test_tuple_width(self):
n = 1000
ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n))
dti = pd.date_range("2000-01-01", freq="s", periods=n * 2)
levels = [ci, ci.codes + 9, dti, dti, dti]
names = ["a", "b", "dti_1", "dti_2", "dti_3"]
mi = MultiIndex.from_arrays(levels, names=names)
result = mi[:1].__repr__()
expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" # noqa: E501
assert result == expected
result = mi[:10].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),
...
('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),
('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),
('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),
('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),
('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),
('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),
('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),
('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),
('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),
('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""
assert result == expected
def test_multiindex_long_element(self):
# Non-regression test towards GH#52960
data = MultiIndex.from_tuples([("c" * 62,)])
expected = (
"MultiIndex([('cccccccccccccccccccccccccccccccccccccccc"
"cccccccccccccccccccccc',)],\n )"
)
assert str(data) == expected
| TestRepr |
python | readthedocs__readthedocs.org | readthedocs/integrations/models.py | {
"start": 14023,
"end": 14368
} | class ____(Integration):
integration_type_id = Integration.BITBUCKET_WEBHOOK
has_sync = True
class Meta:
proxy = True
@property
def can_sync(self):
try:
return all((k in self.provider_data) for k in ["uuid", "url"])
except (ValueError, TypeError):
return False
| BitbucketWebhook |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/streaming/_types.py | {
"start": 527,
"end": 680
} | class ____(BaseModel):
type: Literal["text"]
text: str
"""The text delta"""
snapshot: str
"""The entire accumulated text"""
| TextEvent |
python | walkccc__LeetCode | solutions/138. Copy List with Random Pointer/138.py | {
"start": 0,
"end": 350
} | class ____:
def copyRandomList(self, head: 'Node') -> 'Node':
if not head:
return None
if head in self.map:
return self.map[head]
newNode = Node(head.val)
self.map[head] = newNode
newNode.next = self.copyRandomList(head.next)
newNode.random = self.copyRandomList(head.random)
return newNode
map = {}
| Solution |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/classes.py | {
"start": 626,
"end": 728
} | class ____(Quux):
pass
Alias = Foo
#: docstring
OtherAlias = Bar
#: docstring
IntAlias = int
| Corge |
python | pytorch__pytorch | test/dynamo/test_guard_serialization.py | {
"start": 1936,
"end": 2145
} | class ____(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return func(*args, **kwargs)
| GlobalTorchFunctionMode |
python | google__pytype | pytype/abstract/_typing.py | {
"start": 17877,
"end": 17978
} | class ____(_TypeVariableInstance):
"""An instance of a ParamSpec type parameter."""
| ParamSpecInstance |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 71201,
"end": 71477
} | class ____(_PrintableStructure):
_fields_ = [
('isGridLicenseSupported', c_int),
('licensableFeaturesCount', c_uint),
('gridLicensableFeatures', c_nvmlGridLicensableFeature_t * NVML_GRID_LICENSE_FEATURE_MAX_COUNT),
]
| c_nvmlGridLicensableFeatures_t |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/assignment6.py | {
"start": 230,
"end": 585
} | class ____:
# This should not generate an error because
# the RHS of the assignment refers to a different
# "a", declared in an outer scope.
a = a
# Same with "b" here.
(b, a) = (b, 3)
# Same with "c" here.
[c] = [c]
# This should generate an error because "d" is
# not declared in the outer scope.
e = d
| MyClass |
python | pypa__pipenv | pipenv/patched/pip/_internal/cli/spinners.py | {
"start": 466,
"end": 2289
} | class ____(SpinnerInterface):
def __init__(
self,
message: str,
file: Optional[IO[str]] = None,
spin_chars: str = "-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds: float = 0.125,
):
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status: str) -> None:
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self) -> None:
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status: str) -> None:
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
| InteractiveSpinner |
python | huggingface__transformers | src/transformers/models/aria/modular_aria.py | {
"start": 47800,
"end": 48326
} | class ____(LlamaMLP):
"""
Shared Expert MLP for shared experts.
Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.
Args:
config (`AriaTextConfig`): Configuration object for the Aria language model.
"""
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts
| AriaSharedExpertsMLP |
python | openai__openai-python | src/openai/types/realtime/realtime_response_create_mcp_tool_param.py | {
"start": 2328,
"end": 4415
} | class ____(TypedDict, total=False):
server_label: Required[str]
"""A label for this MCP server, used to identify it in tool calls."""
type: Required[Literal["mcp"]]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[AllowedTools]
"""List of allowed tool names or a filter object."""
authorization: str
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[RequireApproval]
"""Specify which of the MCP server's tools require approval."""
server_description: str
"""Optional description of the MCP server, used to provide more context."""
server_url: str
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
| RealtimeResponseCreateMcpToolParam |
python | getsentry__sentry | src/sentry/testutils/pytest/kafka.py | {
"start": 499,
"end": 6874
} | class ____:
def __init__(self, request, settings):
self.test_name = request.node.name
kafka_config = {}
for key, val in settings.KAFKA_CLUSTERS["default"]["common"].items():
kafka_config[key] = val
self.admin_client = AdminClient(kafka_config)
def delete_topic(self, topic_name):
try:
futures_dict = self.admin_client.delete_topics([topic_name])
self._sync_wait_on_result(futures_dict)
except Exception:
_log.warning("Could not delete topic %s", topic_name)
def _sync_wait_on_result(self, futures_dict):
"""
Synchronously waits on all futures returned by the admin_client api.
:param futures_dict: the api returns a dict of futures that can be awaited
"""
# just wait on all futures returned by the async operations of the admin_client
for f in futures_dict.values():
f.result(5) # wait up to 5 seconds for the admin operation to finish
@pytest.fixture
def kafka_admin(request):
"""
A fixture representing a simple wrapper over the admin interface
:param request: the pytest request
:return: a Kafka admin wrapper
"""
def inner(settings):
return _KafkaAdminWrapper(request, settings)
return inner
@pytest.fixture(scope="session")
def scope_consumers():
"""
Sets up an object to keep track of the scope consumers ( consumers that will only
be created once per test session).
"""
all_consumers: MutableMapping[str, Consumer | None] = {
# Relay is configured to use this topic for all ingest messages. See
# `templates/config.yml`.
"ingest-events": None,
"outcomes": None,
}
yield all_consumers
for consumer_name, consumer in all_consumers.items():
if consumer is not None:
try:
# stop the consumer
consumer.signal_shutdown()
consumer.run()
except: # noqa:
_log.warning("Failed to cleanup consumer %s", consumer_name)
@pytest.fixture(scope="function")
def session_ingest_consumer(scope_consumers, kafka_admin, task_runner):
"""
Returns a factory for a session ingest consumer.
Note/Warning: Once an ingest consumer is created it will be reused by all tests in the session.
The ingest consumer is created the first time with the provided settings and then reused.
If you don't want this behaviour DO NOT USE this fixture (create a fixture, similar with this one,
that returns a new consumer at each invocation rather then reusing it)
:return: a function factory that creates a consumer at first invocation and returns the cached consumer afterwards.
"""
def ingest_consumer(settings):
from sentry.consumers import get_stream_processor
from sentry.utils.batching_kafka_consumer import create_topics
# Relay is configured to use this topic for all ingest messages. See
# `template/config.yml`.
cluster_name = "default"
topic_event_name = "ingest-events"
if scope_consumers[topic_event_name] is not None:
# reuse whatever was already created (will ignore the settings)
return scope_consumers[topic_event_name]
# first time the consumer is requested, create it using settings
admin = kafka_admin(settings)
admin.delete_topic(topic_event_name)
create_topics(cluster_name, [topic_event_name])
# simulate the event ingestion task
group_id = "test-consumer"
consumer = get_stream_processor(
"ingest-attachments",
consumer_args=["--max-batch-size=1", "--max-batch-time-ms=10000", "--processes=1"],
topic=topic_event_name,
cluster=cluster_name,
group_id=group_id,
auto_offset_reset="earliest",
strict_offset_reset=False,
)
scope_consumers[topic_event_name] = consumer
return consumer
return ingest_consumer
@pytest.fixture(scope="function")
def wait_for_ingest_consumer(session_ingest_consumer, task_runner):
"""
Returns a function that can be used to create a wait loop for the ingest consumer
The ingest consumer will be called in a loop followed by a query to the supplied
predicate. If the predicate returns a non None value the wait will be ended and
the waiter will return whatever the predicate returned.
If the max_time passes the waiter will be terminated and the waiter will return None
Note: The reason there we return a factory and not directly the waiter is that we
need to configure the consumer with the test settings (settings are typically available
in the test) so a test would typically first create the waiter and the use it to wait for
the required condition:
waiter = wait_for_ingest_consumer( test_settings_derived_from_the_project_settings)
result = waiter( my_predicate, SOME_TIMEOUT)
assert result == expected_result
"""
def factory(settings, **kwargs):
consumer = session_ingest_consumer(settings, **kwargs)
def waiter(exit_predicate, max_time=MAX_SECONDS_WAITING_FOR_EVENT):
"""
Implements a wait loop for the ingest consumer
:param exit_predicate: A Callable[(),Any] that will be called in a loop after each call
to the KafkaConsumer _run_once()
:param max_time: maximum time in seconds to wait
:return: the first non None result returned by the exit predicate or None if the
max time has expired without the exit predicate returning a non None value
"""
start_wait = time.time()
with task_runner():
while time.time() - start_wait < max_time:
consumer._run_once()
# check if the condition is satisfied
val = exit_predicate()
if val is not None:
return val # we got what we were waiting for stop looping
_log.warning(
"Ingest consumer waiter timed-out after %d seconds", time.time() - start_wait
)
return None # timeout without any success
return waiter
return factory
| _KafkaAdminWrapper |
python | Pylons__pyramid | tests/test_traversal.py | {
"start": 45978,
"end": 46767
} | class ____:
application_url = (
'http://example.com:5432' # app_url never ends with slash
)
matchdict = None
matched_route = None
def __init__(self, environ=None, path_info=text_('/'), toraise=None):
if environ is None:
environ = {}
self.environ = environ
self._set_path_info(path_info)
self.toraise = toraise
def _get_path_info(self):
if self.toraise:
raise self.toraise
return self._path_info
def _set_path_info(self, v):
self._path_info = v
path_info = property(_get_path_info, _set_path_info)
def _makeRequest(environ=None):
from pyramid.registry import Registry
request = DummyRequest()
request.registry = Registry()
return request
| DummyRequest |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/diff_test/package.py | {
"start": 228,
"end": 1000
} | class ____(AutotoolsPackage):
"""zlib replacement with optimizations for next generation systems."""
homepage = "https://github.com/zlib-ng/zlib-ng"
url = "https://github.com/zlib-ng/zlib-ng/archive/2.0.0.tar.gz"
git = "https://github.com/zlib-ng/zlib-ng.git"
license("Zlib")
version("2.1.6", tag="2.1.6", commit="74253725f884e2424a0dd8ae3f69896d5377f325")
version("2.1.5", sha256="3f6576971397b379d4205ae5451ff5a68edf6c103b2f03c4188ed7075fbb5f04")
version("2.1.4", sha256="a0293475e6a44a3f6c045229fe50f69dc0eebc62a42405a51f19d46a5541e77a")
version("2.0.7", sha256="6c0853bb27738b811f2b4d4af095323c3d5ce36ceed6b50e5f773204fb8f7200")
version("2.0.0", sha256="86993903527d9b12fc543335c19c1d33a93797b3d4d37648b5addae83679ecd8")
| DiffTest |
python | pypa__hatch | src/hatch/utils/platform.py | {
"start": 641,
"end": 11033
} | class ____:
def __init__(self, display_func: Callable = print) -> None:
self.__display_func = display_func
# Lazily loaded constants
self.__default_shell: str | None = None
self.__format_file_uri: Callable[[str], str] | None = None
self.__join_command_args: Callable[[list[str]], str] | None = None
self.__name: str | None = None
self.__display_name: str | None = None
self.__home: Path | None = None
# Whether or not an interactive status is being displayed
self.displaying_status = False
self.__modules = LazilyLoadedModules()
@property
def modules(self) -> LazilyLoadedModules:
"""
Accessor for lazily loading modules that either take multiple milliseconds to import
(like `shutil` and `subprocess`) or are not used on all platforms (like `shlex`).
"""
return self.__modules
def format_for_subprocess(self, command: str | list[str], *, shell: bool) -> str | list[str]:
"""
Format the given command in a cross-platform manner for immediate consumption by subprocess utilities.
"""
if self.windows:
# Manually locate executables on Windows to avoid multiple cases in which `shell=True` is required:
#
# - If the `PATH` environment variable has been modified, see:
# https://github.com/python/cpython/issues/52803
# - Executables that do not have the extension `.exe`, see:
# https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessw
if not shell and not isinstance(command, str):
executable = command[0]
new_command = [self.modules.shutil.which(executable) or executable]
new_command.extend(command[1:])
return new_command
elif not shell and isinstance(command, str):
return self.modules.shlex.split(command)
return command
@staticmethod
def exit_with_code(code: str | int | None) -> None:
sys.exit(code)
def _run_command_integrated(
self, command: str | list[str], *, shell: bool = False, **kwargs: Any
) -> CompletedProcess:
with self.capture_process(command, shell=shell, **kwargs) as process:
for line in self.stream_process_output(process):
self.__display_func(line, end="")
stdout, stderr = process.communicate()
return self.modules.subprocess.CompletedProcess(process.args, process.poll(), stdout, stderr)
def run_command(self, command: str | list[str], *, shell: bool = False, **kwargs: Any) -> CompletedProcess:
"""
Equivalent to the standard library's
[subprocess.run](https://docs.python.org/3/library/subprocess.html#subprocess.run),
with the command first being
[properly formatted](utilities.md#hatch.utils.platform.Platform.format_for_subprocess).
"""
if self.displaying_status and not kwargs.get("capture_output"):
return self._run_command_integrated(command, shell=shell, **kwargs)
self.populate_default_popen_kwargs(kwargs, shell=shell)
return self.modules.subprocess.run(self.format_for_subprocess(command, shell=shell), shell=shell, **kwargs)
def check_command(self, command: str | list[str], *, shell: bool = False, **kwargs: Any) -> CompletedProcess:
"""
Equivalent to [run_command](utilities.md#hatch.utils.platform.Platform.run_command),
but non-zero exit codes will gracefully end program execution.
"""
process = self.run_command(command, shell=shell, **kwargs)
if process.returncode:
self.exit_with_code(process.returncode)
return process
def check_command_output(self, command: str | list[str], *, shell: bool = False, **kwargs: Any) -> str:
"""
Equivalent to the output from the process returned by
[capture_process](utilities.md#hatch.utils.platform.Platform.capture_process),
but non-zero exit codes will gracefully end program execution.
"""
kwargs.setdefault("stdout", self.modules.subprocess.PIPE)
kwargs.setdefault("stderr", self.modules.subprocess.STDOUT)
self.populate_default_popen_kwargs(kwargs, shell=shell)
process = self.modules.subprocess.run(self.format_for_subprocess(command, shell=shell), shell=shell, **kwargs)
if process.returncode:
# Callers might not want to merge both streams so try stderr first
self.__display_func((process.stderr or process.stdout).decode("utf-8"))
self.exit_with_code(process.returncode)
return process.stdout.decode("utf-8")
def capture_process(self, command: str | list[str], *, shell: bool = False, **kwargs: Any) -> Popen:
"""
Equivalent to the standard library's
[subprocess.Popen](https://docs.python.org/3/library/subprocess.html#subprocess.Popen),
with all output captured by `stdout` and the command first being
[properly formatted](utilities.md#hatch.utils.platform.Platform.format_for_subprocess).
"""
self.populate_default_popen_kwargs(kwargs, shell=shell)
return self.modules.subprocess.Popen(
self.format_for_subprocess(command, shell=shell),
shell=shell,
stdout=self.modules.subprocess.PIPE,
stderr=self.modules.subprocess.STDOUT,
**kwargs,
)
def populate_default_popen_kwargs(self, kwargs: dict[str, Any], *, shell: bool) -> None:
# https://support.apple.com/en-us/HT204899
# https://en.wikipedia.org/wiki/System_Integrity_Protection
if (
"executable" not in kwargs
and self.macos
and shell
and any(env_var.startswith(("DYLD_", "LD_")) for env_var in os.environ)
):
default_paths = os.environ.get("PATH", os.defpath).split(os.pathsep)
unprotected_paths = []
for path in default_paths:
normalized_path = os.path.normpath(path)
if not normalized_path.startswith((
"/System",
"/usr",
"/bin",
"/sbin",
"/var",
)) or normalized_path.startswith("/usr/local"):
unprotected_paths.append(path)
search_path = os.pathsep.join(unprotected_paths)
for exe_name in ("sh", "bash", "zsh", "fish"):
executable = self.modules.shutil.which(exe_name, path=search_path)
if executable:
kwargs["executable"] = executable
break
@staticmethod
def stream_process_output(process: Popen) -> Iterable[str]:
# To avoid blocking never use a pipe's file descriptor iterator. See https://bugs.python.org/issue3907
for line in iter(process.stdout.readline, b""): # type: ignore[union-attr]
yield line.decode("utf-8")
@property
def default_shell(self) -> str:
"""
Returns the default shell of the system.
On Windows systems first try the `SHELL` environment variable, if present, followed by
the `COMSPEC` environment variable, defaulting to `cmd`. On all other platforms only
the `SHELL` environment variable will be used, defaulting to `bash`.
"""
if self.__default_shell is None:
if self.windows:
self.__default_shell = cast(str, os.environ.get("SHELL", os.environ.get("COMSPEC", "cmd")))
else:
self.__default_shell = cast(str, os.environ.get("SHELL", "bash"))
return self.__default_shell
@property
def join_command_args(self) -> Callable[[list[str]], str]:
if self.__join_command_args is None:
if self.windows:
self.__join_command_args = self.modules.subprocess.list2cmdline
else:
self.__join_command_args = self.modules.shlex.join
return self.__join_command_args
@property
def format_file_uri(self) -> Callable[[str], str]:
if self.__format_file_uri is None:
if self.windows:
self.__format_file_uri = lambda p: f"file:///{p}".replace("\\", "/")
else:
self.__format_file_uri = lambda p: f"file://{p}"
return self.__format_file_uri
@property
def windows(self) -> bool:
"""
Indicates whether Hatch is running on Windows.
"""
return self.name == "windows"
@property
def macos(self) -> bool:
"""
Indicates whether Hatch is running on macOS.
"""
return self.name == "macos"
@property
def linux(self) -> bool:
"""
Indicates whether Hatch is running on neither Windows nor macOS.
"""
return not (self.windows or self.macos)
def exit_with_command(self, command: list[str]) -> None:
"""
Run the given command and exit with its exit code. On non-Windows systems, this uses the standard library's
[os.execvp](https://docs.python.org/3/library/os.html#os.execvp).
"""
if self.windows:
process = self.run_command(command)
self.exit_with_code(process.returncode)
else:
os.execvp(command[0], command) # noqa: S606
@property
def name(self) -> str:
"""
One of the following:
- `linux`
- `windows`
- `macos`
"""
if self.__name is None:
self.__name = get_platform_name()
return self.__name
@property
def display_name(self) -> str:
"""
One of the following:
- `Linux`
- `Windows`
- `macOS`
"""
if self.__display_name is None:
self.__display_name = "macOS" if self.macos else self.name.capitalize()
return self.__display_name
@property
def home(self) -> Path:
"""
The user's home directory as a path-like object.
"""
if self.__home is None:
from hatch.utils.fs import Path
self.__home = Path(os.path.expanduser("~"))
return self.__home
| Platform |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramInference2.py | {
"start": 295,
"end": 377
} | class ____(Generic[T]):
def method1(self, a: T, b: list[T]) -> None: ...
| Parent1 |
python | scipy__scipy | scipy/optimize/tests/test_differentiable_functions.py | {
"start": 1185,
"end": 16653
} | class ____(TestCase):
def test_finite_difference_grad(self):
ex = ExScalarFunction()
nfev = 0
ngev = 0
x0 = [1.0, 0.0]
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
ex.hess, None, (-np.inf, np.inf))
nfev += 1
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev, nfev)
approx = ScalarFunction(ex.fun, x0, (), '2-point',
ex.hess, None, (-np.inf, np.inf))
nfev += 3
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(analit.f, approx.f)
assert_array_almost_equal(analit.g, approx.g)
x = [10, 0.3]
f_analit = analit.fun(x)
g_analit = analit.grad(x)
nfev += 1
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
f_approx = approx.fun(x)
g_approx = approx.grad(x)
nfev += 3
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(g_analit, g_approx)
x = [2.0, 1.0]
g_analit = analit.grad(x)
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
g_approx = approx.grad(x)
nfev += 3
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_almost_equal(g_analit, g_approx)
x = [2.5, 0.3]
f_analit = analit.fun(x)
g_analit = analit.grad(x)
nfev += 1
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
f_approx = approx.fun(x)
g_approx = approx.grad(x)
nfev += 3
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(g_analit, g_approx)
x = [2, 0.3]
f_analit = analit.fun(x)
g_analit = analit.grad(x)
nfev += 1
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
f_approx = approx.fun(x)
g_approx = approx.grad(x)
nfev += 3
ngev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_almost_equal(f_analit, f_approx)
assert_array_almost_equal(g_analit, g_approx)
@pytest.mark.fail_slow(5.0)
def test_workers(self):
x0 = np.array([2.0, 0.3])
ex = ExScalarFunction()
ex2 = ExScalarFunction()
with MapWrapper(2) as mapper:
approx = ScalarFunction(ex.fun, x0, (), '2-point',
ex.hess, None, (-np.inf, np.inf),
workers=mapper)
approx_series = ScalarFunction(ex2.fun, x0, (), '2-point',
ex2.hess, None, (-np.inf, np.inf),
)
assert_allclose(approx.grad(x0), ex.grad(x0))
assert_allclose(approx_series.grad(x0), ex.grad(x0))
assert_allclose(approx_series.hess(x0), ex.hess(x0))
assert_allclose(approx.hess(x0), ex.hess(x0))
assert_equal(approx.nfev, approx_series.nfev)
assert_equal(approx_series.nfev, ex2.nfev)
assert_equal(approx.ngev, approx_series.ngev)
assert_equal(approx.nhev, approx_series.nhev)
assert_equal(approx_series.nhev, ex2.nhev)
ex = ExScalarFunction()
ex2 = ExScalarFunction()
approx = ScalarFunction(ex.fun, x0, (), '3-point',
ex.hess, None, (-np.inf, np.inf),
workers=mapper)
approx_series = ScalarFunction(ex2.fun, x0, (), '3-point',
ex2.hess, None, (-np.inf, np.inf),
)
assert_allclose(approx.grad(x0), ex.grad(x0))
assert_allclose(approx_series.grad(x0), ex.grad(x0))
assert_allclose(approx_series.hess(x0), ex.hess(x0))
assert_allclose(approx.hess(x0), ex.hess(x0))
assert_equal(approx.nfev, approx_series.nfev)
assert_equal(approx_series.nfev, ex2.nfev)
assert_equal(approx.ngev, approx_series.ngev)
assert_equal(approx.nhev, approx_series.nhev)
assert_equal(approx_series.nhev, ex2.nhev)
ex = ExScalarFunction()
ex2 = ExScalarFunction()
x1 = np.array([3.0, 4.0])
approx = ScalarFunction(ex.fun, x0, (), ex.grad,
'3-point', None, (-np.inf, np.inf),
workers=mapper)
approx_series = ScalarFunction(ex2.fun, x0, (), ex2.grad,
'3-point', None, (-np.inf, np.inf),
)
assert_allclose(approx.grad(x1), ex.grad(x1))
assert_allclose(approx_series.grad(x1), ex.grad(x1))
approx_series.hess(x1)
approx.hess(x1)
assert_equal(approx.nfev, approx_series.nfev)
assert_equal(approx_series.nfev, ex2.nfev)
assert_equal(approx.ngev, approx_series.ngev)
assert_equal(approx_series.ngev, ex2.ngev)
assert_equal(approx.nhev, approx_series.nhev)
assert_equal(approx_series.nhev, ex2.nhev)
def test_fun_and_grad(self):
ex = ExScalarFunction()
def fg_allclose(x, y):
assert_allclose(x[0], y[0])
assert_allclose(x[1], y[1])
# with analytic gradient
x0 = [2.0, 0.3]
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
ex.hess, None, (-np.inf, np.inf))
fg = ex.fun(x0), ex.grad(x0)
fg_allclose(analit.fun_and_grad(x0), fg)
assert analit.ngev == 1
x0[1] = 1.
fg = ex.fun(x0), ex.grad(x0)
fg_allclose(analit.fun_and_grad(x0), fg)
# with finite difference gradient
x0 = [2.0, 0.3]
sf = ScalarFunction(ex.fun, x0, (), '3-point',
ex.hess, None, (-np.inf, np.inf))
assert sf.ngev == 1
fg = ex.fun(x0), ex.grad(x0)
fg_allclose(sf.fun_and_grad(x0), fg)
assert sf.ngev == 1
x0[1] = 1.
fg = ex.fun(x0), ex.grad(x0)
fg_allclose(sf.fun_and_grad(x0), fg)
def test_finite_difference_hess_linear_operator(self):
ex = ExScalarFunction()
nfev = 0
ngev = 0
nhev = 0
x0 = [1.0, 0.0]
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
ex.hess, None, (-np.inf, np.inf))
nfev += 1
ngev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev, nhev)
approx = ScalarFunction(ex.fun, x0, (), ex.grad,
'2-point', None, (-np.inf, np.inf))
assert_(isinstance(approx.H, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_equal(analit.f, approx.f)
assert_array_almost_equal(analit.g, approx.g)
assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v))
nfev += 1
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.0, 1.0]
H_analit = analit.hess(x)
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
H_approx = approx.hess(x)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.1, 1.2]
H_analit = analit.hess(x)
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
H_approx = approx.hess(x)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [2.5, 0.3]
_ = analit.grad(x)
H_analit = analit.hess(x)
ngev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
_ = approx.grad(x)
H_approx = approx.hess(x)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
x = [5.2, 2.3]
_ = analit.grad(x)
H_analit = analit.hess(x)
ngev += 1
nhev += 1
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
_ = approx.grad(x)
H_approx = approx.hess(x)
assert_(isinstance(H_approx, LinearOperator))
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
ngev += 4
assert_array_equal(ex.nfev, nfev)
assert_array_equal(analit.nfev+approx.nfev, nfev)
assert_array_equal(ex.ngev, ngev)
assert_array_equal(analit.ngev+approx.ngev, ngev)
assert_array_equal(ex.nhev, nhev)
assert_array_equal(analit.nhev+approx.nhev, nhev)
def test_x_storage_overlap(self):
# Scalar_Function should not store references to arrays, it should
# store copies - this checks that updating an array in-place causes
# Scalar_Function.x to be updated.
def f(x):
return np.sum(np.asarray(x) ** 2)
x = np.array([1., 2., 3.])
sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf))
assert x is not sf.x
assert_equal(sf.fun(x), 14.0)
assert x is not sf.x
x[0] = 0.
f1 = sf.fun(x)
assert_equal(f1, 13.0)
x[0] = 1
f2 = sf.fun(x)
assert_equal(f2, 14.0)
assert x is not sf.x
# now test with a HessianUpdate strategy specified
hess = BFGS()
x = np.array([1., 2., 3.])
sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf))
assert x is not sf.x
assert_equal(sf.fun(x), 14.0)
assert x is not sf.x
x[0] = 0.
f1 = sf.fun(x)
assert_equal(f1, 13.0)
x[0] = 1
f2 = sf.fun(x)
assert_equal(f2, 14.0)
assert x is not sf.x
# gh13740 x is changed in user function
def ff(x):
x *= x # overwrite x
return np.sum(x)
x = np.array([1., 2., 3.])
sf = ScalarFunction(
ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)
)
assert x is not sf.x
assert_equal(sf.fun(x), 14.0)
assert_equal(sf.x, np.array([1., 2., 3.]))
assert x is not sf.x
def test_lowest_x(self):
# ScalarFunction should remember the lowest func(x) visited.
x0 = np.array([2, 3, 4])
sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess,
None, None)
sf.fun([1, 1, 1])
sf.fun(x0)
sf.fun([1.01, 1, 1.0])
sf.grad([1.01, 1, 1.0])
assert_equal(sf._lowest_f, 0.0)
assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess,
None, (-np.inf, np.inf))
sf.fun([1, 1, 1])
sf.fun(x0)
sf.fun([1.01, 1, 1.0])
sf.grad([1.01, 1, 1.0])
assert_equal(sf._lowest_f, 0.0)
assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
def test_float_size(self):
x0 = np.array([2, 3, 4]).astype(np.float32)
# check that ScalarFunction/approx_derivative always send the correct
# float width
def rosen_(x):
assert x.dtype == np.float32
return rosen(x)
sf = ScalarFunction(rosen_, x0, (), '2-point', rosen_hess,
None, (-np.inf, np.inf))
res = sf.fun(x0)
assert res.dtype == np.float32
| TestScalarFunction |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 280850,
"end": 281181
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("DeploymentReviewer", graphql_name="node")
| DeploymentReviewerEdge |
python | huggingface__transformers | tests/models/gemma/test_tokenization_gemma.py | {
"start": 894,
"end": 3063
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "hf-internal-testing/dummy-gemma"
tokenizer_class = GemmaTokenizer
integration_expected_tokens = ['This', '▁is', '▁a', '▁test', '▁😊', '\n', 'I', '▁was', '▁born', '▁in', '▁', '9', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '\n', '生活的', '真', '谛', '是', '\n', 'Hi', '▁▁', 'Hello', '\n', 'Hi', '▁▁▁', 'Hello', '\n\n', '▁', '\n', '▁▁', '\n', '▁Hello', '\n', '<', 's', '>', '\n', 'hi', '<', 's', '>', 'there', '\n', 'The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁encoded', ':', '▁Hello', '.', '\n', 'But', '▁i', 'rd', '▁and', '▁ปี', '▁▁▁', 'ird', '▁▁▁', 'ด', '\n', 'Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [1596, 603, 476, 2121, 44416, 108, 235285, 729, 7565, 575, 235248, 235315, 235284, 235276, 235276, 235276, 235269, 578, 736, 603, 40751, 235335, 235265, 108, 122182, 235710, 245467, 235427, 108, 2151, 139, 4521, 108, 2151, 140, 4521, 109, 235248, 108, 139, 108, 25957, 108, 235322, 235256, 235313, 108, 544, 235322, 235256, 235313, 11048, 108, 651, 2412, 2067, 1412, 614, 10338, 49748, 235292, 25957, 235265, 108, 1860, 496, 1924, 578, 73208, 140, 5650, 140, 235732, 108, 6750, 1368, 708, 692, 3900] # fmt: skip
expected_tokens_from_ids = ['This', '▁is', '▁a', '▁test', '▁😊', '\n', 'I', '▁was', '▁born', '▁in', '▁', '9', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '\n', '生活的', '真', '谛', '是', '\n', 'Hi', '▁▁', 'Hello', '\n', 'Hi', '▁▁▁', 'Hello', '\n\n', '▁', '\n', '▁▁', '\n', '▁Hello', '\n', '<', 's', '>', '\n', 'hi', '<', 's', '>', 'there', '\n', 'The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁encoded', ':', '▁Hello', '.', '\n', 'But', '▁i', 'rd', '▁and', '▁ปี', '▁▁▁', 'ird', '▁▁▁', 'ด', '\n', 'Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊\nI was born in 92000, and this is falsé.\n生活的真谛是\nHi Hello\nHi Hello\n\n \n \n Hello\n<s>\nhi<s>there\nThe following string should be properly encoded: Hello.\nBut ird and ปี ird ด\nHey how are you doing"
| GemmaTokenizationTest |
python | cython__cython | Cython/Compiler/Tests/TestFlowControl.py | {
"start": 356,
"end": 397
} | class ____:
type = FakeType()
| FakeEntry |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 17515,
"end": 17945
} | class ____(Benchmark):
params = [10, 100, 1000]
param_names = ['num_points']
def setup(self, num_points):
rng = np.random.default_rng(123)
self.points1 = rng.random((num_points, 3))
self.points2 = rng.random((num_points, 3))
def time_directed_hausdorff(self, num_points):
# time directed_hausdorff code in 3 D
distance.directed_hausdorff(self.points1, self.points2)
| Hausdorff |
python | wandb__wandb | wandb/sdk/data_types/base_types/json_metadata.py | {
"start": 476,
"end": 1553
} | class ____(Media):
"""JSONMetadata is a type for encoding arbitrary metadata as files."""
def __init__(self, val: dict) -> None:
super().__init__()
self.validate(val)
self._val = val
ext = "." + self.type_name() + ".json"
tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ext)
with codecs.open(tmp_path, "w", encoding="utf-8") as fp:
util.json_dump_uncompressed(self._val, fp)
self._set_file(tmp_path, is_tmp=True, extension=ext)
@classmethod
def get_media_subdir(cls: Type["JSONMetadata"]) -> str:
return os.path.join("media", "metadata", cls.type_name())
def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict:
json_dict = super().to_json(run_or_artifact)
json_dict["_type"] = self.type_name()
return json_dict
# These methods should be overridden in the child class
@classmethod
def type_name(cls) -> str:
return "metadata"
def validate(self, val: dict) -> bool:
return True
| JSONMetadata |
python | django__django | tests/auth_tests/test_auth_backends.py | {
"start": 41197,
"end": 48854
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.user1 = User.objects.create_user("test", "test@example.com", "test")
def setUp(self):
self.sensitive_password = "mypassword"
@override_settings(
AUTHENTICATION_BACKENDS=["auth_tests.test_auth_backends.TypeErrorBackend"]
)
def test_type_error_raised(self):
"""A TypeError within a backend is propagated properly (#18171)."""
with self.assertRaises(TypeError):
authenticate(username="test", password="test")
@override_settings(
AUTHENTICATION_BACKENDS=["auth_tests.test_auth_backends.TypeErrorBackend"]
)
def test_authenticate_sensitive_variables(self):
try:
authenticate(username="testusername", password=self.sensitive_password)
except TypeError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get("/"), *exc_info)
self.assertNotContains(response, self.sensitive_password, status_code=500)
self.assertContains(response, "TypeErrorBackend", status_code=500)
self.assertContains(
response,
'<tr><td>credentials</td><td class="code">'
"<pre>'********************'</pre></td></tr>",
html=True,
status_code=500,
)
@override_settings(
AUTHENTICATION_BACKENDS=["auth_tests.test_auth_backends.TypeErrorBackend"]
)
async def test_aauthenticate_sensitive_variables(self):
try:
await aauthenticate(
username="testusername", password=self.sensitive_password
)
except TypeError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get("/"), *exc_info)
self.assertNotContains(response, self.sensitive_password, status_code=500)
self.assertContains(response, "TypeErrorBackend", status_code=500)
self.assertContains(
response,
'<tr><td>credentials</td><td class="code">'
"<pre>'********************'</pre></td></tr>",
html=True,
status_code=500,
)
def test_clean_credentials_sensitive_variables(self):
try:
# Passing in a list to cause an exception
_clean_credentials([1, self.sensitive_password])
except TypeError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get("/"), *exc_info)
self.assertNotContains(response, self.sensitive_password, status_code=500)
self.assertContains(
response,
'<tr><td>credentials</td><td class="code">'
"<pre>'********************'</pre></td></tr>",
html=True,
status_code=500,
)
@override_settings(
ROOT_URLCONF="django.contrib.auth.urls",
AUTHENTICATION_BACKENDS=["auth_tests.test_auth_backends.TypeErrorBackend"],
)
def test_login_process_sensitive_variables(self):
try:
self.client.post(
reverse("login"),
dict(username="testusername", password=self.sensitive_password),
)
except TypeError:
exc_info = sys.exc_info()
rf = RequestFactory()
with patch("django.views.debug.ExceptionReporter", FilteredExceptionReporter):
response = technical_500_response(rf.get("/"), *exc_info)
self.assertNotContains(response, self.sensitive_password, status_code=500)
self.assertContains(response, "TypeErrorBackend", status_code=500)
# AuthenticationForm.clean().
self.assertContains(
response,
'<tr><td>password</td><td class="code">'
"<pre>'********************'</pre></td></tr>",
html=True,
status_code=500,
)
def test_setpasswordform_validate_passwords_sensitive_variables(self):
password_form = SetPasswordForm(AnonymousUser())
password_form.cleaned_data = {
"password1": self.sensitive_password,
"password2": self.sensitive_password + "2",
}
try:
password_form.validate_passwords()
except ValueError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get("/"), *exc_info)
self.assertNotContains(response, self.sensitive_password, status_code=500)
self.assertNotContains(response, self.sensitive_password + "2", status_code=500)
self.assertContains(
response,
'<tr><td>password1</td><td class="code">'
"<pre>'********************'</pre></td></tr>",
html=True,
status_code=500,
)
self.assertContains(
response,
'<tr><td>password2</td><td class="code">'
"<pre>'********************'</pre></td></tr>",
html=True,
status_code=500,
)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{"NAME": __name__ + ".TypeErrorValidator"},
]
)
def test_setpasswordform_validate_password_for_user_sensitive_variables(self):
password_form = SetPasswordForm(AnonymousUser())
password_form.cleaned_data = {"password2": self.sensitive_password}
try:
password_form.validate_password_for_user(AnonymousUser())
except TypeError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get("/"), *exc_info)
self.assertNotContains(response, self.sensitive_password, status_code=500)
self.assertContains(
response,
'<tr><td>password</td><td class="code">'
"<pre>'********************'</pre></td></tr>",
html=True,
status_code=500,
)
def test_passwordchangeform_clean_old_password_sensitive_variables(self):
password_form = PasswordChangeForm(User())
password_form.cleaned_data = {"old_password": self.sensitive_password}
password_form.error_messages = None
try:
password_form.clean_old_password()
except TypeError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get("/"), *exc_info)
self.assertNotContains(response, self.sensitive_password, status_code=500)
self.assertContains(
response,
'<tr><td>old_password</td><td class="code">'
"<pre>'********************'</pre></td></tr>",
html=True,
status_code=500,
)
@override_settings(
AUTHENTICATION_BACKENDS=(
"auth_tests.test_auth_backends.SkippedBackend",
"django.contrib.auth.backends.ModelBackend",
)
)
def test_skips_backends_without_arguments(self):
"""
A backend (SkippedBackend) is ignored if it doesn't accept the
credentials as arguments.
"""
self.assertEqual(authenticate(username="test", password="test"), self.user1)
@override_settings(
AUTHENTICATION_BACKENDS=(
"auth_tests.test_auth_backends.SkippedBackendWithDecoratedMethod",
"django.contrib.auth.backends.ModelBackend",
)
)
def test_skips_backends_with_decorated_method(self):
self.assertEqual(authenticate(username="test", password="test"), self.user1)
| AuthenticateTests |
python | PyCQA__pydocstyle | src/pydocstyle/parser.py | {
"start": 11640,
"end": 11890
} | class ____(Value):
_fields = 'kind value start end source'.split()
def __init__(self, *args):
super().__init__(*args)
self.kind = TokenKind(self.kind)
def __str__(self):
return f"{self.kind!r} ({self.value})"
| Token |
python | pypa__pipenv | pipenv/patched/pip/_internal/commands/wheel.py | {
"start": 887,
"end": 6496
} | class ____(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: https://wheel.readthedocs.io/en/latest/
'pip wheel' uses the build system interface as described here:
https://pip.pypa.io/en/stable/reference/build-system/
"""
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def add_options(self) -> None:
self.cmd_opts.add_option(
"-w",
"--wheel-dir",
dest="wheel_dir",
metavar="dir",
default=os.curdir,
help=(
"Build wheels into <dir>, where the default is the "
"current working directory."
),
)
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
self.cmd_opts.add_option(cmdoptions.prefer_binary())
self.cmd_opts.add_option(cmdoptions.no_build_isolation())
self.cmd_opts.add_option(cmdoptions.use_pep517())
self.cmd_opts.add_option(cmdoptions.no_use_pep517())
self.cmd_opts.add_option(cmdoptions.check_build_deps())
self.cmd_opts.add_option(cmdoptions.constraints())
self.cmd_opts.add_option(cmdoptions.editable())
self.cmd_opts.add_option(cmdoptions.requirements())
self.cmd_opts.add_option(cmdoptions.src())
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(cmdoptions.no_deps())
self.cmd_opts.add_option(cmdoptions.progress_bar())
self.cmd_opts.add_option(
"--no-verify",
dest="no_verify",
action="store_true",
default=False,
help="Don't verify if built wheel is valid.",
)
self.cmd_opts.add_option(cmdoptions.config_settings())
self.cmd_opts.add_option(cmdoptions.build_options())
self.cmd_opts.add_option(cmdoptions.global_options())
self.cmd_opts.add_option(
"--pre",
action="store_true",
default=False,
help=(
"Include pre-release and development versions. By default, "
"pip only finds stable versions."
),
)
self.cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
@with_cleanup
def run(self, options: Values, args: List[str]) -> int:
session = self.get_default_session(options)
finder = self._build_package_finder(options, session)
options.wheel_dir = normalize_path(options.wheel_dir)
ensure_dir(options.wheel_dir)
build_tracker = self.enter_context(get_build_tracker())
directory = TempDirectory(
delete=not options.no_clean,
kind="wheel",
globally_managed=True,
)
reqs = self.get_requirements(args, options, finder, session)
check_legacy_setup_py_options(options, reqs)
wheel_cache = WheelCache(options.cache_dir)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
build_tracker=build_tracker,
session=session,
finder=finder,
download_dir=options.wheel_dir,
use_user_site=False,
verbosity=self.verbosity,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
ignore_requires_python=options.ignore_requires_python,
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
reqs_to_build: List[InstallRequirement] = []
for req in requirement_set.requirements.values():
if req.is_wheel:
preparer.save_linked_requirement(req)
else:
reqs_to_build.append(req)
preparer.prepare_linked_requirements_more(requirement_set.requirements.values())
# build wheels
build_successes, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
verify=(not options.no_verify),
build_options=options.build_options or [],
global_options=options.global_options or [],
)
for req in build_successes:
assert req.link and req.link.is_wheel
assert req.local_file_path
# copy from cache to target directory
try:
shutil.copy(req.local_file_path, options.wheel_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name,
e,
)
build_failures.append(req)
if len(build_failures) != 0:
raise CommandError("Failed to build one or more wheels")
return SUCCESS
| WheelCommand |
python | walkccc__LeetCode | solutions/2347. Best Poker Hand/2347.py | {
"start": 0,
"end": 323
} | class ____:
def bestHand(self, ranks: list[int], suits: list[str]) -> str:
if all(suit == suits[0] for suit in suits):
return 'Flush'
match max(Counter(ranks).values()):
case 5 | 4 | 3:
return 'Three of a Kind'
case 2:
return 'Pair'
case _:
return 'High Card'
| Solution |
python | getsentry__sentry | src/sentry/utils/cursors.py | {
"start": 2560,
"end": 9259
} | class ____(Sequence[T]):
def __init__(
self,
results: Sequence[T],
next: Cursor,
prev: Cursor,
hits: int | None = None,
max_hits: int | None = None,
):
self.results = results
self.next = next
self.prev = prev
self.hits = hits
self.max_hits = max_hits
def __len__(self) -> int:
return len(self.results)
def __iter__(self) -> Iterator[T]:
return iter(self.results)
def __repr__(self) -> str:
return f"<{type(self).__name__}: results={len(self.results)}>"
def __getitem__(self, key: Any) -> Any:
return self.results[key]
def _build_next_values(
cursor: Cursor, results: Sequence[T], key: KeyCallable, limit: int, is_desc: bool
) -> tuple[CursorValue, int, bool]:
value = cursor.value
offset = cursor.offset
is_prev = cursor.is_prev
num_results = len(results)
if not value and num_results:
value = key(results[0])
# Next cursor for a prev-cursor simply starts from that prev cursors value
# without an offset.
if is_prev:
return (value, 0, True)
# No results means no more next
if not num_results:
return (value, offset, False)
# Are there more results than whats on the current page?
has_next = num_results > limit
# Determine what our next cursor is by ensuring we have a unique offset
next_value = key(results[-1])
# value has not changed, page forward by adjusting the offset
if next_value == value:
next_offset = offset + limit
return next_value, next_offset, has_next
# We have an absolute value to page from. If any of the items in
# the current result set come *after* or *before* (depending on the
# is_desc flag) we will want to increment the offset to account for
# moving past them.
#
# This is required to account for loss of precision in the key value.
next_offset = 0
result_iter = reversed(results)
# If we have more results the last item in the results should be
# skipped, as we know we want to start from that item and do not
# need to offset from it.
if has_next:
next(result_iter)
for result in result_iter:
result_value = key(result)
is_larger = result_value >= next_value # type: ignore[operator]
is_smaller = result_value <= next_value # type: ignore[operator]
if (is_desc and is_smaller) or (not is_desc and is_larger):
next_offset += 1
else:
break
return next_value, next_offset, has_next
def _build_prev_values(
cursor: Cursor, results: Sequence[T], key: KeyCallable, limit: int, is_desc: bool
) -> tuple[CursorValue, int, bool]:
value = cursor.value
offset = cursor.offset
is_prev = cursor.is_prev
num_results = len(results)
if is_prev:
has_prev = num_results > limit
else:
# It's likely that there's a previous page if they passed us either
# offset values
has_prev = bool(value or offset)
# If the cursor contains previous results, the first item is the item that
# indicates if we have more items later, and is *not* the first item in the
# list, that should be used for the value.
first_prev_index = 1 if is_prev and has_prev else 0
# If we're paging back we need to calculate the key from the first result
# with for_prev=True to ensure rounding of the key is correct.See
# sentry.api.paginator.BasePaginator.get_item_key
prev_value = key(results[first_prev_index], for_prev=True) if results else 0
# Prev only has an offset if the cursor we were dealing with was a
# previous cursor. Otherwise we'd be taking the offset while moving forward.
prev_offset = offset if is_prev else 0
if not (is_prev and num_results):
return prev_value, prev_offset, has_prev
# Value has not changed, page back by adjusting the offset
if prev_value == value:
prev_offset = offset + limit
return prev_value, prev_offset, has_prev
# Just as in the next cursor builder, we may need to add an offset
# if any of the results at the beginning are *before* or *after*
# (depending on the is_desc flag).
#
# This is required to account for loss of precision in the key value.
prev_offset = 0
result_iter = iter(results)
# If we know there are more previous results, we need to move past
# the item indicating that more items exist.
if has_prev:
next(result_iter)
# Always move past the first item, this is the prev_value item and will
# already be offset in the next query.
next(result_iter)
for result in result_iter:
result_value = key(result, for_prev=True)
is_larger = result_value >= prev_value # type: ignore[operator]
is_smaller = result_value <= prev_value # type: ignore[operator]
# Note that the checks are reversed here as a prev query has
# it's ordering reversed.
if (is_desc and is_larger) or (not is_desc and is_smaller):
prev_offset += 1
else:
break
return prev_value, prev_offset, has_prev
def build_cursor(
results: Sequence[T],
key: KeyCallable,
limit: int = 100,
is_desc: bool = False,
cursor: Cursor | None = None,
hits: int | None = None,
max_hits: int | None = None,
on_results: OnResultCallable[T] | None = None,
) -> CursorResult[T | Any]:
if cursor is None:
cursor = Cursor(0, 0, 0)
# Compute values for next cursor
next_value, next_offset, has_next = _build_next_values(
cursor=cursor, results=results, key=key, limit=limit, is_desc=is_desc
)
# Compute values for prev cursor
prev_value, prev_offset, has_prev = _build_prev_values(
cursor=cursor, results=results, key=key, limit=limit, is_desc=is_desc
)
if cursor.is_prev and has_prev:
# A prev cursor with more results should have the first item chopped off
# as this is the item that indicates we have more items before, and
# should not be included on this page.
results = results[1:]
elif not cursor.is_prev:
# For next page cursors we cut off the extra item that indicates there
# are more items.
results = results[:limit]
next_cursor = Cursor(next_value or 0, next_offset, False, has_next)
prev_cursor = Cursor(prev_value or 0, prev_offset, True, has_prev)
if on_results:
results = on_results(results)
return CursorResult(
results=results, next=next_cursor, prev=prev_cursor, hits=hits, max_hits=max_hits
)
| CursorResult |
python | huggingface__transformers | src/transformers/models/audioflamingo3/modular_audioflamingo3.py | {
"start": 4128,
"end": 4970
} | class ____(VoxtralMultiModalProjector):
"""
Audio adaptor (small MLP) that projects AudioFlamingo3Encoder features
to the LLM embedding space so they can replace `<sound>` tokens.
"""
def __init__(self, config: AudioFlamingo3Config):
super().__init__()
self.linear_1 = nn.Linear(
config.audio_config.hidden_size, config.text_config.hidden_size, bias=config.projector_bias
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.text_config.hidden_size, config.text_config.hidden_size, bias=config.projector_bias
)
@auto_docstring(
custom_intro="""
The AudioFlamingo3 model which consists of a fine-tuned Whisper encoder, a multi-modal projector and a Qwen2 language model.
"""
)
| AudioFlamingo3MultiModalProjector |
python | getsentry__sentry | tests/sentry/incidents/test_metric_issue_post_process.py | {
"start": 1023,
"end": 11243
} | class ____(BaseWorkflowTest, BaseMetricIssueTest):
def setUp(self) -> None:
super().setUp()
self.critical_action, self.warning_action = self.create_metric_issue_workflow(self.detector)
@pytest.fixture(autouse=True)
def with_feature_flags(self):
with Feature(
{
"organizations:issue-metric-issue-ingest": True,
"organizations:issue-metric-issue-post-process-group": True,
}
):
yield
def create_metric_issue_workflow(self, detector: Detector):
# create the canonical workflow for a metric issue
workflow = self.create_workflow()
self.create_detector_workflow(detector=detector, workflow=workflow)
critical_dcg = self.create_data_condition_group(organization=self.organization)
self.create_workflow_data_condition_group(condition_group=critical_dcg, workflow=workflow)
self.create_data_condition(
comparison=DetectorPriorityLevel.HIGH,
condition_result=True,
type=Condition.ISSUE_PRIORITY_GREATER_OR_EQUAL,
condition_group=critical_dcg,
)
self.create_data_condition(
comparison=DetectorPriorityLevel.HIGH,
condition_result=True,
type=Condition.ISSUE_PRIORITY_DEESCALATING,
condition_group=critical_dcg,
)
critical_action = self.create_action(
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "channel-123",
"target_display": "Test Channel",
},
)
self.create_data_condition_group_action(critical_action, critical_dcg)
warning_dcg = self.create_data_condition_group(organization=self.organization)
self.create_workflow_data_condition_group(condition_group=warning_dcg, workflow=workflow)
self.create_data_condition(
comparison=DetectorPriorityLevel.MEDIUM,
condition_result=True,
type=Condition.ISSUE_PRIORITY_GREATER_OR_EQUAL,
condition_group=warning_dcg,
)
self.create_data_condition(
comparison=DetectorPriorityLevel.MEDIUM,
condition_result=True,
type=Condition.ISSUE_PRIORITY_DEESCALATING,
condition_group=warning_dcg,
)
warning_action = self.create_action(
integration_id=self.integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "channel-456",
"target_display": "Test Channel",
},
)
self.create_data_condition_group_action(warning_action, warning_dcg)
return (
critical_action,
warning_action,
)
def call_post_process_group(self, occurrence):
stored_occurrence = IssueOccurrence.fetch(occurrence.id, occurrence.project_id)
assert stored_occurrence
event = eventstore.backend.get_event_by_id(
occurrence.project_id, stored_occurrence.event_id
)
assert event
with self.tasks():
post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
cache_key=None,
occurrence_id=occurrence.id,
group_id=event.group_id,
project_id=occurrence.project_id,
eventstream_type=EventStreamEventType.Generic.value,
)
def get_group(self, occurrence):
stored_occurrence = IssueOccurrence.fetch(occurrence.id, occurrence.project_id)
assert stored_occurrence
event = eventstore.backend.get_event_by_id(
occurrence.project_id, stored_occurrence.event_id
)
assert event and event.group_id
return Group.objects.get(id=event.group_id)
def test_simple(self, mock_trigger: MagicMock) -> None:
value = self.critical_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 2 # warning + critical actions
def test_escalation(self, mock_trigger: MagicMock) -> None:
value = self.warning_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 1 # just warning action
mock_trigger.reset_mock()
value = self.critical_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value, 1000)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 2 # warning + critical actions
def test_escalation_with_deduped_actions(self, mock_trigger: MagicMock) -> None:
# make the warning action same as the critical action
self.warning_action.config = self.critical_action.config
self.warning_action.save()
value = self.warning_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 1 # just warning action
mock_trigger.reset_mock()
value = self.critical_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value, 1000)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 1 # just warning action (because we deduped the actions)
def test_deescalation(self, mock_trigger: MagicMock) -> None:
value = self.critical_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 2 # both actions
mock_trigger.reset_mock()
value = self.warning_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value, 1000)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 2 # both actions
def test_resolution_from_critical(self, mock_trigger: MagicMock) -> None:
value = self.critical_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
group = self.get_group(occurrence)
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 2 # both actions
mock_trigger.reset_mock()
value = 0
data_packet = self.create_subscription_packet(value, 1000)
evaluation_result = self.process_packet_and_return_result(data_packet)
assert isinstance(evaluation_result, StatusChangeMessage)
message = evaluation_result.to_dict()
# TODO: Actions don't trigger on resolution yet. Update this test when this functionality exists.
with patch("sentry.workflow_engine.tasks.workflows.metrics.incr") as mock_incr:
with self.tasks():
update_status(group, message)
mock_incr.assert_any_call(
"workflow_engine.tasks.process_workflows.activity_update.executed",
tags={
"activity_type": ActivityType.SET_RESOLVED.value,
"detector_type": self.detector.type,
},
sample_rate=1.0,
)
def test_resolution_from_warning(self, mock_trigger: MagicMock) -> None:
value = self.warning_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
occurrence.save()
group = self.get_group(occurrence)
self.call_post_process_group(occurrence)
assert mock_trigger.call_count == 1 # warning action
mock_trigger.reset_mock()
value = 0
data_packet = self.create_subscription_packet(value, 1000)
evaluation_result = self.process_packet_and_return_result(data_packet)
assert isinstance(evaluation_result, StatusChangeMessage)
message = evaluation_result.to_dict()
# TODO: Actions don't trigger on resolution yet. Update this test when this functionality exists.
with patch("sentry.workflow_engine.tasks.workflows.metrics.incr") as mock_incr:
with self.tasks():
update_status(group, message)
mock_incr.assert_any_call(
"workflow_engine.tasks.process_workflows.activity_update.executed",
tags={
"activity_type": ActivityType.SET_RESOLVED.value,
"detector_type": self.detector.type,
},
sample_rate=1.0,
)
| MetricIssueIntegrationTest |
python | tensorflow__tensorflow | tensorflow/python/data/ops/save_op.py | {
"start": 2811,
"end": 4770
} | class ____(dataset_ops.UnaryDataset):
""""A dataset that loads previously saved dataset."""
def __init__(self, dataset, path, shard_func, compression):
self._element_spec = dataset.element_spec
self._shard_func = shard_func
dataset, shard_func, use_shard_func, path = set_save_dataset_attributes(
dataset, shard_func, path)
variant_tensor = ged_ops.save_dataset_v2(
dataset._variant_tensor, # pylint: disable=protected-access
path=path,
shard_func_other_args=shard_func.captured_inputs,
shard_func=shard_func,
use_shard_func=use_shard_func,
compression=compression,
output_types=structure.get_flat_tensor_types(dataset.element_spec),
output_shapes=structure.get_flat_tensor_shapes(dataset.element_spec),
)
super().__init__(dataset, variant_tensor)
def _functions(self):
return [self._shard_func]
@property
def element_spec(self):
return self._element_spec
def set_save_dataset_attributes(dataset, shard_func, path):
"""Sets parameters for SaveDatasetOp and SaveDatasetV2Op."""
if shard_func is None:
use_shard_func = False
shard_func = lambda *x: None # a dummy function that will not be used
else:
use_shard_func = True
wrapped_func = structured_function.StructuredFunctionWrapper(
shard_func,
"save()",
input_structure=dataset.element_spec,
add_to_graph=False)
encoded = nested_structure_coder.encode_structure(dataset.element_spec)
gfile.MakeDirs(path)
with gfile.GFile(os.path.join(path, dataset_ops.DATASET_SPEC_FILENAME),
"wb") as f:
f.write(encoded.SerializeToString())
path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path")
shard_func = wrapped_func.function
shard_func.add_to_graph(ops.get_default_graph())
# pylint: disable=protected-access
dataset._apply_debug_options()
return dataset, shard_func, use_shard_func, path
| _SaveDataset |
python | doocs__leetcode | solution/0800-0899/0836.Rectangle Overlap/Solution.py | {
"start": 0,
"end": 218
} | class ____:
def isRectangleOverlap(self, rec1: List[int], rec2: List[int]) -> bool:
x1, y1, x2, y2 = rec1
x3, y3, x4, y4 = rec2
return not (y3 >= y2 or y4 <= y1 or x3 >= x2 or x4 <= x1)
| Solution |
python | django__django | tests/model_fields/models.py | {
"start": 16537,
"end": 16957
} | class ____(models.Model):
a = models.IntegerField()
b = models.IntegerField()
field = models.GeneratedField(
expression=F("a") + F("b"),
output_field=models.IntegerField(),
db_persist=False,
)
fk = models.ForeignKey(Foo, on_delete=models.CASCADE, null=True, blank=True)
class Meta:
required_db_features = {"supports_virtual_generated_columns"}
| GeneratedModelVirtual |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/kubernetes_engine.py | {
"start": 34781,
"end": 38037
} | class ____(GKEOperatorMixin, GoogleCloudBaseOperator):
"""
Retrieve information about Job by given name.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEDescribeJobOperator`
:param job_name: The name of the Job to delete
:param namespace: The name of the Google Kubernetes Engine namespace.
:param location: The name of the Google Kubernetes Engine zone or region in which the
cluster resides, e.g. 'us-central1-a'
:param cluster_name: The name of the Google Kubernetes Engine cluster.
:param use_internal_ip: Use the internal IP address as the endpoint.
:param use_dns_endpoint: Use the DNS address as the endpoint.
:param project_id: The Google Developers Console project id
:param gcp_conn_id: The Google cloud connection id to use. This allows for
users to specify a service account.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple({"job_name", "namespace"} | set(GKEOperatorMixin.template_fields))
operator_extra_links = (KubernetesEngineJobLink(),)
def __init__(
self,
job_name: str,
namespace: str,
location: str,
cluster_name: str,
use_internal_ip: bool = False,
use_dns_endpoint: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.job_name = job_name
self.namespace = namespace
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
self.use_internal_ip = use_internal_ip
self.use_dns_endpoint = use_dns_endpoint
self.impersonation_chain = impersonation_chain
self.job: V1Job | None = None
def execute(self, context: Context) -> None:
self.job = self.hook.get_job(job_name=self.job_name, namespace=self.namespace)
self.log.info(
"Retrieved description of Job %s from cluster %s:\n %s",
self.job_name,
self.cluster_name,
self.job,
)
KubernetesEngineJobLink.persist(
context=context,
location=self.location,
cluster_name=self.cluster_name,
namespace=self.job.metadata.namespace,
job_name=self.job.metadata.name,
project_id=self.project_id,
)
return None
| GKEDescribeJobOperator |
python | huggingface__transformers | src/transformers/models/idefics3/modeling_idefics3.py | {
"start": 21139,
"end": 33346
} | class ____(Idefics3PreTrainedModel):
def __init__(self, config: Idefics3Config):
super().__init__(config)
self.padding_idx = self.config.text_config.pad_token_id
self.vocab_size = self.config.text_config.vocab_size
self.vision_model = Idefics3VisionTransformer._from_config(config.vision_config)
self.connector = Idefics3Connector(config)
self.text_model = AutoModel.from_config(config.text_config)
self.image_seq_len = int(
((config.vision_config.image_size // config.vision_config.patch_size) ** 2) / (config.scale_factor**2)
)
self.image_token_id = self.config.image_token_id
self.post_init()
# Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.enable_input_require_grads
def enable_input_require_grads(self):
"""
Enables the gradients for the input embeddings.
This is useful for lora when using gradient checkpointing.
c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
"""
def get_lowest_module(module):
if len(list(module.children())) == 0:
# If the module has no children, it is a leaf module (e.g., Linear, Conv2d, etc.)
return module
else:
# Recursively call the function on each child module
return get_lowest_module(list(module.children())[0])
def make_inputs_require_grads(module, input, output):
output.requires_grad_(True)
self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook(
make_inputs_require_grads
)
# Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.disable_input_require_grads
def disable_input_require_grads(self):
self._text_require_grads_hook.remove()
self._vision_require_grads_hook.remove()
# Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.get_input_embeddings
def get_input_embeddings(self):
return self.text_model.get_input_embeddings()
# Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.set_input_embeddings
def set_input_embeddings(self, value):
self.text_model.set_input_embeddings(value)
def inputs_merger(
self,
input_ids: torch.LongTensor,
inputs_embeds: Optional[torch.Tensor],
image_hidden_states: Optional[torch.Tensor],
):
"""
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
The merging happens as follows:
- The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
- We get the image hidden states for the image through the vision encoder and that hidden state, after a pixel shuffle operation, is then projected into the text embedding space.
We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
- The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
- To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
image_hidden_states = image_hidden_states.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_hidden_states)
return inputs_embeds
def get_image_features(
self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.LongTensor] = None
):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
pixel_attention_mask (`torch.LongTensor`, *optional*):
The attention mask indicating padded regions in the image.
"""
batch_size, num_images, num_channels, height, width = pixel_values.shape
pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility
pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
# Remove padding images - padding images are full 0.
nb_values_per_image = pixel_values.shape[1:].numel()
real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
pixel_values = pixel_values[real_images_inds].contiguous()
# Handle the vision attention mask
if pixel_attention_mask is None:
pixel_attention_mask = torch.ones(
size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)),
dtype=torch.bool,
device=pixel_values.device,
)
else:
# Remove padding images from the mask
pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:])
pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
patch_size = self.config.vision_config.patch_size
patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
# Get sequence from the vision encoder
image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
image_hidden_states.last_hidden_state
# Modality projection & resampling
image_hidden_states = self.connector(image_hidden_states.last_hidden_state)
return image_hidden_states
@can_return_tuple
@auto_docstring(
custom_intro="""
Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
max_num_images is the maximum number of images among the batch_size samples in the batch.
Padding images are not needed beyond padding the pixel_values at the entrance of the model.
For efficiency, we only pass through the vision_model's forward the real images by
discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
"""
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_attention_mask: Optional[torch.BoolTensor] = None,
image_hidden_states: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
return_dict: Optional[bool] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, Idefics3BaseModelOutputWithPast]:
r"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The hidden states of the image encoder after modality projection.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.training and self.text_model.gradient_checkpointing and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# retrieve input_ids and inputs_embeds
if input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.text_model.get_input_embeddings()(input_ids).to(self.device)
# START VISUAL INPUTS INTEGRATION
if pixel_values is not None and image_hidden_states is not None:
raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time")
elif pixel_values is not None:
image_hidden_states = self.get_image_features(pixel_values, pixel_attention_mask)
elif image_hidden_states is not None:
image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
if image_hidden_states is not None:
# When we generate, we don't want to replace the potential image_token_id that we generated by images
# that simply don't exist
inputs_embeds = self.inputs_merger(
input_ids=input_ids,
inputs_embeds=inputs_embeds,
image_hidden_states=image_hidden_states,
)
outputs = self.text_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
return_dict=True,
**kwargs,
)
return Idefics3BaseModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_hidden_states,
)
@auto_docstring(
custom_intro="""
The Idefics3 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top.
"""
)
| Idefics3Model |
python | django__django | tests/settings_tests/tests.py | {
"start": 15137,
"end": 17033
} | class ____(SimpleTestCase):
def test_configure(self):
s = LazySettings()
s.configure(SECRET_KEY="foo")
self.assertTrue(s.is_overridden("SECRET_KEY"))
def test_module(self):
settings_module = ModuleType("fake_settings_module")
settings_module.SECRET_KEY = "foo"
settings_module.USE_TZ = False
sys.modules["fake_settings_module"] = settings_module
try:
s = Settings("fake_settings_module")
self.assertTrue(s.is_overridden("SECRET_KEY"))
self.assertFalse(s.is_overridden("ALLOWED_HOSTS"))
finally:
del sys.modules["fake_settings_module"]
def test_override(self):
self.assertFalse(settings.is_overridden("ALLOWED_HOSTS"))
with override_settings(ALLOWED_HOSTS=[]):
self.assertTrue(settings.is_overridden("ALLOWED_HOSTS"))
def test_unevaluated_lazysettings_repr(self):
lazy_settings = LazySettings()
expected = "<LazySettings [Unevaluated]>"
self.assertEqual(repr(lazy_settings), expected)
def test_evaluated_lazysettings_repr(self):
lazy_settings = LazySettings()
module = os.environ.get(ENVIRONMENT_VARIABLE)
expected = '<LazySettings "%s">' % module
# Force evaluation of the lazy object.
lazy_settings.APPEND_SLASH
self.assertEqual(repr(lazy_settings), expected)
def test_usersettingsholder_repr(self):
lazy_settings = LazySettings()
lazy_settings.configure(APPEND_SLASH=False)
expected = "<UserSettingsHolder>"
self.assertEqual(repr(lazy_settings._wrapped), expected)
def test_settings_repr(self):
module = os.environ.get(ENVIRONMENT_VARIABLE)
lazy_settings = Settings(module)
expected = '<Settings "%s">' % module
self.assertEqual(repr(lazy_settings), expected)
| IsOverriddenTest |
python | ApeWorX__ape | src/ape/pytest/fixtures.py | {
"start": 17898,
"end": 18926
} | class ____(dict[Scope, Snapshot]):
def __init__(self):
super().__init__(
{
Scope.SESSION: Snapshot(Scope.SESSION),
Scope.PACKAGE: Snapshot(Scope.PACKAGE),
Scope.MODULE: Snapshot(Scope.MODULE),
Scope.CLASS: Snapshot(Scope.CLASS),
Scope.FUNCTION: Snapshot(Scope.FUNCTION),
}
)
def get_snapshot_id(self, scope: Scope) -> Optional["SnapshotID"]:
return self[scope].identifier
def set_snapshot_id(self, scope: Scope, snapshot_id: "SnapshotID"):
self[scope].identifier = snapshot_id
def clear_snapshot_id(self, scope: Scope):
self[scope].identifier = None
def next_snapshots(self, scope: Scope) -> Iterator[Snapshot]:
for scope_value in range(scope + 1, Scope.FUNCTION + 1):
yield self[scope_value] # type: ignore
def extend_fixtures(self, scope: Scope, fixtures: Iterable[str]):
self[scope].fixtures.extend(fixtures)
| SnapshotRegistry |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/serdes/serdes.py | {
"start": 21737,
"end": 27782
} | class ____(Serializer, Generic[T]):
# NOTE: See `whitelist_for_serdes` docstring for explanations of parameters.
def __init__(
self,
*,
klass: type[T],
storage_name: Optional[str] = None,
storage_field_names: Optional[Mapping[str, str]] = None,
old_fields: Optional[Mapping[str, JsonSerializableValue]] = None,
skip_when_empty_fields: Optional[AbstractSet[str]] = None,
skip_when_none_fields: Optional[AbstractSet[str]] = None,
field_serializers: Optional[Mapping[str, "FieldSerializer"]] = None,
kwargs_fields: Optional[AbstractSet[str]] = None,
):
self.klass = klass
self.storage_name = storage_name
self.storage_field_names = storage_field_names or {}
self.loaded_field_names = {v: k for k, v in self.storage_field_names.items()}
self.old_fields = old_fields or {}
self.skip_when_empty_fields = skip_when_empty_fields or set()
self.skip_when_none_fields = skip_when_none_fields or set()
self.field_serializers = field_serializers or {}
self.kwargs_fields = kwargs_fields
@abstractmethod
def object_as_mapping(self, value: T) -> Mapping[str, PackableValue]: ...
def unpack(
self,
unpacked_dict: dict[str, UnpackedValue],
whitelist_map: WhitelistMap,
context: UnpackContext,
) -> T:
try:
unpacked_dict = self.before_unpack(context, unpacked_dict)
unpacked: dict[str, PackableValue] = {}
for key, value in unpacked_dict.items():
loaded_name = self.loaded_field_names.get(key, key)
# Naively implements backwards compatibility by filtering arguments that aren't present in
# the constructor. If a property is present in the serialized object, but doesn't exist in
# the version of the class loaded into memory, that property will be completely ignored.
if loaded_name in self.constructor_param_names:
# custom unpack regardless of hook vs recursive descent
custom = self.field_serializers.get(loaded_name)
if custom:
unpacked[loaded_name] = custom.unpack(
value,
whitelist_map=whitelist_map,
context=context,
)
elif context.observed_unknown_serdes_values:
unpacked[loaded_name] = context.assert_no_unknown_values(value)
else:
unpacked[loaded_name] = value # type: ignore # 2 hot 4 cast()
else:
context.clear_ignored_unknown_values(value)
return self.klass(**unpacked)
except Exception as exc:
value = self.handle_unpack_error(exc, context, unpacked_dict)
if isinstance(context, UnpackContext):
context.assert_no_unknown_values(value)
context.clear_ignored_unknown_values(unpacked_dict)
return value
# Hook: Modify the contents of the unpacked dict before domain object construction during
# deserialization.
def before_unpack(
self,
context: UnpackContext,
unpacked_dict: dict[str, UnpackedValue],
) -> dict[str, UnpackedValue]:
return unpacked_dict
# Hook: Handle an error that occurs when unpacking an object. Can be used to return a default
# value.
def handle_unpack_error(
self,
exc: Exception,
context: UnpackContext,
storage_dict: dict[str, Any],
) -> Any:
raise exc
def pack_items(
self,
value: T,
whitelist_map: WhitelistMap,
object_handler: Callable[[SerializableObject, WhitelistMap, str], JsonSerializableValue],
descent_path: str,
) -> Iterator[tuple[str, JsonSerializableValue]]:
yield "__class__", self.get_storage_name()
for key, inner_value in self.object_as_mapping(self.before_pack(value)).items():
storage_key = self.storage_field_names.get(key, key)
custom = self.field_serializers.get(key)
if custom:
field_value = custom.pack(
inner_value,
whitelist_map=whitelist_map,
descent_path=f"{descent_path}.{key}",
)
else:
field_value = inner_value
if (key in self.skip_when_empty_fields and field_value in EMPTY_VALUES_TO_SKIP) or (
key in self.skip_when_none_fields and field_value is None
):
continue
yield (
storage_key,
_transform_for_serialization(
field_value,
whitelist_map=whitelist_map,
object_handler=object_handler,
descent_path=f"{descent_path}.{key}",
),
)
for key, default in self.old_fields.items():
yield key, default
# Hook: Modify the contents of the object before packing
def before_pack(self, value: T) -> T:
return value
@property
@abstractmethod
def constructor_param_names(self) -> Sequence[str]: ...
def get_storage_name(self) -> str:
return self.storage_name or self.klass.__name__
T_NamedTuple = TypeVar("T_NamedTuple", default=NamedTuple)
# T_NamedTuple previously had a `NamedTuple` bound, but the bound triggers type errors when a `@record`
# decorated class is bound to the variable. @record actually generates a NamedTuple variant of the
# class that was passed in, but we haven't found out how to communicate that to the type system.
# Instead the type signature of the `@record` decorator passes the input class through unmodified.
# Therefore, we forgo `bound` here so that `NamedTupleSerializer` can be used with `@record`
# classes.
| ObjectSerializer |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | {
"start": 5804,
"end": 12135
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies([check_ops.assert_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_scalar_comparison(self):
const_true = constant_op.constant(True, name="true")
const_false = constant_op.constant(False, name="false")
with self.assertRaisesRegex(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(const_true, const_false, message="fail")
def test_returns_none_with_eager(self):
with context.eager_mode():
small = constant_op.constant([1, 2], name="small")
x = check_ops.assert_equal(small, small)
assert x is None
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_greater(self):
# Static check
static_small = constant_op.constant([1, 2], name="small")
static_big = constant_op.constant([3, 4], name="big")
with self.assertRaisesRegex(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(static_big, static_small, message="fail")
@test_util.run_deprecated_v1
def test_raises_when_greater_dynamic(self):
with self.cached_session():
small = array_ops.placeholder(dtypes.int32, name="small")
big = array_ops.placeholder(dtypes.int32, name="big")
with ops.control_dependencies(
[check_ops.assert_equal(big, small, message="fail")]):
out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval(feed_dict={small: [1, 2], big: [3, 4]})
def test_error_message_eager(self):
expected_error_msg_full = r"""big does not equal small
Condition x == y did not hold.
Indices of first 3 different values:
\[\[0 0\]
\[1 1\]
\[2 0\]\]
Corresponding x values:
\[2 3 6\]
Corresponding y values:
\[20 30 60\]
First 6 elements of x:
\[2 2 3 3 6 6\]
First 6 elements of y:
\[20 2 3 30 60 6\]"""
expected_error_msg_default = r"""big does not equal small
Condition x == y did not hold.
Indices of first 3 different values:
\[\[0 0\]
\[1 1\]
\[2 0\]\]
Corresponding x values:
\[2 3 6\]
Corresponding y values:
\[20 30 60\]
First 3 elements of x:
\[2 2 3\]
First 3 elements of y:
\[20 2 3\]"""
expected_error_msg_short = r"""big does not equal small
Condition x == y did not hold.
Indices of first 2 different values:
\[\[0 0\]
\[1 1\]\]
Corresponding x values:
\[2 3\]
Corresponding y values:
\[20 30\]
First 2 elements of x:
\[2 2\]
First 2 elements of y:
\[20 2\]"""
with context.eager_mode():
big = constant_op.constant([[2, 2], [3, 3], [6, 6]])
small = constant_op.constant([[20, 2], [3, 30], [60, 6]])
with self.assertRaisesRegex(errors.InvalidArgumentError,
expected_error_msg_full):
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=10)
with self.assertRaisesRegex(errors.InvalidArgumentError,
expected_error_msg_default):
check_ops.assert_equal(big, small, message="big does not equal small")
with self.assertRaisesRegex(errors.InvalidArgumentError,
expected_error_msg_short):
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=2)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_less(self):
# Static check
static_small = constant_op.constant([3, 1], name="small")
static_big = constant_op.constant([4, 2], name="big")
with self.assertRaisesRegex(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(static_big, static_small, message="fail")
@test_util.run_deprecated_v1
def test_raises_when_less_dynamic(self):
with self.cached_session():
small = array_ops.placeholder(dtypes.int32, name="small")
big = array_ops.placeholder(dtypes.int32, name="big")
with ops.control_dependencies([check_ops.assert_equal(small, big)]):
out = array_ops.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval(feed_dict={small: [3, 1], big: [4, 2]})
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
small = constant_op.constant([[1, 2], [1, 2]], name="small")
small_2 = constant_op.constant([1, 2], name="small_2")
with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
small_2 = constant_op.constant([1, 1], name="small_2")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesIncompatibleShapesError(
(errors.InvalidArgumentError, ValueError)):
with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_not_equal_and_broadcastable_shapes(self):
cond = constant_op.constant([True, False], name="small")
with self.assertRaisesRegex(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(cond, False, message="fail")
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_noop_when_both_identical(self):
larry = constant_op.constant([])
check_op = check_ops.assert_equal(larry, larry)
if context.executing_eagerly():
self.assertIs(check_op, None)
else:
self.assertEqual(check_op.type, "NoOp")
| AssertEqualTest |
python | getsentry__sentry | src/sentry/api/serializers/models/environment.py | {
"start": 158,
"end": 234
} | class ____(TypedDict):
id: str
name: str
| EnvironmentSerializerResponse |
python | huggingface__transformers | src/transformers/models/seed_oss/modeling_seed_oss.py | {
"start": 22303,
"end": 22412
} | class ____(GenericForSequenceClassification, SeedOssPreTrainedModel):
pass
| SeedOssForSequenceClassification |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance1.py | {
"start": 5458,
"end": 5536
} | class ____(Protocol):
def f0(self, /) -> None: ...
@runtime_checkable
| Proto1 |
python | PyCQA__pylint | tests/functional/n/none_dunder_protocols.py | {
"start": 607,
"end": 1267
} | class ____(metaclass=MetaContainer):
"""https://github.com/pylint-dev/pylint/issues/6366"""
__len__, __iter__ = [lambda x: x] * 2
def test():
1 in NonIterableClass # [unsupported-membership-test]
1 in OldNonIterableClass # [unsupported-membership-test]
1 in NonContainerClass # [unsupported-membership-test]
1 in NonIterableClass() # [unsupported-membership-test]
1 in OldNonIterableClass() # [unsupported-membership-test]
1 in NonContainerClass() # [unsupported-membership-test]
1 in MultipleAssignmentNonesClass() # [unsupported-membership-test]
1 in MultipleAssignmentLambdasClass()
| MultipleAssignmentLambdasClass |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_bedrock.py | {
"start": 1186,
"end": 1894
} | class ____:
def test_service_waiters(self):
"""Ensure that all custom Bedrock waiters have unit tests."""
def _class_tests_a_waiter(class_name: str) -> bool:
"""Check if the class name starts with 'Test' and ends with 'Waiter'."""
return bool(re.match(r"^Test[A-Za-z]+Waiter$", class_name))
# Collect WAITER_NAME from each waiter test class in this module.
test_classes = inspect.getmembers(sys.modules[__name__], inspect.isclass)
waiters_tested = [cls.WAITER_NAME for (name, cls) in test_classes if _class_tests_a_waiter(name)]
assert sorted(BedrockHook()._list_custom_waiters()) == sorted(waiters_tested)
| TestBedrockCustomWaiters |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 111212,
"end": 112728
} | class ____(nn.Module):
"""
Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single
postprocessor.
Args:
modalities (`Mapping[str, PostprocessorType]`):
Dictionary mapping modality name to postprocessor class for that modality.
input_is_dict (`bool`, *optional*, defaults to `False`):
If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If
False, input is a tensor which is sliced up during postprocessing by *modality_sizes*.
"""
def __init__(self, modalities: Mapping[str, PostprocessorType], input_is_dict: bool = False):
super().__init__()
self.modalities = nn.ModuleDict(modalities)
self.input_is_dict = input_is_dict
def forward(
self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None
) -> Mapping[str, torch.Tensor]:
if not self.input_is_dict:
# Slice up modalities by their sizes.
if modality_sizes is None:
raise ValueError("Modality sizes should be specified if input is not a dictionary.")
inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)
outputs = {
modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None)
for modality, postprocessor in self.modalities.items()
}
return outputs
| PerceiverMultimodalPostprocessor |
python | fsspec__filesystem_spec | fsspec/implementations/reference.py | {
"start": 2076,
"end": 2379
} | class ____(collections.abc.ItemsView):
def __iter__(self):
return zip(self._mapping.keys(), self._mapping.values())
def ravel_multi_index(idx, sizes):
val = 0
mult = 1
for i, s in zip(idx[::-1], sizes[::-1]):
val += i * mult
mult *= s
return val
| RefsItemsView |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor2.py | {
"start": 3975,
"end": 4077
} | class ____(Generic[_T1]):
def __new__(cls, o: _T1) -> Self: ...
plant: Plant[float] = Plant(0)
| Plant |
python | pytorch__pytorch | test/inductor/test_debug_trace.py | {
"start": 4572,
"end": 6921
} | class ____:
var_ranges = {p0: 256}
index0 = p0
def body(self, ops):
get_index = self.get_index('index0')
load = ops.load('buf0', get_index)
constant = ops.constant(2.0, torch.float32)
add = ops.add(load, constant)
get_index_1 = self.get_index('index0')
store = ops.store('buf1', get_index_1, add, None)
return store
op2: ExternKernelSchedulerNode(ExternKernelOut)
op2.writes = [StarDep(name='buf2', mode=None)]
op2.unmet_dependencies = [StarDep(name='buf1', mode=None)]
op2.met_dependencies = [StarDep(name='arg1_1', mode=None)]
op2.outputs = [
buf2: ExternKernelOut
buf2.layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf2.users = [NodeUser(node=OUTPUT, can_inplace=False, is_weak=False)]
]
op2.node.kernel = extern_kernels.mm""",
)
post_fusion_logs = post_fusion_stream.getvalue().strip()
self.assertExpectedInline(
post_fusion_logs,
"""\
AFTER FUSION
op0_op1: FusedSchedulerNode(SchedulerNode,SchedulerNode)
op0_op1.writes = [MemoryDep('buf0', c0, {c0: 256}), MemoryDep('buf1', c0, {c0: 256})]
op0_op1.unmet_dependencies = []
op0_op1.met_dependencies = [MemoryDep('arg0_1', c0, {c0: 256})]
op0_op1.outputs = [
buf0: ComputedBuffer
buf0.layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf0.users = [NodeUser(node=SchedulerNode(name='op1'), can_inplace=True, is_weak=False)]
buf1: ComputedBuffer
buf1.layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf1.users = [NodeUser(node=ExternKernelSchedulerNode(name='op2'), can_inplace=False, is_weak=False)]
]
op0_op1.snodes[0] =
op0: SchedulerNode(ComputedBuffer)
op0.writes = [MemoryDep('buf0', c0, {c0: 256})]
op0.unmet_dependencies = []
op0.met_dependencies = [MemoryDep('arg0_1', c0, {c0: 256})]
op0.outputs = [
buf0: ComputedBuffer
buf0.layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf0.users = [NodeUser(node=SchedulerNode(name='op1'), can_inplace=True, is_weak=False)]
]
op0.group.device = cpu
op0.group.iteration = ((256,), ())
op0.sizes = ([256], [])
arg0_1_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf0_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
| op1_loop_body |
python | scipy__scipy | scipy/stats/tests/test_kdeoth.py | {
"start": 10221,
"end": 22863
} | class ____(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * self.silverman_factor()
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3 was removed because we have no obligation to maintain support
# for user invocation of private methods
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
def test_gaussian_kde_covariance_caching():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
# Set the bandwidth, then reset it to the default.
kde = stats.gaussian_kde(x1)
kde.set_bandwidth(bw_method=0.5)
kde.set_bandwidth(bw_method='scott')
y2 = kde(xs)
assert_array_almost_equal(y_expected, y2, decimal=7)
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
def test_kde_integer_input():
"""Regression test for #1181."""
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
_ftypes = ['float32', 'float64', 'float96', 'float128', 'int32', 'int64']
@pytest.mark.parametrize("bw_type", _ftypes + ["scott", "silverman"])
@pytest.mark.parametrize("dtype", _ftypes)
def test_kde_output_dtype(dtype, bw_type):
# Check whether the datatypes are available
dtype = getattr(np, dtype, None)
if bw_type in ["scott", "silverman"]:
bw = bw_type
else:
bw_type = getattr(np, bw_type, None)
bw = bw_type(3) if bw_type else None
if any(dt is None for dt in [dtype, bw]):
pytest.skip()
weights = np.arange(5, dtype=dtype)
dataset = np.arange(5, dtype=dtype)
k = stats.gaussian_kde(dataset, bw_method=bw, weights=weights)
points = np.arange(5, dtype=dtype)
result = k(points)
# weights are always cast to float64
assert result.dtype == np.result_type(dataset, points, np.float64(weights),
k.factor)
def test_pdf_logpdf_validation():
rng = np.random.default_rng(64202298293133848336925499069837723291)
xn = rng.standard_normal((2, 10))
gkde = stats.gaussian_kde(xn)
xs = rng.standard_normal((3, 10))
msg = "points have dimension 3, dataset has dimension 2"
with pytest.raises(ValueError, match=msg):
gkde.logpdf(xs)
def test_pdf_logpdf():
rng = np.random.default_rng(1)
n_basesample = 50
xn = rng.normal(0, 1, n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
gkde = stats.gaussian_kde(xs)
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
def test_pdf_logpdf_weighted():
rng = np.random.default_rng(1)
n_basesample = 50
xn = rng.normal(0, 1, n_basesample)
wn = rng.random(n_basesample)
# Default
gkde = stats.gaussian_kde(xn, weights=wn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
rng = np.random.default_rng(4531935345)
gkde = stats.gaussian_kde(xs, weights=rng.random(len(xs)))
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
def test_marginal_1_axis():
rng = np.random.default_rng(6111799263660870475)
n_data = 50
n_dim = 10
dataset = rng.normal(size=(n_dim, n_data))
points = rng.normal(size=(n_dim, 3))
dimensions = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) # dimensions to keep
kde = stats.gaussian_kde(dataset)
marginal = kde.marginal(dimensions)
pdf = marginal.pdf(points[dimensions])
def marginal_pdf_single(point):
def f(x):
x = np.concatenate(([x], point[dimensions]))
return kde.pdf(x)[0]
return integrate.quad(f, -np.inf, np.inf)[0]
def marginal_pdf(points):
return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points)
ref = marginal_pdf(points)
assert_allclose(pdf, ref, rtol=1e-6)
@pytest.mark.xslow
def test_marginal_2_axis():
rng = np.random.default_rng(6111799263660870475)
n_data = 30
n_dim = 4
dataset = rng.normal(size=(n_dim, n_data))
points = rng.normal(size=(n_dim, 3))
dimensions = np.array([1, 3]) # dimensions to keep
kde = stats.gaussian_kde(dataset)
marginal = kde.marginal(dimensions)
pdf = marginal.pdf(points[dimensions])
def marginal_pdf(points):
def marginal_pdf_single(point):
def f(y, x):
w, z = point[dimensions]
x = np.array([x, w, y, z])
return kde.pdf(x)[0]
return integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)[0]
return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points)
ref = marginal_pdf(points)
assert_allclose(pdf, ref, rtol=1e-6)
def test_marginal_iv():
# test input validation
rng = np.random.default_rng(6111799263660870475)
n_data = 30
n_dim = 4
dataset = rng.normal(size=(n_dim, n_data))
points = rng.normal(size=(n_dim, 3))
kde = stats.gaussian_kde(dataset)
# check that positive and negative indices are equivalent
dimensions1 = [-1, 1]
marginal1 = kde.marginal(dimensions1)
pdf1 = marginal1.pdf(points[dimensions1])
dimensions2 = [3, -3]
marginal2 = kde.marginal(dimensions2)
pdf2 = marginal2.pdf(points[dimensions2])
assert_equal(pdf1, pdf2)
# IV for non-integer dimensions
message = "Elements of `dimensions` must be integers..."
with pytest.raises(ValueError, match=message):
kde.marginal([1, 2.5])
# IV for uniqueness
message = "All elements of `dimensions` must be unique."
with pytest.raises(ValueError, match=message):
kde.marginal([1, 2, 2])
# IV for non-integer dimensions
message = (r"Dimensions \[-5 6\] are invalid for a distribution in 4...")
with pytest.raises(ValueError, match=message):
kde.marginal([1, -5, 6])
@pytest.mark.xslow
def test_logpdf_overflow():
# regression test for gh-12988; testing against linalg instability for
# very high dimensionality kde
rng = np.random.default_rng(1)
n_dimensions = 2500
n_samples = 5000
xn = np.array([rng.normal(0, 1, n_samples) + (n) for n in range(
0, n_dimensions)])
# Default
gkde = stats.gaussian_kde(xn)
logpdf = gkde.logpdf(np.arange(0, n_dimensions))
np.testing.assert_equal(np.isneginf(logpdf[0]), False)
np.testing.assert_equal(np.isnan(logpdf[0]), False)
def test_weights_intact():
# regression test for gh-9709: weights are not modified
rng = np.random.default_rng(12345)
vals = rng.lognormal(size=100)
weights = rng.choice([1.0, 10.0, 100], size=vals.size)
orig_weights = weights.copy()
stats.gaussian_kde(np.log10(vals), weights=weights)
assert_allclose(weights, orig_weights, atol=1e-14, rtol=1e-14)
def test_weights_integer():
# integer weights are OK, cf gh-9709 (comment)
values = [0.2, 13.5, 21.0, 75.0, 99.0]
weights = [1, 2, 4, 8, 16] # a list of integers
pdf_i = stats.gaussian_kde(values, weights=weights)
pdf_f = stats.gaussian_kde(values, weights=np.float64(weights))
xn = [0.3, 11, 88]
assert_allclose(pdf_i.evaluate(xn),
pdf_f.evaluate(xn), atol=1e-14, rtol=1e-14)
def test_seed():
# Test the seed option of the resample method
def test_seed_sub(gkde_trail):
n_sample = 200
# The results should be different without using seed
samp1 = gkde_trail.resample(n_sample)
samp2 = gkde_trail.resample(n_sample)
assert_raises(
AssertionError, assert_allclose, samp1, samp2, atol=1e-13
)
# Use integer seed
seed = 831
samp1 = gkde_trail.resample(n_sample, seed=seed)
samp2 = gkde_trail.resample(n_sample, seed=seed)
assert_allclose(samp1, samp2, atol=1e-13)
# Use RandomState
rstate1 = np.random.RandomState(seed=138)
samp1 = gkde_trail.resample(n_sample, seed=rstate1)
rstate2 = np.random.RandomState(seed=138)
samp2 = gkde_trail.resample(n_sample, seed=rstate2)
assert_allclose(samp1, samp2, atol=1e-13)
# check that np.random.Generator can be used (numpy >= 1.17)
if hasattr(np.random, 'default_rng'):
# obtain a np.random.Generator object
rng = np.random.default_rng(1234)
gkde_trail.resample(n_sample, seed=rng)
rng = np.random.default_rng(8765678)
n_basesample = 500
wn = rng.random(n_basesample)
# Test 1D case
xn_1d = rng.normal(0, 1, n_basesample)
gkde_1d = stats.gaussian_kde(xn_1d)
test_seed_sub(gkde_1d)
gkde_1d_weighted = stats.gaussian_kde(xn_1d, weights=wn)
test_seed_sub(gkde_1d_weighted)
# Test 2D case
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
xn_2d = rng.multivariate_normal(mean, covariance, size=n_basesample).T
gkde_2d = stats.gaussian_kde(xn_2d)
test_seed_sub(gkde_2d)
gkde_2d_weighted = stats.gaussian_kde(xn_2d, weights=wn)
test_seed_sub(gkde_2d_weighted)
def test_singular_data_covariance_gh10205():
# When the data lie in a lower-dimensional subspace and this causes
# and exception, check that the error message is informative.
rng = np.random.default_rng(2321583144339784787)
mu = np.array([1, 10, 20])
sigma = np.array([[4, 10, 0], [10, 25, 0], [0, 0, 100]])
data = rng.multivariate_normal(mu, sigma, 1000)
try: # doesn't raise any error on some platforms, and that's OK
stats.gaussian_kde(data.T)
except linalg.LinAlgError:
msg = "The data appears to lie in a lower-dimensional subspace..."
with assert_raises(linalg.LinAlgError, match=msg):
stats.gaussian_kde(data.T)
def test_fewer_points_than_dimensions_gh17436():
# When the number of points is fewer than the number of dimensions, the
# the covariance matrix would be singular, and the exception tested in
# test_singular_data_covariance_gh10205 would occur. However, sometimes
# this occurs when the user passes in the transpose of what `gaussian_kde`
# expects. This can result in a huge covariance matrix, so bail early.
rng = np.random.default_rng(2046127537594925772)
rvs = rng.multivariate_normal(np.zeros(3), np.eye(3), size=5)
message = "Number of dimensions is greater than number of samples..."
with pytest.raises(ValueError, match=message):
stats.gaussian_kde(rvs)
| _kde_subclass4 |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 48949,
"end": 49914
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("or_IN")
Faker.seed(0)
def test_first_names(self):
"""simple test to verify that we are pulling gender specific names"""
name = self.fake.first_name_female()
assert name in OrINProvider.first_names_female
name = self.fake.first_name_male()
assert name in OrINProvider.first_names_male
name = self.fake.first_name_unisex()
assert name in OrINProvider.first_names_unisex
name = self.fake.first_name()
assert name in OrINProvider.first_names
def test_middle_names(self):
"""test the middle name"""
name = self.fake.middle_name()
assert name in OrINProvider.middle_names
def test_last_names(self):
"""test the last name is generating from the provided tuple"""
last_name = self.fake.last_name()
assert last_name in OrINProvider.last_names
| TestOrIN |
python | rapidsai__cudf | docs/cudf/source/_ext/PandasCompat.py | {
"start": 734,
"end": 951
} | class ____(nodes.General, nodes.Element):
pass
def visit_PandasCompat_node(self, node):
self.visit_admonition(node)
def depart_PandasCompat_node(self, node):
self.depart_admonition(node)
| PandasCompatList |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/selector.py | {
"start": 12153,
"end": 12961
} | class ____:
"""The information needed to define partitions selection for a given asset key."""
asset_key: AssetKey
partitions: Optional[PartitionsSelector] = None
def to_graphql_input(self):
return {
"assetKey": self.asset_key.to_graphql_input(),
"partitions": self.partitions.to_graphql_input() if self.partitions else None,
}
@staticmethod
def from_graphql_input(graphql_data) -> "PartitionsByAssetSelector":
asset_key = graphql_data["assetKey"]
partitions = graphql_data.get("partitions")
return PartitionsByAssetSelector(
asset_key=AssetKey.from_graphql_input(asset_key),
partitions=PartitionsSelector.from_graphql_input(partitions) if partitions else None,
)
| PartitionsByAssetSelector |
python | spyder-ide__spyder | spyder/plugins/help/widgets.py | {
"start": 8690,
"end": 37419
} | class ____(PluginMainWidget):
ENABLE_SPINNER = True
# Signals
sig_item_found = Signal()
"""This signal is emitted when an item is found."""
sig_render_started = Signal()
"""This signal is emitted to inform a help text rendering has started."""
sig_render_finished = Signal()
"""This signal is emitted to inform a help text rendering has finished."""
def __init__(self, name=None, plugin=None, parent=None):
super().__init__(name, plugin, parent)
# Attributes
self._starting_up = True
self._current_color_scheme = None
self._last_texts = [None, None]
self._last_editor_doc = None
self._last_console_cb = None
self._last_editor_cb = None
self.css_path = self.get_conf('css_path', CSS_PATH, 'appearance')
self.no_docs = _("No documentation available")
self.docstring = True # TODO: What is this used for?
# Widgets
self._sphinx_thread = SphinxThread(
None,
html_text_no_doc=warning(self.no_docs, css_path=self.css_path),
css_path=self.css_path,
)
self.shell = None
self.internal_console = None
self.internal_shell = None
self.plain_text = PlainText(self)
self.rich_text = RichText(self)
self.source_label = QLabel(_("Source"))
self.source_label.ID = HelpWidgetToolbarItems.SourceLabel
self.source_combo = SpyderComboBox(self)
self.source_combo.ID = HelpWidgetToolbarItems.SourceCombo
self.object_label = QLabel(_("Object"))
self.object_label.ID = HelpWidgetToolbarItems.ObjectLabel
self.object_combo = ObjectComboBox(
self, HelpWidgetToolbarItems.ObjectCombo)
self.object_edit = QLineEdit(self)
self.object_edit.ID = HelpWidgetToolbarItems.ObjectEdit
# Setup
self.object_edit.setReadOnly(True)
self.object_combo.setMaxCount(self.get_conf('max_history_entries'))
self.object_combo.setItemText(0, '')
self.plain_text.set_wrap_mode(self.get_conf('wrap'))
self.source_combo.addItems([_("Console"), _("Editor")])
if (not programs.is_module_installed('rope') and
not programs.is_module_installed('jedi', '>=0.11.0')):
self.source_combo.hide()
self.source_label.hide()
# Layout
self.stacked_widget = QStackedWidget()
self.stacked_widget.addWidget(self.rich_text)
self.stacked_widget.addWidget(self.plain_text)
layout = QVBoxLayout()
layout.addWidget(self.stacked_widget)
self.setLayout(layout)
# Signals
self._sphinx_thread.html_ready.connect(
self._on_sphinx_thread_html_ready)
self._sphinx_thread.error_msg.connect(
self._on_sphinx_thread_error_msg)
self.object_combo.valid.connect(self.force_refresh)
self.rich_text.sig_link_clicked.connect(self.handle_link_clicks)
self.source_combo.currentIndexChanged.connect(
lambda x: self.source_changed())
self.sig_render_started.connect(self.start_spinner)
self.sig_render_finished.connect(self.stop_spinner)
# --- PluginMainWidget API
# ------------------------------------------------------------------------
def get_title(self):
return _('Help')
def setup(self):
self.wrap_action = self.create_action(
name=HelpWidgetActions.ToggleWrap,
text=_("Wrap lines"),
toggled=True,
initial=self.get_conf('wrap'),
option='wrap'
)
self.copy_action = self.create_action(
name=HelpWidgetActions.CopyAction,
text=_("Copy"),
triggered=lambda value: self.plain_text.copy(),
register_shortcut=False,
)
self.select_all_action = self.create_action(
name=HelpWidgetActions.SelectAll,
text=_("Select All"),
triggered=lambda value: self.plain_text.select_all(),
register_shortcut=False,
)
self.auto_import_action = self.create_action(
name=HelpWidgetActions.ToggleAutomaticImport,
text=_("Automatic import"),
toggled=True,
initial=self.get_conf('automatic_import'),
option='automatic_import'
)
self.show_source_action = self.create_action(
name=HelpWidgetActions.ToggleShowSource,
text=_("Show Source"),
toggled=True,
option='show_source'
)
self.rich_text_action = self.create_action(
name=HelpWidgetActions.ToggleRichMode,
text=_("Rich Text"),
toggled=True,
initial=self.get_conf('rich_mode'),
option='rich_mode'
)
self.plain_text_action = self.create_action(
name=HelpWidgetActions.TogglePlainMode,
text=_("Plain Text"),
toggled=True,
initial=self.get_conf('plain_mode'),
option='plain_mode'
)
self.locked_action = self.create_action(
name=HelpWidgetActions.ToggleLocked,
text=_("Lock/Unlock"),
toggled=True,
icon=self.create_icon('lock_open'),
initial=self.get_conf('locked'),
option='locked'
)
self.home_action = self.create_action(
name=HelpWidgetActions.Home,
text=_("Home"),
triggered=self.show_intro_message,
icon=self.create_icon('home'),
)
# Add the help actions to an exclusive QActionGroup
help_actions = QActionGroup(self)
help_actions.setExclusive(True)
help_actions.addAction(self.plain_text_action)
help_actions.addAction(self.rich_text_action)
# Menu
menu = self.get_options_menu()
for item in [self.rich_text_action, self.plain_text_action,
self.show_source_action]:
self.add_item_to_menu(
item,
menu=menu,
section=HelpWidgetOptionsMenuSections.Display,
)
self.add_item_to_menu(
self.auto_import_action,
menu=menu,
section=HelpWidgetOptionsMenuSections.Other,
)
# Plain text menu
self._plain_text_context_menu = self.create_menu(
"plain_text_context_menu")
self.add_item_to_menu(
self.copy_action,
self._plain_text_context_menu,
section="copy_section",
)
self.add_item_to_menu(
self.select_all_action,
self._plain_text_context_menu,
section="select_section",
)
self.add_item_to_menu(
self.wrap_action,
self._plain_text_context_menu,
section="wrap_section",
)
# Toolbar
toolbar = self.get_main_toolbar()
for item in [self.source_label, self.source_combo, self.object_label,
self.object_combo, self.object_edit, self.home_action,
self.locked_action]:
self.add_item_to_toolbar(
item,
toolbar=toolbar,
section=HelpWidgetMainToolbarSections.Main,
)
self.source_changed()
self.switch_to_rich_text()
self.show_intro_message()
# Signals
self.plain_text.sig_custom_context_menu_requested.connect(
self._show_plain_text_context_menu)
def _should_display_welcome_page(self):
"""Determine if the help welcome page should be displayed."""
return (self._last_editor_doc is None or
self._last_console_cb is None or
self._last_editor_cb is None)
@on_conf_change(option='wrap')
def on_wrap_option_update(self, value):
self.plain_text.set_wrap_mode(value)
@on_conf_change(option='locked')
def on_lock_update(self, value):
if value:
icon = self.create_icon('lock')
tip = _("Unlock")
else:
icon = self.create_icon('lock_open')
tip = _("Lock")
action = self.get_action(HelpWidgetActions.ToggleLocked)
action.setIcon(icon)
action.setToolTip(tip)
@on_conf_change(option='automatic_import')
def on_automatic_import_update(self, value):
self.object_combo.validate_current_text()
if self._should_display_welcome_page():
self.show_intro_message()
else:
self.force_refresh()
@on_conf_change(option='rich_mode')
def on_rich_mode_update(self, value):
if value:
# Plain Text OFF / Rich text ON
self.docstring = not value
self.stacked_widget.setCurrentWidget(self.rich_text)
self.get_action(HelpWidgetActions.ToggleShowSource).setChecked(
False)
else:
# Plain Text ON / Rich text OFF
self.docstring = value
self.stacked_widget.setCurrentWidget(self.plain_text)
if self._should_display_welcome_page():
self.show_intro_message()
else:
self.force_refresh()
@on_conf_change(option='show_source')
def on_show_source_update(self, value):
if value:
self.switch_to_plain_text()
self.get_action(HelpWidgetActions.ToggleRichMode).setChecked(
False)
self.docstring = not value
if self._should_display_welcome_page():
self.show_intro_message()
else:
self.force_refresh()
@on_conf_change(section='appearance', option=['selected', 'ui_theme'])
def change_color_scheme(self, option, value):
if option == 'ui_theme':
value = self.get_conf('selected', section='appearance')
self.set_plain_text_color_scheme(value)
def update_actions(self):
pass
def get_focus_widget(self):
self.object_combo.lineEdit().selectAll()
return self.object_combo
# --- Private API
# ------------------------------------------------------------------------
@Slot(QPoint)
def _show_plain_text_context_menu(self, point):
point = self.plain_text.mapToGlobal(point)
self._plain_text_context_menu.popup(point)
def _on_sphinx_thread_html_ready(self, html_text):
"""
Set our sphinx documentation based on thread result.
Parameters
----------
html_text: str
Html results text.
"""
self._sphinx_thread.wait()
self.set_rich_text_html(html_text, QUrl.fromLocalFile(self.css_path))
self.sig_render_finished.emit()
self.stop_spinner()
def _on_sphinx_thread_error_msg(self, error_msg):
"""
Display error message on Sphinx rich text failure.
Parameters
----------
error_msg: str
Error message text.
"""
self._sphinx_thread.wait()
self.plain_text_action.setChecked(True)
sphinx_ver = programs.get_module_version('sphinx')
QMessageBox.critical(
self,
_('Help'),
_("The following error occurred when calling "
"<b>Sphinx %s</b>. <br>Incompatible Sphinx "
"version or doc string decoding failed."
"<br><br>Error message:<br>%s"
) % (sphinx_ver, error_msg),
)
self.sig_render_finished.emit()
# --- Public API
# ------------------------------------------------------------------------
def source_is_console(self):
"""Return True if source is Console."""
return self.source_combo.currentIndex() == 0
def switch_to_editor_source(self):
"""Switch to editor view of the help viewer."""
self.source_combo.setCurrentIndex(1)
def switch_to_console_source(self):
"""Switch to console view of the help viewer."""
self.source_combo.setCurrentIndex(0)
def source_changed(self):
"""Handle a source (plain/rich) change."""
is_console = self.source_is_console()
if is_console:
self.object_combo.show()
self.object_edit.hide()
else:
# Editor
self.object_combo.hide()
self.object_edit.show()
self.get_action(HelpWidgetActions.ToggleShowSource).setEnabled(
is_console)
self.get_action(HelpWidgetActions.ToggleAutomaticImport).setEnabled(
is_console)
self.restore_text()
def save_text(self, callback):
"""
Save help text.
Parameters
----------
callback: callable
Method to call on save.
"""
if self.source_is_console():
self._last_console_cb = callback
else:
self._last_editor_cb = callback
def restore_text(self):
"""Restore last text using callback."""
if self.source_is_console():
cb = self._last_console_cb
else:
cb = self._last_editor_cb
if cb is None:
if self.get_conf('plain_mode'):
self.switch_to_plain_text()
else:
self.switch_to_rich_text()
else:
func = cb[0]
args = cb[1:]
func(*args)
if func.__self__ is self.rich_text:
self.switch_to_rich_text()
else:
self.switch_to_plain_text()
@property
def find_widget(self):
"""Show find widget."""
if self.get_conf('plain_mode'):
return self.plain_text.find_widget
else:
return self.rich_text.find_widget
def switch_to_plain_text(self):
"""Switch to plain text mode."""
self.get_action(HelpWidgetActions.TogglePlainMode).setChecked(True)
def switch_to_rich_text(self):
"""Switch to rich text mode."""
self.get_action(HelpWidgetActions.ToggleRichMode).setChecked(True)
def set_plain_text(self, text, is_code):
"""
Set plain text docs.
Parameters
----------
text: str
Text content.
is_code: bool
True if it is code text.
Notes
-----
Text is coming from utils.dochelpers.getdoc
"""
if type(text) is dict:
name = text['name']
if name:
rst_title = ''.join(['='*len(name), '\n', name, '\n',
'='*len(name), '\n\n'])
else:
rst_title = ''
try:
if text['argspec']:
definition = ''.join(
['Definition: ', name, text['argspec'], '\n\n'])
else:
definition = ''
if text['note']:
note = ''.join(['Type: ', text['note'], '\n\n----\n\n'])
else:
note = ''
except TypeError:
definition = self.no_docs
note = ''
full_text = ''.join([rst_title, definition, note,
text['docstring']])
else:
full_text = text
self.plain_text.set_text(full_text, is_code)
self.save_text([self.plain_text.set_text, full_text, is_code])
def set_rich_text_html(self, html_text, base_url):
"""
Set rich text.
Parameters
----------
html_text: str
Html string.
base_url: str
Location of stylesheets and images to load in the page.
"""
self.rich_text.set_html(html_text, base_url)
self.save_text([self.rich_text.set_html, html_text, base_url])
def show_loading_message(self):
"""Create html page to show while the documentation is generated."""
self.sig_render_started.emit()
loading_message = _("Retrieving documentation")
loading_img = get_image_path('loading_sprites')
if os.name == 'nt':
loading_img = loading_img.replace('\\', '/')
self.set_rich_text_html(
loading(loading_message, loading_img, css_path=self.css_path),
QUrl.fromLocalFile(self.css_path),
)
def show_intro_message(self):
"""Show message on Help with the right shortcuts."""
intro_message_eq = _(
"Here you can get help of any object by pressing "
"%s in front of it, either on the Editor or the "
"Console.%s")
intro_message_dif = _(
"Here you can get help of any object by pressing "
"%s in front of it on the Editor, or %s in front "
"of it on the Console.%s")
intro_message_common = _(
"Help can also be shown automatically after writing "
"a left parenthesis next to an object. You can "
"activate this behavior in %s.")
prefs = _("Preferences > Help")
shortcut_editor = self.get_conf(
'editor/inspect current object', section='shortcuts')
shortcut_console = self.get_conf(
'ipython_console/inspect current object', section='shortcuts')
if sys.platform == 'darwin':
shortcut_editor = shortcut_editor.replace('Ctrl', 'Cmd')
shortcut_console = shortcut_console.replace('Ctrl', 'Cmd')
if self.get_conf('rich_mode'):
title = _("Usage")
tutorial_message = _("New to Spyder? Read our")
tutorial = _("tutorial")
if shortcut_editor == shortcut_console:
intro_message = (intro_message_eq + intro_message_common) % (
"<b>"+shortcut_editor+"</b>", "<br><br>",
"<i>"+prefs+"</i>")
else:
intro_message = (intro_message_dif + intro_message_common) % (
"<b>"+shortcut_editor+"</b>",
"<b>"+shortcut_console+"</b>",
"<br><br>", "<i>"+prefs+"</i>")
self.set_rich_text_html(usage(title, intro_message,
tutorial_message, tutorial,
css_path=self.css_path),
QUrl.fromLocalFile(self.css_path))
else:
install_sphinx = "\n\n%s" % _("Please consider installing Sphinx "
"to get documentation rendered in "
"rich text.")
if shortcut_editor == shortcut_console:
intro_message = (intro_message_eq + intro_message_common) % (
shortcut_editor, "\n\n", prefs)
else:
intro_message = (intro_message_dif + intro_message_common) % (
shortcut_editor, shortcut_console, "\n\n", prefs)
intro_message += install_sphinx
self.set_plain_text(intro_message, is_code=False)
def show_rich_text(self, text, collapse=False, img_path=''):
"""
Show text in rich mode.
Parameters
----------
text: str
Plain text to display.
collapse: bool, optional
Show collapsable sections as collapsed/expanded. Default is False.
img_path: str, optional
Path to folder with additional images needed to correctly
display the rich text help. Default is ''.
"""
self.switch_to_rich_text()
context = generate_context(collapse=collapse, img_path=img_path,
css_path=self.css_path)
self.render_sphinx_doc(text, context)
def show_plain_text(self, text):
"""
Show text in plain mode.
Parameters
----------
text: str
Plain text to display.
"""
self.switch_to_plain_text()
self.set_plain_text(text, is_code=False)
@Slot()
def show_tutorial(self):
"""Show the Spyder tutorial."""
tutorial_path = get_module_source_path('spyder.plugins.help.utils')
tutorial = os.path.join(tutorial_path, 'tutorial.rst')
with open(tutorial, 'r') as fh:
text = fh.read()
self.show_rich_text(text, collapse=True)
def handle_link_clicks(self, url):
"""
Handle how url links should be opened.
Parameters
----------
url: QUrl
QUrl object containing the link to open.
"""
url = str(url.toString())
if url == "spy://tutorial":
self.show_tutorial()
elif url.startswith('http'):
start_file(url)
else:
self.rich_text.load_url(url)
@Slot()
@Slot(bool)
@Slot(bool, bool)
def force_refresh(self, valid=True, editing=True):
"""
Force a refresh/rerender of the help viewer content.
Parameters
----------
valid: bool, optional
Default is True.
editing: bool, optional
Default is True.
"""
if valid:
if self.source_is_console():
self.set_object_text(None, force_refresh=True)
elif self._last_editor_doc is not None:
self.set_editor_doc(self._last_editor_doc, force_refresh=True)
def set_object_text(self, text, force_refresh=False, ignore_unknown=False):
"""
Set object's name in Help's combobox.
Parameters
----------
text: str
Object name.
force_refresh: bool, optional
Force a refresh with the rendering.
ignore_unknown: bool, optional
Ignore not found object names.
See Also
--------
:py:meth:spyder.widgets.mixins.GetHelpMixin.show_object_info
"""
if self.get_conf('locked') and not force_refresh:
return
self.switch_to_console_source()
add_to_combo = True
if text is None:
text = str(self.object_combo.currentText())
add_to_combo = False
found = self.show_help(text, ignore_unknown=ignore_unknown)
if ignore_unknown and not found:
return
if add_to_combo:
self.object_combo.add_text(text)
if found:
self.sig_item_found.emit()
index = self.source_combo.currentIndex()
self._last_texts[index] = text
def set_editor_doc(self, help_data, force_refresh=False):
"""
Set content for help data sent from the editor.
Parameters
----------
help_data: dict
Dictionary with editor introspection information.
force_refresh: bool, optional
Force a refresh with the rendering.
Examples
--------
>>> help_data = {
'obj_text': str,
'name': str,
'argspec': str,
'note': str,
'docstring': str,
'path': str,
}
"""
if self.get_conf('locked') and not force_refresh:
return
self.switch_to_editor_source()
self._last_editor_doc = help_data
self.object_edit.setText(help_data['obj_text'])
if self.get_conf('rich_mode'):
self.render_sphinx_doc(help_data)
else:
self.set_plain_text(help_data, is_code=False)
index = self.source_combo.currentIndex()
self._last_texts[index] = help_data['docstring']
def set_shell(self, shell):
"""
Bind to shell.
Parameters
----------
shell: object
internal shell or ipython console shell
"""
self.shell = shell
def get_shell(self):
"""
Return shell which is currently bound to Help.
"""
if self.shell is None:
self.shell = self.internal_shell
return self.shell
def render_sphinx_doc(self, help_data, context=None, css_path=CSS_PATH):
"""
Transform help_data dictionary to HTML and show it.
Parameters
----------
help_data: str or dict
Dictionary with editor introspection information.
context: dict
Sphinx context.
css_path: str
Path to CSS file for styling.
"""
if isinstance(help_data, dict):
path = help_data.pop('path', '')
dname = os.path.dirname(path)
else:
dname = ''
# Math rendering option could have changed
self._sphinx_thread.render(help_data, context, self.get_conf('math'),
dname, css_path=self.css_path)
self.show_loading_message()
def show_help(self, obj_text, ignore_unknown=False):
"""
Show help for an object's name.
Parameters
----------
obj_text: str
Object's name.
ignore_unknown: bool, optional
Ignore unknown object's name.
"""
# TODO: This method makes active use of the shells. It would be better
# to use signals and pass information this way for better decoupling.
shell = self.get_shell()
if shell is None:
return
obj_text = str(obj_text)
if not shell.is_defined(obj_text):
if (self.get_conf('automatic_import')
and self.internal_shell.is_defined(obj_text,
force_import=True)):
shell = self.internal_shell
else:
shell = None
doc = None
source_text = None
if shell is not None:
doc = shell.get_doc(obj_text)
source_text = shell.get_source(obj_text)
is_code = False
if self.get_conf('rich_mode'):
self.render_sphinx_doc(doc, css_path=self.css_path)
return doc is not None
elif self.docstring:
hlp_text = doc
if hlp_text is None:
hlp_text = source_text
if hlp_text is None:
return False
else:
hlp_text = source_text
if hlp_text is None:
hlp_text = doc
if hlp_text is None:
hlp_text = _("No source code available.")
if ignore_unknown:
return False
else:
is_code = True
self.set_plain_text(hlp_text, is_code=is_code)
return True
def set_rich_text_font(self, font, fixed_font):
"""
Set rich text mode font.
Parameters
----------
fixed_font: QFont
The current rich text font to use.
"""
self.rich_text.set_font(font, fixed_font=fixed_font)
def set_plain_text_font(self, font, color_scheme=None):
"""
Set plain text mode font.
Parameters
----------
font: QFont
The current plain text font to use.
color_scheme: str
The selected color scheme.
"""
if color_scheme is None:
color_scheme = self._current_color_scheme
self.plain_text.set_font(font, color_scheme=color_scheme)
def set_plain_text_color_scheme(self, color_scheme):
"""
Set plain text mode color scheme.
Parameters
----------
color_scheme: str
The selected color scheme.
"""
self._current_color_scheme = color_scheme
self.plain_text.set_color_scheme(color_scheme)
def set_history(self, history):
"""
Set list of strings on object combo box.
Parameters
----------
history: list
List of strings of objects.
"""
self.object_combo.addItems(history)
def get_history(self):
"""
Return list of strings on object combo box.
"""
history = []
for index in range(self.object_combo.count()):
history.append(str(self.object_combo.itemText(index)))
return history
def set_internal_console(self, console):
"""
Set the internal console shell.
Parameters
----------
console: :py:class:spyder.plugins.console.plugin.Console
Console plugin.
"""
self.internal_console = console
if self.internal_console is not None:
self.internal_shell = console.get_widget().shell
| HelpWidget |
python | huggingface__transformers | src/transformers/models/rag/modeling_rag.py | {
"start": 8468,
"end": 15762
} | class ____(ModelOutput):
r"""
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see `past_key_values` input) to speed up sequential decoding.
retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
the `doc_scores`.
retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
logits: Optional[torch.FloatTensor] = None
doc_scores: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
retrieved_doc_embeds: Optional[torch.FloatTensor] = None
retrieved_doc_ids: Optional[torch.LongTensor] = None
context_input_ids: Optional[torch.LongTensor] = None
context_attention_mask: Optional[torch.LongTensor] = None
question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
question_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
question_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
generator_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
generator_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_dec_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
generator_dec_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring(
custom_intro="""
RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
Tasks](https://huggingface.co/papers/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
"""
)
@auto_docstring
| RetrievAugLMOutput |
python | huggingface__transformers | src/transformers/image_utils.py | {
"start": 2237,
"end": 2351
} | class ____(ExplicitEnum):
COCO_DETECTION = "coco_detection"
COCO_PANOPTIC = "coco_panoptic"
| AnnotationFormat |
python | django__django | tests/admin_views/models.py | {
"start": 3591,
"end": 3695
} | class ____(models.Model):
content = models.TextField()
date = models.DateTimeField()
| CustomArticle |
python | pypa__warehouse | tests/unit/manage/test_views.py | {
"start": 221636,
"end": 228963
} | class ____:
def test_delete_role(self, db_request, monkeypatch):
project = ProjectFactory.create(name="foobar")
user = UserFactory.create(username="testuser")
role = RoleFactory.create(user=user, project=project, role_name="Owner")
user_2 = UserFactory.create()
db_request.method = "POST"
db_request.user = user_2
db_request.POST = MultiDict({"role_id": role.id})
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
send_collaborator_removed_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
views, "send_collaborator_removed_email", send_collaborator_removed_email
)
send_removed_as_collaborator_email = pretend.call_recorder(
lambda *a, **kw: None
)
monkeypatch.setattr(
views,
"send_removed_as_collaborator_email",
send_removed_as_collaborator_email,
)
result = views.delete_project_role(project, db_request)
assert db_request.route_path.calls == [
pretend.call("manage.project.roles", project_name=project.name)
]
assert db_request.db.query(Role).all() == []
assert send_collaborator_removed_email.calls == [
pretend.call(
db_request, set(), user=user, submitter=user_2, project_name="foobar"
)
]
assert send_removed_as_collaborator_email.calls == [
pretend.call(db_request, user, submitter=user_2, project_name="foobar")
]
assert db_request.session.flash.calls == [
pretend.call("Removed collaborator", queue="success")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
entry = (
db_request.db.query(JournalEntry)
.options(joinedload(JournalEntry.submitted_by))
.one()
)
assert entry.name == project.name
assert entry.action == "remove Owner testuser"
assert entry.submitted_by == db_request.user
def test_delete_missing_role(self, db_request):
project = ProjectFactory.create(name="foobar")
missing_role_id = str(uuid.uuid4())
db_request.method = "POST"
db_request.user = pretend.stub()
db_request.POST = MultiDict({"role_id": missing_role_id})
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
result = views.delete_project_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Could not find role", queue="error")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
def test_delete_own_owner_role(self, db_request):
project = ProjectFactory.create(name="foobar")
user = UserFactory.create(username="testuser")
role = RoleFactory.create(user=user, project=project, role_name="Owner")
db_request.method = "POST"
db_request.user = user
db_request.POST = MultiDict({"role_id": role.id})
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
result = views.delete_project_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Cannot remove yourself as Sole Owner", queue="error")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
def test_delete_not_sole_owner_role(self, db_request, monkeypatch):
project = ProjectFactory.create(name="foobar")
user = UserFactory.create()
RoleFactory.create(user=user, project=project, role_name="Owner")
user_2 = UserFactory.create(username="testuser")
role_2 = RoleFactory.create(user=user_2, project=project, role_name="Owner")
db_request.method = "POST"
db_request.user = user_2
db_request.POST = MultiDict({"role_id": role_2.id})
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
send_collaborator_removed_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
views, "send_collaborator_removed_email", send_collaborator_removed_email
)
send_removed_as_collaborator_email = pretend.call_recorder(
lambda *a, **kw: None
)
monkeypatch.setattr(
views,
"send_removed_as_collaborator_email",
send_removed_as_collaborator_email,
)
result = views.delete_project_role(project, db_request)
assert db_request.route_path.calls == [pretend.call("manage.projects")]
assert db_request.db.query(Role).filter(Role.user_id == user_2.id).all() == []
assert send_collaborator_removed_email.calls == [
pretend.call(
db_request, {user}, user=user_2, submitter=user_2, project_name="foobar"
)
]
assert send_removed_as_collaborator_email.calls == [
pretend.call(db_request, user_2, submitter=user_2, project_name="foobar")
]
assert db_request.session.flash.calls == [
pretend.call("Removed collaborator", queue="success")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
entry = (
db_request.db.query(JournalEntry)
.options(joinedload(JournalEntry.submitted_by))
.one()
)
assert entry.name == project.name
assert entry.action == "remove Owner testuser"
assert entry.submitted_by == db_request.user
def test_delete_non_owner_role(self, db_request):
project = ProjectFactory.create(name="foobar")
user = UserFactory.create(username="testuser")
role = RoleFactory.create(user=user, project=project, role_name="Owner")
some_other_user = UserFactory.create(username="someotheruser")
some_other_project = ProjectFactory.create(name="someotherproject")
db_request.method = "POST"
db_request.user = some_other_user
db_request.POST = MultiDict({"role_id": role.id})
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
result = views.delete_project_role(some_other_project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Could not find role", queue="error")
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
| TestDeleteProjectRole |
python | encode__starlette | starlette/responses.py | {
"start": 5935,
"end": 6565
} | class ____(Response):
media_type = "application/json"
def __init__(
self,
content: Any,
status_code: int = 200,
headers: Mapping[str, str] | None = None,
media_type: str | None = None,
background: BackgroundTask | None = None,
) -> None:
super().__init__(content, status_code, headers, media_type, background)
def render(self, content: Any) -> bytes:
return json.dumps(
content,
ensure_ascii=False,
allow_nan=False,
indent=None,
separators=(",", ":"),
).encode("utf-8")
| JSONResponse |
python | numpy__numpy | numpy/distutils/fcompiler/intel.py | {
"start": 3946,
"end": 5448
} | class ____(BaseIntelFCompiler):
compiler_type = 'intelv'
description = 'Intel Visual Fortran Compiler for 32-bit apps'
version_match = intel_version_match('32-bit|IA-32')
def update_executables(self):
f = dummy_fortran_file()
self.executables['version_cmd'] = ['<F77>', '/FI', '/c',
f + '.f', '/o', f + '.o']
ar_exe = 'lib.exe'
possible_executables = ['ifort', 'ifl']
executables = {
'version_cmd' : None,
'compiler_f77' : [None],
'compiler_fix' : [None],
'compiler_f90' : [None],
'linker_so' : [None],
'archiver' : [ar_exe, "/verbose", "/OUT:"],
'ranlib' : None
}
compile_switch = '/c '
object_switch = '/Fo' # No space after /Fo!
library_switch = '/OUT:' # No space after /OUT:!
module_dir_switch = '/module:' # No space after /module:
module_include_switch = '/I'
def get_flags(self):
opt = ['/nologo', '/MD', '/nbs', '/names:lowercase',
'/assume:underscore', '/fpp']
return opt
def get_flags_free(self):
return []
def get_flags_debug(self):
return ['/4Yb', '/d2']
def get_flags_opt(self):
return ['/O1', '/assume:minus0'] # Scipy test failures with /O2
def get_flags_arch(self):
return ["/arch:IA32", "/QaxSSE3"]
def runtime_library_dir_option(self, dir):
raise NotImplementedError
| IntelVisualFCompiler |
python | openai__openai-python | src/openai/resources/realtime/realtime.py | {
"start": 15533,
"end": 18348
} | class ____:
"""Represents a live websocket connection to the Realtime API"""
session: RealtimeSessionResource
response: RealtimeResponseResource
input_audio_buffer: RealtimeInputAudioBufferResource
conversation: RealtimeConversationResource
output_audio_buffer: RealtimeOutputAudioBufferResource
_connection: WebsocketConnection
def __init__(self, connection: WebsocketConnection) -> None:
self._connection = connection
self.session = RealtimeSessionResource(self)
self.response = RealtimeResponseResource(self)
self.input_audio_buffer = RealtimeInputAudioBufferResource(self)
self.conversation = RealtimeConversationResource(self)
self.output_audio_buffer = RealtimeOutputAudioBufferResource(self)
def __iter__(self) -> Iterator[RealtimeServerEvent]:
"""
An infinite-iterator that will continue to yield events until
the connection is closed.
"""
from websockets.exceptions import ConnectionClosedOK
try:
while True:
yield self.recv()
except ConnectionClosedOK:
return
def recv(self) -> RealtimeServerEvent:
"""
Receive the next message from the connection and parses it into a `RealtimeServerEvent` object.
Canceling this method is safe. There's no risk of losing data.
"""
return self.parse_event(self.recv_bytes())
def recv_bytes(self) -> bytes:
"""Receive the next message from the connection as raw bytes.
Canceling this method is safe. There's no risk of losing data.
If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does,
then you can call `.parse_event(data)`.
"""
message = self._connection.recv(decode=False)
log.debug(f"Received websocket message: %s", message)
return message
def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
data = (
event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
if isinstance(event, BaseModel)
else json.dumps(maybe_transform(event, RealtimeClientEventParam))
)
self._connection.send(data)
def close(self, *, code: int = 1000, reason: str = "") -> None:
self._connection.close(code=code, reason=reason)
def parse_event(self, data: str | bytes) -> RealtimeServerEvent:
"""
Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object.
This is helpful if you're using `.recv_bytes()`.
"""
return cast(
RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent))
)
| RealtimeConnection |
python | getsentry__sentry | tests/sentry/monitors/tasks/test_detect_broken_monitor_envs.py | {
"start": 947,
"end": 27837
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self._run_tasks = self.tasks()
self._run_tasks.__enter__()
def tearDown(self) -> None:
super().tearDown()
self._run_tasks.__exit__(None, None, None)
def create_monitor_env(self, monitor, environment_id):
return MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=environment_id,
status=MonitorStatus.OK,
)
def generate_cron_monitor_url(self, org_slug: str, project_slug: str, monitor_slug: str) -> str:
return "http://testserver" + reverse(
"sentry-organization-cron-monitor-details",
args=[org_slug, project_slug, monitor_slug],
)
def create_monitor_and_env(
self, name="test monitor", organization_id=None, project_id=None, environment_id=None
):
if organization_id is None:
organization_id = self.organization.id
if project_id is None:
project_id = self.project.id
if environment_id is None:
environment_id = self.environment.id
monitor = Monitor.objects.create(
name=name,
organization_id=organization_id,
project_id=project_id,
config={
"schedule": [1, "day"],
"schedule_type": ScheduleType.INTERVAL,
"failure_issue_threshold": 1,
"max_runtime": None,
"checkin_margin": None,
},
)
return (monitor, self.create_monitor_env(monitor=monitor, environment_id=environment_id))
def create_incident_for_monitor_env(self, monitor, monitor_environment):
first_checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
date_added=timezone.now() - timedelta(days=14),
)
incident = MonitorIncident.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
starting_checkin=first_checkin,
starting_timestamp=first_checkin.date_added,
grouphash=hash_from_values([uuid.uuid4()]),
)
for i in range(3, -1, -1):
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
date_added=timezone.now() - timedelta(days=i),
)
return incident
@patch("sentry.monitors.tasks.detect_broken_monitor_envs.MessageBuilder")
@patch("django.utils.timezone.now")
def test_creates_broken_detection_no_duplicates(
self, mock_now: MagicMock, builder: MagicMock
) -> None:
now = before_now()
mock_now.return_value = now
monitor, monitor_environment = self.create_monitor_and_env()
incident = self.create_incident_for_monitor_env(monitor, monitor_environment)
detect_broken_monitor_envs()
broken_detection = MonitorEnvBrokenDetection.objects.get(monitor_incident=incident)
assert broken_detection.user_notified_timestamp == now
assert builder.call_count == 1
# running the task again shouldn't create duplicates or send additional emails
detect_broken_monitor_envs()
assert len(MonitorEnvBrokenDetection.objects.filter(monitor_incident=incident)) == 1
assert builder.call_count == 1
def test_does_not_create_broken_detection_insufficient_duration(self) -> None:
monitor, monitor_environment = self.create_monitor_and_env()
first_checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
date_added=timezone.now() - timedelta(days=10),
)
incident = MonitorIncident.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
starting_checkin=first_checkin,
starting_timestamp=first_checkin.date_added,
grouphash=hash_from_values([uuid.uuid4()]),
)
for _ in range(4):
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
date_added=timezone.now() - timedelta(days=1),
)
detect_broken_monitor_envs()
assert len(MonitorEnvBrokenDetection.objects.filter(monitor_incident=incident)) == 0
def test_does_not_create_broken_detection_insufficient_checkins(self) -> None:
monitor, monitor_environment = self.create_monitor_and_env()
first_checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
date_added=timezone.now() - timedelta(days=14),
)
incident = MonitorIncident.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
starting_checkin=first_checkin,
starting_timestamp=first_checkin.date_added,
grouphash=hash_from_values([uuid.uuid4()]),
)
for i in range(1, -1, -1):
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
date_added=timezone.now() - timedelta(days=i),
)
detect_broken_monitor_envs()
assert len(MonitorEnvBrokenDetection.objects.filter(monitor_incident=incident)) == 0
def test_does_not_create_for_disabled_monitor(self) -> None:
monitor, monitor_environment = self.create_monitor_and_env()
monitor.status = ObjectStatus.DISABLED
monitor.save()
incident = self.create_incident_for_monitor_env(monitor, monitor_environment)
detect_broken_monitor_envs()
assert len(MonitorEnvBrokenDetection.objects.filter(monitor_incident=incident)) == 0
@patch("sentry.monitors.tasks.detect_broken_monitor_envs.MessageBuilder")
@patch("django.utils.timezone.now")
def test_sends_emails_to_all_users_across_orgs(
self, mock_now: MagicMock, builder: MagicMock
) -> None:
now = before_now()
mock_now.return_value = now
monitor, monitor_environment = self.create_monitor_and_env()
second_user = self.create_user("second_user@example.com")
second_org = self.create_organization(owner=second_user)
self.create_member(user=self.user, organization=second_org)
second_team = self.create_team(organization=second_org, members=[second_user, self.user])
second_project = self.create_project(organization=second_org, teams=[second_team])
second_env = self.create_environment(second_project, name="production")
second_monitor, second_monitor_environment = self.create_monitor_and_env(
name="second monitor",
organization_id=second_org.id,
project_id=second_project.id,
environment_id=second_env.id,
)
third_monitor, third_monitor_environment = self.create_monitor_and_env(
name="third monitor",
organization_id=second_org.id,
project_id=second_project.id,
environment_id=second_env.id,
)
self.create_incident_for_monitor_env(monitor, monitor_environment)
self.create_incident_for_monitor_env(second_monitor, second_monitor_environment)
self.create_incident_for_monitor_env(third_monitor, third_monitor_environment)
detect_broken_monitor_envs()
broken_detections = MonitorEnvBrokenDetection.objects.all()
assert len(broken_detections) == 3
assert broken_detections[0].user_notified_timestamp == now
assert broken_detections[1].user_notified_timestamp == now
assert broken_detections[2].user_notified_timestamp == now
# should build 3 emails, 2 for self.user from the 2 orgs, and 1 for second_user
expected_contexts = [
{
"broken_monitors": [
(
monitor.slug,
self.project.slug,
f"{self.generate_cron_monitor_url(self.organization.slug, self.project.slug, monitor.slug)}?environment={self.environment.name}",
timezone.now() - timedelta(days=14),
)
],
"view_monitors_link": f"http://testserver/organizations/{self.organization.slug}/insights/crons/",
},
{
"broken_monitors": [
(
second_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, second_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
),
(
third_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, third_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
),
],
"view_monitors_link": f"http://testserver/organizations/{second_org.slug}/insights/crons/",
},
{
"broken_monitors": [
(
second_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, second_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
),
(
third_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, third_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
),
],
"view_monitors_link": f"http://testserver/organizations/{second_org.slug}/insights/crons/",
},
]
expected_subjects = [
"1 of your Cron Monitors isn't working",
"2 of your Cron Monitors aren't working",
"2 of your Cron Monitors aren't working",
]
builder.assert_has_calls(
[
call(
**{
"subject": subject,
"template": "sentry/emails/crons/broken-monitors.txt",
"html_template": "sentry/emails/crons/broken-monitors.html",
"type": "crons.broken_monitors",
"context": context,
}
)
for subject, context in zip(expected_subjects, expected_contexts)
],
any_order=True,
)
@patch("sentry.monitors.tasks.detect_broken_monitor_envs.MessageBuilder")
@patch("django.utils.timezone.now")
def test_disables_environments_and_sends_email(
self, mock_now: MagicMock, builder: MagicMock
) -> None:
now = before_now()
mock_now.return_value = now
monitor, monitor_environment = self.create_monitor_and_env()
second_user = self.create_user("second_user@example.com")
second_org = self.create_organization(owner=second_user)
self.create_member(user=self.user, organization=second_org)
second_team = self.create_team(organization=second_org, members=[second_user, self.user])
second_project = self.create_project(organization=second_org, teams=[second_team])
second_env = self.create_environment(second_project, name="production")
second_monitor, second_monitor_environment = self.create_monitor_and_env(
name="second monitor",
organization_id=second_org.id,
project_id=second_project.id,
environment_id=second_env.id,
)
third_monitor, third_monitor_environment = self.create_monitor_and_env(
name="third monitor",
organization_id=second_org.id,
project_id=second_project.id,
environment_id=second_env.id,
)
incident = self.create_incident_for_monitor_env(monitor, monitor_environment)
second_incident = self.create_incident_for_monitor_env(
second_monitor, second_monitor_environment
)
third_incident = self.create_incident_for_monitor_env(
third_monitor, third_monitor_environment
)
broken_detection = MonitorEnvBrokenDetection.objects.create(
monitor_incident=incident,
detection_timestamp=now - timedelta(days=14),
user_notified_timestamp=now - timedelta(days=14),
)
second_broken_detection = MonitorEnvBrokenDetection.objects.create(
monitor_incident=second_incident,
detection_timestamp=now - timedelta(days=14),
user_notified_timestamp=now - timedelta(days=14),
)
third_broken_detection = MonitorEnvBrokenDetection.objects.create(
monitor_incident=third_incident,
detection_timestamp=now - timedelta(days=14),
user_notified_timestamp=now - timedelta(days=14),
)
detect_broken_monitor_envs()
# should have the two monitor environments as muted
monitor_environment.refresh_from_db()
second_monitor_environment.refresh_from_db()
third_monitor_environment.refresh_from_db()
assert monitor_environment.is_muted
assert second_monitor_environment.is_muted
assert third_monitor_environment.is_muted
broken_detection.refresh_from_db()
second_broken_detection.refresh_from_db()
third_broken_detection.refresh_from_db()
assert broken_detection.env_muted_timestamp == now
assert second_broken_detection.env_muted_timestamp == now
assert third_broken_detection.env_muted_timestamp == now
# should build 3 emails, 2 for self.user from the 2 orgs, and 1 for second_user
expected_contexts = [
{
"muted_monitors": [
(
monitor.slug,
self.project.slug,
f"{self.generate_cron_monitor_url(self.organization.slug, self.project.slug, monitor.slug)}?environment={self.environment.name}",
timezone.now() - timedelta(days=14),
)
],
"view_monitors_link": f"http://testserver/organizations/{self.organization.slug}/insights/crons/",
},
{
"muted_monitors": [
(
second_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, second_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
),
(
third_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, third_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
),
],
"view_monitors_link": f"http://testserver/organizations/{second_org.slug}/insights/crons/",
},
{
"muted_monitors": [
(
second_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, second_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
),
(
third_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, third_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
),
],
"view_monitors_link": f"http://testserver/organizations/{second_org.slug}/insights/crons/",
},
]
expected_subjects = [
"1 of your Cron Monitors has been muted",
"2 of your Cron Monitors have been muted",
"2 of your Cron Monitors have been muted",
]
builder.assert_has_calls(
[
call(
**{
"subject": subject,
"template": "sentry/emails/crons/muted-monitors.txt",
"html_template": "sentry/emails/crons/muted-monitors.html",
"type": "crons.muted_monitors",
"context": context,
}
)
for subject, context in zip(expected_subjects, expected_contexts)
],
any_order=True,
)
@patch("sentry.monitors.tasks.detect_broken_monitor_envs.MessageBuilder")
@patch("django.utils.timezone.now")
def test_disables_corrects_environments_and_sends_email(
self, mock_now: MagicMock, builder: MagicMock
) -> None:
now = before_now()
mock_now.return_value = now
monitor, monitor_environment = self.create_monitor_and_env()
second_user = self.create_user("second_user@example.com")
second_org = self.create_organization(owner=second_user)
self.create_member(user=self.user, organization=second_org)
second_team = self.create_team(organization=second_org, members=[second_user, self.user])
second_project = self.create_project(organization=second_org, teams=[second_team])
second_env = self.create_environment(second_project, name="production")
second_monitor, second_monitor_environment = self.create_monitor_and_env(
name="second monitor",
organization_id=second_org.id,
project_id=second_project.id,
environment_id=second_env.id,
)
incident = self.create_incident_for_monitor_env(monitor, monitor_environment)
second_incident = self.create_incident_for_monitor_env(
second_monitor, second_monitor_environment
)
# This broken detection shouldn't be automatically disabled, because it's not long enough
broken_detection = MonitorEnvBrokenDetection.objects.create(
monitor_incident=incident,
detection_timestamp=now - timedelta(days=0),
user_notified_timestamp=now - timedelta(days=0),
)
second_broken_detection = MonitorEnvBrokenDetection.objects.create(
monitor_incident=second_incident,
detection_timestamp=now - timedelta(days=14),
user_notified_timestamp=now - timedelta(days=14),
)
detect_broken_monitor_envs()
# should have the one monitor environment as muted
monitor_environment.refresh_from_db()
second_monitor_environment.refresh_from_db()
assert not monitor_environment.is_muted
assert second_monitor_environment.is_muted
broken_detection.refresh_from_db()
second_broken_detection.refresh_from_db()
assert broken_detection.env_muted_timestamp is None
assert second_broken_detection.env_muted_timestamp == now
# should build 3 emails, 2 for self.user from the 2 orgs, and 1 for second_user
expected_contexts = [
{
"muted_monitors": [
(
second_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, second_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
)
],
"view_monitors_link": f"http://testserver/organizations/{second_org.slug}/insights/crons/",
},
{
"muted_monitors": [
(
second_monitor.slug,
second_project.slug,
f"{self.generate_cron_monitor_url(second_org.slug, second_project.slug, second_monitor.slug)}?environment={second_env.name}",
timezone.now() - timedelta(days=14),
)
],
"view_monitors_link": f"http://testserver/organizations/{second_org.slug}/insights/crons/",
},
]
builder.assert_has_calls(
[
call(
**{
"subject": "1 of your Cron Monitors has been muted",
"template": "sentry/emails/crons/muted-monitors.txt",
"html_template": "sentry/emails/crons/muted-monitors.html",
"type": "crons.muted_monitors",
"context": context,
}
)
for context in expected_contexts
],
any_order=True,
)
@patch("sentry.monitors.tasks.detect_broken_monitor_envs.MessageBuilder")
@patch("django.utils.timezone.now")
def test_sends_emails_to_owners_user_id(self, mock_now: MagicMock, builder: MagicMock) -> None:
now = before_now()
mock_now.return_value = now
builder.return_value.send_async = Mock()
monitor, monitor_environment = self.create_monitor_and_env()
new_owner = self.create_user("newowner@example.com")
self.create_member(
user=new_owner,
organization=self.organization,
)
monitor.update(owner_user_id=new_owner.id)
self.create_incident_for_monitor_env(monitor, monitor_environment)
detect_broken_monitor_envs()
builder.return_value.send_async.assert_called_with(["newowner@example.com"])
@patch("sentry.monitors.tasks.detect_broken_monitor_envs.MessageBuilder")
@patch("django.utils.timezone.now")
def test_sends_emails_to_owners_team_id(self, mock_now: MagicMock, builder: MagicMock) -> None:
now = before_now()
mock_now.return_value = now
builder.return_value.send_async = Mock()
monitor, monitor_environment = self.create_monitor_and_env()
team_member1 = self.create_user("teammember1@example.com")
team_member2 = self.create_user("teammember2@example.com")
team_member3 = self.create_user("teammember3@example.com")
# Respects alternate email sending
with assume_test_silo_mode(SiloMode.CONTROL):
UserEmail.objects.create(
user=team_member3, email="newemail3@example.com", is_verified=True
)
UserOption.objects.create(
user=team_member3,
key="mail:email",
project_id=self.project.id,
value="newemail3@example.com",
)
# Test that it won't send to this unverified email
UserEmail.objects.create(
user=team_member2, email="unverified2@example.com", is_verified=False
)
UserOption.objects.create(
user=team_member2,
key="mail:email",
project_id=self.project.id,
value="unverified2@example.com",
)
self.create_member(user=team_member1, organization=self.organization)
self.create_member(user=team_member2, organization=self.organization)
self.create_member(user=team_member3, organization=self.organization)
team = self.create_team(members=[team_member1, team_member2, team_member3])
monitor.update(owner_team_id=team.id)
self.create_incident_for_monitor_env(monitor, monitor_environment)
detect_broken_monitor_envs()
builder.return_value.send_async.assert_has_calls(
[
call(["newemail3@example.com"]),
call(["teammember1@example.com"]),
]
)
@patch("sentry.monitors.tasks.detect_broken_monitor_envs.MessageBuilder")
@patch("django.utils.timezone.now")
def test_does_not_send_emails_to_users_with_disabled_nudges(
self, mock_now: MagicMock, builder: MagicMock
) -> None:
now = before_now()
mock_now.return_value = now
builder.return_value.send_async = Mock()
monitor, monitor_environment = self.create_monitor_and_env()
second_monitor, second_monitor_environment = self.create_monitor_and_env(
name="second monitor",
)
disabled_owner = self.create_user("disabled_owner@example.com")
self.create_member(
user=disabled_owner,
organization=self.organization,
)
enabled_owner = self.create_user("enabled_owner@example.com")
self.create_member(
user=enabled_owner,
organization=self.organization,
)
# Disable Nudges for this disabled_owner
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingOption.objects.create(
type="brokenMonitors",
scope_type="user",
scope_identifier=disabled_owner.id,
user_id=disabled_owner.id,
value="never",
)
monitor.update(owner_user_id=disabled_owner.id)
second_monitor.update(owner_user_id=enabled_owner.id)
self.create_incident_for_monitor_env(monitor, monitor_environment)
self.create_incident_for_monitor_env(second_monitor, second_monitor_environment)
detect_broken_monitor_envs()
builder.return_value.send_async.assert_called_with(["enabled_owner@example.com"])
| MonitorDetectBrokenMonitorEnvTaskTest |
python | huggingface__transformers | src/transformers/models/speech_to_text/modeling_speech_to_text.py | {
"start": 13531,
"end": 16415
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Speech2TextConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Speech2TextAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16:
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return hidden_states, attn_weights
# copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT
# TODO: change copy when applying cache class
| Speech2TextEncoderLayer |
python | numba__numba | numba/tests/test_parallel_backend.py | {
"start": 31573,
"end": 39187
} | class ____(ThreadLayerTestHelper):
_DEBUG = False
@linux_only # os.fork required.
def test_fork_from_non_main_thread(self):
# See issue #5973 and PR #6208 for original context.
# See issue #6963 for context on the following comments:
#
# Important things to note:
# 1. Compilation of code containing an objmode block will result in the
# use of and `ObjModeLiftedWith` as the dispatcher. This inherits
# from `LiftedCode` which handles the serialization. In that
# serialization is a call to uuid.uuid1() which causes a fork_exec in
# CPython internals.
# 2. The selected parallel backend thread pool is started during the
# compilation of a function that has `parallel=True`.
# 3. The TBB backend can handle forks from the main thread, it will
# safely reinitialise after so doing. If a fork occurs from a
# non-main thread it will warn and the state is invalid in the child
# process.
#
# Due to 1. and 2. the `obj_mode_func` function separated out and is
# `njit` decorated. This means during type inference of `work` it will
# trigger a standard compilation of the function and the thread pools
# won't have started yet as the parallelisation compiler passes for
# `work` won't yet have run. This mitigates the fork() call from 1.
# occurring after 2. The result of this is that 3. can be tested using
# the threading etc herein with the state being known as the above
# described, i.e. the TBB threading layer has not experienced a fork().
runme = """if 1:
import threading
import numba
numba.config.THREADING_LAYER='tbb'
from numba import njit, prange, objmode
from numba.core.serialize import PickleCallableByPath
import os
e_running = threading.Event()
e_proceed = threading.Event()
def indirect_core():
e_running.set()
# wait for forker() to have forked
while not e_proceed.isSet():
pass
indirect = PickleCallableByPath(indirect_core)
@njit
def obj_mode_func():
with objmode():
indirect()
@njit(parallel=True, nogil=True)
def work():
acc = 0
for x in prange(10):
acc += x
obj_mode_func()
return acc
def runner():
work()
def forker():
# wait for the jit function to say it's running
while not e_running.isSet():
pass
# then fork
os.fork()
# now fork is done signal the runner to proceed to exit
e_proceed.set()
numba_runner = threading.Thread(target=runner,)
fork_runner = threading.Thread(target=forker,)
threads = (numba_runner, fork_runner)
for t in threads:
t.start()
for t in threads:
t.join()
"""
cmdline = [sys.executable, '-c', runme]
out, err = self.run_cmd(cmdline)
# assert error message printed on stderr
msg_head = "Attempted to fork from a non-main thread, the TBB library"
self.assertIn(msg_head, err)
if self._DEBUG:
print("OUT:", out)
print("ERR:", err)
@linux_only # fork required.
def test_lifetime_of_task_scheduler_handle(self):
self.skip_if_no_external_compiler() # external compiler needed
# See PR #7280 for context.
BROKEN_COMPILERS = 'SKIP: COMPILATION FAILED'
runme = """if 1:
import ctypes
import sys
import multiprocessing as mp
from tempfile import TemporaryDirectory, NamedTemporaryFile
from numba.pycc.platform import Toolchain, external_compiler_works
from numba import njit, prange, threading_layer
import faulthandler
faulthandler.enable()
if not external_compiler_works():
raise AssertionError('External compilers are not found.')
with TemporaryDirectory() as tmpdir:
with NamedTemporaryFile(dir=tmpdir) as tmpfile:
try:
src = \"\"\"
#define TBB_PREVIEW_WAITING_FOR_WORKERS 1
#include <tbb/tbb.h>
static tbb::task_scheduler_handle tsh;
extern "C"
{
void launch(void)
{
tsh = tbb::task_scheduler_handle::get();
}
}
\"\"\"
cxxfile = f"{tmpfile.name}.cxx"
with open(cxxfile, 'wt') as f:
f.write(src)
tc = Toolchain()
object_files = tc.compile_objects([cxxfile,],
output_dir=tmpdir)
dso_name = f"{tmpfile.name}.so"
tc.link_shared(dso_name, object_files,
libraries=['tbb',],
export_symbols=['launch'])
# Load into the process, it doesn't matter whether the
# DSO exists on disk once it's loaded in.
DLL = ctypes.CDLL(dso_name)
except Exception as e:
# Something is broken in compilation, could be one of
# many things including, but not limited to: missing tbb
# headers, incorrect permissions, compilers that don't
# work for the above
print(e)
print('BROKEN_COMPILERS')
sys.exit(0)
# Do the test, launch this library and also execute a
# function with the TBB threading layer.
DLL.launch()
@njit(parallel=True)
def foo(n):
acc = 0
for i in prange(n):
acc += i
return acc
foo(1)
# Check the threading layer used was TBB
assert threading_layer() == 'tbb'
# Use mp context for a controlled version of fork, this triggers the
# reported bug.
ctx = mp.get_context('fork')
def nowork():
pass
p = ctx.Process(target=nowork)
p.start()
p.join(10)
print("SUCCESS")
""".replace('BROKEN_COMPILERS', BROKEN_COMPILERS)
cmdline = [sys.executable, '-c', runme]
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = 'tbb'
out, err = self.run_cmd(cmdline, env=env)
if BROKEN_COMPILERS in out:
self.skipTest("Compilation of DSO failed. Check output for details")
else:
self.assertIn("SUCCESS", out)
if self._DEBUG:
print("OUT:", out)
print("ERR:", err)
@skip_parfors_unsupported
| TestTBBSpecificIssues |
python | apache__airflow | providers/samba/tests/unit/samba/hooks/test_samba.py | {
"start": 1359,
"end": 7564
} | class ____:
@pytest.mark.db_test
def test_get_conn_should_fail_if_conn_id_does_not_exist(self, sdk_connection_not_found):
with pytest.raises(AirflowNotFoundException):
SambaHook("non-existed-connection-id")
@mock.patch("smbclient.register_session")
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_context_manager(self, get_conn_mock, register_session):
CONNECTION = Connection(
host="ip",
schema="share",
login="username",
password="password",
)
get_conn_mock.return_value = CONNECTION
register_session.return_value = None
with SambaHook("samba_default"):
args, kwargs = tuple(register_session.call_args_list[0])
assert args == (CONNECTION.host,)
assert kwargs == {
"username": CONNECTION.login,
"password": CONNECTION.password,
"port": 445,
"connection_cache": {},
}
cache = kwargs.get("connection_cache")
mock_connection = mock.Mock()
mock_connection.disconnect.return_value = None
cache["foo"] = mock_connection
# Test that the connection was disconnected upon exit.
mock_connection.disconnect.assert_called_once()
@pytest.mark.parametrize(
"name",
[
"getxattr",
"link",
"listdir",
"listxattr",
"lstat",
"makedirs",
"mkdir",
"open_file",
"readlink",
"remove",
"removedirs",
"removexattr",
"rename",
"replace",
"rmdir",
"scandir",
"setxattr",
"stat",
"stat_volume",
"symlink",
"truncate",
"unlink",
"utime",
"walk",
],
)
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_method(self, get_conn_mock, name):
CONNECTION = Connection(
host="ip",
schema="share",
login="username",
password="password",
)
get_conn_mock.return_value = CONNECTION
hook = SambaHook("samba_default")
connection_settings = {
"connection_cache": {},
"username": CONNECTION.login,
"password": CONNECTION.password,
"port": 445,
}
with mock.patch("smbclient." + name) as p:
kwargs = {}
method = getattr(hook, name)
spec = getfullargspec(method)
if spec.defaults:
for default in reversed(spec.defaults):
arg = spec.args.pop()
kwargs[arg] = default
# Ignore "self" argument.
args = spec.args[1:]
method(*args, **kwargs)
assert len(p.mock_calls) == 1
# Verify positional arguments. If the argument is a path parameter, then we expect
# the hook implementation to fully qualify the path.
p_args, p_kwargs = tuple(p.call_args_list[0])
for arg, provided in zip(args, p_args):
if arg in PATH_PARAMETER_NAMES:
expected = "//" + CONNECTION.host + "/" + CONNECTION.schema + "/" + arg
else:
expected = arg
assert expected == provided
# We expect keyword arguments to include the connection settings.
assert dict(kwargs, **connection_settings) == p_kwargs
@pytest.mark.parametrize(
("path", "path_type", "full_path"),
[
# Linux path -> Linux path, no path_type (default)
("/start/path/with/slash", None, "//ip/share/start/path/with/slash"),
("start/path/without/slash", None, "//ip/share/start/path/without/slash"),
# Linux path -> Linux path, explicit path_type (posix)
("/start/path/with/slash/posix", "posix", "//ip/share/start/path/with/slash/posix"),
("start/path/without/slash/posix", "posix", "//ip/share/start/path/without/slash/posix"),
# Linux path -> Windows path, explicit path_type (windows)
("/start/path/with/slash/windows", "windows", r"\\ip\share\start\path\with\slash\windows"),
("start/path/without/slash/windows", "windows", r"\\ip\share\start\path\without\slash\windows"),
# Windows path -> Windows path, explicit path_type (windows)
(
r"\start\path\with\backslash\windows",
"windows",
r"\\ip\share\start\path\with\backslash\windows",
),
(
r"start\path\without\backslash\windows",
"windows",
r"\\ip\share\start\path\without\backslash\windows",
),
],
)
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test__join_path(
self,
get_conn_mock,
path,
path_type,
full_path,
):
CONNECTION = Connection(
host="ip",
schema="share",
login="username",
password="password",
)
get_conn_mock.return_value = CONNECTION
hook = SambaHook("samba_default", share_type=path_type)
assert hook._join_path(path) == full_path
@mock.patch("airflow.providers.samba.hooks.samba.smbclient.open_file", return_value=mock.Mock())
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_open_file(self, get_conn_mock, open_file_mock):
CONNECTION = Connection(
host="ip",
schema="share",
login="username",
password="password",
)
get_conn_mock.return_value = CONNECTION
samba_hook = SambaHook("samba_default")
path = "test_file.txt"
mode = "wb"
result = samba_hook.open_file(path, mode=mode)
assert result is not None, "open_file method returned None"
assert hasattr(result, "write"), f"Error: {result} does not have a 'write' method"
| TestSambaHook |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/topk_op_test.py | {
"start": 1400,
"end": 9325
} | class ____(test.TestCase):
def _validateTopK(
self,
inputs,
k,
expected_values,
expected_indices,
sorted=True,
index_type=dtypes.int32,
): # pylint: disable=redefined-builtin
np_expected_values = np.array(expected_values)
np_expected_indices = np.array(expected_indices)
with self.cached_session():
values_op, indices_op = nn_ops.top_k(
inputs, k, sorted=sorted, index_type=index_type
)
values, indices = self.evaluate([values_op, indices_op])
self.assertEqual(indices.dtype, index_type)
self.assertShapeEqual(np_expected_values, values_op)
self.assertShapeEqual(np_expected_indices, indices_op)
if sorted:
self.assertAllClose(np_expected_values, values)
# Do some special casing of equality of indices: if indices
# are not the same, but values are floating type, ensure that
# the values are within epsilon of each other.
if not np.issubdtype(np_expected_values.dtype, np.floating) and \
np_expected_values.dtype != dtypes.bfloat16.as_numpy_dtype:
# Values are not floating point type; check indices exactly
self.assertAllEqual(np_expected_indices, indices)
else:
# Values are floating point; indices may be swapped for
# values near each other.
indices_not_equal = np_expected_indices != indices
if np.any(indices_not_equal):
values_unsure = values[indices_not_equal]
expected_values_unsure = expected_values[indices_not_equal]
self.assertAllClose(expected_values_unsure, values_unsure)
else:
np_inputs = np.array(inputs)
# Check that the indices are valid.
for result_index, src_index in np.ndenumerate(indices):
value = values[result_index]
expected_value = np_inputs[result_index[0], src_index]
np.testing.assert_almost_equal(value, expected_value)
# Check that if two elements are equal, the lower-index element appears
# first.
shape = values.shape
for batch_index in range(shape[0]):
for index in range(shape[1] - 1):
if np.isclose(values[batch_index, index],
values[batch_index, index + 1]):
self.assertLess(indices[batch_index, index],
indices[batch_index, index + 1])
# Now check the results, ignoring order.
self.assertAllEqual(np.sort(np_expected_indices), np.sort(indices))
self.assertAllClose(np.sort(np_expected_values), np.sort(values))
def testTop1(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 1, [[0.4], [0.3]], [[3], [1]])
def testTop2(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.4, 0.2]]
self._validateTopK(inputs, 2, [[0.4, 0.3], [0.4, 0.3]], [[3, 1], [2, 1]])
def testOutputIndexType(self):
for index_type in [dtypes.int16, dtypes.int32, dtypes.int64]:
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.4, 0.2]]
self._validateTopK(
inputs,
2,
[[0.4, 0.3], [0.4, 0.3]],
[[3, 1], [2, 1]],
index_type=index_type,
)
def testKType(self):
for ktype in [dtypes.int32, dtypes.int64, dtypes.int16]:
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.4, 0.2]]
self._validateTopK(
inputs,
constant_op.constant(2, dtype=ktype),
[[0.4, 0.3], [0.4, 0.3]],
[[3, 1], [2, 1]],
)
def testTop3(self):
for k in range(3, 11, 2):
for dim in range(512, 12288, 512):
inputs = np.random.permutation(
np.linspace(0, 100, dim, dtype=np.float64))
indices = np.argsort(-inputs)[:k]
values = -np.sort(-inputs)[:k]
self._validateTopK(inputs, k, values, indices)
def testTop1AllNan(self):
inputs = [[np.nan, np.nan], [np.nan, np.nan]]
self._validateTopK(inputs, 1, [[np.nan], [np.nan]], [[0], [0]])
def _testLargeSort(self, dtype):
b = 10
n = 5000
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)
values = -np.sort(-inputs, axis=1)
self._validateTopK(inputs, n, values, indices)
def testLargeSort(self):
self._testLargeSort(np.float32)
self._testLargeSort(np.float16)
self._testLargeSort(dtypes.bfloat16.as_numpy_dtype)
def _testLargeTopK(self, dtype):
b = 10
n = 5000
k = n - 1
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testLargeTopK(self):
self._testLargeTopK(np.float32)
self._testLargeTopK(np.float16)
self._testLargeTopK(dtypes.bfloat16.as_numpy_dtype)
def _testMediumTopK(self, dtype):
b = 5
n = 500
k = 50
inputs = np.random.permutation(
np.linspace(0, 100, b * n, dtype=dtype)).reshape(b, n)
indices = np.argsort(-inputs, axis=1)[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testMediumTopK(self):
self._testMediumTopK(np.float32)
self._testMediumTopK(np.float16)
self._testMediumTopK(dtypes.bfloat16.as_numpy_dtype)
def testStableSort(self):
b = 5
n = 500
for k in [1, 5, 50, 500]:
# Lots of repeated integers taking values in [0, 3]
inputs = np.random.permutation(
np.linspace(0, 3, b * n, dtype=np.int32)).reshape(b, n)
# Use mergesort, a stable sort, to get the indices.
indices = np.argsort(-inputs, axis=1, kind="mergesort")[:, :k]
values = -np.sort(-inputs, axis=1)[:, :k]
self._validateTopK(inputs, k, values, indices)
def testTopAll(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 4, [[0.4, 0.3, 0.2, 0.1], [0.3, 0.3, 0.2, 0.1]],
[[3, 1, 2, 0], [1, 2, 3, 0]])
def testTop3Unsorted(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.4, 0.3, 0.2]]
self._validateTopK(
inputs,
3, [[0.2, 0.3, 0.4], [0.2, 0.4, 0.3]], [[2, 1, 3], [3, 1, 2]],
sorted=False)
def testTop3Vector(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
self._validateTopK(inputs, 3, [19, 18, 17], [11, 3, 7])
def testTensorK(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
k = constant_op.constant(3)
self._validateTopK(inputs, k, [19, 18, 17], [11, 3, 7])
def testTop3ZeroRows(self):
inputs = np.zeros([0, 10], dtype=np.float32)
self._validateTopK(inputs, 3, np.zeros([0, 3], dtype=np.float32),
np.zeros([0, 3], dtype=np.int32))
def testKNegative(self):
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Need k >= 0, got -7|non-negative",
):
self.evaluate(nn_ops.top_k([[0.1, 0.2], [0.3, 0.4]], -7))
def testKTooLarge(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
r"must have last dimension >= k = 4|must have at least k",
):
self.evaluate(nn_ops.top_k(inputs, 4))
@test_util.run_deprecated_v1
def testTopKGradients(self):
with self.session() as sess:
inputs = array_ops.placeholder(dtypes.float32, shape=[2, 5])
values, _ = nn_ops.top_k(inputs, 3)
grad = sess.run(
gradients_impl.gradients(
values, inputs, grad_ys=[[[1., 2., 3.], [4., 5., 6.]]]),
feed_dict={inputs: [[2., -1., 1000., 3., 4.],
[1., 5., 2., 4., 3.]]})[0]
self.assertEqual(
grad.tolist(), [[0., 0., 1., 3., 2.], [0., 4., 0., 5., 6.]])
| TopKTest |
python | huggingface__transformers | src/transformers/models/dots1/modular_dots1.py | {
"start": 1371,
"end": 1413
} | class ____(DeepseekV3MLP):
pass
| Dots1MLP |
python | pytorch__pytorch | test/test_scatter_gather_ops.py | {
"start": 791,
"end": 23765
} | class ____(TestCase):
# Fills an index tensor with valid indices
def _fill_indices(self, idx, dim, dim_size, elems_per_row, m, n, o, unique_indices=True):
for i in range(1 if dim == 0 else m):
for j in range(1 if dim == 1 else n):
for k in range(1 if dim == 2 else o):
ii = [i, j, k]
ii[dim] = slice(0, idx.size(dim) + 1)
if unique_indices:
idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row]
else:
idx[tuple(ii)] = torch.randint(dim_size, (elems_per_row,))
@dtypes(torch.float32, torch.complex64)
def test_gather(self, device, dtype):
m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
elems_per_row = random.randint(1, 10)
dim = random.randrange(3)
src = make_tensor((m, n, o), device=device, dtype=dtype)
idx_size = [m, n, o]
idx_size[dim] = elems_per_row
idx = make_tensor(idx_size, device=device, dtype=torch.long)
self._fill_indices(idx, dim, src.size(dim), elems_per_row, m, n, o)
actual = torch.gather(src, dim, idx)
expected = torch.zeros(idx_size, device=device, dtype=dtype)
for i in range(idx_size[0]):
for j in range(idx_size[1]):
for k in range(idx_size[2]):
ii = [i, j, k]
ii[dim] = idx[i, j, k]
expected[i, j, k] = src[tuple(ii)]
self.assertEqual(actual, expected, atol=0, rtol=0)
# Guarded because torch.max isn't defined for complex types
if not dtype.is_complex:
src = make_tensor((3, 4, 5), device=device, dtype=dtype)
expected, idx = src.max(2, True)
actual = torch.gather(src, 2, idx)
self.assertEqual(actual, expected, atol=0, rtol=0)
@serialTest()
@dtypes(torch.int8, torch.bfloat16)
def test_gather_large(self, device, dtype):
# test larger shapes to check vectorized implementation
for (m, n, k) in ((4096, 3072, 4096), (4096, 3072, 4100), (4, 4, 16384 * 8192)):
torch.cuda.empty_cache()
src = make_tensor((m, k), device=device, dtype=dtype)
alloc0 = torch.empty(src.nelement() * 2, device=device, dtype=dtype)
discontig = alloc0.view(m, 2 * k)[:, ::2].copy_(src)
alloc1 = torch.empty(src.nelement() + 1, device=device, dtype=dtype)
misaligned = alloc1[1:].view(m, k).copy_(src)
alloc2 = torch.empty(m, k + 4, device=device, dtype=dtype)
misaligned1 = alloc2[:, :-4].copy_(src)
num_ind = n
for dim in (0, 1):
max_ind = src.shape[dim]
ind0 = torch.randint(max_ind, (num_ind,), device=device)
ind_discontig0 = torch.empty(num_ind * 2, device=device, dtype=torch.int64)[::2].copy_(ind0)
shape_ind = [1] * src.ndim
shape_ind[dim] = ind0.shape[0]
shape_out = list(src.shape)
shape_out[dim] = ind0.shape[0]
ind = ind0.view(shape_ind).expand(shape_out)
ind_discontig = ind_discontig0.view(shape_ind).expand(shape_out)
res = torch.gather(src, dim=dim, index=ind)
ref = src[ind0] if dim == 0 else src[:, ind0]
self.assertEqual(res, ref, atol=0, rtol=0)
if res.device.type == "cuda":
ref_cpu = src.cpu()[ind0.cpu()] if dim == 0 else src.cpu()[:, ind0.cpu()]
self.assertEqual(res.cpu(), ref_cpu, atol=0, rtol=0)
res = torch.gather(src, dim=dim, index=ind_discontig)
self.assertEqual(res, ref, atol=0, rtol=0)
res_ind = src[ind_discontig0] if dim == 0 else src[:, ind_discontig0]
self.assertEqual(res_ind, ref, atol=0, rtol=0)
res_ind_neg = src[ind0 - src.shape[dim]] if dim == 0 else src[:, ind0 - src.shape[1]]
self.assertEqual(res_ind_neg, ref, atol=0, rtol=0)
res = torch.gather(discontig, dim=dim, index=ind)
self.assertEqual(res, ref, atol=0, rtol=0)
res_ind = discontig[ind0] if dim == 0 else discontig[:, ind0]
self.assertEqual(res_ind, ref, atol=0, rtol=0)
res = torch.gather(misaligned, dim=dim, index=ind)
self.assertEqual(res, ref, atol=0, rtol=0)
res_ind = misaligned[ind0] if dim == 0 else misaligned[:, ind0]
self.assertEqual(res_ind, ref, atol=0, rtol=0)
res_ind = misaligned1[ind0] if dim == 0 else misaligned[:, ind0]
self.assertEqual(res_ind, ref, atol=0, rtol=0)
res_gather = torch.gather(misaligned1, dim=dim, index=ind)
self.assertEqual(res_gather, ref, atol=0, rtol=0)
del src, alloc0, alloc1, alloc2
del discontig, misaligned, misaligned1
# test gather along 1st dim that can accidentally trigger fast path
# because due to index dimension in the gather dim being 1
# an unexpected squashing in tensorIterator happens
src = make_tensor((16, 2, 16), device=device, dtype=dtype)
ind = torch.randint(2, (16, 1), device=device).view(16, 1, 1).expand(16, 1, 16)
res = torch.gather(src, dim=1, index=ind)
if res.device.type == "cuda":
ref_cpu = torch.gather(src.cpu(), dim=1, index=ind.cpu())
self.assertEqual(res.cpu(), ref_cpu, atol=0, rtol=0)
@dtypes(torch.bool)
def test_gather_bool(self, device, dtype):
src = torch.tensor(((False, True), (True, True)), device=device, dtype=dtype)
idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)
actual = torch.gather(src, 1, idx)
expected = torch.tensor(((False, False), (True, True)), device=device, dtype=dtype)
self.assertEqual(actual, expected, atol=0, rtol=0)
@parametrize("sparse_grad", [False, True])
@dtypes(torch.float32, torch.float64)
def test_gather_backward_with_empty_index_tensor(self, device, dtype, sparse_grad):
dim = -1
input = torch.rand([10, 5], dtype=dtype, device=device, requires_grad=True)
index = torch.randint(0, 2, [3, 0], dtype=torch.int64, device=device)
res = torch.gather(input, dim, index, sparse_grad=sparse_grad)
res.sum().backward()
grad = input.grad.to_dense() if sparse_grad else input.grad
expected_grad = torch.zeros_like(input, requires_grad=False)
self.assertEqual(grad, expected_grad, atol=0, rtol=0)
def _test_scatter_base(self, fn, *, device, dtype, is_scalar, reduction,
unique_indices=True, include_self=True):
m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
elems_per_row = random.randint(1, 10)
dim = random.randrange(3)
idx_size = [m, n, o]
idx_size[dim] = elems_per_row
idx = torch.empty(tuple(idx_size), device=device, dtype=torch.long)
self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o, unique_indices)
if is_scalar:
src = random.random()
else:
src_size = [random.randint(1, 5) + s for s in idx_size]
src = make_tensor(tuple(src_size), device=device, dtype=dtype)
base = make_tensor((m, n, o), device=device, dtype=dtype)
if reduction is not None:
if fn is torch.Tensor.scatter_reduce_:
actual = fn(base.clone(), dim, idx, src, reduce=reduction, include_self=include_self)
else:
actual = fn(base.clone(), dim, idx, src, reduce=reduction)
else:
actual = fn(base.clone(), dim, idx, src)
expected = base.clone()
counts = torch.zeros(base.shape, dtype=torch.long, device=device) + include_self
for i in range(idx_size[0]):
for j in range(idx_size[1]):
for k in range(idx_size[2]):
ii = [i, j, k]
ii[dim] = idx[i, j, k]
if fn is torch.Tensor.scatter_add_:
expected[tuple(ii)] += src[i, j, k]
else:
# method may be 'scatter_', 'scatter', 'scatter_reduce'
# or 'scatter_reduce_', the former two might have a reduction argument
# while the latter two always do
value = src if is_scalar else src[i, j, k]
if ((not include_self) and counts[tuple(ii)] == 0):
expected[tuple(ii)] = value
else:
if reduction == "add" or reduction == "sum":
expected[tuple(ii)] += value
elif reduction == "multiply" or reduction == "prod":
expected[tuple(ii)] *= value
elif reduction == "amax":
expected[tuple(ii)] = max(expected[tuple(ii)], value)
elif reduction == "amin":
expected[tuple(ii)] = min(expected[tuple(ii)], value)
elif reduction == "mean":
expected[tuple(ii)] += value
else:
expected[tuple(ii)] = value
counts[tuple(ii)] += 1
if (reduction == "mean"):
counts.masked_fill_(counts == 0, 1)
if (dtype.is_floating_point or dtype.is_complex):
expected /= counts
else:
expected.div_(counts, rounding_mode="floor")
if dtype == torch.float16 or dtype == torch.bfloat16:
# Some CUDA kernels (e.g. indexing_backward_kernel_stride_1) that are called during
# the test use fp32 for internal accumulation for improved accuracy. When using 16 bit
# precision types can be small differences
self.assertEqual(actual, expected, atol=0.04, rtol=0.05)
else:
# When we are running opportunistic_fastatomics, we will expect some floating point rounding
# errors as the order of operation is not guaranteed.
if TEST_WITH_ROCM and CDNA3OrLater() \
and not torch.are_deterministic_algorithms_enabled():
self.assertEqual(actual, expected, atol=1e-9, rtol=1e-6)
else:
self.assertEqual(actual, expected, atol=0, rtol=0)
# Tests empty index
dst = make_tensor((2, 2), device=device, dtype=dtype)
idx = torch.tensor((), device=device, dtype=torch.long)
src = make_tensor((2, 2), device=device, dtype=dtype)
if reduction is not None:
actual = fn(dst, 0, idx, src, reduce=reduction)
else:
actual = fn(dst, 0, idx, src)
self.assertEqual(actual, dst, atol=0, rtol=0)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter_(self, device, dtype):
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=False, reduction=None)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter__scalar(self, device, dtype):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=True, reduction=None)
# FIXME: RuntimeError: "cuda_scatter_gather_base_kernel_reduce_multiply" not implemented for 'ComplexFloat'
@toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)})
@dtypesIfCUDA(torch.float16, torch.float32)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter__reductions(self, device, dtype):
for reduction in ("add", "multiply"):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=False, reduction=reduction)
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=True, reduction=reduction)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter_add_(self, device, dtype):
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self._test_scatter_base(torch.Tensor.scatter_add_, device=device, dtype=dtype,
is_scalar=False, reduction=None)
@dtypes(torch.float32)
def test_scatter_add_mult_index_base(self, device, dtype):
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
m, n = 30, 40
idx = torch.zeros(m, n, device=device, dtype=torch.long)
src = torch.ones(m, n, device=device, dtype=dtype)
res0 = torch.zeros(m, n, device=device, dtype=dtype).scatter_add_(0, idx, src)
res1 = torch.zeros(m, n, device=device, dtype=dtype).scatter_add_(1, idx, src)
self.assertEqual(res0[0, :], m * torch.ones(n, device=device, dtype=dtype), atol=0, rtol=0)
self.assertEqual(res1[:, 0], n * torch.ones(m, device=device, dtype=dtype), atol=0, rtol=0)
# FIXME: discrepancy between bool ReduceAdd on CUDA and CPU (a + b on CPU and buggy a && b on CUDA)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=False))
def test_scatter_reduce_sum(self, device, dtype):
for include_self in (True, False):
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='sum', unique_indices=False,
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True))
@dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_prod(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='prod', unique_indices=False,
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=False))
@dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_mean(self, device, dtype):
for include_self in (True, False):
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='mean', unique_indices=False,
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
@dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_amax(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='amax', unique_indices=False,
include_self=include_self)
# simple test for nan/inf propagation
if (dtype.is_floating_point):
input = torch.zeros(3, device=device, dtype=dtype)
src = torch.tensor([1, float('nan'), -float('inf'), -float('inf'), 2, float('inf')], device=device, dtype=dtype)
idx = torch.tensor([0, 0, 1, 1, 2, 2], device=device)
input.scatter_reduce_(0, idx, src, 'amax', include_self=include_self)
expected_result = torch.tensor([float('nan'), -float('inf'), float('inf')], device=device, dtype=dtype)
if (include_self):
expected_result[1] = 0
self.assertEqual(input, expected_result)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
@dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_amin(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
is_scalar=False, reduction='amin', unique_indices=False,
include_self=include_self)
# simple test for nan/inf propagation
if (dtype.is_floating_point):
input = torch.zeros(3, device=device, dtype=dtype)
src = torch.tensor([1, float('nan'), -2, -float('inf'), float('inf'), float('inf')], device=device, dtype=dtype)
idx = torch.tensor([0, 0, 1, 1, 2, 2], device=device)
input.scatter_reduce_(0, idx, src, 'amin', include_self=include_self)
expected_result = torch.tensor([float('nan'), -float('inf'), float('inf')], device=device, dtype=dtype)
if (include_self):
expected_result[2] = 0
self.assertEqual(input, expected_result)
@onlyCPU
@dtypes(torch.float32, torch.float64, torch.bfloat16, torch.float16)
def test_scatter_expanded_index(self, device, dtype):
def helper(input_size, idx_size):
input = torch.randn(input_size, device=device).to(dtype=dtype)
input2 = input.clone()
shape = [1] * len(input_size)
shape[0] = idx_size
dim_size = input_size[0]
idx = torch.randint(0, dim_size, shape)
# The fast path on scatter when index is expanded
# will depend on sorted index where the collected src indice
# for each row in input will be mapped to rowptrs in a CSR format.
# Create some empty rows by masking:
mask = (idx > 1) * (idx < 4)
idx[mask] = 0
expanded_shape = input_size
expanded_shape[0] = idx_size
idx = idx.expand(expanded_shape)
idx2 = idx.contiguous()
src = torch.randn(expanded_shape, device=device).to(dtype=dtype)
out = input.scatter_add(0, idx, src)
out2 = input2.scatter_add(0, idx2, src)
self.assertEqual(out, out2)
for reduce in ["sum", "prod", "mean", "amax", "amin"]:
for include_self in [True, False]:
out = input.scatter_reduce(0, idx, src, reduce=reduce, include_self=include_self)
out2 = input2.scatter_reduce(0, idx2, src, reduce=reduce, include_self=include_self)
self.assertEqual(out, out2)
helper([50, 17], 100)
helper([50, 1], 100)
helper([50, 8, 7], 100)
helper([50, 3, 4, 5], 100)
@dtypes(torch.float32)
def test_scatter_add_broadcasted_index_deterministic(self, device, dtype):
for d in (0, 1):
inp = torch.randn(3, 4, 5, device=device, dtype=dtype)
idx_1d = torch.randint(3, (10,), device=device)
src_shape = list(inp.shape)
src_shape[d] = 10
src = torch.randn(src_shape, device=device, dtype=dtype)
idx_view_shape = [1] * inp.ndim
idx_view_shape[d] = 10
idx = idx_1d.view(idx_view_shape).expand(src_shape)
ref = inp.clone().scatter_add_(d, idx, src)
with DeterministicGuard(True):
res = inp.clone().scatter_add_(d, idx, src)
self.assertEqual(res, ref)
@onlyCPU
@dtypes(torch.float32, torch.float64, torch.bfloat16)
def test_gather_expanded_index(self, device, dtype):
# Test when index is [N, 1], which would have stride [1, 0]
# should be excluded from the fast path when index ix expanded
input = torch.arange(25).view(5, 5)
input2 = input.to(dtype=dtype)
idx = torch.arange(5).view(5, 1)
out = torch.gather(input, 0, idx)
out2 = torch.gather(input2, 0, idx)
self.assertEqual(out.to(dtype=dtype), out2)
def helper(input_size, idx_size):
input = torch.randn(input_size, device=device).to(dtype=dtype)
input2 = input.clone()
shape = [1] * len(input_size)
shape[0] = idx_size
dim_size = input_size[0]
idx = torch.randint(0, dim_size, shape)
# Test the fast path on gather when index is expanded
expanded_shape = input_size
expanded_shape[0] = idx_size
idx = idx.expand(expanded_shape)
idx2 = idx.contiguous()
out = torch.gather(input, 0, idx)
out2 = torch.gather(input2, 0, idx2)
self.assertEqual(out, out2)
# test unsqueezed index
# expanded_index kernel can not handle the case:
# the size > 1 and stride == 1 at a dimension.
# for example: the index with size of [1, 8, 7], stride of [1, 1, 0].
# see https://github.com/pytorch/pytorch/issues/129093
def unsqueeze_helper(idx, dim):
if dim == 2:
return idx.unsqueeze(1).t()
else:
return unsqueeze_helper(idx, dim - 1).unsqueeze(dim - 1)
idx = torch.randint(0, dim_size, (input.shape[1],))
idx = unsqueeze_helper(idx, len(input_size))
expanded_shape[0] = 1
idx = idx.expand(expanded_shape)
idx2 = idx.contiguous()
out = torch.gather(input, 0, idx)
out2 = torch.gather(input2, 0, idx2)
self.assertEqual(out, out2)
helper([50, 17], 100)
helper([50, 1], 100)
helper([50, 8, 7], 100)
helper([50, 3, 4, 5], 100)
# Generic Device Test Framework instantiation, see
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# for details.
instantiate_device_type_tests(TestScatterGather, globals())
if __name__ == '__main__':
run_tests()
| TestScatterGather |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 204433,
"end": 206886
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of CreateSponsorship"""
__schema__ = github_schema
__field_names__ = (
"sponsor_id",
"sponsor_login",
"sponsorable_id",
"sponsorable_login",
"tier_id",
"amount",
"is_recurring",
"receive_emails",
"privacy_level",
"client_mutation_id",
)
sponsor_id = sgqlc.types.Field(ID, graphql_name="sponsorId")
"""The ID of the user or organization who is acting as the sponsor,
paying for the sponsorship. Required if sponsorLogin is not given.
"""
sponsor_login = sgqlc.types.Field(String, graphql_name="sponsorLogin")
"""The username of the user or organization who is acting as the
sponsor, paying for the sponsorship. Required if sponsorId is not
given.
"""
sponsorable_id = sgqlc.types.Field(ID, graphql_name="sponsorableId")
"""The ID of the user or organization who is receiving the
sponsorship. Required if sponsorableLogin is not given.
"""
sponsorable_login = sgqlc.types.Field(String, graphql_name="sponsorableLogin")
"""The username of the user or organization who is receiving the
sponsorship. Required if sponsorableId is not given.
"""
tier_id = sgqlc.types.Field(ID, graphql_name="tierId")
"""The ID of one of sponsorable's existing tiers to sponsor at.
Required if amount is not specified.
"""
amount = sgqlc.types.Field(Int, graphql_name="amount")
"""The amount to pay to the sponsorable in US dollars. Required if a
tierId is not specified. Valid values: 1-12000.
"""
is_recurring = sgqlc.types.Field(Boolean, graphql_name="isRecurring")
"""Whether the sponsorship should happen monthly/yearly or just this
one time. Required if a tierId is not specified.
"""
receive_emails = sgqlc.types.Field(Boolean, graphql_name="receiveEmails")
"""Whether the sponsor should receive email updates from the
sponsorable.
"""
privacy_level = sgqlc.types.Field(SponsorshipPrivacy, graphql_name="privacyLevel")
"""Specify whether others should be able to see that the sponsor is
sponsoring the sponsorable. Public visibility still does not
reveal which tier is used.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| CreateSponsorshipInput |
python | Farama-Foundation__Gymnasium | tests/wrappers/test_array_conversion.py | {
"start": 1861,
"end": 9470
} | class ____(NamedTuple):
a: Any # Array API compatible object. Does not have proper typing support yet.
b: Any # Same as a
def _supports_higher_precision(xp, low_type, high_type):
"""Check if an array module supports higher precision type."""
return xp.result_type(low_type, high_type) == high_type
# When converting between array modules (source → target → source), we need to ensure that the
# precision used is supported by both modules. If either module only supports 32-bit types, we must
# use the lower precision to account for the conversion during the roundtrip.
def atleast_float32(source_xp, target_xp):
"""Return source_xp.float64 if both modules support it, otherwise source_xp.float32."""
source_supports_64 = _supports_higher_precision(
source_xp, source_xp.float32, source_xp.float64
)
target_supports_64 = _supports_higher_precision(
target_xp, target_xp.float32, target_xp.float64
)
return (
source_xp.float64
if (source_supports_64 and target_supports_64)
else source_xp.float32
)
def atleast_int32(source_xp, target_xp):
"""Return source_xp.int64 if both modules support it, otherwise source_xp.int32."""
source_supports_64 = _supports_higher_precision(
source_xp, source_xp.int32, source_xp.int64
)
target_supports_64 = _supports_higher_precision(
target_xp, target_xp.int32, target_xp.int64
)
return (
source_xp.int64
if (source_supports_64 and target_supports_64)
else source_xp.int32
)
def value_parametrization():
for source_xp, target_xp in installed_modules_combinations:
xp = module_namespace(source_xp)
source_xp = module_namespace(source_xp)
target_xp = module_namespace(target_xp)
for value, expected_value in [
(2, xp.asarray(2, dtype=atleast_int32(source_xp, target_xp))),
(
(3.0, 4),
(
xp.asarray(3.0, dtype=atleast_float32(source_xp, target_xp)),
xp.asarray(4, dtype=atleast_int32(source_xp, target_xp)),
),
),
(
[3.0, 4],
[
xp.asarray(3.0, dtype=atleast_float32(source_xp, target_xp)),
xp.asarray(4, dtype=atleast_int32(source_xp, target_xp)),
],
),
(
{
"a": 6.0,
"b": 7,
},
{
"a": xp.asarray(6.0, dtype=atleast_float32(source_xp, target_xp)),
"b": xp.asarray(7, dtype=atleast_int32(source_xp, target_xp)),
},
),
(xp.asarray(1.0, dtype=xp.float32), xp.asarray(1.0, dtype=xp.float32)),
(xp.asarray(1.0, dtype=xp.uint8), xp.asarray(1.0, dtype=xp.uint8)),
(xp.asarray([1, 2], dtype=xp.int32), xp.asarray([1, 2], dtype=xp.int32)),
(
xp.asarray([[1.0], [2.0]], dtype=xp.int32),
xp.asarray([[1.0], [2.0]], dtype=xp.int32),
),
(
{
"a": (
1,
xp.asarray(2.0, dtype=xp.float32),
xp.asarray([3, 4], dtype=xp.int32),
),
"b": {"c": 5},
},
{
"a": (
xp.asarray(1, dtype=atleast_int32(source_xp, target_xp)),
xp.asarray(2.0, dtype=xp.float32),
xp.asarray([3, 4], dtype=xp.int32),
),
"b": {
"c": xp.asarray(5, dtype=atleast_int32(source_xp, target_xp))
},
},
),
(
ExampleNamedTuple(
a=xp.asarray([1, 2], dtype=xp.int32),
b=xp.asarray([1.0, 2.0], dtype=xp.float32),
),
ExampleNamedTuple(
a=xp.asarray([1, 2], dtype=xp.int32),
b=xp.asarray([1.0, 2.0], dtype=xp.float32),
),
),
(None, None),
]:
yield (source_xp, target_xp, value, expected_value)
@pytest.mark.parametrize(
"source_xp,target_xp,value,expected_value", value_parametrization()
)
def test_roundtripping(source_xp, target_xp, value, expected_value):
"""Test roundtripping between different Array API compatible frameworks."""
roundtripped_value = array_conversion(
array_conversion(value, xp=target_xp), xp=source_xp
)
assert xp_data_equivalence(roundtripped_value, expected_value)
@pytest.mark.parametrize("env_xp, target_xp", installed_modules_combinations)
def test_array_conversion_wrapper(env_xp, target_xp):
# Define reset and step functions without partial to avoid pickling issues
def reset_func(self, seed=None, options=None):
"""A generic array API reset function."""
return env_xp.asarray([1.0, 2.0, 3.0]), {"data": env_xp.asarray([1, 2, 3])}
def step_func(self, action):
"""A generic array API step function."""
assert isinstance(action, type(env_xp.zeros(1)))
return (
env_xp.asarray([1, 2, 3]),
env_xp.asarray(5.0),
env_xp.asarray(True),
env_xp.asarray(False),
{"data": env_xp.asarray([1.0, 2.0])},
)
env = GenericTestEnv(reset_func=reset_func, step_func=step_func)
# Check that the reset and step for env_xp environment are as expected
obs, info = env.reset()
# env_xp is automatically converted to the compatible namespace by array_namespace, so we need
# to check against the compatible namespace of env_xp in array_api_compat
env_xp_compat = module_namespace(env_xp)
assert array_namespace(obs) is env_xp_compat
assert isinstance(info, dict) and array_namespace(info["data"]) is env_xp_compat
obs, reward, terminated, truncated, info = env.step(env_xp_compat.asarray([1, 2]))
assert array_namespace(obs) is env_xp_compat
assert array_namespace(reward) is env_xp_compat
assert array_namespace(terminated) is env_xp_compat
assert array_namespace(truncated) is env_xp_compat
assert isinstance(info, dict) and array_namespace(info["data"]) is env_xp_compat
# Check that the wrapped version is correct.
target_xp_compat = module_namespace(target_xp)
wrapped_env = ArrayConversion(env, env_xp=env_xp, target_xp=target_xp)
obs, info = wrapped_env.reset()
assert array_namespace(obs) is target_xp_compat
assert isinstance(info, dict) and array_namespace(info["data"]) is target_xp_compat
action = target_xp.asarray([1, 2], dtype=target_xp.int32)
obs, reward, terminated, truncated, info = wrapped_env.step(action)
assert array_namespace(obs) is target_xp_compat
assert isinstance(reward, float)
assert isinstance(terminated, bool) and isinstance(truncated, bool)
assert isinstance(info, dict) and array_namespace(info["data"]) is target_xp_compat
# Check that the wrapped environment can render. This implicitly returns None and requires a
# None -> None conversion
wrapped_env.render()
# Test that the wrapped environment can be pickled
env = gymnasium.make("CartPole-v1", disable_env_checker=True)
wrapped_env = ArrayConversion(env, env_xp=env_xp, target_xp=target_xp)
pkl = pickle.dumps(wrapped_env)
pickle.loads(pkl)
| ExampleNamedTuple |
python | huggingface__transformers | tests/models/qwen2_vl/test_modeling_qwen2_vl.py | {
"start": 1467,
"end": 5544
} | class ____:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=14,
text_config={
"bos_token_id": 0,
"eos_token_id": 1,
"pad_token_id": 2,
"hidden_act": "silu",
"hidden_size": 32,
"vocab_size": 99,
"intermediate_size": 37,
"max_position_embeddings": 512,
"max_window_layers": 3,
"num_attention_heads": 4,
"num_hidden_layers": 2,
"num_key_value_heads": 2,
"rope_theta": 10000,
"tie_word_embeddings": True,
"rope_parameters": {"type": "mrope", "mrope_section": [2, 1, 1]},
},
vision_start_token_id=3,
image_token_id=4,
video_token_id=5,
is_training=True,
vision_config={
"depth": 2,
"embed_dim": 32,
"hidden_act": "quick_gelu",
"hidden_size": 32,
"mlp_ratio": 4,
"num_heads": 4,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.hidden_size = text_config["hidden_size"]
self.vision_start_token_id = vision_start_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 32
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return Qwen2VLConfig(
text_config=self.text_config,
vision_config=self.vision_config,
vision_start_token_id=self.vision_start_token_id,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[:, -1] = self.pad_token_id
attention_mask[:, -1] = 0
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id
input_ids[:, self.num_image_tokens] = self.image_token_id
input_ids[:, self.num_image_tokens - 1] = self.vision_start_token_id
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| Qwen2VLVisionText2TextModelTester |
python | walkccc__LeetCode | solutions/3461. Check If Digits Are Equal in String After Operations I/3461.py | {
"start": 0,
"end": 903
} | class ____:
def hasSameDigits(self, s: str) -> bool:
n = len(s)
num1 = 0
num2 = 0
for i in range(n - 1):
coefficient = self._nCMOD10(n - 2, i)
num1 += (coefficient * (int(s[i]) - 0)) % 10
num1 %= 10
num2 += (coefficient * (int(s[i + 1]) - 0)) % 10
num2 %= 10
return num1 == num2
def _nCMOD10(self, n: int, k: int) -> int:
"""Returns (n, k) % 10."""
mod2 = self._lucasTheorem(n, k, 2)
mod5 = self._lucasTheorem(n, k, 5)
lookup = [
[0, 6, 2, 8, 4], # mod2 == 0
[5, 1, 7, 3, 9] # mod2 == 1
]
return lookup[mod2][mod5]
def _lucasTheorem(self, n: int, k: int, prime: int) -> int:
"""Returns (n, k) % prime."""
res = 1
while n > 0 or k > 0:
nMod = n % prime
MOD = k % prime
res *= math.comb(nMod, MOD)
res %= prime
n //= prime
k //= prime
return res
| Solution |
python | altair-viz__altair | altair/utils/display.py | {
"start": 1401,
"end": 5145
} | class ____(PluginRegistry[RendererType, MimeBundleType]):
entrypoint_err_messages = {
"notebook": textwrap.dedent(
"""
To use the 'notebook' renderer, you must install the vega package
and the associated Jupyter extension.
See https://altair-viz.github.io/getting_started/installation.html
for more information.
"""
),
}
def set_embed_options(
self,
defaultStyle: bool | str | None = None,
renderer: str | None = None,
width: int | None = None,
height: int | None = None,
padding: int | None = None,
scaleFactor: float | None = None,
actions: bool | dict[str, bool] | None = None,
format_locale: str | dict | None = None,
time_format_locale: str | dict | None = None,
**kwargs,
) -> PluginEnabler:
"""
Set options for embeddings of Vega & Vega-Lite charts.
Options are fully documented at https://github.com/vega/vega-embed.
Similar to the `enable()` method, this can be used as either
a persistent global switch, or as a temporary local setting using
a context manager (i.e. a `with` statement).
Parameters
----------
defaultStyle : bool or string
Specify a default stylesheet for embed actions.
renderer : string
The renderer to use for the view. One of "canvas" (default) or "svg"
width : integer
The view width in pixels
height : integer
The view height in pixels
padding : integer
The view padding in pixels
scaleFactor : number
The number by which to multiply the width and height (default 1)
of an exported PNG or SVG image.
actions : bool or dict
Determines if action links ("Export as PNG/SVG", "View Source",
"View Vega" (only for Vega-Lite), "Open in Vega Editor") are
included with the embedded view. If the value is true, all action
links will be shown and none if the value is false. This property
can take a key-value mapping object that maps keys (export, source,
compiled, editor) to boolean values for determining if
each action link should be shown.
format_locale : str or dict
d3-format locale name or dictionary. Defaults to "en-US" for United States English.
See https://github.com/d3/d3-format/tree/main/locale for available names and example
definitions.
time_format_locale : str or dict
d3-time-format locale name or dictionary. Defaults to "en-US" for United States English.
See https://github.com/d3/d3-time-format/tree/main/locale for available names and example
definitions.
**kwargs :
Additional options are passed directly to embed options.
"""
options: dict[str, bool | str | float | dict[str, bool] | None] = {
"defaultStyle": defaultStyle,
"renderer": renderer,
"width": width,
"height": height,
"padding": padding,
"scaleFactor": scaleFactor,
"actions": actions,
"formatLocale": format_locale,
"timeFormatLocale": time_format_locale,
}
kwargs.update({key: val for key, val in options.items() if val is not None})
return self.enable(None, embed_options=kwargs)
# ==============================================================================
# VegaLite v1/v2 renderer logic
# ==============================================================================
| RendererRegistry |
python | ipython__ipython | tests/test_pylabtools.py | {
"start": 5171,
"end": 11449
} | class ____(InteractiveShell):
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.config.HistoryManager.hist_file = ":memory:"
super().init_history()
def enable_gui(self, gui):
pass
def test_just_shell_no_leak(shell_pylab_fixture):
s = shell_pylab_fixture
def test_qt(shell_pylab_fixture):
s = shell_pylab_fixture
gui, backend = s.enable_matplotlib(None)
assert gui == "qt"
assert s.pylab_gui_select == "qt"
gui, backend = s.enable_matplotlib("inline")
assert gui is None
assert s.pylab_gui_select == "qt"
gui, backend = s.enable_matplotlib("qt")
assert gui == "qt"
assert s.pylab_gui_select == "qt"
gui, backend = s.enable_matplotlib("inline")
assert gui is None
assert s.pylab_gui_select == "qt"
gui, backend = s.enable_matplotlib()
assert gui == "qt"
assert s.pylab_gui_select == "qt"
s.configurables = []
s.history_manager = None
def test_inline(shell_pylab_fixture):
s = shell_pylab_fixture
gui, backend = s.enable_matplotlib("inline")
assert gui is None
assert s.pylab_gui_select == None
gui, backend = s.enable_matplotlib("inline")
assert gui is None
assert s.pylab_gui_select == None
gui, backend = s.enable_matplotlib("qt")
assert gui == "qt"
assert s.pylab_gui_select == "qt"
def test_inline_twice(shell_pylab_fixture):
"Using '%matplotlib inline' twice should not reset formatters"
ip = shell_pylab_fixture
gui, backend = ip.enable_matplotlib("inline")
assert gui is None
fmts = {"png"}
active_mimes = {_fmt_mime_map[fmt] for fmt in fmts}
pt.select_figure_formats(ip, fmts)
gui, backend = ip.enable_matplotlib("inline")
assert gui is None
for mime, f in ip.display_formatter.formatters.items():
if mime in active_mimes:
assert Figure in f
else:
assert Figure not in f
def test_qt_gtk(shell_pylab_fixture):
s = shell_pylab_fixture
gui, backend = s.enable_matplotlib("qt")
assert gui == "qt"
assert s.pylab_gui_select == "qt"
gui, backend = s.enable_matplotlib("gtk3")
assert gui == "qt"
assert s.pylab_gui_select == "qt"
@dec.skipif(not pt._matplotlib_manages_backends())
def test_backend_module_name_case_sensitive(shell_pylab_fixture):
# Matplotlib backend names are case insensitive unless explicitly specified using
# "module://some_module.some_name" syntax which are case sensitive for mpl >= 3.9.1
all_lowercase = "module://matplotlib_inline.backend_inline"
some_uppercase = "module://matplotlib_inline.Backend_inline"
mpl3_9_1 = matplotlib.__version_info__ >= (3, 9, 1)
s = shell_pylab_fixture
s.enable_matplotlib(all_lowercase)
if mpl3_9_1:
with pytest.raises(RuntimeError):
s.enable_matplotlib(some_uppercase)
else:
s.enable_matplotlib(some_uppercase)
s.run_line_magic("matplotlib", all_lowercase)
if mpl3_9_1:
with pytest.raises(RuntimeError):
s.run_line_magic("matplotlib", some_uppercase)
else:
s.run_line_magic("matplotlib", some_uppercase)
def test_no_gui_backends():
for k in ["agg", "svg", "pdf", "ps"]:
assert k not in pt.backend2gui
def test_figure_no_canvas():
fig = Figure()
fig.canvas = None
pt.print_figure(fig)
@pytest.mark.parametrize(
"name, expected_gui, expected_backend",
[
# name is gui
("gtk3", "gtk3", "gtk3agg"),
("gtk4", "gtk4", "gtk4agg"),
("headless", None, "agg"),
("osx", "osx", "macosx"),
("qt", "qt", "qtagg"),
("qt5", "qt5", "qt5agg"),
("qt6", "qt6", "qtagg"),
("tk", "tk", "tkagg"),
("wx", "wx", "wxagg"),
# name is backend
("agg", None, "agg"),
("cairo", None, "cairo"),
("pdf", None, "pdf"),
("ps", None, "ps"),
("svg", None, "svg"),
("template", None, "template"),
("gtk3agg", "gtk3", "gtk3agg"),
("gtk3cairo", "gtk3", "gtk3cairo"),
("gtk4agg", "gtk4", "gtk4agg"),
("gtk4cairo", "gtk4", "gtk4cairo"),
("macosx", "osx", "macosx"),
("nbagg", "nbagg", "nbagg"),
("notebook", "nbagg", "notebook"),
("qtagg", "qt", "qtagg"),
("qtcairo", "qt", "qtcairo"),
("qt5agg", "qt5", "qt5agg"),
("qt5cairo", "qt5", "qt5cairo"),
("tkagg", "tk", "tkagg"),
("tkcairo", "tk", "tkcairo"),
("webagg", "webagg", "webagg"),
("wxagg", "wx", "wxagg"),
("wxcairo", "wx", "wxcairo"),
],
)
def test_backend_builtin(name, expected_gui, expected_backend):
# Test correct identification of Matplotlib built-in backends without importing and using them,
# otherwise we would need to ensure all the complex dependencies such as windowing toolkits are
# installed.
mpl_manages_backends = pt._matplotlib_manages_backends()
if not mpl_manages_backends:
# Backends not supported before _matplotlib_manages_backends or supported
# but with different expected_gui or expected_backend.
if (
name.endswith("agg")
or name.endswith("cairo")
or name in ("headless", "macosx", "pdf", "ps", "svg", "template")
):
pytest.skip()
elif name == "qt6":
expected_backend = "qtagg"
elif name == "notebook":
expected_backend, expected_gui = expected_gui, expected_backend
gui, backend = pt.find_gui_and_backend(name)
if not mpl_manages_backends:
gui = gui.lower() if gui else None
backend = backend.lower() if backend else None
assert gui == expected_gui
assert backend == expected_backend
def test_backend_entry_point():
gui, backend = pt.find_gui_and_backend("inline")
assert gui is None
expected_backend = (
"inline"
if pt._matplotlib_manages_backends()
else "module://matplotlib_inline.backend_inline"
)
assert backend == expected_backend
def test_backend_unknown():
with pytest.raises(RuntimeError if pt._matplotlib_manages_backends() else KeyError):
pt.find_gui_and_backend("name-does-not-exist")
| Shell |
python | fluentpython__example-code-2e | 02-array-seq/lispy/py3.9/lis.py | {
"start": 1666,
"end": 5266
} | class ____(ChainMap[Symbol, Any]):
"A ChainMap that allows changing an item in-place."
def change(self, key: Symbol, value: object) -> None:
"Find where key is defined and change the value there."
for map in self.maps:
if key in map:
map[key] = value # type: ignore[index]
return
raise KeyError(key)
def standard_env() -> Environment:
"An environment with some Scheme standard procedures."
env = Environment()
env.update(vars(math)) # sin, cos, sqrt, pi, ...
env.update({
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv,
'quotient': op.floordiv,
'>': op.gt,
'<': op.lt,
'>=': op.ge,
'<=': op.le,
'=': op.eq,
'abs': abs,
'append': lambda *args: list(chain(*args)),
'apply': lambda proc, args: proc(*args),
'begin': lambda *x: x[-1],
'car': lambda x: x[0],
'cdr': lambda x: x[1:],
'cons': lambda x, y: [x] + y,
'display': lambda x: print(lispstr(x)),
'eq?': op.is_,
'equal?': op.eq,
'filter': lambda *args: list(filter(*args)),
'length': len,
'list': lambda *x: list(x),
'list?': lambda x: isinstance(x, list),
'map': lambda *args: list(map(*args)),
'max': max,
'min': min,
'not': op.not_,
'null?': lambda x: x == [],
'number?': lambda x: isinstance(x, (int, float)),
'procedure?': callable,
'round': round,
'symbol?': lambda x: isinstance(x, Symbol),
})
return env
################ Interaction: A REPL
def repl(prompt: str = 'lis.py> ') -> NoReturn:
"A prompt-read-eval-print loop."
global_env = Environment({}, standard_env())
while True:
ast = parse(input(prompt))
val = evaluate(ast, global_env)
if val is not None:
print(lispstr(val))
def lispstr(exp: object) -> str:
"Convert a Python object back into a Lisp-readable string."
if isinstance(exp, list):
return '(' + ' '.join(map(lispstr, exp)) + ')'
else:
return str(exp)
################ Evaluator
# tag::EVAL_IF_TOP[]
def evaluate(exp: Expression, env: Environment) -> Any:
"Evaluate an expression in an environment."
if isinstance(exp, Symbol): # variable reference
return env[exp]
# end::EVAL_IF_TOP[]
elif not isinstance(exp, list): # constant literal
return exp
# tag::EVAL_IF_MIDDLE[]
elif exp[0] == 'quote': # (quote exp)
(_, x) = exp
return x
elif exp[0] == 'if': # (if test conseq alt)
(_, test, consequence, alternative) = exp
if evaluate(test, env):
return evaluate(consequence, env)
else:
return evaluate(alternative, env)
elif exp[0] == 'lambda': # (lambda (parm…) body…)
(_, parms, *body) = exp
return Procedure(parms, body, env)
elif exp[0] == 'define':
(_, name, value_exp) = exp
env[name] = evaluate(value_exp, env)
# end::EVAL_IF_MIDDLE[]
elif exp[0] == 'set!':
(_, name, value_exp) = exp
env.change(name, evaluate(value_exp, env))
else: # (proc arg…)
(func_exp, *args) = exp
proc = evaluate(func_exp, env)
args = [evaluate(arg, env) for arg in args]
return proc(*args)
| Environment |
python | faif__python-patterns | patterns/creational/abstract_factory.py | {
"start": 1433,
"end": 1568
} | class ____(Pet):
def speak(self) -> None:
print("woof")
def __str__(self) -> str:
return f"Dog<{self.name}>"
| Dog |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/slots1.py | {
"start": 1138,
"end": 1240
} | class ____(Slots1, NoSlots2):
def __init__(self):
self.bbb = 1
self.fff = 1
| NoSlots1_1 |
python | getsentry__sentry | src/sentry/options/manager.py | {
"start": 2837,
"end": 5654
} | class ____(KeyError):
pass
DEFAULT_FLAGS = 1 << 0
# Value can't be changed at runtime
FLAG_IMMUTABLE = 1 << 1
# Don't check/set in the datastore. Option only exists from file.
FLAG_NOSTORE = 1 << 2
# Values that should only exist in datastore, and shouldn't exist in
# config files.
FLAG_STOREONLY = 1 << 3
# Values that must be defined for setup to be considered complete
FLAG_REQUIRED = 1 << 4
# If the value is defined on disk, use that and don't attempt to fetch from db.
# This also make the value immutable to changes from web UI.
FLAG_PRIORITIZE_DISK = 1 << 5
# If the value is allowed to be empty to be considered valid
FLAG_ALLOW_EMPTY = 1 << 6
# Values that are credentials should not show up in web UI.
FLAG_CREDENTIAL = 1 << 7
# Values that are meant to be modified live, eg. for rollout etc.
FLAG_ADMIN_MODIFIABLE = 1 << 8
# Values that are rates, between [0,1]
FLAG_RATE = 1 << 9
# Values that are bools
FLAG_BOOL = 1 << 10
# Value can be dynamically updated by automator
FLAG_AUTOMATOR_MODIFIABLE = 1 << 11
# Values that are scalar numeric integer values
FLAG_SCALAR = 1 << 12
FLAG_MODIFIABLE_RATE = FLAG_ADMIN_MODIFIABLE | FLAG_RATE
FLAG_MODIFIABLE_BOOL = FLAG_ADMIN_MODIFIABLE | FLAG_BOOL
FLAG_MODIFIABLE_SCALAR = FLAG_ADMIN_MODIFIABLE | FLAG_SCALAR
# These flags combinations prevent the `register` method from succeeding.
INVALID_COMBINATIONS = {
FLAG_ADMIN_MODIFIABLE | FLAG_NOSTORE,
FLAG_ADMIN_MODIFIABLE | FLAG_IMMUTABLE,
FLAG_ADMIN_MODIFIABLE | FLAG_CREDENTIAL,
FLAG_AUTOMATOR_MODIFIABLE | FLAG_NOSTORE,
FLAG_AUTOMATOR_MODIFIABLE | FLAG_IMMUTABLE,
FLAG_AUTOMATOR_MODIFIABLE | FLAG_CREDENTIAL,
# A flag may only be one of a bool, rate, or scalar.
FLAG_RATE | FLAG_BOOL,
FLAG_BOOL | FLAG_SCALAR,
FLAG_SCALAR | FLAG_RATE,
# An option being required does not strictly mean that it cannot be updated by
# the Automator. The issue is on why they exist. Most of them are set by the
# application itself during the first initialization.
# That flow cannot, like anything else in the application, cannot update the
# configMap
FLAG_AUTOMATOR_MODIFIABLE | FLAG_REQUIRED,
}
# How long will a cache key exist in local memory before being evicted
DEFAULT_KEY_TTL = 10
# How long will a cache key exist in local memory *after ttl* while the backing store is erroring
DEFAULT_KEY_GRACE = 60
# Some update channel can only update options that have a specific flag.
# This dictionary contains the mapping between update channels and required
# flag.
# If a channel is not in the dictionary it does not have restrictions.
WRITE_REQUIRED_FLAGS = {
UpdateChannel.ADMIN: FLAG_ADMIN_MODIFIABLE,
UpdateChannel.AUTOMATOR: FLAG_AUTOMATOR_MODIFIABLE,
}
def _make_cache_key(key):
return "o:%s" % md5_text(key).hexdigest()
| UnknownOption |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 2520,
"end": 2697
} | class ____(TypedDict):
action: Literal["compute", "transfer", "disk-read", "disk-write", "deserialize"]
start: float
stop: float
source: NotRequired[str]
| StartStop |
python | pallets__itsdangerous | tests/test_itsdangerous/test_url_safe.py | {
"start": 587,
"end": 793
} | class ____(TestURLSafeSerializer, TestTimedSerializer):
@pytest.fixture()
def serializer_factory(self):
return partial(URLSafeTimedSerializer, secret_key="secret-key")
| TestURLSafeTimedSerializer |
python | getsentry__sentry | src/sentry/snuba/entity_subscription.py | {
"start": 9878,
"end": 10053
} | class ____(BaseEventsAndTransactionEntitySubscription):
query_type = SnubaQuery.Type.PERFORMANCE
dataset = Dataset.Transactions
| PerformanceTransactionsEntitySubscription |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1045559,
"end": 1046702
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"created_at",
"emoji",
"emoji_html",
"expires_at",
"indicates_limited_availability",
"message",
"organization",
"updated_at",
"user",
)
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
emoji = sgqlc.types.Field(String, graphql_name="emoji")
emoji_html = sgqlc.types.Field(HTML, graphql_name="emojiHTML")
expires_at = sgqlc.types.Field(DateTime, graphql_name="expiresAt")
indicates_limited_availability = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="indicatesLimitedAvailability"
)
message = sgqlc.types.Field(String, graphql_name="message")
organization = sgqlc.types.Field(Organization, graphql_name="organization")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
user = sgqlc.types.Field(sgqlc.types.non_null(User), graphql_name="user")
| UserStatus |
python | huggingface__transformers | src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py | {
"start": 10461,
"end": 11141
} | class ____(nn.Module):
def __init__(self, config: VitPoseBackboneConfig):
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.activation = ACT2FN[config.hidden_act]
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.fc1(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.fc2(hidden_state)
return hidden_state
| VitPoseBackboneMLP |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 10464,
"end": 10633
} | class ____(ProjectVersionEditMixin, UpdateView):
success_message = _("Version updated")
template_name = "projects/project_version_detail.html"
| ProjectVersionDetail |
python | django__django | tests/forms_tests/templatetags/tags.py | {
"start": 66,
"end": 415
} | class ____(Node):
count = 0
def render(self, context):
self.count += 1
for v in context.flatten().values():
try:
str(v)
except AttributeError:
pass
return str(self.count)
@register.tag
def count_render(parser, token):
return CountRenderNode()
| CountRenderNode |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 4644,
"end": 9585
} | class ____(NamedTuple):
"""A named tuple describing an optimization criteria."""
priority: int
value: int
name: str
kind: OptimizationKind
def build_criteria_names(costs, arg_tuples):
"""Construct an ordered mapping from criteria names to costs."""
# pull optimization criteria names out of the solution
priorities_names = []
for args in arg_tuples:
priority, name = args[:2]
priority = int(priority)
# Add the priority of this opt criterion and its name
if priority < fixed_priority_offset:
# if the priority is less than fixed_priority_offset, then it
# has an associated build priority -- the same criterion but for
# nodes that we have to build.
priorities_names.append((priority, name, OptimizationKind.CONCRETE))
build_priority = priority + build_priority_offset
priorities_names.append((build_priority, name, OptimizationKind.BUILD))
else:
priorities_names.append((priority, name, OptimizationKind.OTHER))
# sort the criteria by priority
priorities_names = sorted(priorities_names, reverse=True)
# We only have opt-criterion values for non-error types
# error type criteria are excluded (they come first)
error_criteria = len(costs) - len(priorities_names)
costs = costs[error_criteria:]
return [
OptimizationCriteria(priority, value, name, status)
for (priority, name, status), value in zip(priorities_names, costs)
]
def specify(spec):
if isinstance(spec, spack.spec.Spec):
return spec
return spack.spec.Spec(spec)
def remove_facts(
*to_be_removed: str,
) -> Callable[[spack.spec.Spec, List[AspFunction]], List[AspFunction]]:
"""Returns a transformation function that removes facts from the input list of facts."""
def _remove(spec: spack.spec.Spec, facts: List[AspFunction]) -> List[AspFunction]:
return list(filter(lambda x: x.args[0] not in to_be_removed, facts))
return _remove
def dag_closure_by_deptype(spec: spack.spec.Spec, facts: List[AspFunction]) -> List[AspFunction]:
edges = spec.edges_to_dependencies()
# Compute the "link" transitive closure with `when: root ^[deptypes=link] <this_pkg>`
if len(edges) == 1:
edge = edges[0]
if not edge.direct and edge.depflag == dt.LINK | dt.RUN:
root, leaf = edge.parent.name, edge.spec.name
return [fn.attr("closure", root, leaf, "linkrun")]
return facts
def libc_is_compatible(lhs: spack.spec.Spec, rhs: spack.spec.Spec) -> bool:
return (
lhs.name == rhs.name
and lhs.external_path == rhs.external_path
and lhs.version >= rhs.version
)
def c_compiler_runs(compiler) -> bool:
return CompilerPropertyDetector(compiler).compiler_verbose_output() is not None
def extend_flag_list(flag_list, new_flags):
"""Extend a list of flags, preserving order and precedence.
Add new_flags at the end of flag_list. If any flags in new_flags are
already in flag_list, they are moved to the end so that they take
higher precedence on the compile line.
"""
for flag in new_flags:
if flag in flag_list:
flag_list.remove(flag)
flag_list.append(flag)
def _reorder_flags(flag_list: List[spack.spec.CompilerFlag]) -> List[spack.spec.CompilerFlag]:
"""Reorder a list of flags to ensure that the order matches that of the flag group."""
if not flag_list:
return []
if len({x.flag_group for x in flag_list}) != 1 or len({x.source for x in flag_list}) != 1:
raise InternalConcretizerError(
"internal solver error: cannot reorder compiler flags for concretized specs. "
"Please report a bug at https://github.com/spack/spack/issues"
)
flag_group = flag_list[0].flag_group
flag_source = flag_list[0].source
flag_propagate = flag_list[0].propagate
# Once we have the flag_group, no need to iterate over the flag_list because the
# group represents all of them
return [
spack.spec.CompilerFlag(
flag, propagate=flag_propagate, flag_group=flag_group, source=flag_source
)
for flag, propagate in spack.compilers.flags.tokenize_flags(
flag_group, propagate=flag_propagate
)
]
def check_packages_exist(specs):
"""Ensure all packages mentioned in specs exist."""
repo = spack.repo.PATH
for spec in specs:
for s in spec.traverse():
try:
check_passed = repo.repo_for_pkg(s).exists(s.name) or repo.is_virtual(s.name)
except Exception as e:
msg = "Cannot find package: {0}".format(str(e))
check_passed = False
tty.debug(msg)
if not check_passed:
raise spack.repo.UnknownPackageError(str(s.fullname))
| OptimizationCriteria |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/tests/test_registry.py | {
"start": 556,
"end": 3770
} | class ____:
"""Tests for _get_connector_type_from_registry_entry function."""
@pytest.fixture
def mock_source_entry(self):
"""Create a mock source registry entry."""
entry = Mock(spec=PolymorphicRegistryEntry)
setattr(entry, ConnectorTypePrimaryKey.SOURCE.value, "test-source-id")
return entry
@pytest.fixture
def mock_destination_entry(self):
"""Create a mock destination registry entry."""
entry = Mock(spec=PolymorphicRegistryEntry)
setattr(entry, ConnectorTypePrimaryKey.DESTINATION.value, "test-destination-id")
return entry
@pytest.fixture
def mock_invalid_entry(self):
"""Create a mock entry that is neither source nor destination."""
entry = Mock(spec=PolymorphicRegistryEntry)
return entry
@pytest.mark.parametrize(
"entry_fixture,expected_type,description",
[
("mock_source_entry", ConnectorTypes.SOURCE, "source entry"),
("mock_destination_entry", ConnectorTypes.DESTINATION, "destination entry"),
],
)
def test_get_connector_type_from_registry_entry_types(self, entry_fixture, expected_type, description, request):
"""Test connector type detection from registry entries."""
registry_entry = request.getfixturevalue(entry_fixture)
result = _get_connector_type_from_registry_entry(registry_entry)
assert result == expected_type
assert isinstance(result, ConnectorTypes)
@pytest.mark.parametrize(
"entry_fixture,expected_type,has_attribute,not_has_attribute,description",
[
(
"mock_source_entry",
ConnectorTypes.SOURCE,
ConnectorTypePrimaryKey.SOURCE.value,
ConnectorTypePrimaryKey.DESTINATION.value,
"source entry",
),
(
"mock_destination_entry",
ConnectorTypes.DESTINATION,
ConnectorTypePrimaryKey.DESTINATION.value,
ConnectorTypePrimaryKey.SOURCE.value,
"destination entry",
),
],
)
def test_get_connector_type_from_registry_entry_has_correct_attribute(
self, entry_fixture, expected_type, has_attribute, not_has_attribute, description, request
):
registry_entry = request.getfixturevalue(entry_fixture)
assert hasattr(registry_entry, has_attribute)
assert not hasattr(registry_entry, not_has_attribute)
result = _get_connector_type_from_registry_entry(registry_entry)
assert result == expected_type
def test_get_connector_type_from_registry_entry_invalid_raises_error(self, mock_invalid_entry):
"""Test that invalid entry raises ValueError."""
assert not hasattr(mock_invalid_entry, ConnectorTypePrimaryKey.SOURCE.value)
assert not hasattr(mock_invalid_entry, ConnectorTypePrimaryKey.DESTINATION.value)
with pytest.raises(ValueError) as exc_info:
_get_connector_type_from_registry_entry(mock_invalid_entry)
assert "Registry entry is not a source or destination" in str(exc_info.value)
| TestGetConnectorTypeFromRegistryEntry |
python | takluyver__flit | flit/init.py | {
"start": 1572,
"end": 4385
} | class ____:
def __init__(self, directory='.'):
self.directory = Path(directory)
self.defaults = get_defaults()
def validate_email(self, s):
# Properly validating an email address is much more complex
return bool(re.match(r'.+@.+', s)) or s == ""
def validate_homepage(self, s):
return not s or s.startswith(('http://', 'https://'))
def guess_module_name(self):
packages, modules = [], []
for p in self.directory.iterdir():
if not p.stem.isidentifier():
continue
if p.is_dir() and (p / '__init__.py').is_file():
if p.name not in {'test', 'tests'}:
packages.append(p.name)
elif p.is_file() and p.suffix == '.py':
if p.stem not in {'setup'} and not p.name.startswith('test_'):
modules.append(p.stem)
src_dir = self.directory / 'src'
if src_dir.is_dir():
for p in src_dir.iterdir():
if not p.stem.isidentifier():
continue
if p.is_dir() and (p / '__init__.py').is_file():
if p.name not in {'test', 'tests'}:
packages.append(p.name)
elif p.is_file() and p.suffix == '.py':
if p.stem not in {'setup'} and not p.name.startswith('test_'):
modules.append(p.stem)
if len(packages) == 1:
return packages[0]
elif len(packages) == 0 and len(modules) == 1:
return modules[0]
else:
return None
def update_defaults(self, author, author_email, module, home_page, license):
new_defaults = {'author': author, 'author_email': author_email,
'license': license}
name_chunk_pat = rf'\b{re.escape(module)}\b'
if re.search(name_chunk_pat, home_page):
new_defaults['home_page_template'] = \
re.sub(name_chunk_pat, '{modulename}', home_page, flags=re.I)
if any(new_defaults[k] != self.defaults.get(k) for k in new_defaults):
self.defaults.update(new_defaults)
store_defaults(self.defaults)
def write_license(self, name, author):
if (self.directory / 'LICENSE').exists():
return
year = date.today().year
license_text = (license_templates_dir / name).read_text('utf-8')
(self.directory / 'LICENSE').write_text(
license_text.format(year=year, author=author), encoding='utf-8'
)
def find_readme(self):
allowed = ("readme.md","readme.rst","readme.txt")
for fl in self.directory.glob("*.*"):
if fl.name.lower() in allowed:
return fl.name
return None
| IniterBase |
python | pypa__hatch | tests/project/test_frontend.py | {
"start": 4233,
"end": 7527
} | class ____:
@pytest.mark.parametrize(
("backend_pkg", "backend_api"),
[pytest.param(backend_pkg, backend_api, id=backend_pkg) for backend_pkg, backend_api in BACKENDS],
)
def test_standard(self, temp_dir, temp_dir_data, platform, global_application, backend_pkg, backend_api):
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / "pyproject.toml").write_text(
f"""\
[build-system]
requires = ["{backend_pkg}"]
build-backend = "{backend_api}"
[project]
name = "foo"
version = "9000.42"
description = "text"
"""
)
package_dir = project_dir / "foo"
package_dir.mkdir()
(package_dir / "__init__.py").touch()
project = Project(project_dir)
project.build_env = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
temp_dir_data,
temp_dir_data,
platform,
0,
global_application,
)
output_dir = temp_dir / "output"
output_dir.mkdir()
script = project.build_frontend.scripts.build_wheel(output_dir=str(output_dir), project_root=str(project_dir))
platform.check_command([sys.executable, "-c", script])
work_dir = output_dir / "work"
output = json.loads((output_dir / "output.json").read_text())
wheel_path = work_dir / output["return_val"]
assert wheel_path.is_file()
assert wheel_path.name.startswith("foo-9000.42-")
assert wheel_path.name.endswith(".whl")
@pytest.mark.parametrize(
("backend_pkg", "backend_api"),
[pytest.param(backend_pkg, backend_api, id=backend_pkg) for backend_pkg, backend_api in BACKENDS],
)
def test_editable(self, temp_dir, temp_dir_data, platform, global_application, backend_pkg, backend_api):
project_dir = temp_dir / "project"
project_dir.mkdir()
(project_dir / "pyproject.toml").write_text(
f"""\
[build-system]
requires = ["{backend_pkg}"]
build-backend = "{backend_api}"
[project]
name = "foo"
version = "9000.42"
description = "text"
"""
)
package_dir = project_dir / "foo"
package_dir.mkdir()
(package_dir / "__init__.py").touch()
project = Project(project_dir)
project.build_env = MockEnvironment(
temp_dir,
project.metadata,
"default",
project.config.envs["default"],
{},
temp_dir_data,
temp_dir_data,
platform,
0,
global_application,
)
output_dir = temp_dir / "output"
output_dir.mkdir()
script = project.build_frontend.scripts.build_wheel(
output_dir=str(output_dir), project_root=str(project_dir), editable=True
)
platform.check_command([sys.executable, "-c", script])
work_dir = output_dir / "work"
output = json.loads((output_dir / "output.json").read_text())
wheel_path = work_dir / output["return_val"]
assert wheel_path.is_file()
assert wheel_path.name.startswith("foo-9000.42-")
assert wheel_path.name.endswith(".whl")
| TestBuildWheel |
python | cython__cython | Cython/Distutils/old_build_ext.py | {
"start": 1365,
"end": 2303
} | class ____:
def __init__(self):
self.flags = (
'OPT',
'CFLAGS',
'CPPFLAGS',
'EXTRA_CFLAGS',
'BASECFLAGS',
'PY_CFLAGS',
)
self.state = sysconfig.get_config_vars(*self.flags)
self.config_vars = sysconfig.get_config_vars()
def disable_optimization(self):
"disable optimization for the C or C++ compiler"
badoptions = ('-O1', '-O2', '-O3')
for flag, option in zip(self.flags, self.state):
if option is not None:
L = [opt for opt in option.split() if opt not in badoptions]
self.config_vars[flag] = ' '.join(L)
def restore_state(self):
"restore the original state"
for flag, option in zip(self.flags, self.state):
if option is not None:
self.config_vars[flag] = option
optimization = Optimization()
| Optimization |
python | ray-project__ray | python/ray/util/client/server/server_stubs.py | {
"start": 1346,
"end": 1717
} | class ____(ClientReferenceSentinel):
def get_remote_obj(self):
global _current_server
real_ref_id = self.get_real_ref_from_server()
if real_ref_id is None:
return None
return _current_server.lookup_or_register_func(
real_ref_id, self.client_id, None
)
def identity(x):
return x
| ClientReferenceFunction |
python | sqlalchemy__sqlalchemy | test/orm/test_syntax_extensions.py | {
"start": 4225,
"end": 7677
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_select_post_select_clause(self):
User = self.classes.User
stmt = select(User).ext(PostSelectClause()).where(User.name == "x")
self.assert_compile(
stmt,
"SELECT POST SELECT KEYWORD users.id, users.name "
"FROM users WHERE users.name = :name_1",
)
def test_select_pre_columns_clause(self):
User = self.classes.User
stmt = select(User).ext(PreColumnsClause()).where(User.name == "x")
self.assert_compile(
stmt,
"SELECT PRE COLUMNS users.id, users.name FROM users "
"WHERE users.name = :name_1",
)
def test_select_post_criteria_clause(self):
User = self.classes.User
stmt = select(User).ext(PostCriteriaClause()).where(User.name == "x")
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"WHERE users.name = :name_1 POST CRITERIA",
)
def test_select_post_criteria_clause_multiple(self):
User = self.classes.User
stmt = (
select(User)
.ext(PostCriteriaClause())
.ext(PostCriteriaClause2())
.where(User.name == "x")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"WHERE users.name = :name_1 POST CRITERIA 2 POST CRITERIA 2",
)
def test_select_post_select_body(self):
User = self.classes.User
stmt = select(User).ext(PostBodyClause()).where(User.name == "x")
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"WHERE users.name = :name_1 POST SELECT BODY",
)
def test_insert_post_values(self):
User = self.classes.User
self.assert_compile(
insert(User).ext(PostValuesClause()),
"INSERT INTO users (id, name) VALUES (:id, :name) POST VALUES",
)
def test_update_post_criteria(self):
User = self.classes.User
self.assert_compile(
update(User).ext(PostCriteriaClause()).where(User.name == "hi"),
"UPDATE users SET id=:id, name=:name "
"WHERE users.name = :name_1 POST CRITERIA",
)
@testing.combinations(
(lambda User: select(1).ext(PostCriteriaClauseCols(User.id)), True),
(
lambda User: select(1).ext(PostCriteriaClauseColsNoProp(User.id)),
False,
),
(
lambda User, users: users.update().ext(
PostCriteriaClauseCols(User.id)
),
True,
),
(
lambda User, users: users.delete().ext(
PostCriteriaClauseCols(User.id)
),
True,
),
(lambda User, users: users.delete(), False),
)
def test_propagate_attrs(self, stmt, expected):
User = self.classes.User
user_table = self.tables.users
stmt = testing.resolve_lambda(stmt, User=User, users=user_table)
if expected:
eq_(
stmt._propagate_attrs,
{
"compile_state_plugin": "orm",
"plugin_subject": inspect(User),
},
)
else:
eq_(stmt._propagate_attrs, {})
| TestExtensionPoints |
python | mlflow__mlflow | mlflow/models/evaluation/evaluators/regressor.py | {
"start": 376,
"end": 3342
} | class ____(BuiltInEvaluator):
"""
A built-in evaluator for regressor models.
"""
name = "regressor"
@classmethod
def can_evaluate(cls, *, model_type, evaluator_config, **kwargs):
return model_type == _ModelType.REGRESSOR
def _evaluate(
self,
model: Optional["mlflow.pyfunc.PyFuncModel"],
extra_metrics: list[EvaluationMetric],
custom_artifacts=None,
**kwargs,
) -> EvaluationResult | None:
self.y_true = self.dataset.labels_data
self.sample_weights = self.evaluator_config.get("sample_weights", None)
input_df = self.X.copy_to_avoid_mutation()
self.y_pred = self._generate_model_predictions(model, input_df)
self._compute_buildin_metrics(model)
self.evaluate_metrics(extra_metrics, prediction=self.y_pred, target=self.y_true)
self.evaluate_and_log_custom_artifacts(
custom_artifacts, prediction=self.y_pred, target=self.y_true
)
self.log_metrics()
self.log_eval_table(self.y_pred)
return EvaluationResult(
metrics=self.aggregate_metrics, artifacts=self.artifacts, run_id=self.run_id
)
def _generate_model_predictions(self, model, input_df):
if predict_fn := _extract_predict_fn(model):
preds = predict_fn(input_df)
y_pred, _, _ = _extract_output_and_other_columns(preds, self.predictions)
return y_pred
else:
return self.dataset.predictions_data
def _compute_buildin_metrics(self, model):
self._evaluate_sklearn_model_score_if_scorable(model, self.y_true, self.sample_weights)
self.metrics_values.update(
_get_aggregate_metrics_values(
_get_regressor_metrics(self.y_true, self.y_pred, self.sample_weights)
)
)
def _get_regressor_metrics(y, y_pred, sample_weights):
from mlflow.metrics.metric_definitions import _root_mean_squared_error
sum_on_target = (
(np.array(y) * np.array(sample_weights)).sum() if sample_weights is not None else sum(y)
)
return {
"example_count": len(y),
"mean_absolute_error": sk_metrics.mean_absolute_error(
y, y_pred, sample_weight=sample_weights
),
"mean_squared_error": sk_metrics.mean_squared_error(
y, y_pred, sample_weight=sample_weights
),
"root_mean_squared_error": _root_mean_squared_error(
y_true=y,
y_pred=y_pred,
sample_weight=sample_weights,
),
"sum_on_target": sum_on_target,
"mean_on_target": sum_on_target / len(y),
"r2_score": sk_metrics.r2_score(y, y_pred, sample_weight=sample_weights),
"max_error": sk_metrics.max_error(y, y_pred),
"mean_absolute_percentage_error": sk_metrics.mean_absolute_percentage_error(
y, y_pred, sample_weight=sample_weights
),
}
| RegressorEvaluator |
python | jina-ai__jina | jina/jaml/parsers/gateway/legacy.py | {
"start": 239,
"end": 2508
} | class ____(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: gateway yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Gateway YAML parser given the syntax version number
"""
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
for key in {
'name',
'port',
'protocol',
'host',
'tracing',
'graph_description',
'graph_conditions',
'deployments_addresses',
'deployments_metadata',
'deployments_no_reduce',
'timeout_send',
'retries',
'compression',
'runtime_name',
'prefetch',
'meter',
'log_config',
}:
if runtime_args and not runtime_args.get(key) and data.get(key):
runtime_args[key] = data.get(key)
if runtime_args.get('default_port'):
yaml_port = data.get('port')
if isinstance(yaml_port, int):
yaml_port = [yaml_port]
runtime_args['port'] = yaml_port or runtime_args.get('port')
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
req_handler_cls=GatewayRequestHandler
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned gateway object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
| GatewayLegacyParser |
python | django__django | tests/admin_inlines/admin.py | {
"start": 2588,
"end": 2670
} | class ____(PhotoInlineMixin, admin.StackedInline):
pass
| PhotoStackedExtra2Inline |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum6.py | {
"start": 807,
"end": 940
} | class ____(EnumWithoutValue):
x = 0
# This should generate an error because enums with values
# cannot be subclassed.
| EnumWithValue |
python | Textualize__textual | src/textual/command.py | {
"start": 14122,
"end": 14631
} | class ____(Static, inherit_css=False):
"""Widget for displaying a search icon before the command input."""
DEFAULT_CSS = """
SearchIcon {
color: #000; /* required for snapshot tests */
margin-left: 1;
margin-top: 1;
width: 2;
}
"""
icon: var[str] = var("🔎")
"""The icon to display."""
def render(self) -> VisualType:
"""Render the icon.
Returns:
The icon renderable.
"""
return self.icon
| SearchIcon |
python | huggingface__transformers | src/transformers/quantizers/quantizer_compressed_tensors.py | {
"start": 873,
"end": 5082
} | class ____(HfQuantizer):
"""
Quantizer for the compressed_tensors package. Loads and restores models to
quantized state with compressed_tensors
"""
requires_calibration = True
required_packages = ["compressed_tensors"]
def __init__(self, quantization_config: CompressedTensorsConfig, **kwargs):
super().__init__(quantization_config, **kwargs)
if not is_compressed_tensors_available():
raise ImportError(
"Using `compressed_tensors` quantized models requires the compressed-tensors library: "
"`pip install compressed-tensors`"
)
# Call post_init here to ensure proper config setup when `run_compressed`
# is provided directly via CompressedTensorsConfig, and to avoid duplicate logging.
quantization_config.post_init()
from compressed_tensors.compressors import ModelCompressor
self.compressor = ModelCompressor.from_compression_config(quantization_config)
self.run_compressed = quantization_config.run_compressed
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_compressed_tensors_available():
raise ImportError(
"Using `compressed_tensors` quantized models requires the compressed-tensors library: "
"`pip install compressed-tensors`"
)
if not is_torch_available():
# torch already should be installed as part of compressed tensors
raise ImportError("torch is required for using compressed-tensors quantization")
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
logger.info("Loading model using torch.float16 for compressed-tensors quantization")
dtype = torch.float16
elif dtype != torch.float16:
logger.info("We suggest you to set `dtype=torch.float16` for better efficiency with compressed_tensors.")
return dtype
def _process_model_before_weight_loading(self, model, **kwargs):
from compressed_tensors.quantization import apply_quantization_config
ct_quantization_config = self.compressor.quantization_config
# Always initialize compressed wrappers to match the checkpoint
apply_quantization_config(model, ct_quantization_config, self.run_compressed)
if (
self.quantization_config.is_quantization_compressed
or self.quantization_config.is_sparsification_compressed
):
self.compressor.compress_model(model=model)
def _process_model_after_weight_loading(self, model, **kwargs):
"""Decompress loaded model if necessary - need for qat"""
if (
self.quantization_config.is_quantization_compressed and not self.run_compressed
) or self.quantization_config.is_sparsification_compressed:
self.compressor.decompress_model(model=model)
def update_tp_plan(self, config):
additional_plan = {
"layers.*.feed_forward.experts.*.gate_proj.weight": "local_colwise",
"layers.*.feed_forward.experts.*.gate_proj.weight_scale": "local_colwise",
"layers.*.feed_forward.experts.*.up_proj.weight": "local_colwise",
"layers.*.feed_forward.experts.*.up_proj.weight_scale": "local_colwise",
"layers.*.feed_forward.experts.*.down_proj.weight": "local_rowwise",
}
if config.get_text_config() is not None and config.get_text_config().base_model_tp_plan is not None:
config.get_text_config().base_model_tp_plan.update(additional_plan)
return config
@property
def is_trainable(self):
return True
def is_qat_trainable(self) -> bool:
"""Loaded Models can carry out quantization aware training"""
# models need to be decompressed carry out qat
return not self.run_compressed or not self.quantization_config.is_quantization_compressed
def is_serializable(self, safe_serialization=None) -> bool:
"""Models quantized using compressed tensors can be saved to disk"""
return True
| CompressedTensorsHfQuantizer |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 40151,
"end": 41249
} | class ____(VOWarning, ValueError):
"""Unknown datatype on a field.
The supported datatypes are::
double, float, bit, boolean, unsignedByte, short, int, long,
floatComplex, doubleComplex, char, unicodeChar
The following non-standard aliases are also supported, but in
these case :ref:`W13 <W13>` will be raised::
string -> char
unicodeString -> unicodeChar
int16 -> short
int32 -> int
int64 -> long
float32 -> float
float64 -> double
unsignedInt -> long
unsignedShort -> int
To add more datatype mappings during parsing, use the
``datatype_mapping`` keyword to `astropy.io.votable.parse`.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Unknown datatype '{}' on field '{}'"
default_args = ("x", "y")
# E07: Deprecated
| E06 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.