language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | streamlit__streamlit | lib/tests/streamlit/runtime/media_file_manager_test.py | {
"start": 17049,
"end": 33559
} | class ____(TestCase):
"""Tests for deferred callable functionality in MediaFileManager."""
def setUp(self):
super().setUp()
self.storage = MemoryMediaFileStorage("/mock/endpoint")
self.media_file_manager = MediaFileManager(self.storage)
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_add_deferred_generates_file_id(self):
"""Test that add_deferred generates a unique file_id."""
def callable1():
return b"content1"
def callable2():
return b"content2"
file_id1 = self.media_file_manager.add_deferred(
callable1, "text/plain", random_coordinates()
)
file_id2 = self.media_file_manager.add_deferred(
callable2, "text/plain", random_coordinates()
)
# File IDs should be different
assert file_id1 != file_id2
# File IDs should be non-empty
assert file_id1 != ""
assert file_id2 != ""
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_add_deferred_stores_callable(self):
"""Test that add_deferred stores the callable with metadata."""
def generate_data():
return b"test data"
file_id = self.media_file_manager.add_deferred(
generate_data, "application/pdf", random_coordinates(), file_name="test.pdf"
)
# Callable should be stored
assert file_id in self.media_file_manager._deferred_callables
deferred = self.media_file_manager._deferred_callables[file_id]
assert deferred["callable"] == generate_data
assert deferred["mimetype"] == "application/pdf"
assert deferred["filename"] == "test.pdf"
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_add_deferred_tracks_session_coordinate(self):
"""Test that add_deferred tracks session and coordinate mapping."""
coord = random_coordinates()
def generate_data():
return b"data"
file_id = self.media_file_manager.add_deferred(
generate_data, "text/plain", coord
)
# Should be tracked by session and coordinate
assert (
self.media_file_manager._files_by_session_and_coord["mock_session"][coord]
== file_id
)
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_calls_callable(self):
"""Test that execute_deferred invokes the callable."""
call_count = 0
def generate_data():
nonlocal call_count
call_count += 1
return b"data"
file_id = self.media_file_manager.add_deferred(
generate_data, "text/plain", random_coordinates()
)
assert call_count == 0
self.media_file_manager.execute_deferred(file_id)
assert call_count == 1
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_returns_url(self):
"""Test that execute_deferred returns a valid URL."""
def generate_data():
return b"test data"
file_id = self.media_file_manager.add_deferred(
generate_data, "text/plain", random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_handles_str_return(self):
"""Test that execute_deferred handles string return values."""
def generate_string():
return "string data"
file_id = self.media_file_manager.add_deferred(
generate_string, "text/plain", random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_handles_bytes_return(self):
"""Test that execute_deferred handles bytes return values."""
def generate_bytes():
return b"bytes data"
file_id = self.media_file_manager.add_deferred(
generate_bytes, "application/octet-stream", random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_handles_bytesio_return(self):
"""Test that execute_deferred handles BytesIO return values."""
def generate_bytesio():
return io.BytesIO(b"bytesio data")
file_id = self.media_file_manager.add_deferred(
generate_bytesio, "application/octet-stream", random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_handles_buffered_reader_return(self):
"""Test that execute_deferred handles BufferedReader return values."""
def generate_buffered_reader():
return io.BufferedReader(io.BytesIO(b"buffered data"))
file_id = self.media_file_manager.add_deferred(
generate_buffered_reader, "application/octet-stream", random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_keeps_callable(self):
"""Test that execute_deferred keeps callable for multiple downloads."""
def generate_data():
return b"data"
file_id = self.media_file_manager.add_deferred(
generate_data, "text/plain", random_coordinates()
)
# Callable should exist before execution
assert file_id in self.media_file_manager._deferred_callables
self.media_file_manager.execute_deferred(file_id)
# Callable should still exist after execution for multiple downloads
assert file_id in self.media_file_manager._deferred_callables
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_file_not_found(self):
"""Test that execute_deferred raises error for non-existent file_id."""
with pytest.raises(MediaFileStorageError, match=r"Deferred file .* not found"):
self.media_file_manager.execute_deferred("nonexistent_id")
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_callable_raises_exception(self):
"""Test that execute_deferred propagates callable execution errors."""
def failing_callable():
raise ValueError("Test error")
file_id = self.media_file_manager.add_deferred(
failing_callable, "text/plain", random_coordinates()
)
with pytest.raises(MediaFileStorageError, match="Callable execution failed"):
self.media_file_manager.execute_deferred(file_id)
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_callable_returns_invalid_type(self):
"""Test that execute_deferred handles invalid return types."""
def invalid_callable():
return 123 # Invalid type
file_id = self.media_file_manager.add_deferred(
invalid_callable, "text/plain", random_coordinates()
)
with pytest.raises(
MediaFileStorageError, match="Callable returned unsupported type"
):
self.media_file_manager.execute_deferred(file_id)
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_multiple_times_same_callable(self):
"""Test that the same deferred callable can be executed multiple times."""
call_count = 0
def generate_data():
nonlocal call_count
call_count += 1
return f"data_{call_count}".encode()
file_id = self.media_file_manager.add_deferred(
generate_data, "text/plain", random_coordinates()
)
# First execution should work
url1 = self.media_file_manager.execute_deferred(file_id)
assert url1.startswith("/mock/endpoint/")
assert call_count == 1
# Second execution should also work (callable is kept)
url2 = self.media_file_manager.execute_deferred(file_id)
assert url2.startswith("/mock/endpoint/")
assert call_count == 2
# URLs can be different if the callable returns different data
# but both should be valid
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_clear_session_refs_and_remove_orphaned_cleans_deferred(self):
"""Test that deferred callables are cleaned up after clear_session_refs + remove_orphaned_files."""
def generate_data():
return b"data"
# Add a deferred callable
file_id = self.media_file_manager.add_deferred(
generate_data, "text/plain", random_coordinates()
)
# Callable should exist
assert file_id in self.media_file_manager._deferred_callables
assert len(self.media_file_manager._deferred_callables) == 1
# Clear session refs (doesn't immediately delete callables)
self.media_file_manager.clear_session_refs("mock_session")
# Callable should still exist (not immediately deleted to avoid race conditions)
assert file_id in self.media_file_manager._deferred_callables
assert len(self.media_file_manager._deferred_callables) == 1
# Remove orphaned files (this cleans up orphaned deferred callables)
self.media_file_manager.remove_orphaned_files()
# Now the deferred callable should be removed
assert file_id not in self.media_file_manager._deferred_callables
assert len(self.media_file_manager._deferred_callables) == 0
@mock.patch("streamlit.runtime.media_file_manager._get_session_id")
def test_remove_orphaned_only_cleans_unreferenced_deferred(
self, mock_get_session_id
):
"""Test that only truly orphaned deferred callables are removed."""
def generate_data():
return b"data"
# Add deferred callable for session 1
mock_get_session_id.return_value = "session_1"
file_id_1 = self.media_file_manager.add_deferred(
generate_data, "text/plain", random_coordinates()
)
# Add deferred callable for session 2
mock_get_session_id.return_value = "session_2"
file_id_2 = self.media_file_manager.add_deferred(
generate_data, "text/plain", random_coordinates()
)
# Both callables should exist
assert file_id_1 in self.media_file_manager._deferred_callables
assert file_id_2 in self.media_file_manager._deferred_callables
assert len(self.media_file_manager._deferred_callables) == 2
# Clear session 1 refs (doesn't immediately delete)
self.media_file_manager.clear_session_refs("session_1")
# Both callables still exist
assert file_id_1 in self.media_file_manager._deferred_callables
assert file_id_2 in self.media_file_manager._deferred_callables
# Remove orphaned files - only session 1's callable should be cleaned
self.media_file_manager.remove_orphaned_files()
assert file_id_1 not in self.media_file_manager._deferred_callables
assert file_id_2 in self.media_file_manager._deferred_callables
assert len(self.media_file_manager._deferred_callables) == 1
# Clear session 2 refs and remove orphans
self.media_file_manager.clear_session_refs("session_2")
self.media_file_manager.remove_orphaned_files()
# Now both should be removed
assert file_id_2 not in self.media_file_manager._deferred_callables
assert len(self.media_file_manager._deferred_callables) == 0
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_handles_text_io_wrapper_return(self):
"""Test that execute_deferred handles TextIOWrapper (text stream) return values."""
def generate_text_wrapper():
# Create a TextIOWrapper over BytesIO containing UTF-8 text
byte_stream = io.BytesIO(b"wrapped text")
return io.TextIOWrapper(byte_stream, encoding="utf-8")
file_id = self.media_file_manager.add_deferred(
generate_text_wrapper, "text/plain", random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_infers_text_plain_for_string_when_mimetype_none(self):
"""If mimetype is None, infer text/plain for str returns."""
def generate_text():
return "hello world"
file_id = self.media_file_manager.add_deferred(
generate_text, None, random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
assert url.endswith(".txt")
# Verify stored mimetype is text/plain
filename = url.split("/")[-1]
stored = self.storage.get_file(filename)
assert stored.mimetype == "text/plain"
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_respects_provided_mimetype_over_inferred(self):
"""Test that provided mimetype is used even when data type suggests different."""
def generate_text():
return "hello world"
# Even though data is string (would infer text/plain), use provided mimetype
file_id = self.media_file_manager.add_deferred(
generate_text, "text/csv", random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
# Verify stored mimetype is text/csv (the provided one)
filename = url.split("/")[-1]
stored = self.storage.get_file(filename)
assert stored.mimetype == "text/csv"
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session"),
)
def test_execute_deferred_infers_octet_stream_for_bytes_when_mimetype_none(self):
"""If mimetype is None, infer application/octet-stream for bytes returns."""
def generate_bytes():
return b"binary data"
file_id = self.media_file_manager.add_deferred(
generate_bytes, None, random_coordinates()
)
url = self.media_file_manager.execute_deferred(file_id)
assert url.startswith("/mock/endpoint/")
# Verify stored mimetype is application/octet-stream
filename = url.split("/")[-1]
stored = self.storage.get_file(filename)
assert stored.mimetype == "application/octet-stream"
| MediaFileManagerDeferredTest |
python | celery__celery | t/unit/backends/test_database.py | {
"start": 2025,
"end": 8461
} | class ____:
@pytest.fixture(autouse=True)
def remmove_db(self):
yield
if os.path.exists(DB_PATH):
os.remove(DB_PATH)
def setup_method(self):
self.uri = 'sqlite:///' + DB_PATH
self.app.conf.result_serializer = 'pickle'
def test_retry_helper(self):
from celery.backends.database import DatabaseError
calls = [0]
@retry
def raises():
calls[0] += 1
raise DatabaseError(1, 2, 3)
with pytest.raises(DatabaseError):
raises(max_retries=5)
assert calls[0] == 5
def test_missing_dburi_raises_ImproperlyConfigured(self):
self.app.conf.database_url = None
with pytest.raises(ImproperlyConfigured):
DatabaseBackend(app=self.app)
def test_table_schema_config(self):
self.app.conf.database_table_schemas = {
'task': 'foo',
'group': 'bar',
}
# disable table creation because schema foo and bar do not exist
# and aren't created if they don't exist.
self.app.conf.database_create_tables_at_setup = False
tb = DatabaseBackend(self.uri, app=self.app)
assert tb.task_cls.__table__.schema == 'foo'
assert tb.task_cls.__table__.c.id.default.schema == 'foo'
assert tb.taskset_cls.__table__.schema == 'bar'
assert tb.taskset_cls.__table__.c.id.default.schema == 'bar'
def test_table_name_config(self):
self.app.conf.database_table_names = {
'task': 'foo',
'group': 'bar',
}
tb = DatabaseBackend(self.uri, app=self.app)
assert tb.task_cls.__table__.name == 'foo'
assert tb.taskset_cls.__table__.name == 'bar'
def test_table_creation_at_setup_config(self):
from sqlalchemy import inspect
self.app.conf.database_create_tables_at_setup = True
tb = DatabaseBackend(self.uri, app=self.app)
engine = tb.session_manager.get_engine(tb.url)
inspect(engine).has_table("celery_taskmeta")
inspect(engine).has_table("celery_tasksetmeta")
def test_missing_task_id_is_PENDING(self):
tb = DatabaseBackend(self.uri, app=self.app)
assert tb.get_state('xxx-does-not-exist') == states.PENDING
def test_missing_task_meta_is_dict_with_pending(self):
tb = DatabaseBackend(self.uri, app=self.app)
meta = tb.get_task_meta('xxx-does-not-exist-at-all')
assert meta['status'] == states.PENDING
assert meta['task_id'] == 'xxx-does-not-exist-at-all'
assert meta['result'] is None
assert meta['traceback'] is None
def test_mark_as_done(self):
tb = DatabaseBackend(self.uri, app=self.app)
tid = uuid()
assert tb.get_state(tid) == states.PENDING
assert tb.get_result(tid) is None
tb.mark_as_done(tid, 42)
assert tb.get_state(tid) == states.SUCCESS
assert tb.get_result(tid) == 42
def test_is_pickled(self):
tb = DatabaseBackend(self.uri, app=self.app)
tid2 = uuid()
result = {'foo': 'baz', 'bar': SomeClass(12345)}
tb.mark_as_done(tid2, result)
# is serialized properly.
rindb = tb.get_result(tid2)
assert rindb.get('foo') == 'baz'
assert rindb.get('bar').data == 12345
def test_mark_as_started(self):
tb = DatabaseBackend(self.uri, app=self.app)
tid = uuid()
tb.mark_as_started(tid)
assert tb.get_state(tid) == states.STARTED
def test_mark_as_revoked(self):
tb = DatabaseBackend(self.uri, app=self.app)
tid = uuid()
tb.mark_as_revoked(tid)
assert tb.get_state(tid) == states.REVOKED
def test_mark_as_retry(self):
tb = DatabaseBackend(self.uri, app=self.app)
tid = uuid()
try:
raise KeyError('foo')
except KeyError as exception:
import traceback
trace = '\n'.join(traceback.format_stack())
tb.mark_as_retry(tid, exception, traceback=trace)
assert tb.get_state(tid) == states.RETRY
assert isinstance(tb.get_result(tid), KeyError)
assert tb.get_traceback(tid) == trace
def test_mark_as_failure(self):
tb = DatabaseBackend(self.uri, app=self.app)
tid3 = uuid()
try:
raise KeyError('foo')
except KeyError as exception:
import traceback
trace = '\n'.join(traceback.format_stack())
tb.mark_as_failure(tid3, exception, traceback=trace)
assert tb.get_state(tid3) == states.FAILURE
assert isinstance(tb.get_result(tid3), KeyError)
assert tb.get_traceback(tid3) == trace
def test_forget(self):
tb = DatabaseBackend(self.uri, backend='memory://', app=self.app)
tid = uuid()
tb.mark_as_done(tid, {'foo': 'bar'})
tb.mark_as_done(tid, {'foo': 'bar'})
x = self.app.AsyncResult(tid, backend=tb)
x.forget()
assert x.result is None
def test_process_cleanup(self):
tb = DatabaseBackend(self.uri, app=self.app)
tb.process_cleanup()
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self):
tb = DatabaseBackend(self.uri, app=self.app)
assert loads(dumps(tb))
def test_save__restore__delete_group(self):
tb = DatabaseBackend(self.uri, app=self.app)
tid = uuid()
res = {'something': 'special'}
assert tb.save_group(tid, res) == res
res2 = tb.restore_group(tid)
assert res2 == res
tb.delete_group(tid)
assert tb.restore_group(tid) is None
assert tb.restore_group('xxx-nonexisting-id') is None
def test_cleanup(self):
tb = DatabaseBackend(self.uri, app=self.app)
for i in range(10):
tb.mark_as_done(uuid(), 42)
tb.save_group(uuid(), {'foo': 'bar'})
s = tb.ResultSession()
for t in s.query(Task).all():
t.date_done = datetime.now() - tb.expires * 2
for t in s.query(TaskSet).all():
t.date_done = datetime.now() - tb.expires * 2
s.commit()
s.close()
tb.cleanup()
def test_Task__repr__(self):
assert 'foo' in repr(Task('foo'))
def test_TaskSet__repr__(self):
assert 'foo', repr(TaskSet('foo' in None))
@skip.if_pypy
| test_DatabaseBackend |
python | pyinstaller__pyinstaller | PyInstaller/fake-modules/_pyi_rth_utils/_win32.py | {
"start": 1045,
"end": 1187
} | class ____(ctypes.Structure):
_fields_ = [
("User", SID_AND_ATTRIBUTES),
]
PTOKEN_USER = ctypes.POINTER(TOKEN_USER)
| TOKEN_USER |
python | marshmallow-code__marshmallow | src/marshmallow/validate.py | {
"start": 18589,
"end": 19597
} | class ____(Validator):
"""Validator which fails if ``value`` is a member of ``iterable``.
:param iterable: A sequence of invalid values.
:param error: Error message to raise in case of a validation error. Can be
interpolated using `{input}` and `{values}`.
"""
default_message = "Invalid input."
def __init__(self, iterable: typing.Iterable, *, error: str | None = None):
self.iterable = iterable
self.values_text = ", ".join(str(each) for each in self.iterable)
self.error: str = error or self.default_message
def _repr_args(self) -> str:
return f"iterable={self.iterable!r}"
def _format_error(self, value) -> str:
return self.error.format(input=value, values=self.values_text)
def __call__(self, value: typing.Any) -> typing.Any:
try:
if value in self.iterable:
raise ValidationError(self._format_error(value))
except TypeError:
pass
return value
| NoneOf |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 12914,
"end": 13379
} | class ____(CompareInstr):
OPNAME = 'icmp'
VALID_OP = {
'eq': 'equal',
'ne': 'not equal',
'ugt': 'unsigned greater than',
'uge': 'unsigned greater or equal',
'ult': 'unsigned less than',
'ule': 'unsigned less or equal',
'sgt': 'signed greater than',
'sge': 'signed greater or equal',
'slt': 'signed less than',
'sle': 'signed less or equal',
}
VALID_FLAG = set()
| ICMPInstr |
python | pypa__hatch | tests/publish/plugin/test_interface.py | {
"start": 214,
"end": 1986
} | class ____:
def test_default(self, isolation):
project_config = {}
plugin_config = {}
publisher = MockPublisher(None, isolation, None, project_config, plugin_config)
assert publisher.disable is publisher.disable is False
def test_project_config(self, isolation):
project_config = {"disable": True}
plugin_config = {}
publisher = MockPublisher(None, isolation, None, project_config, plugin_config)
assert publisher.disable is True
def test_project_config_not_boolean(self, isolation):
project_config = {"disable": 9000}
plugin_config = {}
publisher = MockPublisher(None, isolation, None, project_config, plugin_config)
with pytest.raises(TypeError, match="Field `tool.hatch.publish.mock.disable` must be a boolean"):
_ = publisher.disable
def test_plugin_config(self, isolation):
project_config = {}
plugin_config = {"disable": True}
publisher = MockPublisher(None, isolation, None, project_config, plugin_config)
assert publisher.disable is True
def test_plugin_config_not_boolean(self, isolation):
project_config = {}
plugin_config = {"disable": 9000}
publisher = MockPublisher(None, isolation, None, project_config, plugin_config)
with pytest.raises(TypeError, match="Global plugin configuration `publish.mock.disable` must be a boolean"):
_ = publisher.disable
def test_project_config_overrides_plugin_config(self, isolation):
project_config = {"disable": False}
plugin_config = {"disable": True}
publisher = MockPublisher(None, isolation, None, project_config, plugin_config)
assert publisher.disable is False
| TestDisable |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-make-array-elements-zero.py | {
"start": 42,
"end": 543
} | class ____(object):
def minOperations(self, queries):
"""
:type queries: List[List[int]]
:rtype: int
"""
result = 0
for l, r in queries:
total = 0
base = i = 1
while base <= r:
nl, nr = max(l, base), min(r, 4*base-1)
if nl <= nr:
total += i*(nr-nl+1)
i += 1
base *= 4
result += (total+1)//2
return result
| Solution |
python | scrapy__scrapy | tests/test_downloadermiddleware_httpcache.py | {
"start": 25031,
"end": 25267
} | class ____(
TestBase, StorageTestMixin, DummyPolicyTestMixin
):
storage_class = "scrapy.extensions.httpcache.FilesystemCacheStorage"
policy_class = "scrapy.extensions.httpcache.DummyPolicy"
| TestFilesystemStorageWithDummyPolicy |
python | pypa__pipenv | pipenv/patched/pip/_internal/index/sources.py | {
"start": 6092,
"end": 8707
} | class ____(LinkSource):
"""``--[extra-]index-url=<path-to-directory>``.
This is treated like a remote URL; ``candidates_from_page`` contains logic
for this by appending ``index.html`` to the link.
"""
def __init__(
self,
candidates_from_page: CandidatesFromPage,
link: Link,
) -> None:
self._candidates_from_page = candidates_from_page
self._link = link
@property
def link(self) -> Optional[Link]:
return self._link
def page_candidates(self) -> FoundCandidates:
yield from self._candidates_from_page(self._link)
def file_links(self) -> FoundLinks:
return ()
def build_source(
location: str,
*,
candidates_from_page: CandidatesFromPage,
page_validator: PageValidator,
expand_dir: bool,
cache_link_parsing: bool,
project_name: str,
) -> Tuple[Optional[str], Optional[LinkSource]]:
path: Optional[str] = None
url: Optional[str] = None
if os.path.exists(location): # Is a local path.
url = path_to_url(location)
path = location
elif location.startswith("file:"): # A file: URL.
url = location
path = url_to_path(location)
elif is_url(location):
url = location
if url is None:
msg = (
"Location '%s' is ignored: "
"it is either a non-existing path or lacks a specific scheme."
)
logger.warning(msg, location)
return (None, None)
if path is None:
source: LinkSource = _RemoteFileSource(
candidates_from_page=candidates_from_page,
page_validator=page_validator,
link=Link(url, cache_link_parsing=cache_link_parsing),
)
return (url, source)
if os.path.isdir(path):
if expand_dir:
source = _FlatDirectorySource(
candidates_from_page=candidates_from_page,
path=path,
project_name=project_name,
)
else:
source = _IndexDirectorySource(
candidates_from_page=candidates_from_page,
link=Link(url, cache_link_parsing=cache_link_parsing),
)
return (url, source)
elif os.path.isfile(path):
source = _LocalFileSource(
candidates_from_page=candidates_from_page,
link=Link(url, cache_link_parsing=cache_link_parsing),
)
return (url, source)
logger.warning(
"Location '%s' is ignored: it is neither a file nor a directory.",
location,
)
return (url, None)
| _IndexDirectorySource |
python | mlflow__mlflow | mlflow/models/evaluation/artifacts.py | {
"start": 1863,
"end": 2243
} | class ____(EvaluationArtifact):
def _save(self, output_artifact_path):
with open(output_artifact_path, "w") as f:
json.dump(self._content, f)
def _load_content_from_file(self, local_artifact_path):
with open(local_artifact_path) as f:
self._content = json.load(f)
return self._content
@developer_stable
| JsonEvaluationArtifact |
python | tensorflow__tensorflow | tensorflow/python/ops/init_ops_v2_test.py | {
"start": 3357,
"end": 6993
} | class ____(InitializersTest):
@test_util.run_in_graph_and_eager_modes
def testZeros(self):
self._range_test(
init_ops_v2.Zeros(), shape=(4, 5), target_mean=0., target_max=0.)
@test_util.run_in_graph_and_eager_modes
def testZerosPartition(self):
init = init_ops_v2.Zeros()
self._partition_test(init)
@test_util.run_in_graph_and_eager_modes
def testZerosInvalidKwargs(self):
init = init_ops_v2.Zeros()
with self.assertRaisesRegex(
TypeError, r"Keyword argument should be one of .* Received: dtpye"):
init((2, 2), dtpye=dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def testOnes(self):
self._range_test(
init_ops_v2.Ones(), shape=(4, 5), target_mean=1., target_max=1.)
@test_util.run_in_graph_and_eager_modes
def testOnesPartition(self):
init = init_ops_v2.Ones()
self._partition_test(init)
@test_util.run_in_graph_and_eager_modes
def testConstantInt(self):
self._range_test(
init_ops_v2.Constant(2),
shape=(5, 6, 4),
target_mean=2,
target_max=2,
target_min=2)
@test_util.run_in_graph_and_eager_modes
def testConstantPartition(self):
init = init_ops_v2.Constant([1, 2, 3, 4])
with self.assertRaisesWithLiteralMatch(
ValueError,
r"Constant initializer doesn't support partition-related arguments"):
init((4, 2), dtype=dtypes.float32, partition_shape=(2, 2))
@test_util.run_in_graph_and_eager_modes
def testConstantTuple(self):
init = init_ops_v2.constant_initializer((10, 20, 30))
tensor = init(shape=[3])
self.assertAllEqual(self.evaluate(tensor), [10, 20, 30])
self.assertEqual(tensor.shape, [3])
@test_util.run_in_graph_and_eager_modes
def testConstantInvalidValue(self):
c = constant_op.constant([1.0, 2.0, 3.0])
with self.assertRaisesRegex(TypeError,
r"Invalid type for initial value: .*Tensor.*"):
init_ops_v2.constant_initializer(c)
v = variables.Variable([3.0, 2.0, 1.0])
with self.assertRaisesRegex(
TypeError, r"Invalid type for initial value: .*Variable.*"):
init_ops_v2.constant_initializer(v)
def _testNDimConstantInitializer(self, value, shape, expected):
with test_util.use_gpu():
init = init_ops_v2.constant_initializer(value)
x = init(shape)
actual = self.evaluate(array_ops.reshape(x, [-1]))
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
@test_util.run_in_graph_and_eager_modes
def testNDimConstantInitializer(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 3]
expected = list(value)
self._testNDimConstantInitializer(value, shape, expected)
self._testNDimConstantInitializer(np.asarray(value), shape, expected)
self._testNDimConstantInitializer(
np.asarray(value).reshape(tuple(shape)), shape, expected)
def _testNDimConstantInitializerIncorrectNumberValues(self, value, shape):
with test_util.use_gpu():
init = init_ops_v2.constant_initializer(value)
self.assertRaises(TypeError, init, shape=shape)
@test_util.run_in_graph_and_eager_modes
def testNDimConstantInitializerIncorrectNumberValues(self):
value = [0, 1, 2, 3, 4, 5]
for shape in [[2, 4], [2, 2]]:
self._testNDimConstantInitializerIncorrectNumberValues(value, shape)
self._testNDimConstantInitializerIncorrectNumberValues(
np.asarray(value), shape)
self._testNDimConstantInitializerIncorrectNumberValues(
np.asarray(value).reshape(tuple([2, 3])), shape)
| ConstantInitializersTest |
python | tornadoweb__tornado | tornado/template.py | {
"start": 7578,
"end": 8428
} | class ____:
pass
_UNSET = _UnsetMarker()
def filter_whitespace(mode: str, text: str) -> str:
"""Transform whitespace in ``text`` according to ``mode``.
Available modes are:
* ``all``: Return all whitespace unmodified.
* ``single``: Collapse consecutive whitespace with a single whitespace
character, preserving newlines.
* ``oneline``: Collapse all runs of whitespace into a single space
character, removing all newlines in the process.
.. versionadded:: 4.3
"""
if mode == "all":
return text
elif mode == "single":
text = re.sub(r"([\t ]+)", " ", text)
text = re.sub(r"(\s*\n\s*)", "\n", text)
return text
elif mode == "oneline":
return re.sub(r"(\s+)", " ", text)
else:
raise Exception("invalid whitespace mode %s" % mode)
| _UnsetMarker |
python | ray-project__ray | python/ray/air/_internal/usage.py | {
"start": 1460,
"end": 9200
} | class ____(Enum):
TUNER = "Tuner.fit"
TRAINER = "Trainer.fit"
TUNE_RUN = "tune.run"
TUNE_RUN_EXPERIMENTS = "tune.run_experiments"
def _find_class_name(obj, allowed_module_path_prefix: str, whitelist: Set[str]):
"""Find the class name of the object. If the object is not
under `allowed_module_path_prefix` or if its class is not in the whitelist,
return "Custom".
Args:
obj: The object under inspection.
allowed_module_path_prefix: If the `obj`'s class is not under
the `allowed_module_path_prefix`, its class name will be anonymized.
whitelist: If the `obj`'s class is not in the `whitelist`,
it will be anonymized.
Returns:
The class name to be tagged with telemetry.
"""
module_path = obj.__module__
cls_name = obj.__class__.__name__
if module_path.startswith(allowed_module_path_prefix) and cls_name in whitelist:
return cls_name
else:
return "Custom"
def tag_air_trainer(trainer: "BaseTrainer"):
from ray.train.trainer import BaseTrainer
assert isinstance(trainer, BaseTrainer)
trainer_name = _find_class_name(trainer, "ray.train", AIR_TRAINERS)
record_extra_usage_tag(TagKey.AIR_TRAINER, trainer_name)
def tag_train_v2_trainer(trainer):
from ray.train.v2.api.data_parallel_trainer import DataParallelTrainer
assert isinstance(trainer, DataParallelTrainer)
trainer_name = _find_class_name(trainer, "ray.train", TRAIN_V2_TRAINERS)
record_extra_usage_tag(TagKey.TRAIN_TRAINER, trainer_name)
def tag_searcher(searcher: Union["BasicVariantGenerator", "Searcher"]):
from ray.tune.search import BasicVariantGenerator, Searcher
if isinstance(searcher, BasicVariantGenerator):
# Note this could be highly inflated as all train flows are treated
# as using BasicVariantGenerator.
record_extra_usage_tag(TagKey.TUNE_SEARCHER, "BasicVariantGenerator")
elif isinstance(searcher, Searcher):
searcher_name = _find_class_name(
searcher, "ray.tune.search", TUNE_SEARCHERS.union(TUNE_SEARCHER_WRAPPERS)
)
if searcher_name in TUNE_SEARCHER_WRAPPERS:
# ignore to avoid double tagging with wrapper name.
return
record_extra_usage_tag(TagKey.TUNE_SEARCHER, searcher_name)
else:
assert False, (
"Not expecting a non-BasicVariantGenerator, "
"non-Searcher type passed in for `tag_searcher`."
)
def tag_scheduler(scheduler: "TrialScheduler"):
from ray.tune.schedulers import TrialScheduler
assert isinstance(scheduler, TrialScheduler)
scheduler_name = _find_class_name(scheduler, "ray.tune.schedulers", TUNE_SCHEDULERS)
record_extra_usage_tag(TagKey.TUNE_SCHEDULER, scheduler_name)
def tag_setup_wandb():
record_extra_usage_tag(TagKey.AIR_SETUP_WANDB_INTEGRATION_USED, "1")
def tag_setup_mlflow():
record_extra_usage_tag(TagKey.AIR_SETUP_MLFLOW_INTEGRATION_USED, "1")
def _count_callbacks(callbacks: Optional[List["Callback"]]) -> Dict[str, int]:
"""Creates a map of callback class name -> count given a list of callbacks."""
from ray.air.integrations.comet import CometLoggerCallback
from ray.air.integrations.mlflow import MLflowLoggerCallback
from ray.air.integrations.wandb import WandbLoggerCallback
from ray.tune import Callback
from ray.tune.logger import LoggerCallback
from ray.tune.logger.aim import AimLoggerCallback
from ray.tune.utils.callback import DEFAULT_CALLBACK_CLASSES
built_in_callbacks = (
WandbLoggerCallback,
MLflowLoggerCallback,
CometLoggerCallback,
AimLoggerCallback,
) + DEFAULT_CALLBACK_CLASSES
callback_names = [callback_cls.__name__ for callback_cls in built_in_callbacks]
callback_counts = collections.defaultdict(int)
callbacks = callbacks or []
for callback in callbacks:
if not isinstance(callback, Callback):
# This will error later, but don't include this as custom usage.
continue
callback_name = callback.__class__.__name__
if callback_name in callback_names:
callback_counts[callback_name] += 1
elif isinstance(callback, LoggerCallback):
callback_counts["CustomLoggerCallback"] += 1
else:
callback_counts["CustomCallback"] += 1
return callback_counts
def tag_callbacks(callbacks: Optional[List["Callback"]]) -> bool:
"""Records built-in callback usage via a JSON str representing a
dictionary mapping callback class name -> counts.
User-defined callbacks will increment the count under the `CustomLoggerCallback`
or `CustomCallback` key depending on which of the provided interfaces they subclass.
NOTE: This will NOT track the name of the user-defined callback,
nor its implementation.
This will NOT report telemetry if no callbacks are provided by the user.
Returns:
bool: True if usage was recorded, False otherwise.
"""
if not callbacks:
# User didn't pass in any callbacks -> no usage recorded.
return False
callback_counts = _count_callbacks(callbacks)
if callback_counts:
callback_counts_str = json.dumps(callback_counts)
record_extra_usage_tag(TagKey.AIR_CALLBACKS, callback_counts_str)
def tag_storage_type(storage: "StorageContext"):
"""Records the storage configuration of an experiment.
The storage configuration is set by `RunConfig(storage_path, storage_filesystem)`.
The possible storage types (defined by `pyarrow.fs.FileSystem.type_name`) are:
- 'local' = pyarrow.fs.LocalFileSystem. This includes NFS usage.
- 'mock' = pyarrow.fs._MockFileSystem. This is used for testing.
- ('s3', 'gcs', 'abfs', 'hdfs'): Various remote storage schemes
with default implementations in pyarrow.
- 'custom' = All other storage schemes, which includes ALL cases where a
custom `storage_filesystem` is provided.
- 'other' = catches any other cases not explicitly handled above.
"""
whitelist = {"local", "mock", "s3", "gcs", "abfs", "hdfs"}
if storage.custom_fs_provided:
storage_config_tag = "custom"
elif storage.storage_filesystem.type_name in whitelist:
storage_config_tag = storage.storage_filesystem.type_name
else:
storage_config_tag = "other"
record_extra_usage_tag(TagKey.AIR_STORAGE_CONFIGURATION, storage_config_tag)
def tag_ray_air_env_vars() -> bool:
"""Records usage of environment variables exposed by the Ray AIR libraries.
NOTE: This does not track the values of the environment variables, nor
does this track environment variables not explicitly included in the
`all_ray_air_env_vars` allow-list.
Returns:
bool: True if at least one environment var is supplied by the user.
"""
from ray.air.constants import AIR_ENV_VARS
from ray.train.constants import TRAIN_ENV_VARS
from ray.tune.constants import TUNE_ENV_VARS
all_ray_air_env_vars = sorted(
set().union(AIR_ENV_VARS, TUNE_ENV_VARS, TRAIN_ENV_VARS)
)
user_supplied_env_vars = []
for env_var in all_ray_air_env_vars:
if env_var in os.environ:
user_supplied_env_vars.append(env_var)
if user_supplied_env_vars:
env_vars_str = json.dumps(user_supplied_env_vars)
record_extra_usage_tag(TagKey.AIR_ENV_VARS, env_vars_str)
return True
return False
def tag_air_entrypoint(entrypoint: AirEntrypoint) -> None:
"""Records the entrypoint to an AIR training run."""
assert entrypoint in AirEntrypoint
record_extra_usage_tag(TagKey.AIR_ENTRYPOINT, entrypoint.value)
| AirEntrypoint |
python | neetcode-gh__leetcode | python/1288-remove-covered-intervals.py | {
"start": 0,
"end": 467
} | class ____:
def removeCoveredIntervals(self, intervals: List[List[int]]) -> int:
# sort on the basis of inc li first and then on the basis of dec length (=> -ri)
intervals.sort(key=lambda x: (x[0], -x[1]))
covered, maxri = 0, 0
for _, ri in intervals:
if ri > maxri:
maxri = ri
else:
covered += 1
return len(intervals) - covered | Solution |
python | ray-project__ray | python/ray/serve/schema.py | {
"start": 33086,
"end": 33389
} | class ____(str, Enum):
"""The current status of the application."""
NOT_STARTED = "NOT_STARTED"
DEPLOYING = "DEPLOYING"
DEPLOY_FAILED = "DEPLOY_FAILED"
RUNNING = "RUNNING"
UNHEALTHY = "UNHEALTHY"
DELETING = "DELETING"
@PublicAPI(stability="alpha")
@dataclass
| ApplicationStatus |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 12700,
"end": 14054
} | class ____(greentest.TestCase):
def test_function(self):
g = gevent.Greenlet.spawn(dummy_test_func)
self.assert_nstr_endswith(g, 'at X: dummy_test_func>')
self.assert_greenlet_not_ready(g)
g.join()
self.assert_greenlet_ready(g)
self.assert_nstr_endswith(g, 'at X: dummy_test_func>')
def test_method(self):
g = gevent.Greenlet.spawn(A().method)
self.assert_nstr_startswith(g, '<Greenlet at X:')
# Accessing the name to generate a minimal_ident will cause it to be included.
getattr(g, 'name')
self.assert_nstr_startswith(g, '<Greenlet "Greenlet-')
# Assigning to the name changes it
g.name = 'Foo'
self.assert_nstr_startswith(g, '<Greenlet "Foo"')
self.assert_nstr_endswith(g, 'at X: <bound method A.method of <module.A object at X>>>')
self.assert_greenlet_not_ready(g)
g.join()
self.assert_greenlet_ready(g)
self.assert_nstr_endswith(g, 'at X: <bound method A.method of <module.A object at X>>>')
def test_subclass(self):
g = Subclass()
self.assert_nstr_startswith(g, '<Subclass ')
self.assert_nstr_endswith(g, 'at X: _run>')
g = Subclass(None, 'question', answer=42)
self.assert_nstr_endswith(g, " at X: _run('question', answer=42)>")
| TestStr |
python | joke2k__faker | faker/providers/ssn/mt_MT/__init__.py | {
"start": 42,
"end": 404
} | class ____(BaseProvider):
"""
A Faker provider for the Maltese VAT IDs
"""
vat_id_formats = ("MT########",)
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Maltese VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
| Provider |
python | spyder-ide__spyder | external-deps/spyder-remote-services/spyder_remote_services/services/files/handlers.py | {
"start": 7900,
"end": 8161
} | class ____(BaseFSHandler):
@web.authenticated
@authorized
def post(self):
path = self.get_path_argument("path")
dest = self.get_path_argument("dest")
result = self.fs_move(path, dest)
self.write_json(result)
| MoveHandler |
python | Pylons__pyramid | tests/test_path.py | {
"start": 79,
"end": 1151
} | class ____(unittest.TestCase):
def tearDown(self):
from . import test_path
if hasattr(test_path, '__abspath__'):
del test_path.__abspath__
def _callFUT(self, path, level=2):
from pyramid.path import caller_path
return caller_path(path, level)
def test_isabs(self):
result = self._callFUT('/a/b/c')
self.assertEqual(result, '/a/b/c')
def test_pkgrelative(self):
import os
result = self._callFUT('a/b/c')
self.assertEqual(result, os.path.join(here, 'a/b/c'))
def test_memoization_has_abspath(self):
import os
from . import test_path
test_path.__abspath__ = '/foo/bar'
result = self._callFUT('a/b/c')
self.assertEqual(result, os.path.join('/foo/bar', 'a/b/c'))
def test_memoization_success(self):
import os
from . import test_path
result = self._callFUT('a/b/c')
self.assertEqual(result, os.path.join(here, 'a/b/c'))
self.assertEqual(test_path.__abspath__, here)
| TestCallerPath |
python | falconry__falcon | falcon/errors.py | {
"start": 3303,
"end": 3425
} | class ____(InvalidMediaType):
"""The media range contains an invalid media type and/or the q value."""
| InvalidMediaRange |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-tablestore/llama_index/vector_stores/tablestore/base.py | {
"start": 509,
"end": 31015
} | class ____(BasePydanticVectorStore):
"""
Tablestore vector store.
In this vector store we store the text, its embedding and
its metadata in Tablestore.
Args:
tablestore_client (OTSClient, optional): External tablestore(ots) client.
If this parameter is set, the following endpoint/instance_name/access_key_id/access_key_secret will be ignored.
endpoint (str, optional): Tablestore instance endpoint.
instance_name (str, optional): Tablestore instance name.
access_key_id (str, optional): Aliyun access key id.
access_key_secret (str, optional): Aliyun access key secret.
table_name (str, optional): Tablestore table name.
index_name (str, optional): Tablestore SearchIndex index name.
text_field (str, optional): Name of the Tablestore field that stores the text.
vector_field (str, optional): Name of the Tablestore field that stores the embedding.
ref_doc_id_field (str, optional): Name of the Tablestore field that stores the ref doc id.
vector_dimension (int): The dimension of the embedding vectors.
vector_metric_type (VectorMetricType, optional): The similarity metric type to use.
metadata_mappings (list[FieldSchema], optional): Custom metadata mapping is used to filter non-vector fields.
See the following documentation for details:
https://help.aliyun.com/zh/tablestore/developer-reference/create-search-indexes-by-using-python-sdk
kwargs (Any): Additional arguments to pass to the tablestore(ots) client.
Returns:
TablestoreVectorStore: Vectorstore that supports add, delete, and query.
Examples:
`pip install llama-index-vector-stores-tablestore`
```python
import tablestore
# create a vector store that does not support filtering non-vector fields
vector_store = TablestoreVectorStore(
endpoint="<end_point>",
instance_name="<instance_name>",
access_key_id="<access_key_id>",
access_key_secret="<access_key_secret>",
vector_dimension=512,
)
# create a vector store that support filtering non-vector fields
vector_store_with_meta_data = TablestoreVectorStore(
endpoint="<end_point>",
instance_name="<instance_name>",
access_key_id="<access_key_id>",
access_key_secret="<access_key_secret>",
vector_dimension=512,
# optional: custom metadata mapping is used to filter non-vector fields.
metadata_mappings=[
tablestore.FieldSchema(
"type", # non-vector fields
tablestore.FieldType.KEYWORD,
index=True,
enable_sort_and_agg=True,
),
tablestore.FieldSchema(
"time", # non-vector fields
tablestore.FieldType.LONG,
index=True,
enable_sort_and_agg=True,
),
],
)
```
"""
stores_text: bool = True
_vector_dimension: int = PrivateAttr(default=512)
_logger: Any = PrivateAttr(default=None)
_tablestore_client: tablestore.OTSClient = PrivateAttr(default=None)
_table_name: str = PrivateAttr(default="llama_index_vector_store_ots_v1")
_index_name: str = PrivateAttr(default="llama_index_vector_store_ots_index_v1")
_text_field: str = PrivateAttr(default="content")
_vector_field: str = PrivateAttr(default="embedding")
_ref_doc_id_field: str = PrivateAttr(default="ref_doc_id")
_metadata_mappings: List[tablestore.FieldSchema] = PrivateAttr(default=None)
def __init__(
self,
tablestore_client: Optional[tablestore.OTSClient] = None,
endpoint: Optional[str] = None,
instance_name: Optional[str] = None,
access_key_id: Optional[str] = None,
access_key_secret: Optional[str] = None,
table_name: str = "llama_index_vector_store_ots_v1",
index_name: str = "llama_index_vector_store_ots_index_v1",
text_field: str = "content",
vector_field: str = "embedding",
ref_doc_id_field: str = "ref_doc_id",
vector_dimension: int = 512,
vector_metric_type: tablestore.VectorMetricType = tablestore.VectorMetricType.VM_COSINE,
metadata_mappings: Optional[List[tablestore.FieldSchema]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__()
self._logger = getLogger(__name__)
if not tablestore_client:
self._tablestore_client = tablestore.OTSClient(
endpoint,
access_key_id,
access_key_secret,
instance_name,
retry_policy=tablestore.WriteRetryPolicy(),
**kwargs, # pass additional arguments
)
else:
self._tablestore_client = tablestore_client
self._vector_dimension = vector_dimension
self._table_name = table_name
self._index_name = index_name
self._text_field = text_field
self._vector_field = vector_field
self._ref_doc_id_field = ref_doc_id_field
self._metadata_mappings = [
tablestore.FieldSchema(
text_field,
tablestore.FieldType.TEXT,
index=True,
enable_sort_and_agg=False,
store=False,
analyzer=tablestore.AnalyzerType.MAXWORD,
),
tablestore.FieldSchema(
ref_doc_id_field,
tablestore.FieldType.KEYWORD,
index=True,
enable_sort_and_agg=True,
store=False,
),
tablestore.FieldSchema(
vector_field,
tablestore.FieldType.VECTOR,
vector_options=tablestore.VectorOptions(
data_type=tablestore.VectorDataType.VD_FLOAT_32,
dimension=vector_dimension,
metric_type=vector_metric_type,
),
),
]
if metadata_mappings:
for mapping in metadata_mappings:
if (
mapping.field_name == text_field
or mapping.field_name == vector_field
or mapping.field_name == ref_doc_id_field
):
continue
self._metadata_mappings.append(mapping)
def create_table_if_not_exist(self) -> None:
"""Create table if not exist."""
table_list = self._tablestore_client.list_table()
if self._table_name in table_list:
self._logger.info(
"Tablestore system table[%s] already exists", self._table_name
)
return
self._logger.info(
"Tablestore system table[%s] does not exist, try to create the table.",
self._table_name,
)
schema_of_primary_key = [("id", "STRING")]
table_meta = tablestore.TableMeta(self._table_name, schema_of_primary_key)
table_options = tablestore.TableOptions()
reserved_throughput = tablestore.ReservedThroughput(
tablestore.CapacityUnit(0, 0)
)
try:
self._tablestore_client.create_table(
table_meta, table_options, reserved_throughput
)
self._logger.info(
"Tablestore create table[%s] successfully.", self._table_name
)
except tablestore.OTSClientError as e:
traceback.print_exc()
self._logger.exception(
"Tablestore create system table[%s] failed with client error, http_status:%d, error_message:%s",
self._table_name,
e.get_http_status(),
e.get_error_message(),
)
except tablestore.OTSServiceError as e:
traceback.print_exc()
self._logger.exception(
"Tablestore create system table[%s] failed with client error, http_status:%d, error_code:%s, error_message:%s, request_id:%s",
self._table_name,
e.get_http_status(),
e.get_error_code(),
e.get_error_message(),
e.get_request_id(),
)
def create_search_index_if_not_exist(self) -> None:
"""Create search index if not exist."""
search_index_list = self._tablestore_client.list_search_index(
table_name=self._table_name
)
if self._index_name in [t[1] for t in search_index_list]:
self._logger.info(
"Tablestore system index[%s] already exists", self._index_name
)
return
index_meta = tablestore.SearchIndexMeta(self._metadata_mappings)
self._tablestore_client.create_search_index(
self._table_name, self._index_name, index_meta
)
self._logger.info(
"Tablestore create system index[%s] successfully.", self._index_name
)
def delete_table_if_exists(self):
"""Delete table if exists."""
search_index_list = self._tablestore_client.list_search_index(
table_name=self._table_name
)
for resp_tuple in search_index_list:
self._tablestore_client.delete_search_index(resp_tuple[0], resp_tuple[1])
self._logger.info(
"Tablestore delete index[%s] successfully.", self._index_name
)
self._tablestore_client.delete_table(self._table_name)
self._logger.info(
"Tablestore delete system table[%s] successfully.", self._index_name
)
def delete_search_index(self, table_name, index_name) -> None:
self._tablestore_client.delete_search_index(table_name, index_name)
self._logger.info("Tablestore delete index[%s] successfully.", self._index_name)
def _write_row(
self,
row_id: str,
content: str,
embedding_vector: List[float],
metadata: Dict[str, Any],
) -> None:
primary_key = [("id", row_id)]
attribute_columns = [
(self._text_field, content),
(self._vector_field, json.dumps(embedding_vector)),
]
for k, v in metadata.items():
item = (k, v)
attribute_columns.append(item)
row = tablestore.Row(primary_key, attribute_columns)
try:
self._tablestore_client.put_row(self._table_name, row)
self._logger.debug(
"Tablestore put row successfully. id:%s, content:%s, meta_data:%s",
row_id,
content,
metadata,
)
except tablestore.OTSClientError as e:
self._logger.exception(
"Tablestore put row failed with client error:%s, id:%s, content:%s, meta_data:%s",
e,
row_id,
content,
metadata,
)
except tablestore.OTSServiceError as e:
self._logger.exception(
"Tablestore put row failed with client error:%s, id:%s, content:%s, meta_data:%s, http_status:%d, error_code:%s, error_message:%s, request_id:%s",
e,
row_id,
content,
metadata,
e.get_http_status(),
e.get_error_code(),
e.get_error_message(),
e.get_request_id(),
)
def _delete_row(self, row_id: str) -> None:
primary_key = [("id", row_id)]
try:
self._tablestore_client.delete_row(self._table_name, primary_key, None)
self._logger.info("Tablestore delete row successfully. id:%s", row_id)
except tablestore.OTSClientError as e:
self._logger.exception(
"Tablestore delete row failed with client error:%s, id:%s", e, row_id
)
except tablestore.OTSServiceError as e:
self._logger.exception(
"Tablestore delete row failed with client error:%s, id:%s, http_status:%d, error_code:%s, error_message:%s, request_id:%s",
e,
row_id,
e.get_http_status(),
e.get_error_code(),
e.get_error_message(),
e.get_request_id(),
)
def _delete_all(self) -> None:
inclusive_start_primary_key = [("id", tablestore.INF_MIN)]
exclusive_end_primary_key = [("id", tablestore.INF_MAX)]
total = 0
try:
while True:
(
consumed,
next_start_primary_key,
row_list,
next_token,
) = self._tablestore_client.get_range(
self._table_name,
tablestore.Direction.FORWARD,
inclusive_start_primary_key,
exclusive_end_primary_key,
[],
5000,
max_version=1,
)
for row in row_list:
self._tablestore_client.delete_row(
self._table_name, row.primary_key, None
)
total += 1
if next_start_primary_key is not None:
inclusive_start_primary_key = next_start_primary_key
else:
break
except tablestore.OTSClientError as e:
self._logger.exception(
"Tablestore delete row failed with client error:%s", e
)
except tablestore.OTSServiceError as e:
self._logger.exception(
"Tablestore delete row failed with client error:%s, http_status:%d, error_code:%s, error_message:%s, request_id:%s",
e,
e.get_http_status(),
e.get_error_code(),
e.get_error_message(),
e.get_request_id(),
)
self._logger.info("delete all rows count:%d", total)
def _search(
self, query: VectorStoreQuery, knn_top_k: int
) -> VectorStoreQueryResult:
filter_query = self._parse_filters(query.filters)
query_mode = query.mode
query_str = query.query_str
query_embedding = query.query_embedding
ots_text_query = tablestore.BoolQuery(
must_queries=[
filter_query,
tablestore.MatchQuery(field_name=self._text_field, text=query_str),
],
must_not_queries=[],
filter_queries=[],
should_queries=[],
)
ots_vector_query = tablestore.KnnVectorQuery(
field_name=self._vector_field,
top_k=knn_top_k,
float32_query_vector=query_embedding,
filter=filter_query,
)
if query_mode == VectorStoreQueryMode.HYBRID:
if query_str is None:
raise ValueError("query_str cannot be None")
ots_query = tablestore.BoolQuery(
must_queries=[],
must_not_queries=[],
filter_queries=[],
should_queries=[
ots_text_query,
ots_vector_query,
],
minimum_should_match=1,
)
elif query_mode == VectorStoreQueryMode.TEXT_SEARCH:
if query_str is None:
raise ValueError("query_str cannot be None")
ots_query = ots_text_query
else:
ots_query = ots_vector_query
sort = tablestore.Sort(
sorters=[tablestore.ScoreSort(sort_order=tablestore.SortOrder.DESC)]
)
search_query = tablestore.SearchQuery(
ots_query, limit=query.similarity_top_k, get_total_count=False, sort=sort
)
try:
search_response = self._tablestore_client.search(
table_name=self._table_name,
index_name=self._index_name,
search_query=search_query,
columns_to_get=tablestore.ColumnsToGet(
return_type=tablestore.ColumnReturnType.ALL
),
)
self._logger.info(
"Tablestore search successfully. request_id:%s",
search_response.request_id,
)
return self._to_query_result(search_response)
except tablestore.OTSClientError as e:
self._logger.exception("Tablestore search failed with client error:%s", e)
except tablestore.OTSServiceError as e:
self._logger.exception(
"Tablestore search failed with client error:%s, http_status:%d, error_code:%s, error_message:%s, request_id:%s",
e,
e.get_http_status(),
e.get_error_code(),
e.get_error_message(),
e.get_request_id(),
)
def _filter(
self,
filters: Optional[MetadataFilters] = None,
return_type: Optional[
tablestore.ColumnReturnType
] = tablestore.ColumnReturnType.ALL,
limit: Optional[int] = 100,
) -> List:
if filters is None:
return []
filter_query = self._parse_filters(filters)
search_query = tablestore.SearchQuery(
filter_query, limit=1, get_total_count=False
)
all_rows = []
try:
# first round
search_response = self._tablestore_client.search(
table_name=self._table_name,
index_name=self._index_name,
search_query=search_query,
columns_to_get=tablestore.ColumnsToGet(return_type=return_type),
)
all_rows.extend(search_response.rows)
# loop
while search_response.next_token:
search_query.next_token = search_response.next_token
search_response = self._tablestore_client.search(
table_name=self._table_name,
index_name=self._index_name,
search_query=search_query,
columns_to_get=tablestore.ColumnsToGet(return_type=return_type),
)
all_rows.extend(search_response.rows)
return all_rows
except tablestore.OTSClientError as e:
self._logger.exception("Tablestore search failed with client error:%s", e)
except tablestore.OTSServiceError as e:
self._logger.exception(
"Tablestore search failed with client error:%s, http_status:%d, error_code:%s, error_message:%s, request_id:%s",
e,
e.get_http_status(),
e.get_error_code(),
e.get_error_message(),
e.get_request_id(),
)
def _to_get_nodes_result(self, rows) -> List[TextNode]:
nodes = []
for row in rows:
node_id = row[0][0][1]
meta_data = {}
text = None
embedding = None
for col in row[1]:
key = col[0]
val = col[1]
if key == self._text_field:
text = val
continue
if key == self._vector_field:
embedding = json.loads(val)
continue
meta_data[key] = val
node = TextNode(
id_=node_id,
text=text,
metadata=meta_data,
embedding=embedding,
)
nodes.append(node)
return nodes
def _get_row(self, row_id: str) -> Optional[TextNode]:
primary_key = [("id", row_id)]
try:
_, row, _ = self._tablestore_client.get_row(
self._table_name, primary_key, None, None, 1
)
self._logger.debug("Tablestore get row successfully. id:%s", row_id)
if row is None:
return None
node_id = row.primary_key[0][1]
meta_data = {}
text = None
embedding = None
for col in row.attribute_columns:
key = col[0]
val = col[1]
if key == self._text_field:
text = val
continue
if key == self._vector_field:
embedding = json.loads(val)
continue
meta_data[key] = val
return TextNode(
id_=node_id,
text=text,
metadata=meta_data,
embedding=embedding,
)
except tablestore.OTSClientError as e:
self._logger.exception(
"Tablestore get row failed with client error:%s, id:%s", e, row_id
)
except tablestore.OTSServiceError as e:
self._logger.exception(
"Tablestore get row failed with client error:%s, "
"id:%s, http_status:%d, error_code:%s, error_message:%s, request_id:%s",
e,
row_id,
e.get_http_status(),
e.get_error_code(),
e.get_error_message(),
e.get_request_id(),
)
def _to_query_result(self, search_response) -> VectorStoreQueryResult:
nodes = []
ids = []
similarities = []
for hit in search_response.search_hits:
row = hit.row
score = hit.score
node_id = row[0][0][1]
meta_data = {}
text = None
embedding = None
for col in row[1]:
key = col[0]
val = col[1]
if key == self._text_field:
text = val
continue
if key == self._vector_field:
embedding = json.loads(val)
continue
meta_data[key] = val
node = TextNode(
id_=node_id,
text=text,
metadata=meta_data,
embedding=embedding,
)
ids.append(node_id)
nodes.append(node)
similarities.append(score)
return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=similarities)
def _parse_filters_recursively(
self, filters: MetadataFilters
) -> tablestore.BoolQuery:
"""Parse (possibly nested) MetadataFilters to equivalent tablestore search expression."""
bool_query = tablestore.BoolQuery(
must_queries=[],
must_not_queries=[],
filter_queries=[],
should_queries=[],
minimum_should_match=None,
)
if filters.condition is FilterCondition.AND:
bool_clause = bool_query.must_queries
elif filters.condition is FilterCondition.OR:
bool_clause = bool_query.should_queries
else:
raise ValueError(f"Unsupported filter condition: {filters.condition}")
for filter_item in filters.filters:
if isinstance(filter_item, MetadataFilter):
bool_clause.append(self._parse_filter(filter_item))
elif isinstance(filter_item, MetadataFilters):
bool_clause.append(self._parse_filters_recursively(filter_item))
else:
raise ValueError(f"Unsupported filter type: {type(filter_item)}")
return bool_query
def _parse_filters(self, filters: Optional[MetadataFilters]) -> tablestore.Query:
"""Parse MetadataFilters to equivalent OpenSearch expression."""
if filters is None:
return tablestore.MatchAllQuery()
return self._parse_filters_recursively(filters=filters)
@staticmethod
def _parse_filter(filter_item: MetadataFilter) -> tablestore.Query:
key = filter_item.key
val = filter_item.value
op = filter_item.operator
if op == FilterOperator.EQ:
return tablestore.TermQuery(field_name=key, column_value=val)
elif op == FilterOperator.GT:
return tablestore.RangeQuery(
field_name=key, range_from=val, include_lower=False
)
elif op == FilterOperator.GTE:
return tablestore.RangeQuery(
field_name=key, range_from=val, include_lower=True
)
elif op == FilterOperator.LT:
return tablestore.RangeQuery(
field_name=key, range_to=val, include_upper=False
)
elif op == FilterOperator.LTE:
return tablestore.RangeQuery(
field_name=key, range_to=val, include_upper=True
)
elif op == FilterOperator.NE:
bq = tablestore.BoolQuery(
must_queries=[],
must_not_queries=[],
filter_queries=[],
should_queries=[],
minimum_should_match=None,
)
bq.must_not_queries.append(
tablestore.TermQuery(field_name=key, column_value=val)
)
return bq
elif op in [FilterOperator.IN, FilterOperator.ANY]:
return tablestore.TermsQuery(field_name=key, column_values=val)
elif op == FilterOperator.NIN:
bq = tablestore.BoolQuery(
must_queries=[],
must_not_queries=[],
filter_queries=[],
should_queries=[],
minimum_should_match=None,
)
bq.must_not_queries.append(
tablestore.TermsQuery(field_name=key, column_values=val)
)
return bq
elif op == FilterOperator.ALL:
bq = tablestore.BoolQuery(
must_queries=[],
must_not_queries=[],
filter_queries=[],
should_queries=[],
minimum_should_match=None,
)
for val_item in val:
bq.must_queries.append(
tablestore.TermQuery(field_name=key, column_value=val_item)
)
return bq
elif op == FilterOperator.TEXT_MATCH:
return tablestore.MatchQuery(field_name=key, text=val)
elif op == FilterOperator.CONTAINS:
return tablestore.WildcardQuery(field_name=key, value=f"*{val}*")
else:
raise ValueError(f"Unsupported filter operator: {filter_item.operator}")
@property
def client(self) -> Any:
"""Get client."""
return self._tablestore_client
def add(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
"""Add nodes to vector store."""
if len(nodes) == 0:
return []
ids = []
for node in nodes:
if len(node.get_embedding()) != self._vector_dimension:
raise RuntimeError(
"node embedding size:%d is not the same as vector store dim:%d"
% (len(node.get_embedding()), self._vector_dimension)
)
self._write_row(
row_id=node.node_id,
content=node.text,
embedding_vector=node.get_embedding(),
metadata=node.metadata,
)
ids.append(node.node_id)
return ids
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""Delete nodes from vector store."""
if node_ids is None and filters is None:
raise RuntimeError("node_ids and filters cannot be None at the same time.")
if node_ids is not None and filters is not None:
raise RuntimeError("node_ids and filters cannot be set at the same time.")
if filters is not None:
rows = self._filter(
filters=filters, return_type=tablestore.ColumnReturnType.NONE
)
for row in rows:
self._delete_row(row[0][0][1])
if node_ids is not None:
for node_id in node_ids:
self._delete_row(node_id)
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes from vector store."""
if node_ids is None and filters is None:
raise RuntimeError("node_ids and filters cannot be None at the same time.")
if node_ids is not None and filters is not None:
raise RuntimeError("node_ids and filters cannot be set at the same time.")
if filters is not None:
rows = self._filter(
filters=filters, return_type=tablestore.ColumnReturnType.ALL
)
return self._to_get_nodes_result(rows)
if node_ids is not None:
nodes = []
for node_id in node_ids:
nodes.append(self._get_row(node_id))
return nodes
return []
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes using with ref_doc_id."""
rows = self._filter(
filters=MetadataFilters(
filters=[
MetadataFilter(
key=self._ref_doc_id_field,
value=ref_doc_id,
operator=FilterOperator.EQ,
),
],
condition=FilterCondition.AND,
),
return_type=tablestore.ColumnReturnType.NONE,
)
for row in rows:
self._delete_row(row[0][0][1])
def clear(self) -> None:
"""Clear all nodes from configured vector store."""
self._delete_all()
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store."""
knn_top_k = query.similarity_top_k
if "knn_top_k" in kwargs:
knn_top_k = kwargs["knn_top_k"]
return self._search(query=query, knn_top_k=knn_top_k)
| TablestoreVectorStore |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/_settings.py | {
"start": 20584,
"end": 47718
} | class ____(metaclass=settingsMeta):
"""
A settings object controls the following aspects of test behavior:
|~settings.max_examples|, |~settings.derandomize|, |~settings.database|,
|~settings.verbosity|, |~settings.phases|, |~settings.stateful_step_count|,
|~settings.report_multiple_bugs|, |~settings.suppress_health_check|,
|~settings.deadline|, |~settings.print_blob|, and |~settings.backend|.
A settings object can be applied as a decorator to a test function, in which
case that test function will use those settings. A test may only have one
settings object applied to it. A settings object can also be passed to
|settings.register_profile| or as a parent to another |settings|.
Attribute inheritance
---------------------
Settings objects are immutable once created. When a settings object is created,
it uses the value specified for each attribute. Any attribute which is
not specified will inherit from its value in the ``parent`` settings object.
If ``parent`` is not passed, any attributes which are not specified will inherit
from the current settings profile instead.
For instance, ``settings(max_examples=10)`` will have a ``max_examples`` of ``10``,
and the value of all other attributes will be equal to its value in the
current settings profile.
Changes made from activating a new settings profile with |settings.load_profile|
will be reflected in settings objects created after the profile was loaded,
but not in existing settings objects.
.. _builtin-profiles:
Built-in profiles
-----------------
While you can register additional profiles with |settings.register_profile|,
Hypothesis comes with two built-in profiles: ``default`` and ``ci``.
By default, the ``default`` profile is active. If the ``CI`` environment
variable is set to any value, the ``ci`` profile is active by default. Hypothesis
also automatically detects various vendor-specific CI environment variables.
The attributes of the currently active settings profile can be retrieved with
``settings()`` (so ``settings().max_examples`` is the currently active default
for |settings.max_examples|).
The settings attributes for the built-in profiles are as follows:
.. code-block:: python
default = settings.register_profile(
"default",
max_examples=100,
derandomize=False,
database=not_set, # see settings.database for the default database
verbosity=Verbosity.normal,
phases=tuple(Phase),
stateful_step_count=50,
report_multiple_bugs=True,
suppress_health_check=(),
deadline=duration(milliseconds=200),
print_blob=False,
backend="hypothesis",
)
ci = settings.register_profile(
"ci",
parent=default,
derandomize=True,
deadline=None,
database=None,
print_blob=True,
suppress_health_check=[HealthCheck.too_slow],
)
You can replace either of the built-in profiles with |settings.register_profile|:
.. code-block:: python
# run more examples in CI
settings.register_profile(
"ci",
settings.get_profile("ci"),
max_examples=1000,
)
"""
_profiles: ClassVar[dict[str, "settings"]] = {}
_current_profile: ClassVar[str | None] = None
def __init__(
self,
parent: Optional["settings"] = None,
*,
# This looks pretty strange, but there's good reason: we want Mypy to detect
# bad calls downstream, but not to freak out about the `= not_set` part even
# though it's not semantically valid to pass that as an argument value.
# The intended use is "like **kwargs, but more tractable for tooling".
max_examples: int = not_set, # type: ignore
derandomize: bool = not_set, # type: ignore
database: Optional["ExampleDatabase"] = not_set, # type: ignore
verbosity: "Verbosity" = not_set, # type: ignore
phases: Collection["Phase"] = not_set, # type: ignore
stateful_step_count: int = not_set, # type: ignore
report_multiple_bugs: bool = not_set, # type: ignore
suppress_health_check: Collection["HealthCheck"] = not_set, # type: ignore
deadline: int | float | datetime.timedelta | None = not_set, # type: ignore
print_blob: bool = not_set, # type: ignore
backend: str = not_set, # type: ignore
) -> None:
self._in_definition = True
if parent is not None:
check_type(settings, parent, "parent")
if derandomize not in (not_set, False):
if database not in (not_set, None): # type: ignore
raise InvalidArgument(
"derandomize=True implies database=None, so passing "
f"{database=} too is invalid."
)
database = None
# fallback is None if we're creating the default settings object, and
# the parent (or default settings object) otherwise
self._fallback = parent or settings.default
self._max_examples = (
self._fallback.max_examples # type: ignore
if max_examples is not_set # type: ignore
else _validate_max_examples(max_examples)
)
self._derandomize = (
self._fallback.derandomize # type: ignore
if derandomize is not_set # type: ignore
else _validate_choices("derandomize", derandomize, choices=[True, False])
)
if database is not not_set: # type: ignore
database = _validate_database(database)
self._database = database
self._cached_database = None
self._verbosity = (
self._fallback.verbosity # type: ignore
if verbosity is not_set # type: ignore
else _validate_enum_value(Verbosity, verbosity, name="verbosity")
)
self._phases = (
self._fallback.phases # type: ignore
if phases is not_set # type: ignore
else _validate_phases(phases)
)
self._stateful_step_count = (
self._fallback.stateful_step_count # type: ignore
if stateful_step_count is not_set # type: ignore
else _validate_stateful_step_count(stateful_step_count)
)
self._report_multiple_bugs = (
self._fallback.report_multiple_bugs # type: ignore
if report_multiple_bugs is not_set # type: ignore
else _validate_choices(
"report_multiple_bugs", report_multiple_bugs, choices=[True, False]
)
)
self._suppress_health_check = (
self._fallback.suppress_health_check # type: ignore
if suppress_health_check is not_set # type: ignore
else _validate_suppress_health_check(suppress_health_check)
)
self._deadline = (
self._fallback.deadline # type: ignore
if deadline is not_set # type: ignore
else _validate_deadline(deadline)
)
self._print_blob = (
self._fallback.print_blob # type: ignore
if print_blob is not_set # type: ignore
else _validate_choices("print_blob", print_blob, choices=[True, False])
)
self._backend = (
self._fallback.backend # type: ignore
if backend is not_set # type: ignore
else _validate_backend(backend)
)
self._in_definition = False
@property
def max_examples(self):
"""
Once this many satisfying examples have been considered without finding any
counter-example, Hypothesis will stop looking.
Note that we might call your test function fewer times if we find a bug early
or can tell that we've exhausted the search space; or more if we discard some
examples due to use of .filter(), assume(), or a few other things that can
prevent the test case from completing successfully.
The default value is chosen to suit a workflow where the test will be part of
a suite that is regularly executed locally or on a CI server, balancing total
running time against the chance of missing a bug.
If you are writing one-off tests, running tens of thousands of examples is
quite reasonable as Hypothesis may miss uncommon bugs with default settings.
For very complex code, we have observed Hypothesis finding novel bugs after
*several million* examples while testing :pypi:`SymPy <sympy>`.
If you are running more than 100k examples for a test, consider using our
:ref:`integration for coverage-guided fuzzing <fuzz_one_input>` - it really
shines when given minutes or hours to run.
The default max examples is ``100``.
"""
return self._max_examples
@property
def derandomize(self):
"""
If True, seed Hypothesis' random number generator using a hash of the test
function, so that every run will test the same set of examples until you
update Hypothesis, Python, or the test function.
This allows you to `check for regressions and look for bugs
<https://blog.nelhage.com/post/two-kinds-of-testing/>`__ using separate
settings profiles - for example running
quick deterministic tests on every commit, and a longer non-deterministic
nightly testing run.
The default is ``False``. If running on CI, the default is ``True`` instead.
"""
return self._derandomize
@property
def database(self):
"""
An instance of |ExampleDatabase| that will be used to save examples to
and load previous examples from.
If not set, a |DirectoryBasedExampleDatabase| is created in the current
working directory under ``.hypothesis/examples``. If this location is
unusable, e.g. due to the lack of read or write permissions, Hypothesis
will emit a warning and fall back to an |InMemoryExampleDatabase|.
If ``None``, no storage will be used.
See the :ref:`database documentation <database>` for a list of database
classes, and how to define custom database classes.
"""
from hypothesis.database import _db_for_path
# settings.database has two conflicting requirements:
# * The default settings should respect changes to set_hypothesis_home_dir
# in-between accesses
# * `s.database is s.database` should be true, except for the default settings
#
# We therefore cache s.database for everything except the default settings,
# which always recomputes dynamically.
if self._fallback is None:
# if self._fallback is None, we are the default settings, at which point
# we should recompute the database dynamically
assert self._database is not_set
return _db_for_path(not_set)
# otherwise, we cache the database
if self._cached_database is None:
self._cached_database = (
self._fallback.database if self._database is not_set else self._database
)
return self._cached_database
@property
def verbosity(self):
"""
Control the verbosity level of Hypothesis messages.
To see what's going on while Hypothesis runs your tests, you can turn
up the verbosity setting.
.. code-block:: pycon
>>> from hypothesis import settings, Verbosity
>>> from hypothesis.strategies import lists, integers
>>> @given(lists(integers()))
... @settings(verbosity=Verbosity.verbose)
... def f(x):
... assert not any(x)
... f()
Trying example: []
Falsifying example: [-1198601713, -67, 116, -29578]
Shrunk example to [-1198601713]
Shrunk example to [-128]
Shrunk example to [32]
Shrunk example to [1]
[1]
The four levels are |Verbosity.quiet|, |Verbosity.normal|,
|Verbosity.verbose|, and |Verbosity.debug|. |Verbosity.normal| is the
default. For |Verbosity.quiet|, Hypothesis will not print anything out,
not even the final falsifying example. |Verbosity.debug| is basically
|Verbosity.verbose| but a bit more so. You probably don't want it.
Verbosity can be passed either as a |Verbosity| enum value, or as the
corresponding string value, or as the corresponding integer value. For
example:
.. code-block:: python
# these three are equivalent
settings(verbosity=Verbosity.verbose)
settings(verbosity="verbose")
If you are using :pypi:`pytest`, you may also need to :doc:`disable
output capturing for passing tests <pytest:how-to/capture-stdout-stderr>`
to see verbose output as tests run.
"""
return self._verbosity
@property
def phases(self):
"""
Control which phases should be run.
Hypothesis divides tests into logically distinct phases.
- |Phase.explicit|: Running explicit examples from |@example|.
- |Phase.reuse|: Running examples from the database which previously failed.
- |Phase.generate|: Generating new random examples.
- |Phase.target|: Mutating examples for :ref:`targeted property-based
testing <targeted>`. Requires |Phase.generate|.
- |Phase.shrink|: Shrinking failing examples.
- |Phase.explain|: Attempting to explain why a failure occurred.
Requires |Phase.shrink|.
The phases argument accepts a collection with any subset of these. E.g.
``settings(phases=[Phase.generate, Phase.shrink])`` will generate new examples
and shrink them, but will not run explicit examples or reuse previous failures,
while ``settings(phases=[Phase.explicit])`` will only run explicit examples
from |@example|.
Phases can be passed either as a |Phase| enum value, or as the corresponding
string value. For example:
.. code-block:: python
# these two are equivalent
settings(phases=[Phase.explicit])
settings(phases=["explicit"])
Following the first failure, Hypothesis will (usually, depending on
which |Phase| is enabled) track which lines of code are always run on
failing but never on passing inputs. On 3.12+, this uses
:mod:`sys.monitoring`, while 3.11 and earlier uses :func:`python:sys.settrace`.
For python 3.11 and earlier, we therefore automatically disable the explain
phase on PyPy, or if you are using :pypi:`coverage` or a debugger. If
there are no clearly suspicious lines of code, :pep:`we refuse the
temptation to guess <20>`.
After shrinking to a minimal failing example, Hypothesis will try to find
parts of the example -- e.g. separate args to |@given|
-- which can vary freely without changing the result
of that minimal failing example. If the automated experiments run without
finding a passing variation, we leave a comment in the final report:
.. code-block:: python
test_x_divided_by_y(
x=0, # or any other generated value
y=0,
)
Just remember that the *lack* of an explanation sometimes just means that
Hypothesis couldn't efficiently find one, not that no explanation (or
simpler failing example) exists.
"""
return self._phases
@property
def stateful_step_count(self):
"""
The maximum number of times to call an additional |@rule| method in
:ref:`stateful testing <stateful>` before we give up on finding a bug.
Note that this setting is effectively multiplicative with max_examples,
as each example will run for a maximum of ``stateful_step_count`` steps.
The default stateful step count is ``50``.
"""
return self._stateful_step_count
@property
def report_multiple_bugs(self):
"""
Because Hypothesis runs the test many times, it can sometimes find multiple
bugs in a single run. Reporting all of them at once is usually very useful,
but replacing the exceptions can occasionally clash with debuggers.
If disabled, only the exception with the smallest minimal example is raised.
The default value is ``True``.
"""
return self._report_multiple_bugs
@property
def suppress_health_check(self):
"""
Suppress the given |HealthCheck| exceptions. Those health checks will not
be raised by Hypothesis. To suppress all health checks, you can pass
``suppress_health_check=list(HealthCheck)``.
Health checks can be passed either as a |HealthCheck| enum value, or as
the corresponding string value. For example:
.. code-block:: python
# these two are equivalent
settings(suppress_health_check=[HealthCheck.filter_too_much])
settings(suppress_health_check=["filter_too_much"])
Health checks are proactive warnings, not correctness errors, so we
encourage suppressing health checks where you have evaluated they will
not pose a problem, or where you have evaluated that fixing the underlying
issue is not worthwhile.
.. seealso::
See also the :doc:`/how-to/suppress-healthchecks` how-to.
"""
return self._suppress_health_check
@property
def deadline(self):
"""
The maximum allowed duration of an individual test case, in milliseconds.
You can pass an integer, float, or timedelta. If ``None``, the deadline
is disabled entirely.
We treat the deadline as a soft limit in some cases, where that would
avoid flakiness due to timing variability.
The default deadline is 200 milliseconds. If running on CI, the default is
``None`` instead.
"""
return self._deadline
@property
def print_blob(self):
"""
If set to ``True``, Hypothesis will print code for failing examples that
can be used with |@reproduce_failure| to reproduce the failing example.
The default value is ``False``. If running on CI, the default is ``True`` instead.
"""
return self._print_blob
@property
def backend(self):
"""
.. warning::
EXPERIMENTAL AND UNSTABLE - see :ref:`alternative-backends`.
The importable name of a backend which Hypothesis should use to generate
primitive types. We support heuristic-random, solver-based, and fuzzing-based
backends.
"""
return self._backend
def __call__(self, test: T) -> T:
"""Make the settings object (self) an attribute of the test.
The settings are later discovered by looking them up on the test itself.
"""
# Aliasing as Any avoids mypy errors (attr-defined) when accessing and
# setting custom attributes on the decorated function or class.
_test: Any = test
# Using the alias here avoids a mypy error (return-value) later when
# ``test`` is returned, because this check results in type refinement.
if not callable(_test):
raise InvalidArgument(
"settings objects can be called as a decorator with @given, "
f"but decorated {test=} is not callable."
)
if inspect.isclass(test):
from hypothesis.stateful import RuleBasedStateMachine
if issubclass(_test, RuleBasedStateMachine):
attr_name = "_hypothesis_internal_settings_applied"
if getattr(test, attr_name, False):
raise InvalidArgument(
"Applying the @settings decorator twice would "
"overwrite the first version; merge their arguments "
"instead."
)
setattr(test, attr_name, True)
_test.TestCase.settings = self
return test
else:
raise InvalidArgument(
"@settings(...) can only be used as a decorator on "
"functions, or on subclasses of RuleBasedStateMachine."
)
if hasattr(_test, "_hypothesis_internal_settings_applied"):
# Can't use _hypothesis_internal_use_settings as an indicator that
# @settings was applied, because @given also assigns that attribute.
descr = get_pretty_function_description(test)
raise InvalidArgument(
f"{descr} has already been decorated with a settings object.\n"
f" Previous: {_test._hypothesis_internal_use_settings!r}\n"
f" This: {self!r}"
)
_test._hypothesis_internal_use_settings = self
_test._hypothesis_internal_settings_applied = True
return test
def __setattr__(self, name: str, value: object) -> None:
if not name.startswith("_") and not self._in_definition:
raise AttributeError("settings objects are immutable")
return super().__setattr__(name, value)
def __repr__(self) -> str:
bits = sorted(
f"{name}={getattr(self, name)!r}"
for name in all_settings
if (name != "backend" or len(AVAILABLE_PROVIDERS) > 1) # experimental
)
return "settings({})".format(", ".join(bits))
def show_changed(self) -> str:
bits = []
for name in all_settings:
value = getattr(self, name)
if value != getattr(default, name):
bits.append(f"{name}={value!r}")
return ", ".join(sorted(bits, key=len))
@staticmethod
def register_profile(
name: str,
parent: Optional["settings"] = None,
**kwargs: Any,
) -> None:
"""
Register a settings object as a settings profile, under the name ``name``.
The ``parent`` and ``kwargs`` arguments to this method are as for
|settings|.
If a settings profile already exists under ``name``, it will be overwritten.
Registering a profile with the same name as the currently active profile
will cause those changes to take effect in the active profile immediately,
and do not require reloading the profile.
Registered settings profiles can be retrieved later by name with
|settings.get_profile|.
"""
check_type(str, name, "name")
if (
default_variable.value
and settings._current_profile
and default_variable.value != settings._profiles[settings._current_profile]
):
note_deprecation(
"Cannot register a settings profile when the current settings differ "
"from the current profile (usually due to an @settings decorator). "
"Register profiles at module level instead.",
since="2025-11-15",
has_codemod=False,
)
# if we just pass the parent and no kwargs, like
# settings.register_profile(settings(max_examples=10))
# then optimize out the pointless intermediate settings object which
# would just forward everything to the parent.
settings._profiles[name] = (
parent
if parent is not None and not kwargs
else settings(parent=parent, **kwargs)
)
if settings._current_profile == name:
settings.load_profile(name)
@staticmethod
def get_profile(name: str) -> "settings":
"""
Returns the settings profile registered under ``name``. If no settings
profile is registered under ``name``, raises |InvalidArgument|.
"""
check_type(str, name, "name")
try:
return settings._profiles[name]
except KeyError:
raise InvalidArgument(f"Profile {name!r} is not registered") from None
@staticmethod
def load_profile(name: str) -> None:
"""
Makes the settings profile registered under ``name`` the active profile.
If no settings profile is registered under ``name``, raises |InvalidArgument|.
"""
check_type(str, name, "name")
settings._current_profile = name
default_variable.value = settings.get_profile(name)
@staticmethod
def get_current_profile_name() -> str:
"""
The name of the current settings profile. For example:
.. code-block:: python
>>> settings.load_profile("myprofile")
>>> settings.get_current_profile_name()
'myprofile'
"""
assert settings._current_profile is not None
return settings._current_profile
@contextlib.contextmanager
def local_settings(s: settings) -> Generator[settings, None, None]:
with default_variable.with_value(s):
yield s
def note_deprecation(
message: str, *, since: str, has_codemod: bool, stacklevel: int = 0
) -> None:
if since != "RELEASEDAY":
date = datetime.date.fromisoformat(since)
assert datetime.date(2021, 1, 1) <= date
if has_codemod:
message += (
"\n The `hypothesis codemod` command-line tool can automatically "
"refactor your code to fix this warning."
)
warnings.warn(HypothesisDeprecationWarning(message), stacklevel=2 + stacklevel)
default = settings(
max_examples=100,
derandomize=False,
database=not_set, # type: ignore
verbosity=Verbosity.normal,
phases=tuple(Phase),
stateful_step_count=50,
report_multiple_bugs=True,
suppress_health_check=(),
deadline=duration(milliseconds=200),
print_blob=False,
backend="hypothesis",
)
settings.register_profile("default", default)
settings.load_profile("default")
assert settings.default is not None
CI = settings(
derandomize=True,
deadline=None,
database=None,
print_blob=True,
suppress_health_check=[HealthCheck.too_slow],
)
settings.register_profile("ci", CI)
if is_in_ci(): # pragma: no cover # covered in ci, but not locally
settings.load_profile("ci")
assert settings.default is not None
# Check that the kwonly args to settings.__init__ is the same as the set of
# defined settings - in case we've added or remove something from one but
# not the other.
assert set(all_settings) == {
p.name
for p in inspect.signature(settings.__init__).parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
}
| settings |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/spanner.py | {
"start": 5620,
"end": 8528
} | class ____(GoogleCloudBaseOperator):
"""
Delete a Cloud Spanner instance; if an instance does not exist, no action is taken and the task succeeds.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeleteInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_delete_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_spanner_delete_template_fields]
def __init__(
self,
*,
instance_id: str,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
def execute(self, context: Context) -> bool | None:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if hook.get_instance(project_id=self.project_id, instance_id=self.instance_id):
return hook.delete_instance(project_id=self.project_id, instance_id=self.instance_id)
self.log.info(
"Instance '%s' does not exist in project '%s'. Aborting delete.",
self.instance_id,
self.project_id,
)
return True
| SpannerDeleteInstanceOperator |
python | django__django | django/db/models/fields/tuple_lookups.py | {
"start": 8707,
"end": 9856
} | class ____(TupleLookupMixin, LessThan):
def get_fallback_sql(self, compiler, connection):
# Process right-hand-side to trigger sanitization.
self.process_rhs(compiler, connection)
# e.g.: (a, b, c) < (x, y, z) as SQL:
# WHERE a < x OR (a = x AND (b < y OR (b = y AND c < z)))
lookups = itertools.cycle([LessThan, Exact])
connectors = itertools.cycle([OR, AND])
cols_list = [col for col in self.lhs for _ in range(2)]
vals_list = [val for val in self.rhs for _ in range(2)]
cols_iter = iter(cols_list[:-1])
vals_iter = iter(vals_list[:-1])
col = next(cols_iter)
val = next(vals_iter)
lookup = next(lookups)
connector = next(connectors)
root = node = WhereNode([lookup(col, val)], connector=connector)
for col, val in zip(cols_iter, vals_iter):
lookup = next(lookups)
connector = next(connectors)
child = WhereNode([lookup(col, val)], connector=connector)
node.children.append(child)
node = child
return root.as_sql(compiler, connection)
| TupleLessThan |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/edges.py | {
"start": 1041,
"end": 7310
} | class ____(DependencyMixin):
"""
Class that represents edge information to be added between two tasks/operators.
Has shorthand factory functions, like Label("hooray").
Current implementation supports
t1 >> Label("Success route") >> t2
t2 << Label("Success route") << t2
Note that due to the potential for use in either direction, this waits
to make the actual connection between both sides until both are declared,
and will do so progressively if multiple ups/downs are added.
This and EdgeInfo are related - an EdgeModifier is the Python object you
use to add information to (potentially multiple) edges, and EdgeInfo
is the representation of the information for one specific edge.
"""
def __init__(self, label: str | None = None):
self.label = label
self._upstream: list[DependencyMixin] = []
self._downstream: list[DependencyMixin] = []
@property
def roots(self):
return self._downstream
@property
def leaves(self):
return self._upstream
@staticmethod
def _make_list(
item_or_list: DependencyMixin | Sequence[DependencyMixin],
) -> Sequence[DependencyMixin]:
if not isinstance(item_or_list, Sequence):
return [item_or_list]
return item_or_list
def _save_nodes(
self,
nodes: DependencyMixin | Sequence[DependencyMixin],
stream: list[DependencyMixin],
):
from airflow.sdk.definitions._internal.node import DAGNode
from airflow.sdk.definitions.taskgroup import TaskGroup
from airflow.sdk.definitions.xcom_arg import XComArg
for node in self._make_list(nodes):
if isinstance(node, (TaskGroup, XComArg, DAGNode)):
stream.append(node)
else:
raise TypeError(
f"Cannot use edge labels with {type(node).__name__}, only tasks, XComArg or TaskGroups"
)
def _convert_streams_to_task_groups(self):
"""
Convert a node to a TaskGroup or leave it as a DAGNode.
Requires both self._upstream and self._downstream.
To do this, we keep a set of group_ids seen among the streams. If we find that
the nodes are from the same TaskGroup, we will leave them as DAGNodes and not
convert them to TaskGroups
"""
from airflow.sdk.definitions._internal.node import DAGNode
from airflow.sdk.definitions.taskgroup import TaskGroup
from airflow.sdk.definitions.xcom_arg import XComArg
group_ids = set()
for node in [*self._upstream, *self._downstream]:
if isinstance(node, DAGNode) and node.task_group:
if node.task_group.is_root:
group_ids.add("root")
else:
group_ids.add(node.task_group.group_id)
elif isinstance(node, TaskGroup):
group_ids.add(node.group_id)
elif isinstance(node, XComArg):
if isinstance(node.operator, DAGNode) and node.operator.task_group:
if node.operator.task_group.is_root:
group_ids.add("root")
else:
group_ids.add(node.operator.task_group.group_id)
# If all nodes originate from the same TaskGroup, we will not convert them
if len(group_ids) != 1:
self._upstream = self._convert_stream_to_task_groups(self._upstream)
self._downstream = self._convert_stream_to_task_groups(self._downstream)
def _convert_stream_to_task_groups(self, stream: Sequence[DependencyMixin]) -> Sequence[DependencyMixin]:
from airflow.sdk.definitions._internal.node import DAGNode
return [
node.task_group
if isinstance(node, DAGNode) and node.task_group and not node.task_group.is_root
else node
for node in stream
]
def set_upstream(
self,
other: DependencyMixin | Sequence[DependencyMixin],
edge_modifier: EdgeModifier | None = None,
):
"""
Set the given task/list onto the upstream attribute, then attempt to resolve the relationship.
Providing this also provides << via DependencyMixin.
"""
self._save_nodes(other, self._upstream)
if self._upstream and self._downstream:
# Convert _upstream and _downstream to task_groups only after both are set
self._convert_streams_to_task_groups()
for node in self._downstream:
node.set_upstream(other, edge_modifier=self)
def set_downstream(
self,
other: DependencyMixin | Sequence[DependencyMixin],
edge_modifier: EdgeModifier | None = None,
):
"""
Set the given task/list onto the downstream attribute, then attempt to resolve the relationship.
Providing this also provides >> via DependencyMixin.
"""
self._save_nodes(other, self._downstream)
if self._upstream and self._downstream:
# Convert _upstream and _downstream to task_groups only after both are set
self._convert_streams_to_task_groups()
for node in self._upstream:
node.set_downstream(other, edge_modifier=self)
def update_relative(
self,
other: DependencyMixin,
upstream: bool = True,
edge_modifier: EdgeModifier | None = None,
) -> None:
"""Update relative if we're not the "main" side of a relationship; still run the same logic."""
if upstream:
self.set_upstream(other)
else:
self.set_downstream(other)
def add_edge_info(self, dag: DAG, upstream_id: str, downstream_id: str):
"""
Add or update task info on the Dag for this specific pair of tasks.
Called either from our relationship trigger methods above, or directly
by set_upstream/set_downstream in operators.
"""
dag.set_edge_info(upstream_id, downstream_id, {"label": self.label})
# Factory functions
def Label(label: str):
"""Create an EdgeModifier that sets a human-readable label on the edge."""
return EdgeModifier(label=label)
| EdgeModifier |
python | getsentry__sentry | src/sentry/uptime/consumers/results_consumer.py | {
"start": 17523,
"end": 17870
} | class ____(ResultsStrategyFactory[CheckResult, UptimeSubscription]):
result_processor_cls = UptimeResultProcessor
topic_for_codec = Topic.UPTIME_RESULTS
identifier = "uptime"
def build_payload_grouping_key(self, result: CheckResult) -> str:
return self.result_processor.get_subscription_id(result)
| UptimeResultsStrategyFactory |
python | google__jax | jax_plugins/cuda/plugin_setup.py | {
"start": 1793,
"end": 4599
} | class ____(Distribution):
"""This class makes 'bdist_wheel' include an ABI tag on the wheel."""
def has_ext_modules(self):
return True
setup(
name=project_name,
version=__version__,
cmdclass=_cmdclass,
description="JAX Plugin for NVIDIA GPUs",
long_description="",
long_description_content_type="text/markdown",
author="JAX team",
author_email="jax-dev@google.com",
packages=[package_name],
python_requires=">=3.11",
install_requires=[f"jax-cuda{cuda_version}-pjrt=={__version__}"],
extras_require={
'with-cuda': [
f"nvidia-cublas{cuda_wheel_suffix}{nvidia_cublas_version}",
f"nvidia-cuda-cupti{cuda_wheel_suffix}{nvidia_cuda_cupti_version}",
f"nvidia-cuda-nvcc{cuda_wheel_suffix}{nvidia_cuda_nvcc_version}",
f"nvidia-cuda-runtime{cuda_wheel_suffix}{nvidia_cuda_runtime_version}",
f"nvidia-cudnn-cu{cuda_version}{nvidia_cudnn_version}",
f"nvidia-cufft{cuda_wheel_suffix}{nvidia_cufft_version}",
f"nvidia-cusolver{cuda_wheel_suffix}{nvidia_cusolver_version}",
f"nvidia-cusparse{cuda_wheel_suffix}{nvidia_cusparse_version}",
f"nvidia-nccl-cu{cuda_version}{nvidia_nccl_version}",
# nvjitlink is not a direct dependency of JAX, but it is a transitive
# dependency via, for example, cuSOLVER. NVIDIA's cuSOLVER packages
# do not have a version constraint on their dependencies, so the
# package doesn't get upgraded even though not doing that can cause
# problems (https://github.com/jax-ml/jax/issues/18027#issuecomment-1756305196)
# Until NVIDIA add version constraints, add a version constraint
# here.
f"nvidia-nvjitlink{cuda_wheel_suffix}{nvidia_nvjitlink_version}",
# nvrtc is a transitive and undeclared dep of cudnn.
f"nvidia-cuda-nvrtc{cuda_wheel_suffix}{nvidia_cuda_nvrtc_version}",
# NVSHMEM is used by Mosaic GPU collectives and can be used by XLA to
# speed up collectives too.
f"nvidia-nvshmem-cu{cuda_version}{nvidia_nvshmem_version}",
] + (["nvidia-nvvm"] if cuda_version == 13 else []),
},
url="https://github.com/jax-ml/jax",
license="Apache-2.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Programming Language :: Python :: Free Threading :: 3 - Stable",
],
package_data={
package_name: [
"*",
"nvvm/libdevice/libdevice*",
],
},
zip_safe=False,
distclass=BinaryDistribution,
)
| BinaryDistribution |
python | getsentry__sentry | src/sentry/preprod/size_analysis/download.py | {
"start": 287,
"end": 485
} | class ____(Exception):
def __init__(self, message: str, status_code: int):
self.message = message
self.status_code = status_code
super().__init__(message)
| SizeAnalysisError |
python | ansible__ansible | lib/ansible/module_utils/_internal/_json/_profiles/_module_legacy_c2m.py | {
"start": 175,
"end": 1000
} | class ____(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
@classmethod
def post_init(cls) -> None:
cls.serialize_map = {}
cls.serialize_map.update(cls._common_discard_tags)
cls.serialize_map.update(
{
set: cls.serialize_as_list, # legacy _json_encode_fallback behavior
tuple: cls.serialize_as_list, # JSONEncoder built-in behavior
_datetime.date: cls.serialize_as_isoformat,
_datetime.time: cls.serialize_as_isoformat, # always failed pre-2.18, so okay to include for consistency
_datetime.datetime: cls.serialize_as_isoformat,
}
)
cls.handle_key = cls._handle_key_str_fallback # type: ignore[method-assign] # legacy stdlib-compatible key behavior
| _Profile |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/sensor.py | {
"start": 3987,
"end": 4095
} | class ____(NamedTuple):
"""Placeholder for launched backfills."""
backfill_id: str
| BackfillSubmission |
python | google__pytype | pytype/rewrite/convert_test.py | {
"start": 3602,
"end": 3929
} | class ____(ConverterTestBase):
def test_alias(self):
alias = self.build_pytd('import os.path', name='os.path')
module = self.conv.pytd_alias_to_value(alias)
self.assertIsInstance(module, abstract.Module)
self.assertEqual(module.name, 'os.path')
if __name__ == '__main__':
unittest.main()
| PytdAliasToValueTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_quote_name07.py | {
"start": 314,
"end": 1509
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("quote_name07.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
sheet_name = "Sheet'1"
worksheet = workbook.add_worksheet(sheet_name)
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [48135552, 54701056]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.repeat_rows(0, 1)
worksheet.set_portrait()
worksheet.vertical_dpi = 200
chart.add_series({"values": [sheet_name, 0, 0, 4, 0]})
chart.add_series({"values": [sheet_name, 0, 1, 4, 1]})
chart.add_series({"values": [sheet_name, 0, 2, 4, 2]})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 122362,
"end": 123454
} | class ____(torch.nn.Module):
def __init__(self, in_channels, out_channels, scale=8, kernel_size=3, dilation=1):
super().__init__()
in_channel = in_channels // scale
hidden_channel = out_channels // scale
self.blocks = nn.ModuleList(
[
TimeDelayNetBlock(
in_channel,
hidden_channel,
kernel_size=kernel_size,
dilation=dilation,
)
for i in range(scale - 1)
]
)
self.scale = scale
def forward(self, hidden_states):
outputs = []
for i, hidden_part in enumerate(torch.chunk(hidden_states, self.scale, dim=1)):
if i == 0:
output_part = hidden_part
elif i == 1:
output_part = self.blocks[i - 1](hidden_part)
else:
output_part = self.blocks[i - 1](hidden_part + output_part)
outputs.append(output_part)
output = torch.cat(outputs, dim=1)
return output
| Res2NetBlock |
python | huggingface__transformers | src/transformers/models/kosmos2/processing_kosmos2.py | {
"start": 1486,
"end": 1562
} | class ____(TextKwargs, total=False):
add_eos_token: bool
| Kosmos2TextKwargs |
python | conda__conda | tests/plugins/test_solvers.py | {
"start": 436,
"end": 783
} | class ____(solve.Solver):
def solve_final_state(self, *args, **kwargs):
log.info("My verbose solver!")
return super().solve_final_state(*args, **kwargs)
@staticmethod
def user_agent():
return verbose_user_agent
classic_solver = plugins.CondaSolver(
name="classic",
backend=solve.Solver,
)
| VerboseSolver |
python | kamyu104__LeetCode-Solutions | Python/array-of-doubled-pairs.py | {
"start": 58,
"end": 392
} | class ____(object):
def canReorderDoubled(self, A):
"""
:type A: List[int]
:rtype: bool
"""
count = collections.Counter(A)
for x in sorted(count, key=abs):
if count[x] > count[2*x]:
return False
count[2*x] -= count[x]
return True
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/types.py | {
"start": 7066,
"end": 9646
} | class ____(sqltypes.TypeEngine[BitString]):
"""Represent the PostgreSQL BIT type.
The :class:`_postgresql.BIT` type yields values in the form of the
:class:`_postgresql.BitString` Python value type.
.. versionchanged:: 2.1 The :class:`_postgresql.BIT` type now works
with :class:`_postgresql.BitString` values rather than plain strings.
"""
render_bind_cast = True
__visit_name__ = "BIT"
operator_classes = (
OperatorClass.BASE | OperatorClass.COMPARISON | OperatorClass.BITWISE
)
def __init__(
self, length: Optional[int] = None, varying: bool = False
) -> None:
if varying:
# BIT VARYING can be unlimited-length, so no default
self.length = length
else:
# BIT without VARYING defaults to length 1
self.length = length or 1
self.varying = varying
def bind_processor(
self, dialect: Dialect
) -> _BindProcessorType[BitString]:
def bound_value(value: Any) -> Any:
if isinstance(value, BitString):
return str(value)
return value
return bound_value
def result_processor(
self, dialect: Dialect, coltype: object
) -> _ResultProcessorType[BitString]:
def from_result_value(value: Any) -> Any:
if value is not None:
value = BitString(value)
return value
return from_result_value
def coerce_compared_value(
self, op: OperatorType | None, value: Any
) -> TypeEngine[Any]:
if isinstance(value, str):
return self
return super().coerce_compared_value(op, value)
@property
def python_type(self) -> type[Any]:
return BitString
class comparator_factory(TypeEngine.Comparator[BitString]):
def __lshift__(self, other: Any) -> ColumnOperators:
return self.bitwise_lshift(other)
def __rshift__(self, other: Any) -> ColumnOperators:
return self.bitwise_rshift(other)
def __and__(self, other: Any) -> ColumnOperators:
return self.bitwise_and(other)
def __or__(self, other: Any) -> ColumnOperators:
return self.bitwise_or(other)
# NOTE: __xor__ is not defined on sql.operators.ColumnOperators.
# Use `bitwise_xor` directly instead.
# def __xor__(self, other: Any) -> ColumnOperators:
# return self.bitwise_xor(other)
def __invert__(self) -> ColumnOperators:
return self.bitwise_not()
PGBit = BIT
| BIT |
python | ray-project__ray | python/ray/_private/memory_monitor.py | {
"start": 2419,
"end": 3391
} | class ____(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
@staticmethod
def get_message(used_gb, total_gb, threshold):
proc_str = get_top_n_memory_usage(n=10)
return (
"More than {}% of the memory on ".format(int(100 * threshold))
+ "node {} is used ({} / {} GB). ".format(
platform.node(), round(used_gb, 2), round(total_gb, 2)
)
+ f"The top 10 memory consumers are:\n\n{proc_str}"
+ "\n\nIn addition, up to {} GiB of shared memory is ".format(
round(get_shared(psutil.virtual_memory()) / (1024**3), 2)
)
+ "currently being used by the Ray object store.\n---\n"
"--- Tip: Use the `ray memory` command to list active "
"objects in the cluster.\n"
"--- To disable OOM exceptions, set "
"RAY_DISABLE_MEMORY_MONITOR=1.\n---\n"
)
| RayOutOfMemoryError |
python | gevent__gevent | src/greentest/3.10/test_subprocess.py | {
"start": 2336,
"end": 2608
} | class ____(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
| PopenExecuteChildRaises |
python | Netflix__metaflow | test/core/tests/basic_parallel.py | {
"start": 72,
"end": 1473
} | class ____(MetaflowTest):
PRIORITY = 1
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@steps(0, ["parallel-split"], required=True)
def split(self):
self.my_node_index = None
@steps(0, ["parallel-step"], required=True)
def inner(self):
from metaflow import current
assert_equals(4, current.parallel.num_nodes)
self.my_node_index = current.parallel.node_index
assert_equals(self.my_node_index, self.input)
@steps(0, ["join"], required=True)
def join(self, inputs):
got = sorted([inp.my_node_index for inp in inputs])
assert_equals(list(range(4)), got)
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if type(checker).__name__ == "CliCheck":
# CliCheck doesn't support enlisting of tasks.
assert run is None
else:
assert run is not None
tasks = run["parallel_inner"].tasks()
task_list = list(tasks)
assert_equals(4, len(task_list))
assert_equals(1, len(list(run["parallel_inner"].control_tasks())))
| BasicParallelTest |
python | huggingface__transformers | src/transformers/models/pegasus/modeling_pegasus.py | {
"start": 46790,
"end": 55347
} | class ____(PegasusPreTrainedModel, GenerationMixin):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
_tied_weights_keys = {
"lm_head.weight": "model.shared.weight",
}
def __init__(self, config: PegasusConfig):
super().__init__(config)
self.model = PegasusModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def resize_token_embeddings(
self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True
) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, Seq2SeqLMOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Summarization:
```python
>>> from transformers import AutoTokenizer, PegasusForConditionalGeneration
>>> model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
>>> # Generate Summary
>>> summary_ids = model.generate(inputs["input_ids"])
>>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"California's largest electricity provider has turned off power to hundreds of thousands of customers."
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Pegasus
| PegasusForConditionalGeneration |
python | django-compressor__django-compressor | compressor/tests/test_offline.py | {
"start": 29456,
"end": 30158
} | class ____(OfflineTestCaseMixin, TestCase):
"""
Test that templates extending templates with the same name
(e.g. admin/index.html) don't cause an infinite test_extends_recursion
"""
templates_dir = "test_extends_recursion"
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.staticfiles",
"compressor",
]
@override_settings(INSTALLED_APPS=INSTALLED_APPS)
def _test_offline(self, engine, verbosity=0):
count, _ = CompressCommand().handle_inner(engines=[engine], verbosity=verbosity)
self.assertEqual(count, 1)
| OfflineCompressExtendsRecursionTestCase |
python | facelessuser__soupsieve | tests/test_level4/test_target_within.py | {
"start": 58,
"end": 826
} | class ____(util.TestCase):
"""Test target within selectors."""
MARKUP = """
<a href="#head-2">Jump</a>
<article id="article">
<h2 id="head-1">Header 1</h1>
<div><p>content</p></div>
<h2 id="head-2">Header 2</h1>
<div><p>content</p></div>
</article>
"""
def test_target_within(self):
"""Test target within."""
self.assert_selector(
self.MARKUP,
"article:target-within",
[],
flags=util.HTML
)
def test_not_target_within(self):
"""Test inverse of target within."""
self.assert_selector(
self.MARKUP,
"article:not(:target-within)",
["article"],
flags=util.HTML
)
| TestTargetWithin |
python | dagster-io__dagster | .buildkite/buildkite-shared/buildkite_shared/step_builders/command_step_builder.py | {
"start": 928,
"end": 2181
} | class ____(Enum):
KUBERNETES_GKE = os.getenv("BUILDKITE_KUBERNETES_QUEUE_GKE", "kubernetes-gke")
KUBERNETES_EKS = os.getenv("BUILDKITE_KUBERNETES_QUEUE_EKS", "kubernetes-eks")
DOCKER = os.getenv("BUILDKITE_DOCKER_QUEUE", "buildkite-docker-october22")
MEDIUM = os.getenv("BUILDKITE_MEDIUM_QUEUE") or "buildkite-medium-october22"
WINDOWS = os.getenv("BUILDKITE_WINDOWS_QUEUE") or "buildkite-windows-october22"
@classmethod
def contains(cls, value):
return isinstance(value, cls)
CommandStepConfiguration = TypedDict(
"CommandStepConfiguration",
{
"agents": dict[str, str],
"label": str,
"timeout_in_minutes": int,
"plugins": list[dict[str, object]],
"retry": dict[str, object],
"commands": NotRequired[list[str]],
"depends_on": NotRequired[list[str]],
"key": NotRequired[str],
"skip": NotRequired[Optional[str]],
"artifact_paths": NotRequired[list[str]],
"concurrency": NotRequired[int],
"concurrency_group": NotRequired[str],
"allow_dependency_failure": NotRequired[bool],
"soft_fail": NotRequired[bool],
"if": NotRequired[str], # Reserved word handled with quotes
},
)
| BuildkiteQueue |
python | getsentry__sentry | src/sentry/api/serializers/models/event.py | {
"start": 5852,
"end": 5971
} | class ____(
BaseEventSerializerResponse, ErrorEventFields, TransactionEventFields
):
pass
| EventSerializerResponse |
python | patrick-kidger__equinox | equinox/nn/_conv.py | {
"start": 25571,
"end": 26651
} | class ____(ConvTranspose):
"""As [`equinox.nn.ConvTranspose`][] with `num_spatial_dims=2`."""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = (1, 1),
output_padding: int | Sequence[int] = (0, 0),
padding: str | int | Sequence[int] | Sequence[tuple[int, int]] = (0, 0),
dilation: int | Sequence[int] = (1, 1),
groups: int = 1,
use_bias: bool = True,
padding_mode: str = "ZEROS",
dtype=None,
*,
key: PRNGKeyArray,
):
super().__init__(
num_spatial_dims=2,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
padding_mode=padding_mode,
dtype=dtype,
key=key,
)
| ConvTranspose2d |
python | wandb__wandb | landfill/functional_tests/artifacts/link-model.py | {
"start": 146,
"end": 1099
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def main():
my_model = Net()
wandb.init()
best_model = log_model(my_model, "my-model", aliases=["boom"], scope_project=True)
link_model(best_model, "project/test_portfolio")
wandb.finish()
if __name__ == "__main__":
main()
| Net |
python | nedbat__coveragepy | tests/test_coverage.py | {
"start": 45115,
"end": 46079
} | class ____(CoverageTest):
"""Tests of some reporting behavior."""
def test_no_data_to_report_on_annotate(self) -> None:
# Reporting with no data produces a nice message and no output
# directory.
with pytest.raises(NoDataError, match="No data to report."):
self.command_line("annotate -d ann")
self.assert_doesnt_exist("ann")
def test_no_data_to_report_on_html(self) -> None:
# Reporting with no data produces a nice message and no output
# directory.
with pytest.raises(NoDataError, match="No data to report."):
self.command_line("html -d htmlcov")
self.assert_doesnt_exist("htmlcov")
def test_no_data_to_report_on_xml(self) -> None:
# Reporting with no data produces a nice message.
with pytest.raises(NoDataError, match="No data to report."):
self.command_line("xml")
self.assert_doesnt_exist("coverage.xml")
| ReportingTest |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs_test.py | {
"start": 1321,
"end": 2162
} | class ____(test.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_definitions.resolve(node, ctx, graphs)
node = reaching_fndefs.resolve(node, ctx, graphs)
return node
def assertHasFnDefs(self, node):
anno.getanno(node, anno.Static.DEFINED_FNS_IN)
if __name__ == '__main__':
test.main()
| ReachingFndefsAnalyzerTest |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/shuffle.py | {
"start": 1606,
"end": 4118
} | class ____: # pragma: no cover
"""cuDF-Polars protocol for rapidsmpf shuffler."""
@staticmethod
@nvtx_annotate_cudf_polars(message="RMPFIntegration.insert_partition")
def insert_partition(
df: DataFrame,
partition_id: int, # Not currently used
partition_count: int,
shuffler: Any,
options: ShuffleOptions,
*other: Any,
) -> None:
"""Add cudf-polars DataFrame chunks to an RMP shuffler."""
from rapidsmpf.integrations.cudf.partition import partition_and_pack
if options["cluster_kind"] == "dask":
from rapidsmpf.integrations.dask import get_worker_context
else:
from rapidsmpf.integrations.single import get_worker_context
context = get_worker_context()
on = options["on"]
assert not other, f"Unexpected arguments: {other}"
columns_to_hash = tuple(df.column_names.index(val) for val in on)
packed_inputs = partition_and_pack(
df.table,
columns_to_hash=columns_to_hash,
num_partitions=partition_count,
br=context.br,
stream=DEFAULT_STREAM,
)
shuffler.insert_chunks(packed_inputs)
@staticmethod
@nvtx_annotate_cudf_polars(message="RMPFIntegration.extract_partition")
def extract_partition(
partition_id: int,
shuffler: Any,
options: ShuffleOptions,
) -> DataFrame:
"""Extract a finished partition from the RMP shuffler."""
from rapidsmpf.integrations.cudf.partition import (
unpack_and_concat,
unspill_partitions,
)
if options["cluster_kind"] == "dask":
from rapidsmpf.integrations.dask import get_worker_context
else:
from rapidsmpf.integrations.single import get_worker_context
context = get_worker_context()
shuffler.wait_on(partition_id)
column_names = options["column_names"]
dtypes = options["dtypes"]
return DataFrame.from_table(
unpack_and_concat(
unspill_partitions(
shuffler.extract(partition_id),
br=context.br,
allow_overbooking=True,
statistics=context.statistics,
),
br=context.br,
stream=DEFAULT_STREAM,
),
column_names,
dtypes,
get_dask_cuda_stream(),
)
| RMPFIntegration |
python | django__django | django/core/serializers/jsonl.py | {
"start": 1247,
"end": 2258
} | class ____(PythonDeserializer):
"""Deserialize a stream or string of JSON data."""
def __init__(self, stream_or_string, **options):
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode()
if isinstance(stream_or_string, str):
stream_or_string = stream_or_string.splitlines()
super().__init__(Deserializer._get_lines(stream_or_string), **options)
def _handle_object(self, obj):
try:
yield from super()._handle_object(obj)
except (GeneratorExit, DeserializationError):
raise
except Exception as exc:
raise DeserializationError(f"Error deserializing object: {exc}") from exc
@staticmethod
def _get_lines(stream):
for line in stream:
if not line.strip():
continue
try:
yield json.loads(line)
except Exception as exc:
raise DeserializationError() from exc
| Deserializer |
python | pandas-dev__pandas | pandas/tests/io/test_common.py | {
"start": 14404,
"end": 24991
} | class ____:
@pytest.mark.skipif(WASM, reason="limited file system access on WASM")
def test_constructor_bad_file(self, mmap_file):
non_file = StringIO("I am not a file")
non_file.fileno = lambda: -1
# the error raised is different on Windows
if is_platform_windows():
msg = "The parameter is incorrect"
err = OSError
else:
msg = "[Errno 22]"
err = mmap.error
with pytest.raises(err, match=msg):
icom._maybe_memory_map(non_file, True)
with open(mmap_file, encoding="utf-8") as target:
pass
msg = "I/O operation on closed file"
with pytest.raises(ValueError, match=msg):
icom._maybe_memory_map(target, True)
@pytest.mark.skipif(WASM, reason="limited file system access on WASM")
def test_next(self, mmap_file):
with open(mmap_file, encoding="utf-8") as target:
lines = target.readlines()
with icom.get_handle(
target, "r", is_text=True, memory_map=True
) as wrappers:
wrapper = wrappers.handle
assert isinstance(wrapper.buffer.buffer, mmap.mmap)
for line in lines:
next_line = next(wrapper)
assert next_line.strip() == line.strip()
with pytest.raises(StopIteration, match=r"^$"):
next(wrapper)
def test_unknown_engine(self, temp_file):
df = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
df.to_csv(temp_file)
with pytest.raises(ValueError, match="Unknown engine"):
pd.read_csv(temp_file, engine="pyt")
def test_binary_mode(self, temp_file):
"""
'encoding' shouldn't be passed to 'open' in binary mode.
GH 35058
"""
df = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
df.to_csv(temp_file, mode="w+b")
tm.assert_frame_equal(df, pd.read_csv(temp_file, index_col=0))
@pytest.mark.parametrize("encoding", ["utf-16", "utf-32"])
@pytest.mark.parametrize("compression_", ["bz2", "xz"])
def test_warning_missing_utf_bom(self, encoding, compression_, temp_file):
"""
bz2 and xz do not write the byte order mark (BOM) for utf-16/32.
https://stackoverflow.com/questions/55171439
GH 35681
"""
df = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
with tm.assert_produces_warning(UnicodeWarning, match="byte order mark"):
df.to_csv(temp_file, compression=compression_, encoding=encoding)
# reading should fail (otherwise we wouldn't need the warning)
msg = (
r"UTF-\d+ stream does not start with BOM|"
r"'utf-\d+' codec can't decode byte"
)
with pytest.raises(UnicodeError, match=msg):
pd.read_csv(temp_file, compression=compression_, encoding=encoding)
def test_is_fsspec_url():
assert icom.is_fsspec_url("gcs://pandas/somethingelse.com")
assert icom.is_fsspec_url("gs://pandas/somethingelse.com")
# the following is the only remote URL that is handled without fsspec
assert not icom.is_fsspec_url("http://pandas/somethingelse.com")
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
assert not icom.is_fsspec_url("/local/path")
assert not icom.is_fsspec_url("relative/local/path")
# fsspec URL in string should not be recognized
assert not icom.is_fsspec_url("this is not fsspec://url")
assert not icom.is_fsspec_url("{'url': 'gs://pandas/somethingelse.com'}")
# accept everything that conforms to RFC 3986 schema
assert icom.is_fsspec_url("RFC-3986+compliant.spec://something")
def test_is_fsspec_url_chained():
# GH#48978 Support chained fsspec URLs
# See https://filesystem-spec.readthedocs.io/en/latest/features.html#url-chaining.
assert icom.is_fsspec_url("filecache::s3://pandas/test.csv")
assert icom.is_fsspec_url("zip://test.csv::filecache::gcs://bucket/file.zip")
assert icom.is_fsspec_url("filecache::zip://test.csv::gcs://bucket/file.zip")
assert icom.is_fsspec_url("filecache::dask::s3://pandas/test.csv")
assert not icom.is_fsspec_url("filecache:s3://pandas/test.csv")
assert not icom.is_fsspec_url("filecache:::s3://pandas/test.csv")
assert not icom.is_fsspec_url("filecache::://pandas/test.csv")
@pytest.mark.parametrize("format", ["csv", "json"])
def test_codecs_encoding(format, temp_file):
# GH39247
expected = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
with open(temp_file, mode="w", encoding="utf-8") as handle:
getattr(expected, f"to_{format}")(handle)
with open(temp_file, encoding="utf-8") as handle:
if format == "csv":
df = pd.read_csv(handle, index_col=0)
else:
df = pd.read_json(handle)
tm.assert_frame_equal(expected, df)
def test_codecs_get_writer_reader(temp_file):
# GH39247
expected = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
with open(temp_file, "wb") as handle:
with codecs.getwriter("utf-8")(handle) as encoded:
expected.to_csv(encoded)
with open(temp_file, "rb") as handle:
with codecs.getreader("utf-8")(handle) as encoded:
df = pd.read_csv(encoded, index_col=0)
tm.assert_frame_equal(expected, df)
@pytest.mark.parametrize(
"io_class,mode,msg",
[
(BytesIO, "t", "a bytes-like object is required, not 'str'"),
(StringIO, "b", "string argument expected, got 'bytes'"),
],
)
def test_explicit_encoding(io_class, mode, msg):
# GH39247; this test makes sure that if a user provides mode="*t" or "*b",
# it is used. In the case of this test it leads to an error as intentionally the
# wrong mode is requested
expected = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
)
with io_class() as buffer:
with pytest.raises(TypeError, match=msg):
expected.to_csv(buffer, mode=f"w{mode}")
@pytest.mark.parametrize("encoding_errors", ["strict", "replace"])
@pytest.mark.parametrize("format", ["csv", "json"])
def test_encoding_errors(encoding_errors, format, temp_file):
# GH39450
msg = "'utf-8' codec can't decode byte"
bad_encoding = b"\xe4"
if format == "csv":
content = b"," + bad_encoding + b"\n" + bad_encoding * 2 + b"," + bad_encoding
reader = partial(pd.read_csv, index_col=0)
else:
content = (
b'{"'
+ bad_encoding * 2
+ b'": {"'
+ bad_encoding
+ b'":"'
+ bad_encoding
+ b'"}}'
)
reader = partial(pd.read_json, orient="index")
file = temp_file
file.write_bytes(content)
if encoding_errors != "replace":
with pytest.raises(UnicodeDecodeError, match=msg):
reader(temp_file, encoding_errors=encoding_errors)
else:
df = reader(temp_file, encoding_errors=encoding_errors)
decoded = bad_encoding.decode(errors=encoding_errors)
expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("encoding_errors", [0, None])
def test_encoding_errors_badtype(encoding_errors):
# GH 59075
content = StringIO("A,B\n1,2\n3,4\n")
reader = partial(pd.read_csv, encoding_errors=encoding_errors)
expected_error = "encoding_errors must be a string, got "
expected_error += f"{type(encoding_errors).__name__}"
with pytest.raises(ValueError, match=expected_error):
reader(content)
def test_bad_encdoing_errors(temp_file):
# GH 39777
with pytest.raises(LookupError, match="unknown error handler name"):
icom.get_handle(temp_file, "w", errors="bad")
@pytest.mark.skipif(WASM, reason="limited file system access on WASM")
def test_errno_attribute():
# GH 13872
with pytest.raises(FileNotFoundError, match="\\[Errno 2\\]") as err:
pd.read_csv("doesnt_exist")
assert err.errno == errno.ENOENT
def test_fail_mmap():
with pytest.raises(UnsupportedOperation, match="fileno"):
with BytesIO() as buffer:
icom.get_handle(buffer, "rb", memory_map=True)
def test_close_on_error():
# GH 47136
class TestError:
def close(self):
raise OSError("test")
with pytest.raises(OSError, match="test"):
with BytesIO() as buffer:
with icom.get_handle(buffer, "rb") as handles:
handles.created_handles.append(TestError())
@td.skip_if_no("fsspec")
@pytest.mark.parametrize("compression", [None, "infer"])
def test_read_csv_chained_url_no_error(compression):
# GH 60100
tar_file_path = "pandas/tests/io/data/tar/test-csv.tar"
chained_file_url = f"tar://test.csv::file://{tar_file_path}"
result = pd.read_csv(chained_file_url, compression=compression, sep=";")
expected = pd.DataFrame({"1": {0: 3}, "2": {0: 4}})
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"reader",
[
pd.read_csv,
pd.read_fwf,
pd.read_excel,
pd.read_feather,
pd.read_hdf,
pd.read_stata,
pd.read_sas,
pd.read_json,
pd.read_pickle,
],
)
def test_pickle_reader(reader):
# GH 22265
with BytesIO() as buffer:
pickle.dump(reader, buffer)
@td.skip_if_no("pyarrow")
def test_pyarrow_read_csv_datetime_dtype():
# GH 59904
data = '"date"\n"20/12/2025"\n""\n"31/12/2020"'
result = pd.read_csv(
StringIO(data), parse_dates=["date"], dayfirst=True, dtype_backend="pyarrow"
)
expect_data = pd.to_datetime(["20/12/2025", pd.NaT, "31/12/2020"], dayfirst=True)
expect = pd.DataFrame({"date": expect_data})
tm.assert_frame_equal(expect, result)
| TestMMapWrapper |
python | great-expectations__great_expectations | tests/expectations/test_condition_validators.py | {
"start": 2493,
"end": 4760
} | class ____:
"""Tests for total condition count validator when passed to Expectation."""
def test_error_on_more_than_100_conditions(self):
"""Test that more than 100 conditions raises error in Expectation."""
column = Column("column_1")
# Create 101 conditions
row_condition = column == 0
for i in range(1, 101):
row_condition = row_condition & (column == i)
with pytest.raises(
ValueError, match="100 conditions is the maximum, but 101 conditions are defined"
):
ExpectColumnValuesToBeInSet(
column="test_column", value_set=["a", "b"], row_condition=row_condition
)
def test_exactly_100_conditions_allowed(self):
"""Test that exactly 100 conditions is allowed in Expectation."""
column = Column("column_1")
# Create exactly 100 condition
row_condition = column == 0
for i in range(1, 100):
row_condition = row_condition & (column == i)
# This should not raise an error
expectation = ExpectColumnValuesToBeInSet(
column="test_column", value_set=["a", "b"], row_condition=row_condition
)
assert len(expectation.row_condition.conditions) == 100
def test_nested_and_conditions_count_towards_limit(self):
"""Test that nested AndConditions within OR are counted towards the 100 limit."""
column = Column("column_1")
# Create first AND group with 50 conditions
first_and_group = column == 0
for i in range(1, 50):
first_and_group = first_and_group & (column == i)
# Create second AND group with 51 conditions
second_and_group = column == 50
for i in range(51, 101):
second_and_group = second_and_group & (column == i)
# Combine with OR - total: 50 + 51 = 101 conditions
row_condition = first_and_group | second_and_group
with pytest.raises(
ValueError, match="100 conditions is the maximum, but 101 conditions are defined"
):
ExpectColumnValuesToBeInSet(
column="test_column", value_set=["a", "b"], row_condition=row_condition
)
| TestTotalConditionCountValidator |
python | xlwings__xlwings | docs/conf.py | {
"start": 1054,
"end": 11419
} | class ____(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ("__file__", "__path__"):
return "/dev/null"
elif name[0] == name[0].upper():
mockType = Mock() # type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
@classmethod
def __getitem__(cls, key):
return Mock()
MOCK_MODULES = [
"appscript",
"appscript.reference",
"psutil",
"xlplatform",
"atexit",
"aem",
"osax",
]
if not sys.platform.startswith("win"):
MOCK_MODULES += [
"win32com",
"win32com.client",
"pywintypes",
"pythoncom",
"win32timezone",
"win32com.server",
"win32com.server.util",
"win32com.server.dispatcher",
"win32com.server.policy",
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -- General configuration -----------------------------------------------------
# READTHEDOCS
# on_rtd is whether we are on readthedocs.org
# Note: under Admin > Advanced Settings, check the box 'Install your project inside a
# virtualenv...' and provide a setup.py and requirements.txt file for the extension
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# Define the canonical URL if you are using a custom domain on Read the Docs
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
# Tell Jinja2 templates the build is running on Read the Docs
html_context = {}
if os.environ.get("READTHEDOCS", "") == "True":
html_context["READTHEDOCS"] = True
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.extlinks",
"sphinx.ext.autosectionlabel", # To make easy intra-page links: :ref:`Title`
"sphinx_copybutton",
"sphinx_design",
]
# For sphinx.ext.autosectionlabel
autosectionlabel_prefix_document = True
# autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "xlwings"
copyright = "Zoomer Analytics LLC"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import xlwings
version = xlwings.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# extlinks alias
extlinks = {"issue": ("https://github.com/xlwings/xlwings/issues/%s", "GH %s")}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"sidebar_hide_name": True,
"light_logo": "logo-light.svg",
"dark_logo": "logo-dark.svg",
"light_css_variables": {
"color-brand-primary": "black",
"color-brand-content": "#28a745",
"color-sidebar-caption-text": "#28a745",
"sidebar-caption-font-size": "1em",
"color-announcement-background": "#28a745",
},
"dark_css_variables": {
"color-brand-primary": "white",
"color-announcement-background": "#28a745",
},
"announcement": '<a href="https://lite.xlwings.org/" target="_blank"> xlwings Lite</a> is now available in the add-in store for free!</a>',
}
html_show_sourcelink = False
html_title = "xlwings Documentation"
html_favicon = "_static/favicon.png"
html_extra_path = ["_static/opensource_licenses2.html"]
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "xlwingsdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
"pointsize": "11pt",
# Additional stuff for the LaTeX preamble.
# "preamble": "\\usepackage[UTF8]{ctex}\n",
# Index: '' for no index, '\\printindex' to generate one
"printindex": "\\printindex",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual], toctree_only).
latex_documents = [
(
"index_latex",
"xlwings.tex",
"xlwings - Make Excel Fly!",
"Zoomer Analytics LLC",
"manual",
True,
),
]
# latex_engine = "xelatex"
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# ('index', 'xlwings', u'xlwings Documentation',
# [u'Zoomer Analytics LLC'], 1)
# ]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# ('index', 'xlwings', u'xlwings Documentation',
# u'Zoomer Analytics LLC', 'xlwings', 'One line description of project.',
# 'Miscellaneous'),
# ]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = False
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| Mock |
python | huggingface__transformers | src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py | {
"start": 10304,
"end": 11217
} | class ____(nn.Module):
def __init__(self, input_channels, num_chans, kernel_size, dropout_rate):
super().__init__()
self.conv = nn.Conv1d(
input_channels,
num_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
)
self.activation = nn.ReLU()
self.layer_norm = nn.LayerNorm(num_chans)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
# Perform layer norm on dimension 1
hidden_states = hidden_states.transpose(1, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(1, -1)
hidden_states = self.dropout(hidden_states)
return hidden_states
| FastSpeech2ConformerPredictorLayer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/unit_tests/salesforce_job_response_builder.py | {
"start": 1195,
"end": 2072
} | class ____:
def __init__(self) -> None:
self._response = find_template("job_response", __file__)
self._status_code = 200
def with_id(self, id: str) -> "JobInfoResponseBuilder":
self._response["id"] = id
return self
def with_state(self, state: str) -> "JobInfoResponseBuilder":
self._response["state"] = state
return self
def with_status_code(self, status_code: int) -> "JobInfoResponseBuilder":
self._status_code = status_code
return self
def with_error_message(self, error_message: str) -> "JobInfoResponseBuilder":
self._response["errorMessage"] = error_message
return self
def get_response(self) -> any:
return self._response
def build(self) -> HttpResponse:
return HttpResponse(json.dumps(self._response), self._status_code)
| JobInfoResponseBuilder |
python | google__jax | jax/experimental/sparse/bcoo.py | {
"start": 5247,
"end": 5339
} | class ____(NamedTuple):
n_batch: int
n_sparse: int
n_dense: int
nse: int
| BCOOProperties |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass6.py | {
"start": 127,
"end": 308
} | class ____(str, Enum):
bar = "bar"
for member in Foo:
reveal_type(member, expected_text="Foo")
foo_members = list(Foo)
reveal_type(foo_members, expected_text="list[Foo]")
| Foo |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 190203,
"end": 192012
} | class ____:
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
| TestFancyIndexing |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/beta.py | {
"start": 5342,
"end": 6047
} | class ____:
def __init__(self, beta: AsyncBeta) -> None:
self._beta = beta
@cached_property
def models(self) -> AsyncModelsWithStreamingResponse:
return AsyncModelsWithStreamingResponse(self._beta.models)
@cached_property
def messages(self) -> AsyncMessagesWithStreamingResponse:
return AsyncMessagesWithStreamingResponse(self._beta.messages)
@cached_property
def files(self) -> AsyncFilesWithStreamingResponse:
return AsyncFilesWithStreamingResponse(self._beta.files)
@cached_property
def skills(self) -> AsyncSkillsWithStreamingResponse:
return AsyncSkillsWithStreamingResponse(self._beta.skills)
| AsyncBetaWithStreamingResponse |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 4933,
"end": 5071
} | class ____(Web3Exception):
"""
Raised when there are insufficient data points to
complete a calculation
"""
| InsufficientData |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events_meta.py | {
"start": 853,
"end": 11430
} | class ____(
APITestCase,
MetricsEnhancedPerformanceTestCase,
SearchIssueTestMixin,
SpanTestCase,
OurLogTestCase,
):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1)
self.login_as(user=self.user)
self.project = self.create_project()
self.url = reverse(
"sentry-api-0-organization-events-meta",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
self.features = {"organizations:discover-basic": True}
def test_simple(self) -> None:
self.store_event(data={"timestamp": self.min_ago.isoformat()}, project_id=self.project.id)
with self.feature(self.features):
response = self.client.get(self.url, format="json")
assert response.status_code == 200, response.content
assert response.data["count"] == 1
def test_spans_dataset(self) -> None:
self.store_spans([self.create_span(start_ts=self.min_ago)], is_eap=True)
with self.feature(self.features):
response = self.client.get(self.url, format="json", data={"dataset": "spans"})
assert response.status_code == 200, response.content
assert response.data["count"] == 1
def test_logs_dataset(self) -> None:
self.store_ourlogs(
[
self.create_ourlog(
{"body": "foo"},
timestamp=self.min_ago,
),
self.create_ourlog(
{"body": "bar"},
timestamp=self.min_ago,
),
]
)
with self.feature(self.features):
response = self.client.get(self.url, format="json", data={"dataset": "logs"})
assert response.status_code == 200, response.content
assert response.data["count"] == 2
def test_search(self) -> None:
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "message": "how to make fast"},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "message": "Delete the Data"},
project_id=self.project.id,
)
with self.feature(self.features):
response = self.client.get(self.url, {"query": "delete"}, format="json")
assert response.status_code == 200, response.content
assert response.data["count"] == 1
def test_custom_measurements_query_uses_units(self) -> None:
self.store_transaction_metric(
33,
metric="measurements.custom",
internal_metric="d:transactions/measurements.custom@second",
entity="metrics_distributions",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
data = load_data("transaction", timestamp=self.min_ago)
data["measurements"] = {
"custom": {"value": 0.199, "unit": "second"},
}
self.store_event(data, self.project.id)
data = load_data("transaction", timestamp=self.min_ago)
data["measurements"] = {
"custom": {"value": 0.201, "unit": "second"},
}
self.store_event(data, self.project.id)
url = reverse(
"sentry-api-0-organization-events-meta",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
features = {
"organizations:discover-basic": True,
"organizations:performance-use-metrics": True,
}
for dataset in ["discover", "transactions"]:
query = {
"field": ["measurements.custom"],
"query": "measurements.custom:>200",
"dataset": dataset,
}
with self.feature(features):
response = self.client.get(url, query, format="json")
assert response.status_code == 200, response.content
assert response.data["count"] == 1
def test_invalid_query(self) -> None:
with self.feature(self.features):
response = self.client.get(
self.url, {"query": "is:unresolved priority:[high, medium]"}, format="json"
)
assert response.status_code == 400, response.content
def test_no_projects(self) -> None:
no_project_org = self.create_organization(owner=self.user)
url = reverse(
"sentry-api-0-organization-events-meta",
kwargs={"organization_id_or_slug": no_project_org.slug},
)
with self.feature(self.features):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["count"] == 0
def test_transaction_event(self) -> None:
data = {
"event_id": "a" * 32,
"type": "transaction",
"transaction": "api.issue.delete",
"spans": [],
"contexts": {"trace": {"op": "foobar", "trace_id": "a" * 32, "span_id": "a" * 16}},
"tags": {"important": "yes"},
"timestamp": before_now(minutes=1).isoformat(),
"start_timestamp": before_now(minutes=1, seconds=3).isoformat(),
}
self.store_event(data=data, project_id=self.project.id)
url = reverse(
"sentry-api-0-organization-events-meta",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
with self.feature(self.features):
response = self.client.get(url, {"query": "transaction.duration:>1"}, format="json")
assert response.status_code == 200, response.content
assert response.data["count"] == 1
def test_generic_event(self) -> None:
"""Test that the issuePlatform dataset returns data for a generic issue's short ID"""
_, _, group_info = self.store_search_issue(
self.project.id,
self.user.id,
[f"{ProfileFileIOGroupType.type_id}-group1"],
"prod",
before_now(hours=1),
)
assert group_info is not None
url = reverse(
"sentry-api-0-organization-events-meta",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
with self.feature(self.features):
response = self.client.get(
url,
{
"query": f"issue:{group_info.group.qualified_short_id}",
"dataset": "issuePlatform",
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["count"] == 1
def test_errors_dataset_event(self) -> None:
"""Test that the errors dataset returns data for an issue's short ID"""
group_1 = self.store_event(
data={"timestamp": self.min_ago.isoformat()}, project_id=self.project.id
).group
url = reverse(
"sentry-api-0-organization-events-meta",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
with self.feature(self.features):
response = self.client.get(
url,
{
"query": f"issue:{group_1.qualified_short_id} is:unresolved",
"dataset": "errors",
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["count"] == 1
def test_transaction_event_with_last_seen(self) -> None:
data = {
"event_id": "a" * 32,
"type": "transaction",
"transaction": "api.issue.delete",
"spans": [],
"contexts": {"trace": {"op": "foobar", "trace_id": "a" * 32, "span_id": "a" * 16}},
"tags": {"important": "yes"},
"timestamp": before_now(minutes=1).isoformat(),
"start_timestamp": before_now(minutes=1, seconds=3).isoformat(),
}
self.store_event(data=data, project_id=self.project.id)
with self.feature(self.features):
response = self.client.get(
self.url, {"query": "event.type:transaction last_seen():>2012-12-31"}, format="json"
)
assert response.status_code == 200, response.content
assert response.data["count"] == 1
def test_out_of_retention(self) -> None:
with self.feature(self.features):
with self.options({"system.event-retention-days": 10}):
response = self.client.get(
self.url,
format="json",
data={
"start": before_now(days=20).isoformat(),
"end": before_now(days=15).isoformat(),
},
)
assert response.status_code == 400
@mock.patch("sentry.search.events.builder.base.raw_snql_query")
def test_handling_snuba_errors(self, mock_snql_query: mock.MagicMock) -> None:
mock_snql_query.side_effect = ParseError("test")
with self.feature(self.features):
response = self.client.get(self.url, format="json")
assert response.status_code == 400, response.content
@mock.patch("sentry.utils.snuba.quantize_time")
def test_quantize_dates(self, mock_quantize: mock.MagicMock) -> None:
mock_quantize.return_value = before_now(days=1)
with self.feature(self.features):
# Don't quantize short time periods
self.client.get(
self.url,
format="json",
data={"statsPeriod": "1h", "query": "", "field": ["id", "timestamp"]},
)
# Don't quantize absolute date periods
self.client.get(
self.url,
format="json",
data={
"start": before_now(days=20).isoformat(),
"end": before_now(days=15).isoformat(),
"query": "",
"field": ["id", "timestamp"],
},
)
assert len(mock_quantize.mock_calls) == 0
# Quantize long date periods
self.client.get(
self.url,
format="json",
data={"field": ["id", "timestamp"], "statsPeriod": "90d", "query": ""},
)
assert len(mock_quantize.mock_calls) == 2
| OrganizationEventsMetaEndpoint |
python | numba__numba | numba/tests/test_interpreter.py | {
"start": 29481,
"end": 30293
} | class ____(MemoryLeakMixin, TestCase):
"""Test `fn(pos_arg0, pos_arg1, *args)` where args is a non-tuple iterable.
Python 3.9+ will generate LIST_EXTEND bytecode to combine the positional
arguments with the `*args`.
See #8059
NOTE: At the moment, there are no meaningful tests for NoPython because the
lack of support for `tuple(iterable)` for most iterable types.
"""
def test_list_extend_forceobj(self):
def consumer(*x):
return x
@jit(forceobj=True)
def foo(x):
return consumer(1, 2, *x)
got = foo("ijo")
expect = foo.py_func("ijo")
self.assertEqual(got, (1, 2, "i", "j", "o"))
self.assertEqual(got, expect)
if __name__ == "__main__":
unittest.main()
| TestListExtendInStarArgNonTupleIterable |
python | hynek__structlog | src/structlog/dev.py | {
"start": 5870,
"end": 6364
} | class ____:
"""
A column defines the way a key-value pair is formatted, and, by it's
position to the *columns* argument of `ConsoleRenderer`, the order in which
it is rendered.
Args:
key:
The key for which this column is responsible. Leave empty to define
it as the default formatter.
formatter: The formatter for columns with *key*.
.. versionadded:: 23.3.0
"""
key: str
formatter: ColumnFormatter
@dataclass
| Column |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 2840,
"end": 4405
} | class ____(ContractDataError):
"""
Raised when calling a contract method with the wrong number of arguments.
"""
def __init__(
self,
arguments_length: int,
inputs: Union["MethodABI", "ConstructorABI", int, list, None] = None,
**kwargs,
):
prefix = (
f"The number of the given arguments ({arguments_length}) "
f"do not match what is defined in the ABI"
)
if inputs is None:
super().__init__(f"{prefix}.")
return
inputs_ls: list[Union[MethodABI, ConstructorABI, int]] = (
inputs if isinstance(inputs, list) else [inputs]
)
if not inputs_ls:
suffix = ""
elif any(not isinstance(x, int) for x in inputs_ls):
# Handle ABI arguments
parts = ""
for idx, ipt in enumerate(inputs_ls):
if isinstance(ipt, int):
part = f"{ipt}"
else:
# Signature without outputs.
input_args = ", ".join(i.signature for i in ipt.inputs)
part = f"{getattr(ipt, 'name', '__init__')}({input_args})"
parts = f"{parts}\n\t{part}"
suffix = f":\n{parts}"
else:
# Was only given integers.
options = ", ".join([str(x) for x in inputs_ls])
one_of = "one of " if len(inputs_ls) > 1 else ""
suffix = f" ({one_of}{options})"
super().__init__(f"{prefix}{suffix}")
| ArgumentsLengthError |
python | PyCQA__pylint | tests/functional/a/access/access_to__name__.py | {
"start": 156,
"end": 296
} | class ____:
"""old class"""
def __init__(self):
print(self.__name__) # [no-member]
print(self.__class__.__name__)
| Aaaa |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/hybrid.py | {
"start": 47594,
"end": 61100
} | class ____(interfaces.InspectionAttrInfo, ORMDescriptor[_T]):
"""A decorator which allows definition of a Python descriptor with both
instance-level and class-level behavior.
"""
is_attribute = True
extension_type = HybridExtensionType.HYBRID_PROPERTY
__name__: str
def __init__(
self,
fget: _HybridGetterType[_T],
fset: Optional[_HybridSetterType[_T]] = None,
fdel: Optional[_HybridDeleterType[_T]] = None,
expr: Optional[_HybridExprCallableType[_T]] = None,
custom_comparator: Optional[Comparator[_T]] = None,
update_expr: Optional[_HybridUpdaterType[_T]] = None,
bulk_dml_setter: Optional[_HybridBulkDMLType[_T]] = None,
):
"""Create a new :class:`.hybrid_property`.
Usage is typically via decorator::
from sqlalchemy.ext.hybrid import hybrid_property
class SomeClass:
@hybrid_property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
"""
self.fget = fget
self.fset = fset
self.fdel = fdel
self.expr = _unwrap_classmethod(expr)
self.custom_comparator = _unwrap_classmethod(custom_comparator)
self.update_expr = _unwrap_classmethod(update_expr)
self.bulk_dml_setter = _unwrap_classmethod(bulk_dml_setter)
util.update_wrapper(self, fget) # type: ignore[arg-type]
@overload
def __get__(self, instance: Any, owner: Literal[None]) -> Self: ...
@overload
def __get__(
self, instance: Literal[None], owner: Type[object]
) -> _HybridClassLevelAccessor[_T]: ...
@overload
def __get__(self, instance: object, owner: Type[object]) -> _T: ...
def __get__(
self, instance: Optional[object], owner: Optional[Type[object]]
) -> Union[hybrid_property[_T], _HybridClassLevelAccessor[_T], _T]:
if owner is None:
return self
elif instance is None:
return self._expr_comparator(owner)
else:
return self.fget(instance)
def __set__(
self, instance: object, value: Union[SQLCoreOperations[_T], _T]
) -> None:
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(instance, value) # type: ignore[arg-type]
def __delete__(self, instance: object) -> None:
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(instance)
def _copy(self, **kw: Any) -> hybrid_property[_T]:
defaults = {
key: value
for key, value in self.__dict__.items()
if not key.startswith("_")
}
defaults.update(**kw)
return type(self)(**defaults)
@property
def overrides(self) -> Self:
"""Prefix for a method that is overriding an existing attribute.
The :attr:`.hybrid_property.overrides` accessor just returns
this hybrid object, which when called at the class level from
a parent class, will de-reference the "instrumented attribute"
normally returned at this level, and allow modifying decorators
like :meth:`.hybrid_property.expression` and
:meth:`.hybrid_property.comparator`
to be used without conflicting with the same-named attributes
normally present on the :class:`.QueryableAttribute`::
class SuperClass:
# ...
@hybrid_property
def foobar(self):
return self._foobar
class SubClass(SuperClass):
# ...
@SuperClass.foobar.overrides.expression
def foobar(cls):
return func.subfoobar(self._foobar)
.. seealso::
:ref:`hybrid_reuse_subclass`
"""
return self
class _InPlace(Generic[_TE]):
"""A builder helper for .hybrid_property.
.. versionadded:: 2.0.4
"""
__slots__ = ("attr",)
def __init__(self, attr: hybrid_property[_TE]):
self.attr = attr
def _set(self, **kw: Any) -> hybrid_property[_TE]:
for k, v in kw.items():
setattr(self.attr, k, _unwrap_classmethod(v))
return self.attr
def getter(self, fget: _HybridGetterType[_TE]) -> hybrid_property[_TE]:
return self._set(fget=fget)
def setter(self, fset: _HybridSetterType[_TE]) -> hybrid_property[_TE]:
return self._set(fset=fset)
def deleter(
self, fdel: _HybridDeleterType[_TE]
) -> hybrid_property[_TE]:
return self._set(fdel=fdel)
def expression(
self, expr: _HybridExprCallableType[_TE]
) -> hybrid_property[_TE]:
return self._set(expr=expr)
def comparator(
self, comparator: _HybridComparatorCallableType[_TE]
) -> hybrid_property[_TE]:
return self._set(custom_comparator=comparator)
def update_expression(
self, meth: _HybridUpdaterType[_TE]
) -> hybrid_property[_TE]:
return self._set(update_expr=meth)
def bulk_dml(
self, meth: _HybridBulkDMLType[_TE]
) -> hybrid_property[_TE]:
return self._set(bulk_dml_setter=meth)
@property
def inplace(self) -> _InPlace[_T]:
"""Return the inplace mutator for this :class:`.hybrid_property`.
This is to allow in-place mutation of the hybrid, allowing the first
hybrid method of a certain name to be re-used in order to add
more methods without having to name those methods the same, e.g.::
class Interval(Base):
# ...
@hybrid_property
def radius(self) -> float:
return abs(self.length) / 2
@radius.inplace.setter
def _radius_setter(self, value: float) -> None:
self.length = value * 2
@radius.inplace.expression
def _radius_expression(cls) -> ColumnElement[float]:
return type_coerce(func.abs(cls.length) / 2, Float)
.. versionadded:: 2.0.4
.. seealso::
:ref:`hybrid_pep484_naming`
"""
return hybrid_property._InPlace(self)
def getter(self, fget: _HybridGetterType[_T]) -> hybrid_property[_T]:
"""Provide a modifying decorator that defines a getter method."""
return self._copy(fget=fget)
def setter(self, fset: _HybridSetterType[_T]) -> hybrid_property[_T]:
"""Provide a modifying decorator that defines a setter method."""
return self._copy(fset=fset)
def deleter(self, fdel: _HybridDeleterType[_T]) -> hybrid_property[_T]:
"""Provide a modifying decorator that defines a deletion method."""
return self._copy(fdel=fdel)
def expression(
self, expr: _HybridExprCallableType[_T]
) -> hybrid_property[_T]:
"""Provide a modifying decorator that defines a SQL-expression
producing method.
When a hybrid is invoked at the class level, the SQL expression given
here is wrapped inside of a specialized :class:`.QueryableAttribute`,
which is the same kind of object used by the ORM to represent other
mapped attributes. The reason for this is so that other class-level
attributes such as docstrings and a reference to the hybrid itself may
be maintained within the structure that's returned, without any
modifications to the original SQL expression passed in.
.. note::
When referring to a hybrid property from an owning class (e.g.
``SomeClass.some_hybrid``), an instance of
:class:`.QueryableAttribute` is returned, representing the
expression or comparator object as well as this hybrid object.
However, that object itself has accessors called ``expression`` and
``comparator``; so when attempting to override these decorators on a
subclass, it may be necessary to qualify it using the
:attr:`.hybrid_property.overrides` modifier first. See that
modifier for details.
.. seealso::
:ref:`hybrid_distinct_expression`
"""
return self._copy(expr=expr)
def comparator(
self, comparator: _HybridComparatorCallableType[_T]
) -> hybrid_property[_T]:
"""Provide a modifying decorator that defines a custom
comparator producing method.
The return value of the decorated method should be an instance of
:class:`~.hybrid.Comparator`.
.. note:: The :meth:`.hybrid_property.comparator` decorator
**replaces** the use of the :meth:`.hybrid_property.expression`
decorator. They cannot be used together.
When a hybrid is invoked at the class level, the
:class:`~.hybrid.Comparator` object given here is wrapped inside of a
specialized :class:`.QueryableAttribute`, which is the same kind of
object used by the ORM to represent other mapped attributes. The
reason for this is so that other class-level attributes such as
docstrings and a reference to the hybrid itself may be maintained
within the structure that's returned, without any modifications to the
original comparator object passed in.
.. note::
When referring to a hybrid property from an owning class (e.g.
``SomeClass.some_hybrid``), an instance of
:class:`.QueryableAttribute` is returned, representing the
expression or comparator object as this hybrid object. However,
that object itself has accessors called ``expression`` and
``comparator``; so when attempting to override these decorators on a
subclass, it may be necessary to qualify it using the
:attr:`.hybrid_property.overrides` modifier first. See that
modifier for details.
"""
return self._copy(custom_comparator=comparator)
def update_expression(
self, meth: _HybridUpdaterType[_T]
) -> hybrid_property[_T]:
"""Provide a modifying decorator that defines an UPDATE tuple
producing method.
The method accepts a single value, which is the value to be
rendered into the SET clause of an UPDATE statement. The method
should then process this value into individual column expressions
that fit into the ultimate SET clause, and return them as a
sequence of 2-tuples. Each tuple
contains a column expression as the key and a value to be rendered.
E.g.::
class Person(Base):
# ...
first_name = Column(String)
last_name = Column(String)
@hybrid_property
def fullname(self):
return first_name + " " + last_name
@fullname.update_expression
def fullname(cls, value):
fname, lname = value.split(" ", 1)
return [(cls.first_name, fname), (cls.last_name, lname)]
"""
return self._copy(update_expr=meth)
def bulk_dml(self, meth: _HybridBulkDMLType[_T]) -> hybrid_property[_T]:
"""Define a setter for bulk dml.
.. versionadded:: 2.1
"""
return self._copy(bulk_dml=meth)
@util.memoized_property
def _expr_comparator(
self,
) -> Callable[[Any], _HybridClassLevelAccessor[_T]]:
if self.custom_comparator is not None:
return self._get_comparator(self.custom_comparator)
elif self.expr is not None:
return self._get_expr(self.expr)
else:
return self._get_expr(cast(_HybridExprCallableType[_T], self.fget))
def _get_expr(
self, expr: _HybridExprCallableType[_T]
) -> Callable[[Any], _HybridClassLevelAccessor[_T]]:
def _expr(cls: Any) -> ExprComparator[_T]:
return ExprComparator(cls, expr(cls), self)
util.update_wrapper(_expr, expr)
return self._get_comparator(_expr)
def _get_comparator(
self, comparator: Any
) -> Callable[[Any], _HybridClassLevelAccessor[_T]]:
proxy_attr = attributes._create_proxied_attribute(self)
def expr_comparator(
owner: Type[object],
) -> _HybridClassLevelAccessor[_T]:
# because this is the descriptor protocol, we don't really know
# what our attribute name is. so search for it through the
# MRO.
for lookup in owner.__mro__:
if self.__name__ in lookup.__dict__:
if lookup.__dict__[self.__name__] is self:
name = self.__name__
break
else:
name = attributes._UNKNOWN_ATTR_KEY # type: ignore[assignment]
return cast(
"_HybridClassLevelAccessor[_T]",
proxy_attr(
owner,
name,
self,
comparator(owner),
doc=comparator.__doc__ or self.__doc__,
),
)
return expr_comparator
| hybrid_property |
python | google__jax | jax/_src/core.py | {
"start": 99788,
"end": 106459
} | class ____(Exception):
"""Raised when we cannot conclusively compute with symbolic dimensions."""
def is_symbolic_dim(v: Any) -> bool:
"""Checks if a value is a symbolic dimension used for shape polymorphism.
This should be used very rarely, because symbolic dimensions overload all
operators, and should just work.
"""
return hasattr(v, "dimension_as_value")
def is_constant_dim(d: DimSize) -> bool:
# Whether the dimension is a static integer constant.
# Try using a fast path for non-concrete Tracers.
if isinstance(d, Tracer) and not is_concrete(d):
return False
try:
operator.index(d)
return True
except:
return False
def is_dim(v: Any) -> bool:
return is_symbolic_dim(v) or is_constant_dim(v)
def is_constant_shape(s: Shape) -> bool:
# Whether the shape is a static constant.
return all(is_constant_dim(d) for d in s)
def definitely_equal_one_of_dim(d1: DimSize, dlist: Sequence[DimSize]) -> bool:
return any(definitely_equal(d1, d) for d in dlist)
def definitely_equal_shape(s1: Shape, s2: Shape) -> bool:
"""Check that two shapes are guaranteed to be element-wise equal.
In presence of dynamic shapes may return False even when the shapes may
be equal at runtime.
"""
return (len(s1) == len(s2) and
all(unsafe_map(definitely_equal, s1, s2)))
def divide_shape_sizes(s1: Shape, s2: Shape) -> DimSize:
"""Returns an integer "i" s.t., i * size(s2) == size(s1).
Raises InconclusiveDimensionOperation if there is no such integer."""
sz1 = math.prod(s1)
sz2 = math.prod(s2)
if definitely_equal(sz1, sz2): # Takes care of sz1 and sz2 being 0
return 1
q, r = divmod(sz1, sz2)
if isinstance(r, Tracer) or r != 0:
raise InconclusiveDimensionOperation(
f"Cannot divide evenly the sizes of shapes {tuple(s1)} and {tuple(s2)}. "
f"The remainder {r} should be 0.")
return q
def cancel_divide_tracers(num, denom):
partition = lambda l: partition_list([isinstance(d, Tracer) for d in l], l)
num, num_tracers = partition(num)
denom, denom_tracers = partition(denom)
if num_tracers or denom_tracers:
factor = _cancel_divide(num_tracers, denom_tracers)
if factor is not None:
size1 = math.prod(num)
size2 = math.prod(denom)
if size1 == size2 or size2 != 0:
return factor * (size1 // size2 if size1 != size2 else 1)
def _cancel_divide(num, denom):
num = list(num)
for a in denom:
i = next((i for i, b in enumerate(num) if definitely_equal(a, b)), None)
if i is None:
break # couldn't cancel
del num[i]
else:
return math.prod(num)
def is_empty_shape(s: Shape) -> bool:
return any(definitely_equal(d, 0) for d in s)
def dilate_dim(d: DimSize, dilation: DimSize) -> DimSize:
"""max(0, 1 + dilation * (d - 1)).
Assumes dilation >= 1.
"""
if definitely_equal(dilation, 1): # fast path
return d
return max_dim(1 + dilation * (d - 1), 0)
def stride_dim(d: DimSize, window_size: DimSize, window_stride: DimSize) -> DimSize:
"""max(0, (d - window_size) // window_stride + 1)
If d < window_size, returns 0.
We assume window_size >= 1 and window_stride >= 1.
"""
# If d < window_size then (d - window_size) // window_stride < 0
return max_dim((d - window_size) // window_stride + 1, 0)
def min_dim(d1: DimSize, d2: DimSize) -> DimSize:
"""Like min(d1, d2) but for both constant and symbolic dimensions."""
d1_is_constant = is_constant_dim(d1)
if d1_is_constant and is_constant_dim(d2):
return min(d1, d2)
d1 = concrete_dim_or_error(d1, "argument `d1` of `core.min_dim`")
d2 = concrete_dim_or_error(d2, "argument `d2` of `core.min_dim`")
if d1_is_constant:
return d2.rmin(d1)
else:
return d1.min(d2)
def max_dim(d1: DimSize, d2: DimSize) -> DimSize:
"""Like max(d1, d2) but for both constant and symbolic dimensions."""
d1_is_constant = is_constant_dim(d1)
if d1_is_constant and is_constant_dim(d2):
return max(d1, d2)
d1 = concrete_dim_or_error(d1, "argument `d1` of `core.max_dim`")
d2 = concrete_dim_or_error(d2, "argument `d2` of `core.max_dim`")
if d1_is_constant:
return d2.rmax(d1)
else:
return d1.max(d2)
def dimension_as_value(d: DimSize):
"""Turns a dimension size into a JAX array.
This is the identity function for constant dimensions.
Has the same abstract value as Python constants.
"""
if isinstance(d, (int, Tracer, np.int32, np.int64)): return d
# For shape_poly._DimPolynomial
if hasattr(d, "dimension_as_value"): return d.dimension_as_value()
return operator.index(d)
def canonicalize_slice(
s: slice,
axis_size: DimSize
) -> tuple[DimSize, DimSize, DimSize]:
"""Computes the start index, step, and size of the slice `x[s]`.
This is similar to `s.indices(axis_size)`, except that it returns
`(start, step, size)`, and it works when the slice and/or the
`axis_size` are symbolic.
See https://numpy.org/doc/stable/user/basics.indexing.html#slicing-and-striding
"""
def convert_to_index(d: DimSize) -> DimSize:
# Convert np.array and jax.Array to int, leave symbolic dimensions alone
try:
return operator.index(d)
except:
return d
# Must resolve statically if step is {<0, ==0, >0}
step = convert_to_index(s.step) if s.step is not None else 1
try:
if step == 0:
raise ValueError("slice step cannot be zero")
step_gt_0 = (step > 0)
except InconclusiveDimensionOperation as e:
raise InconclusiveDimensionOperation(
f"In slice with non-constant elements the step ({step}) must " +
f"be resolved statically if it is > 0 or < 0.\nDetails: {e}")
def clamp_index(i: DimSize, which: str):
try:
i_ge_0 = (i >= 0)
except InconclusiveDimensionOperation as e:
raise InconclusiveDimensionOperation(
f"In slice with non-constant elements the {which} ({i}) must " +
f"be resolved statically if it is >= 0.\nDetails: {e}")
if i_ge_0:
if step_gt_0:
return min_dim(axis_size, i)
else:
return min_dim(axis_size - 1, i)
else:
if step_gt_0:
return max_dim(0, axis_size + i)
else:
return max_dim(-1, axis_size + i)
if s.start is None:
start = 0 if step_gt_0 else axis_size - 1
else:
start = clamp_index(convert_to_index(s.start), "start")
if s.stop is None:
stop = axis_size if step_gt_0 else -1
else:
stop = clamp_index(convert_to_index(s.stop), "stop")
gap = step if step_gt_0 else - step
distance = (stop - start) if step_gt_0 else (start - stop)
slice_size = max_dim(0, distance + gap - 1) // gap
return start, step, slice_size
| InconclusiveDimensionOperation |
python | python-excel__xlrd | xlrd/formatting.py | {
"start": 11199,
"end": 39577
} | class ____(BaseObject, EqNeAttrs):
"""
"Number format" information from a ``FORMAT`` record.
.. versionadded:: 0.6.1
"""
#: The key into :attr:`~xlrd.book.Book.format_map`
format_key = 0
#: A classification that has been inferred from the format string.
#: Currently, this is used only to distinguish between numbers and dates.
#: Values::
#:
#: FUN = 0 # unknown
#: FDT = 1 # date
#: FNU = 2 # number
#: FGE = 3 # general
#: FTX = 4 # text
type = FUN
#: The format string
format_str = UNICODE_LITERAL('')
def __init__(self, format_key, ty, format_str):
self.format_key = format_key
self.type = ty
self.format_str = format_str
std_format_strings = {
# "std" == "standard for US English locale"
# #### TODO ... a lot of work to tailor these to the user's locale.
# See e.g. gnumeric-1.x.y/src/formats.c
0x00: "General",
0x01: "0",
0x02: "0.00",
0x03: "#,##0",
0x04: "#,##0.00",
0x05: "$#,##0_);($#,##0)",
0x06: "$#,##0_);[Red]($#,##0)",
0x07: "$#,##0.00_);($#,##0.00)",
0x08: "$#,##0.00_);[Red]($#,##0.00)",
0x09: "0%",
0x0a: "0.00%",
0x0b: "0.00E+00",
0x0c: "# ?/?",
0x0d: "# ??/??",
0x0e: "m/d/yy",
0x0f: "d-mmm-yy",
0x10: "d-mmm",
0x11: "mmm-yy",
0x12: "h:mm AM/PM",
0x13: "h:mm:ss AM/PM",
0x14: "h:mm",
0x15: "h:mm:ss",
0x16: "m/d/yy h:mm",
0x25: "#,##0_);(#,##0)",
0x26: "#,##0_);[Red](#,##0)",
0x27: "#,##0.00_);(#,##0.00)",
0x28: "#,##0.00_);[Red](#,##0.00)",
0x29: "_(* #,##0_);_(* (#,##0);_(* \"-\"_);_(@_)",
0x2a: "_($* #,##0_);_($* (#,##0);_($* \"-\"_);_(@_)",
0x2b: "_(* #,##0.00_);_(* (#,##0.00);_(* \"-\"??_);_(@_)",
0x2c: "_($* #,##0.00_);_($* (#,##0.00);_($* \"-\"??_);_(@_)",
0x2d: "mm:ss",
0x2e: "[h]:mm:ss",
0x2f: "mm:ss.0",
0x30: "##0.0E+0",
0x31: "@",
}
fmt_code_ranges = [ # both-inclusive ranges of "standard" format codes
# Source: the openoffice.org doc't
# and the OOXML spec Part 4, section 3.8.30
( 0, 0, FGE),
( 1, 13, FNU),
(14, 22, FDT),
(27, 36, FDT), # CJK date formats
(37, 44, FNU),
(45, 47, FDT),
(48, 48, FNU),
(49, 49, FTX),
# Gnumeric assumes (or assumed) that built-in formats finish at 49, not at 163
(50, 58, FDT), # CJK date formats
(59, 62, FNU), # Thai number (currency?) formats
(67, 70, FNU), # Thai number (currency?) formats
(71, 81, FDT), # Thai date formats
]
std_format_code_types = {}
for lo, hi, ty in fmt_code_ranges:
for x in xrange(lo, hi+1):
std_format_code_types[x] = ty
del lo, hi, ty, x
date_chars = UNICODE_LITERAL('ymdhs') # year, month/minute, day, hour, second
date_char_dict = {}
for _c in date_chars + date_chars.upper():
date_char_dict[_c] = 5
del _c, date_chars
skip_char_dict = {}
for _c in UNICODE_LITERAL('$-+/(): '):
skip_char_dict[_c] = 1
num_char_dict = {
UNICODE_LITERAL('0'): 5,
UNICODE_LITERAL('#'): 5,
UNICODE_LITERAL('?'): 5,
}
non_date_formats = {
UNICODE_LITERAL('0.00E+00'):1,
UNICODE_LITERAL('##0.0E+0'):1,
UNICODE_LITERAL('General') :1,
UNICODE_LITERAL('GENERAL') :1, # OOo Calc 1.1.4 does this.
UNICODE_LITERAL('general') :1, # pyExcelerator 0.6.3 does this.
UNICODE_LITERAL('@') :1,
}
fmt_bracketed_sub = re.compile(r'\[[^]]*\]').sub
# Boolean format strings (actual cases)
# '"Yes";"Yes";"No"'
# '"True";"True";"False"'
# '"On";"On";"Off"'
def is_date_format_string(book, fmt):
# Heuristics:
# Ignore "text" and [stuff in square brackets (aarrgghh -- see below)].
# Handle backslashed-escaped chars properly.
# E.g. hh\hmm\mss\s should produce a display like 23h59m59s
# Date formats have one or more of ymdhs (caseless) in them.
# Numeric formats have # and 0.
# N.B. 'General"."' hence get rid of "text" first.
# TODO: Find where formats are interpreted in Gnumeric
# TODO: '[h]\\ \\h\\o\\u\\r\\s' ([h] means don't care about hours > 23)
state = 0
s = ''
for c in fmt:
if state == 0:
if c == UNICODE_LITERAL('"'):
state = 1
elif c in UNICODE_LITERAL(r"\_*"):
state = 2
elif c in skip_char_dict:
pass
else:
s += c
elif state == 1:
if c == UNICODE_LITERAL('"'):
state = 0
elif state == 2:
# Ignore char after backslash, underscore or asterisk
state = 0
assert 0 <= state <= 2
if book.verbosity >= 4:
print("is_date_format_string: reduced format is %s" % REPR(s), file=book.logfile)
s = fmt_bracketed_sub('', s)
if s in non_date_formats:
return False
state = 0
separator = ";"
got_sep = 0
date_count = num_count = 0
for c in s:
if c in date_char_dict:
date_count += date_char_dict[c]
elif c in num_char_dict:
num_count += num_char_dict[c]
elif c == separator:
got_sep = 1
# print num_count, date_count, repr(fmt)
if date_count and not num_count:
return True
if num_count and not date_count:
return False
if date_count:
if book.verbosity:
fprintf(book.logfile,
'WARNING *** is_date_format: ambiguous d=%d n=%d fmt=%r\n',
date_count, num_count, fmt)
elif not got_sep:
if book.verbosity:
fprintf(book.logfile,
"WARNING *** format %r produces constant result\n",
fmt)
return date_count > num_count
def handle_format(self, data, rectype=XL_FORMAT):
DEBUG = 0
bv = self.biff_version
if rectype == XL_FORMAT2:
bv = min(bv, 30)
if not self.encoding:
self.derive_encoding()
strpos = 2
if bv >= 50:
fmtkey = unpack('<H', data[0:2])[0]
else:
fmtkey = self.actualfmtcount
if bv <= 30:
strpos = 0
self.actualfmtcount += 1
if bv >= 80:
unistrg = unpack_unicode(data, 2)
else:
unistrg = unpack_string(data, strpos, self.encoding, lenlen=1)
blah = DEBUG or self.verbosity >= 3
if blah:
fprintf(self.logfile,
"FORMAT: count=%d fmtkey=0x%04x (%d) s=%r\n",
self.actualfmtcount, fmtkey, fmtkey, unistrg)
is_date_s = self.is_date_format_string(unistrg)
ty = [FGE, FDT][is_date_s]
if not(fmtkey > 163 or bv < 50):
# user_defined if fmtkey > 163
# N.B. Gnumeric incorrectly starts these at 50 instead of 164 :-(
# if earlier than BIFF 5, standard info is useless
std_ty = std_format_code_types.get(fmtkey, FUN)
# print "std ty", std_ty
is_date_c = std_ty == FDT
if self.verbosity and 0 < fmtkey < 50 and (is_date_c ^ is_date_s):
DEBUG = 2
fprintf(self.logfile,
"WARNING *** Conflict between "
"std format key %d and its format string %r\n",
fmtkey, unistrg)
if DEBUG == 2:
fprintf(self.logfile,
"ty: %d; is_date_c: %r; is_date_s: %r; fmt_strg: %r",
ty, is_date_c, is_date_s, unistrg)
fmtobj = Format(fmtkey, ty, unistrg)
if blah:
fmtobj.dump(self.logfile,
header="--- handle_format [%d] ---" % (self.actualfmtcount-1, ))
self.format_map[fmtkey] = fmtobj
self.format_list.append(fmtobj)
# =============================================================================
def handle_palette(book, data):
if not book.formatting_info:
return
blah = DEBUG or book.verbosity >= 2
n_colours, = unpack('<H', data[:2])
expected_n_colours = (16, 56)[book.biff_version >= 50]
if (DEBUG or book.verbosity >= 1) and n_colours != expected_n_colours:
fprintf(book.logfile,
"NOTE *** Expected %d colours in PALETTE record, found %d\n",
expected_n_colours, n_colours)
elif blah:
fprintf(book.logfile,
"PALETTE record with %d colours\n", n_colours)
fmt = '<xx%di' % n_colours # use i to avoid long integers
expected_size = 4 * n_colours + 2
actual_size = len(data)
tolerance = 4
if not expected_size <= actual_size <= expected_size + tolerance:
raise XLRDError('PALETTE record: expected size %d, actual size %d' % (expected_size, actual_size))
colours = unpack(fmt, data[:expected_size])
assert book.palette_record == [] # There should be only 1 PALETTE record
# a colour will be 0xbbggrr
# IOW, red is at the little end
for i in xrange(n_colours):
c = colours[i]
red = c & 0xff
green = (c >> 8) & 0xff
blue = (c >> 16) & 0xff
old_rgb = book.colour_map[8+i]
new_rgb = (red, green, blue)
book.palette_record.append(new_rgb)
book.colour_map[8+i] = new_rgb
if blah:
if new_rgb != old_rgb:
print("%2d: %r -> %r" % (i, old_rgb, new_rgb), file=book.logfile)
def palette_epilogue(book):
# Check colour indexes in fonts etc.
# This must be done here as FONT records
# come *before* the PALETTE record :-(
for font in book.font_list:
if font.font_index == 4: # the missing font record
continue
cx = font.colour_index
if cx == 0x7fff: # system window text colour
continue
if cx in book.colour_map:
book.colour_indexes_used[cx] = 1
elif book.verbosity:
print("Size of colour table:", len(book.colour_map), file=book.logfile)
fprintf(book.logfile, "*** Font #%d (%r): colour index 0x%04x is unknown\n",
font.font_index, font.name, cx)
if book.verbosity >= 1:
used = sorted(book.colour_indexes_used.keys())
print("\nColour indexes used:\n%r\n" % used, file=book.logfile)
def handle_style(book, data):
if not book.formatting_info:
return
blah = DEBUG or book.verbosity >= 2
bv = book.biff_version
flag_and_xfx, built_in_id, level = unpack('<HBB', data[:4])
xf_index = flag_and_xfx & 0x0fff
if data == b"\0\0\0\0" and "Normal" not in book.style_name_map:
# Erroneous record (doesn't have built-in bit set).
# Example file supplied by Jeff Bell.
built_in = 1
built_in_id = 0
xf_index = 0
name = "Normal"
level = 255
elif flag_and_xfx & 0x8000:
# built-in style
built_in = 1
name = built_in_style_names[built_in_id]
if 1 <= built_in_id <= 2:
name += str(level + 1)
else:
# user-defined style
built_in = 0
built_in_id = 0
level = 0
if bv >= 80:
try:
name = unpack_unicode(data, 2, lenlen=2)
except UnicodeDecodeError:
print("STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d"
% (built_in, xf_index, built_in_id, level), file=book.logfile)
print("raw bytes:", repr(data[2:]), file=book.logfile)
raise
else:
name = unpack_string(data, 2, book.encoding, lenlen=1)
if blah and not name:
print("WARNING *** A user-defined style has a zero-length name", file=book.logfile)
book.style_name_map[name] = (built_in, xf_index)
if blah:
fprintf(book.logfile, "STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d name=%r\n",
built_in, xf_index, built_in_id, level, name)
def check_colour_indexes_in_obj(book, obj, orig_index):
alist = sorted(obj.__dict__.items())
for attr, nobj in alist:
if hasattr(nobj, 'dump'):
check_colour_indexes_in_obj(book, nobj, orig_index)
elif attr.find('colour_index') >= 0:
if nobj in book.colour_map:
book.colour_indexes_used[nobj] = 1
continue
oname = obj.__class__.__name__
print("*** xf #%d : %s.%s = 0x%04x (unknown)"
% (orig_index, oname, attr, nobj), file=book.logfile)
def fill_in_standard_formats(book):
for x in std_format_code_types.keys():
if x not in book.format_map:
ty = std_format_code_types[x]
# Note: many standard format codes (mostly CJK date formats) have
# format strings that vary by locale; xlrd does not (yet)
# handle those; the type (date or numeric) is recorded but the fmt_str will be None.
fmt_str = std_format_strings.get(x)
fmtobj = Format(x, ty, fmt_str)
book.format_map[x] = fmtobj
def handle_xf(self, data):
# self is a Book instance
# DEBUG = 0
blah = DEBUG or self.verbosity >= 3
bv = self.biff_version
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
# fill in the known standard formats
if bv >= 50 and not self.xfcount:
# i.e. do this once before we process the first XF record
fill_in_standard_formats(self)
if bv >= 80:
unpack_fmt = '<HHHBBBBIiH'
(
xf.font_index, xf.format_key, pkd_type_par,
pkd_align1, xf.alignment.rotation, pkd_align2,
pkd_used, pkd_brdbkg1, pkd_brdbkg2, pkd_brdbkg3,
) = unpack(unpack_fmt, data[0:20])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
# Following is not in OOo docs, but is mentioned
# in Gnumeric source and also in (deep breath)
# org.apache.poi.hssf.record.ExtendedFormatRecord.java
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align1, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x70, 'vert_align'),
))
upkbits(xf.alignment, pkd_align2, (
(0, 0x0f, 'indent_level'),
(4, 0x10, 'shrink_to_fit'),
(6, 0xC0, 'text_direction'),
))
reg = pkd_used >> 2
attr_stems = [
'format',
'font',
'alignment',
'border',
'background',
'protection',
]
for attr_stem in attr_stems:
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbitsL(xf.border, pkd_brdbkg1, (
(0, 0x0000000f, 'left_line_style'),
(4, 0x000000f0, 'right_line_style'),
(8, 0x00000f00, 'top_line_style'),
(12, 0x0000f000, 'bottom_line_style'),
(16, 0x007f0000, 'left_colour_index'),
(23, 0x3f800000, 'right_colour_index'),
(30, 0x40000000, 'diag_down'),
(31, 0x80000000, 'diag_up'),
))
upkbits(xf.border, pkd_brdbkg2, (
(0, 0x0000007F, 'top_colour_index'),
(7, 0x00003F80, 'bottom_colour_index'),
(14, 0x001FC000, 'diag_colour_index'),
(21, 0x01E00000, 'diag_line_style'),
))
upkbitsL(xf.background, pkd_brdbkg2, (
(26, 0xFC000000, 'fill_pattern'),
))
upkbits(xf.background, pkd_brdbkg3, (
(0, 0x007F, 'pattern_colour_index'),
(7, 0x3F80, 'background_colour_index'),
))
elif bv >= 50:
unpack_fmt = '<HHHBBIi'
(
xf.font_index, xf.format_key, pkd_type_par,
pkd_align1, pkd_orient_used,
pkd_brdbkg1, pkd_brdbkg2,
) = unpack(unpack_fmt, data[0:16])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align1, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x70, 'vert_align'),
))
orientation = pkd_orient_used & 0x03
xf.alignment.rotation = [0, 255, 90, 180][orientation]
reg = pkd_orient_used >> 2
attr_stems = [
'format',
'font',
'alignment',
'border',
'background',
'protection',
]
for attr_stem in attr_stems:
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbitsL(xf.background, pkd_brdbkg1, (
( 0, 0x0000007F, 'pattern_colour_index'),
( 7, 0x00003F80, 'background_colour_index'),
(16, 0x003F0000, 'fill_pattern'),
))
upkbitsL(xf.border, pkd_brdbkg1, (
(22, 0x01C00000, 'bottom_line_style'),
(25, 0xFE000000, 'bottom_colour_index'),
))
upkbits(xf.border, pkd_brdbkg2, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x00000038, 'left_line_style'),
( 6, 0x000001C0, 'right_line_style'),
( 9, 0x0000FE00, 'top_colour_index'),
(16, 0x007F0000, 'left_colour_index'),
(23, 0x3F800000, 'right_colour_index'),
))
elif bv >= 40:
unpack_fmt = '<BBHBBHI'
(
xf.font_index, xf.format_key, pkd_type_par,
pkd_align_orient, pkd_used,
pkd_bkg_34, pkd_brd_34,
) = unpack(unpack_fmt, data[0:12])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align_orient, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x30, 'vert_align'),
))
orientation = (pkd_align_orient & 0xC0) >> 6
xf.alignment.rotation = [0, 255, 90, 180][orientation]
reg = pkd_used >> 2
attr_stems = [
'format',
'font',
'alignment',
'border',
'background',
'protection',
]
for attr_stem in attr_stems:
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbits(xf.background, pkd_bkg_34, (
( 0, 0x003F, 'fill_pattern'),
( 6, 0x07C0, 'pattern_colour_index'),
(11, 0xF800, 'background_colour_index'),
))
upkbitsL(xf.border, pkd_brd_34, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x000000F8, 'top_colour_index'),
( 8, 0x00000700, 'left_line_style'),
(11, 0x0000F800, 'left_colour_index'),
(16, 0x00070000, 'bottom_line_style'),
(19, 0x00F80000, 'bottom_colour_index'),
(24, 0x07000000, 'right_line_style'),
(27, 0xF8000000, 'right_colour_index'),
))
elif bv == 30:
unpack_fmt = '<BBBBHHI'
(
xf.font_index, xf.format_key, pkd_type_prot,
pkd_used, pkd_align_par,
pkd_bkg_34, pkd_brd_34,
) = unpack(unpack_fmt, data[0:12])
upkbits(xf.protection, pkd_type_prot, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_prot, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
))
upkbits(xf.alignment, pkd_align_par, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
))
upkbits(xf, pkd_align_par, (
(4, 0xFFF0, 'parent_style_index'),
))
reg = pkd_used >> 2
attr_stems = [
'format',
'font',
'alignment',
'border',
'background',
'protection',
]
for attr_stem in attr_stems:
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbits(xf.background, pkd_bkg_34, (
( 0, 0x003F, 'fill_pattern'),
( 6, 0x07C0, 'pattern_colour_index'),
(11, 0xF800, 'background_colour_index'),
))
upkbitsL(xf.border, pkd_brd_34, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x000000F8, 'top_colour_index'),
( 8, 0x00000700, 'left_line_style'),
(11, 0x0000F800, 'left_colour_index'),
(16, 0x00070000, 'bottom_line_style'),
(19, 0x00F80000, 'bottom_colour_index'),
(24, 0x07000000, 'right_line_style'),
(27, 0xF8000000, 'right_colour_index'),
))
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
elif bv == 21:
## Warning: incomplete treatment; formatting_info not fully supported.
## Probably need to offset incoming BIFF2 XF[n] to BIFF8-like XF[n+16],
## and create XF[0:16] like the standard ones in BIFF8 *AND* add 16 to
## all XF references in cell records :-(
(xf.font_index, format_etc, halign_etc) = unpack('<BxBB', data)
xf.format_key = format_etc & 0x3F
upkbits(xf.protection, format_etc, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
upkbits(xf.alignment, halign_etc, (
(0, 0x07, 'hor_align'),
))
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = 0 # ???????????
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
attr_stems = [
'format',
'font',
'alignment',
'border',
'background',
'protection',
]
for attr_stem in attr_stems:
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
else:
raise XLRDError('programmer stuff-up: bv=%d' % bv)
xf.xf_index = len(self.xf_list)
self.xf_list.append(xf)
self.xfcount += 1
if blah:
xf.dump(
self.logfile,
header="--- handle_xf: xf[%d] ---" % xf.xf_index,
footer=" ",
)
try:
fmt = self.format_map[xf.format_key]
cellty = _cellty_from_fmtty[fmt.type]
except KeyError:
cellty = XL_CELL_NUMBER
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
# Now for some assertions ...
if self.formatting_info:
if self.verbosity and xf.is_style and xf.parent_style_index != 0x0FFF:
msg = "WARNING *** XF[%d] is a style XF but parent_style_index is 0x%04x, not 0x0fff\n"
fprintf(self.logfile, msg, xf.xf_index, xf.parent_style_index)
check_colour_indexes_in_obj(self, xf, xf.xf_index)
if xf.format_key not in self.format_map:
msg = "WARNING *** XF[%d] unknown (raw) format key (%d, 0x%04x)\n"
if self.verbosity:
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
xf.format_key = 0
def xf_epilogue(self):
# self is a Book instance.
self._xf_epilogue_done = 1
num_xfs = len(self.xf_list)
blah = DEBUG or self.verbosity >= 3
blah1 = DEBUG or self.verbosity >= 1
if blah:
fprintf(self.logfile, "xf_epilogue called ...\n")
def check_same(book_arg, xf_arg, parent_arg, attr):
# the _arg caper is to avoid a Warning msg from Python 2.1 :-(
if getattr(xf_arg, attr) != getattr(parent_arg, attr):
fprintf(book_arg.logfile,
"NOTE !!! XF[%d] parent[%d] %s different\n",
xf_arg.xf_index, parent_arg.xf_index, attr)
for xfx in xrange(num_xfs):
xf = self.xf_list[xfx]
try:
fmt = self.format_map[xf.format_key]
cellty = _cellty_from_fmtty[fmt.type]
except KeyError:
cellty = XL_CELL_TEXT
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
# Now for some assertions etc
if not self.formatting_info:
continue
if xf.is_style:
continue
if not(0 <= xf.parent_style_index < num_xfs):
if blah1:
fprintf(self.logfile,
"WARNING *** XF[%d]: is_style=%d but parent_style_index=%d\n",
xf.xf_index, xf.is_style, xf.parent_style_index)
# make it conform
xf.parent_style_index = 0
if self.biff_version >= 30:
if blah1:
if xf.parent_style_index == xf.xf_index:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is also %d\n",
xf.xf_index, xf.parent_style_index)
elif not self.xf_list[xf.parent_style_index].is_style:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is %d; style flag not set\n",
xf.xf_index, xf.parent_style_index)
if blah1 and xf.parent_style_index > xf.xf_index:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is %d; out of order?\n",
xf.xf_index, xf.parent_style_index)
parent = self.xf_list[xf.parent_style_index]
if not xf._alignment_flag and not parent._alignment_flag:
if blah1: check_same(self, xf, parent, 'alignment')
if not xf._background_flag and not parent._background_flag:
if blah1: check_same(self, xf, parent, 'background')
if not xf._border_flag and not parent._border_flag:
if blah1: check_same(self, xf, parent, 'border')
if not xf._protection_flag and not parent._protection_flag:
if blah1: check_same(self, xf, parent, 'protection')
if not xf._format_flag and not parent._format_flag:
if blah1 and xf.format_key != parent.format_key:
fprintf(self.logfile,
"NOTE !!! XF[%d] fmtk=%d, parent[%d] fmtk=%r\n%r / %r\n",
xf.xf_index, xf.format_key, parent.xf_index, parent.format_key,
self.format_map[xf.format_key].format_str,
self.format_map[parent.format_key].format_str)
if not xf._font_flag and not parent._font_flag:
if blah1 and xf.font_index != parent.font_index:
fprintf(self.logfile,
"NOTE !!! XF[%d] fontx=%d, parent[%d] fontx=%r\n",
xf.xf_index, xf.font_index, parent.xf_index, parent.font_index)
def initialise_book(book):
initialise_colour_map(book)
book._xf_epilogue_done = 0
methods = (
handle_font,
handle_efont,
handle_format,
is_date_format_string,
handle_palette,
palette_epilogue,
handle_style,
handle_xf,
xf_epilogue,
)
for method in methods:
setattr(book.__class__, method.__name__, method)
| Format |
python | kamyu104__LeetCode-Solutions | Python/minimum-bit-flips-to-convert-number.py | {
"start": 51,
"end": 250
} | class ____(object):
def minBitFlips(self, start, goal):
"""
:type start: int
:type goal: int
:rtype: int
"""
return bin(start^goal).count('1')
| Solution |
python | getsentry__sentry | src/sentry/users/services/user_option/model.py | {
"start": 500,
"end": 702
} | class ____(TypedDict, total=False):
user_ids: list[int]
keys: list[str]
key: str
project_id: int | None
project_ids: list[int] | None
organization_id: int | None
| UserOptionFilterArgs |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/database.py | {
"start": 24599,
"end": 25857
} | class ____(ExampleDatabase):
"""A wrapper to make the given database read-only.
The implementation passes through ``fetch``, and turns ``save``, ``delete``, and
``move`` into silent no-ops.
Note that this disables Hypothesis' automatic discarding of stale examples.
It is designed to allow local machines to access a shared database (e.g. from CI
servers), without propagating changes back from a local or in-development branch.
"""
def __init__(self, db: ExampleDatabase) -> None:
super().__init__()
assert isinstance(db, ExampleDatabase)
self._wrapped = db
def __repr__(self) -> str:
return f"ReadOnlyDatabase({self._wrapped!r})"
def __eq__(self, other: object) -> bool:
return isinstance(other, ReadOnlyDatabase) and self._wrapped == other._wrapped
def fetch(self, key: bytes) -> Iterable[bytes]:
yield from self._wrapped.fetch(key)
def save(self, key: bytes, value: bytes) -> None:
pass
def delete(self, key: bytes, value: bytes) -> None:
pass
def _start_listening(self) -> None:
# we're read only, so there are no changes to broadcast.
pass
def _stop_listening(self) -> None:
pass
| ReadOnlyDatabase |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 125508,
"end": 126375
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("pipeline_job.PipelineJobHook"))
def test_execute(self, mock_hook):
op = DeletePipelineJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
pipeline_job_id=TEST_PIPELINE_JOB_ID,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.delete_pipeline_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
pipeline_job_id=TEST_PIPELINE_JOB_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestVertexAIDeletePipelineJobOperator |
python | huggingface__transformers | src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py | {
"start": 15663,
"end": 18047
} | class ____(ASTPreTrainedModel):
def __init__(self, config: ASTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.audio_spectrogram_transformer = ASTModel(config)
# Classifier head
self.classifier = ASTMLPHead(config)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> SequenceClassifierOutput:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):
Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via
the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the
mel features, padding and conversion into a tensor of type `torch.FloatTensor`.
See [`~ASTFeatureExtractor.__call__`]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the audio classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.audio_spectrogram_transformer(input_values, **kwargs)
pooled_output = outputs.pooler_output
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel"]
| ASTForAudioClassification |
python | pytorch__pytorch | torch/fx/traceback.py | {
"start": 2080,
"end": 16561
} | class ____:
"""
NodeSource is a data structure that contains the provenance information of a node.
If node `a` is created from node `b`, then `a.meta["from_node"]` may contain NodeSource(b).
"""
class NodeInfo:
def __init__(self, name: str, target: str, graph_id: int):
self.name = name
self.target = target
self.graph_id = graph_id
pass_name: str
action: list["NodeSourceAction"]
from_node: list["NodeSource"]
node_info: Optional["NodeInfo"]
_dict: Optional[dict[str, Any]]
_action_string: Optional[str]
def __init__(
self,
node: Optional[Node],
pass_name: str = "",
action: Optional[Union["NodeSourceAction", list["NodeSourceAction"]]] = None,
):
self.pass_name = pass_name
if action is None:
action = []
elif not isinstance(action, list):
action = [action]
for a in action:
assert isinstance(a, NodeSourceAction)
self.action = action
if node:
self.node_info = self.NodeInfo(
name=node.name, target=str(node.target), graph_id=id(node.graph)
)
self.from_node = (
copy.deepcopy(node.meta["from_node"])
if "from_node" in node.meta
else []
)
else:
self.node_info = None
self.from_node = []
# cache the action string and dict representation for performance.
self._action_string: Optional[str] = None
self._dict: Optional[dict[str, Any]] = None
@property
def name(self) -> str:
return self.node_info.name if self.node_info else ""
@property
def target(self) -> str:
return self.node_info.target if self.node_info else ""
@property
def graph_id(self) -> int:
return self.node_info.graph_id if self.node_info else -1
def __repr__(self):
return self.print_readable()
def _get_action_string(self):
if self._action_string is None:
self._action_string = "+".join([a.name.lower() for a in self.action])
return self._action_string
def print_readable(self, indent=0):
if indent > 9:
return ""
result = ""
action_string = self._get_action_string()
result += (
" " * indent * 4
+ f"(name={self.name}, pass_name={self.pass_name}, action={action_string}, graph_id={self.graph_id})\n"
)
for item in self.from_node:
result += item.print_readable(indent + 1)
return result
def to_dict(self) -> dict:
if self._dict is None:
# Convert the object to a dictionary
action_string = self._get_action_string()
self._dict = {
"name": self.name,
"target": self.target,
"graph_id": self.graph_id,
"pass_name": self.pass_name,
"action": action_string,
"from_node": [node.to_dict() for node in self.from_node],
}
assert self._dict is not None
return self._dict
def __eq__(self, other: object):
if not isinstance(other, NodeSource):
return False
return self.to_dict() == other.to_dict()
def __hash__(self):
# Create a hash based on the dictionary representation
# We need to convert the dict to a hashable form
def _make_hashable(obj):
if isinstance(obj, dict):
return tuple(sorted((k, _make_hashable(v)) for k, v in obj.items()))
elif isinstance(obj, list):
return tuple(_make_hashable(item) for item in obj)
else:
return obj
return hash(_make_hashable(self.to_dict()))
@classmethod
def _from_dict(cls, d: Optional[dict]) -> Optional["NodeSource"]:
"""
Recursively deserialize from_node metadata from dictionary data.
It is used to deserialize the from_node field from serialized metadata.
Please use constructor NodeSource(node, ...) to create a NodeSource object.
"""
if d is None:
return None
assert isinstance(d, dict), f"Expected a dict, got {type(d)}"
# Create a NodeSource object directly without going through the constructor
# to avoid issues with graph ID and node creation
node_source = NodeSource.__new__(NodeSource)
# Reset the cached properties
node_source._action_string = None
node_source._dict = None
# Set the basic attributes
node_source.pass_name = d.get("pass_name", "")
# Parse action string back to NodeSourceAction enum list
action_str = d.get("action", "")
actions = []
if action_str:
for action_name in action_str.split("+"):
if action_name.upper() == "CREATE":
actions.append(NodeSourceAction.CREATE)
elif action_name.upper() == "REPLACE":
actions.append(NodeSourceAction.REPLACE)
node_source.action = actions
# Create the NodeInfo object directly
if "name" in d and "target" in d and "graph_id" in d:
node_info = NodeSource.NodeInfo(
d.get("name", ""), d.get("target", ""), d.get("graph_id", -1)
)
node_source.node_info = node_info
else:
node_source.node_info = None
# Recursively deserialize nested from_node
if d.get("from_node", None) is not None:
node_source.from_node = [
result
for fn in d.get("from_node", [])
if (result := cls._from_dict(fn)) is not None
]
else:
node_source.from_node = []
return node_source
@compatibility(is_backward_compatible=False)
@contextmanager
def preserve_node_meta(enable=True):
global should_preserve_node_meta
global current_meta
saved_should_preserve_node_meta = should_preserve_node_meta
# Shallow copy is OK since fields of current_meta are not mutated
saved_current_meta = current_meta.copy()
try:
should_preserve_node_meta = enable
yield
finally:
should_preserve_node_meta = saved_should_preserve_node_meta
current_meta = saved_current_meta
@compatibility(is_backward_compatible=False)
def set_stack_trace(stack: list[str]):
global current_meta
if should_preserve_node_meta and stack:
current_meta["stack_trace"] = "".join(stack)
@compatibility(is_backward_compatible=False)
@contextmanager
def annotate(annotation_dict: dict):
"""
Temporarily adds custom annotations to the current tracing context.
The fx_node produced from this tracing context will have the
custom annotations in node.metadata["custom"] field.
This context manager allows you to insert arbitrary metadata into the PT2
tracing system by updating the global `current_meta["custom"]` dictionary.
The annotations are automatically reverted after the context exits.
Gradient accumulation nodes will not be annotated.
This is intended for advanced users who need to attach additional metadata to the fx nodes
(e.g., for debugging, analysis, or external tooling) during export tracing.
Note:
This API is **not backward compatible** and may evolve in future releases.
Note:
This API is not compatible with fx.symbolic_trace or jit.trace. It's intended
to be used with PT2 family of tracers, e.g. torch.export and dynamo.
Args:
annotation_dict (dict): A dictionary of custom key-value pairs to inject
into the FX trace metadata.
Example:
After exiting the context, custom annotations are removed.
>>> with annotate({"source": "custom_pass", "tag": 42}):
... pass # Your computation here
"""
global current_meta
has_custom = "custom" in current_meta
old_custom = copy.copy(current_meta.get("custom", {}))
try:
if not has_custom:
current_meta["custom"] = {}
# Update with all key-value pairs from the input dict
current_meta["custom"].update(annotation_dict)
yield
finally:
if has_custom:
# Restore the original custom dict
current_meta["custom"] = old_custom
else:
del current_meta["custom"]
@compatibility(is_backward_compatible=False)
def annotate_fn(annotation_dict: dict):
"""
A decorator that wraps a function with the annotate context manager.
Use this when you want to annotate an entire function instead of a specific code block.
Note:
This API is **not backward compatible** and may evolve in future releases.
Note:
This API is not compatible with fx.symbolic_trace or jit.trace. It's intended
to be used with PT2 family of tracers, e.g. torch.export and dynamo.
Args:
annotation_dict (dict): A dictionary of custom key-value pairs to inject
into the FX trace metadata for all operations in the function.
Example:
All operations in my_function will have {"pp_stage": 1} in their metadata.
>>> @annotate_fn({"pp_stage": 1})
... def my_function(x):
... return x + 1
"""
from functools import wraps
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
with annotate(annotation_dict):
return func(*args, **kwargs)
return wrapper
return decorator
@compatibility(is_backward_compatible=False)
def set_grad_fn_seq_nr(seq_nr):
global current_meta
if should_preserve_node_meta:
# The seq_nr is captured by eager mode in the grad_fn during forward
current_meta["grad_fn_seq_nr"] = current_meta.get("grad_fn_seq_nr", []) + [
seq_nr
]
current_meta["in_grad_fn"] = current_meta.get("in_grad_fn", 0) + 1
@compatibility(is_backward_compatible=False)
def reset_grad_fn_seq_nr():
# NB: reset state properly, this would be helpful towards supporting
# reentrant autograd if we actually wanted to do that.
global current_meta
if should_preserve_node_meta:
current_level = current_meta.get("in_grad_fn", 0)
assert current_level > 0
if current_level == 1:
del current_meta["in_grad_fn"]
del current_meta["grad_fn_seq_nr"]
else:
current_meta["in_grad_fn"] = current_level - 1
current_meta["grad_fn_seq_nr"] = current_meta["grad_fn_seq_nr"][:-1]
@compatibility(is_backward_compatible=False)
def format_stack() -> list[str]:
if should_preserve_node_meta:
return [current_meta.get("stack_trace", "")]
else:
# fallback to traceback.format_stack()
return traceback.format_list(traceback.extract_stack()[:-1])
@compatibility(is_backward_compatible=False)
def has_preserved_node_meta() -> bool:
return should_preserve_node_meta
@compatibility(is_backward_compatible=False)
@contextmanager
def set_current_meta(node, pass_name=""):
global current_meta
if should_preserve_node_meta and node.meta:
saved_meta = current_meta
try:
current_meta = node.meta.copy()
# Update the "from_node" field in current_meta for provenance tracking.
# Instead of appending, overwrite the "from_node" field because current_meta
# will be assigned to the new node. The new NodeSource(node, ...) will
# include the information from the previous current_meta["from_node"].
current_meta["from_node"] = [
NodeSource(node, pass_name, NodeSourceAction.CREATE)
]
yield
finally:
current_meta = saved_meta
else:
yield
@compatibility(is_backward_compatible=False)
def get_current_meta() -> dict[str, Any]:
return current_meta
@compatibility(is_backward_compatible=False)
@contextmanager
def set_current_replay_node(node):
"""
Set the currently replay node. If `current_replay_node` is not None,
then we're re-generating the `current_replay_node` in FunctionalTensorMode.
"""
# See [Note] annotation for more details.
global current_replay_node
saved_current_replay_node = current_replay_node
try:
current_replay_node = node
yield
finally:
current_replay_node = saved_current_replay_node
@compatibility(is_backward_compatible=False)
def get_current_replay_node():
"""
Get the currently replay node
"""
return current_replay_node
@compatibility(is_backward_compatible=False)
def get_graph_provenance_json(graph: Graph) -> dict[str, Any]:
"""
Given an fx.Graph, return a json that contains the provenance information of each node.
"""
try:
provenance_tracking_json = {}
for node in graph.nodes:
if node.op == "call_function":
provenance_tracking_json[node.name] = (
[source.to_dict() for source in node.meta["from_node"]]
if "from_node" in node.meta
else []
)
return provenance_tracking_json
except Exception as e:
# Since this is just debugging, it should never interfere with regular
# program execution, so we use this try-except to guard against any error
signpost_event(
"inductor",
"provenance_tracking_error",
{
"function": "get_graph_provenance_json",
"error_msg": str(e),
"stack_trace": traceback.format_exc(),
},
)
return {}
def _get_custom_metadata(gm: GraphModule) -> str:
assert isinstance(gm, GraphModule)
def helper(gm: GraphModule):
custom_metadata = []
for node in gm.graph.nodes:
if hasattr(node, "meta") and node.meta.get("custom", None):
custom_metadata.append((node.op, node.name, node.meta["custom"]))
if node.op == "get_attr" and isinstance(
getattr(gm, node.target), GraphModule
):
custom_metadata.append(helper(getattr(gm, node.target)))
return custom_metadata
return "\n".join(str(x) for x in helper(gm))
| NodeSource |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 103317,
"end": 112687
} | class ____(sgqlc.types.Enum):
"""Represents countries or regions for billing and residence for a
GitHub Sponsors profile.
Enumeration Choices:
* `AD`: Andorra
* `AE`: United Arab Emirates
* `AF`: Afghanistan
* `AG`: Antigua and Barbuda
* `AI`: Anguilla
* `AL`: Albania
* `AM`: Armenia
* `AO`: Angola
* `AQ`: Antarctica
* `AR`: Argentina
* `AS`: American Samoa
* `AT`: Austria
* `AU`: Australia
* `AW`: Aruba
* `AX`: Åland
* `AZ`: Azerbaijan
* `BA`: Bosnia and Herzegovina
* `BB`: Barbados
* `BD`: Bangladesh
* `BE`: Belgium
* `BF`: Burkina Faso
* `BG`: Bulgaria
* `BH`: Bahrain
* `BI`: Burundi
* `BJ`: Benin
* `BL`: Saint Barthélemy
* `BM`: Bermuda
* `BN`: Brunei Darussalam
* `BO`: Bolivia
* `BQ`: Bonaire, Sint Eustatius and Saba
* `BR`: Brazil
* `BS`: Bahamas
* `BT`: Bhutan
* `BV`: Bouvet Island
* `BW`: Botswana
* `BY`: Belarus
* `BZ`: Belize
* `CA`: Canada
* `CC`: Cocos (Keeling) Islands
* `CD`: Congo (Kinshasa)
* `CF`: Central African Republic
* `CG`: Congo (Brazzaville)
* `CH`: Switzerland
* `CI`: Côte d'Ivoire
* `CK`: Cook Islands
* `CL`: Chile
* `CM`: Cameroon
* `CN`: China
* `CO`: Colombia
* `CR`: Costa Rica
* `CV`: Cape Verde
* `CW`: Curaçao
* `CX`: Christmas Island
* `CY`: Cyprus
* `CZ`: Czech Republic
* `DE`: Germany
* `DJ`: Djibouti
* `DK`: Denmark
* `DM`: Dominica
* `DO`: Dominican Republic
* `DZ`: Algeria
* `EC`: Ecuador
* `EE`: Estonia
* `EG`: Egypt
* `EH`: Western Sahara
* `ER`: Eritrea
* `ES`: Spain
* `ET`: Ethiopia
* `FI`: Finland
* `FJ`: Fiji
* `FK`: Falkland Islands
* `FM`: Micronesia
* `FO`: Faroe Islands
* `FR`: France
* `GA`: Gabon
* `GB`: United Kingdom
* `GD`: Grenada
* `GE`: Georgia
* `GF`: French Guiana
* `GG`: Guernsey
* `GH`: Ghana
* `GI`: Gibraltar
* `GL`: Greenland
* `GM`: Gambia
* `GN`: Guinea
* `GP`: Guadeloupe
* `GQ`: Equatorial Guinea
* `GR`: Greece
* `GS`: South Georgia and South Sandwich Islands
* `GT`: Guatemala
* `GU`: Guam
* `GW`: Guinea-Bissau
* `GY`: Guyana
* `HK`: Hong Kong
* `HM`: Heard and McDonald Islands
* `HN`: Honduras
* `HR`: Croatia
* `HT`: Haiti
* `HU`: Hungary
* `ID`: Indonesia
* `IE`: Ireland
* `IL`: Israel
* `IM`: Isle of Man
* `IN`: India
* `IO`: British Indian Ocean Territory
* `IQ`: Iraq
* `IR`: Iran
* `IS`: Iceland
* `IT`: Italy
* `JE`: Jersey
* `JM`: Jamaica
* `JO`: Jordan
* `JP`: Japan
* `KE`: Kenya
* `KG`: Kyrgyzstan
* `KH`: Cambodia
* `KI`: Kiribati
* `KM`: Comoros
* `KN`: Saint Kitts and Nevis
* `KR`: Korea, South
* `KW`: Kuwait
* `KY`: Cayman Islands
* `KZ`: Kazakhstan
* `LA`: Laos
* `LB`: Lebanon
* `LC`: Saint Lucia
* `LI`: Liechtenstein
* `LK`: Sri Lanka
* `LR`: Liberia
* `LS`: Lesotho
* `LT`: Lithuania
* `LU`: Luxembourg
* `LV`: Latvia
* `LY`: Libya
* `MA`: Morocco
* `MC`: Monaco
* `MD`: Moldova
* `ME`: Montenegro
* `MF`: Saint Martin (French part)
* `MG`: Madagascar
* `MH`: Marshall Islands
* `MK`: Macedonia
* `ML`: Mali
* `MM`: Myanmar
* `MN`: Mongolia
* `MO`: Macau
* `MP`: Northern Mariana Islands
* `MQ`: Martinique
* `MR`: Mauritania
* `MS`: Montserrat
* `MT`: Malta
* `MU`: Mauritius
* `MV`: Maldives
* `MW`: Malawi
* `MX`: Mexico
* `MY`: Malaysia
* `MZ`: Mozambique
* `NA`: Namibia
* `NC`: New Caledonia
* `NE`: Niger
* `NF`: Norfolk Island
* `NG`: Nigeria
* `NI`: Nicaragua
* `NL`: Netherlands
* `NO`: Norway
* `NP`: Nepal
* `NR`: Nauru
* `NU`: Niue
* `NZ`: New Zealand
* `OM`: Oman
* `PA`: Panama
* `PE`: Peru
* `PF`: French Polynesia
* `PG`: Papua New Guinea
* `PH`: Philippines
* `PK`: Pakistan
* `PL`: Poland
* `PM`: Saint Pierre and Miquelon
* `PN`: Pitcairn
* `PR`: Puerto Rico
* `PS`: Palestine
* `PT`: Portugal
* `PW`: Palau
* `PY`: Paraguay
* `QA`: Qatar
* `RE`: Reunion
* `RO`: Romania
* `RS`: Serbia
* `RU`: Russian Federation
* `RW`: Rwanda
* `SA`: Saudi Arabia
* `SB`: Solomon Islands
* `SC`: Seychelles
* `SD`: Sudan
* `SE`: Sweden
* `SG`: Singapore
* `SH`: Saint Helena
* `SI`: Slovenia
* `SJ`: Svalbard and Jan Mayen Islands
* `SK`: Slovakia
* `SL`: Sierra Leone
* `SM`: San Marino
* `SN`: Senegal
* `SO`: Somalia
* `SR`: Suriname
* `SS`: South Sudan
* `ST`: Sao Tome and Principe
* `SV`: El Salvador
* `SX`: Sint Maarten (Dutch part)
* `SZ`: Swaziland
* `TC`: Turks and Caicos Islands
* `TD`: Chad
* `TF`: French Southern Lands
* `TG`: Togo
* `TH`: Thailand
* `TJ`: Tajikistan
* `TK`: Tokelau
* `TL`: Timor-Leste
* `TM`: Turkmenistan
* `TN`: Tunisia
* `TO`: Tonga
* `TR`: Turkey
* `TT`: Trinidad and Tobago
* `TV`: Tuvalu
* `TW`: Taiwan
* `TZ`: Tanzania
* `UA`: Ukraine
* `UG`: Uganda
* `UM`: United States Minor Outlying Islands
* `US`: United States of America
* `UY`: Uruguay
* `UZ`: Uzbekistan
* `VA`: Vatican City
* `VC`: Saint Vincent and the Grenadines
* `VE`: Venezuela
* `VG`: Virgin Islands, British
* `VI`: Virgin Islands, U.S.
* `VN`: Vietnam
* `VU`: Vanuatu
* `WF`: Wallis and Futuna Islands
* `WS`: Samoa
* `YE`: Yemen
* `YT`: Mayotte
* `ZA`: South Africa
* `ZM`: Zambia
* `ZW`: Zimbabwe
"""
__schema__ = github_schema
__choices__ = (
"AD",
"AE",
"AF",
"AG",
"AI",
"AL",
"AM",
"AO",
"AQ",
"AR",
"AS",
"AT",
"AU",
"AW",
"AX",
"AZ",
"BA",
"BB",
"BD",
"BE",
"BF",
"BG",
"BH",
"BI",
"BJ",
"BL",
"BM",
"BN",
"BO",
"BQ",
"BR",
"BS",
"BT",
"BV",
"BW",
"BY",
"BZ",
"CA",
"CC",
"CD",
"CF",
"CG",
"CH",
"CI",
"CK",
"CL",
"CM",
"CN",
"CO",
"CR",
"CV",
"CW",
"CX",
"CY",
"CZ",
"DE",
"DJ",
"DK",
"DM",
"DO",
"DZ",
"EC",
"EE",
"EG",
"EH",
"ER",
"ES",
"ET",
"FI",
"FJ",
"FK",
"FM",
"FO",
"FR",
"GA",
"GB",
"GD",
"GE",
"GF",
"GG",
"GH",
"GI",
"GL",
"GM",
"GN",
"GP",
"GQ",
"GR",
"GS",
"GT",
"GU",
"GW",
"GY",
"HK",
"HM",
"HN",
"HR",
"HT",
"HU",
"ID",
"IE",
"IL",
"IM",
"IN",
"IO",
"IQ",
"IR",
"IS",
"IT",
"JE",
"JM",
"JO",
"JP",
"KE",
"KG",
"KH",
"KI",
"KM",
"KN",
"KR",
"KW",
"KY",
"KZ",
"LA",
"LB",
"LC",
"LI",
"LK",
"LR",
"LS",
"LT",
"LU",
"LV",
"LY",
"MA",
"MC",
"MD",
"ME",
"MF",
"MG",
"MH",
"MK",
"ML",
"MM",
"MN",
"MO",
"MP",
"MQ",
"MR",
"MS",
"MT",
"MU",
"MV",
"MW",
"MX",
"MY",
"MZ",
"NA",
"NC",
"NE",
"NF",
"NG",
"NI",
"NL",
"NO",
"NP",
"NR",
"NU",
"NZ",
"OM",
"PA",
"PE",
"PF",
"PG",
"PH",
"PK",
"PL",
"PM",
"PN",
"PR",
"PS",
"PT",
"PW",
"PY",
"QA",
"RE",
"RO",
"RS",
"RU",
"RW",
"SA",
"SB",
"SC",
"SD",
"SE",
"SG",
"SH",
"SI",
"SJ",
"SK",
"SL",
"SM",
"SN",
"SO",
"SR",
"SS",
"ST",
"SV",
"SX",
"SZ",
"TC",
"TD",
"TF",
"TG",
"TH",
"TJ",
"TK",
"TL",
"TM",
"TN",
"TO",
"TR",
"TT",
"TV",
"TW",
"TZ",
"UA",
"UG",
"UM",
"US",
"UY",
"UZ",
"VA",
"VC",
"VE",
"VG",
"VI",
"VN",
"VU",
"WF",
"WS",
"YE",
"YT",
"ZA",
"ZM",
"ZW",
)
| SponsorsCountryOrRegionCode |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/graphs/order_based_dependency.py | {
"start": 0,
"end": 543
} | class ____:
def execute(self, query: str):
pass
def get_database_connection():
return MockDatabase()
# start_marker
import dagster as dg
@dg.op
def create_table_1():
get_database_connection().execute(
"create table_1 as select * from some_source_table"
)
@dg.op(ins={"start": dg.In(dg.Nothing)})
def create_table_2():
get_database_connection().execute("create table_2 as select * from table_1")
@dg.graph
def nothing_dependency():
create_table_2(start=create_table_1())
# end_marker
| MockDatabase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/call3.py | {
"start": 1786,
"end": 1845
} | class ____(Protocol):
def f(self, x: Any):
...
| P2 |
python | walkccc__LeetCode | solutions/274. H-Index/274-2.py | {
"start": 0,
"end": 219
} | class ____:
def hIndex(self, citations: list[int]) -> int:
n = len(citations)
citations.sort()
for i, citation in enumerate(citations):
if citation >= n - i:
return n - i
return 0
| Solution |
python | tensorflow__tensorflow | tensorflow/compiler/tests/conv3d_test.py | {
"start": 21969,
"end": 28752
} | class ____(xla_test.XLATestCase):
def testConv3DTransposeSingleStride(self):
with self.session(), self.test_scope():
strides = [1, 1, 1, 1, 1]
# Input, output: [batch, depth, height, width, channel]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 5, 6, 4, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells = kernel_depth * kernel_height * kernel_width
# At the corners, #cells = ceil(kernel_depth/2) * ceil(kernel_height/2)
# * ceil(kernel_width/2)
# At the edges, #cells =
# kernel_depth * ceil(kernel_height/2) * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * kernel_height * ceil(kernel_width/2) or
# ceil(kernel_depth/2) * ceil(kernel_height/2) * kernel_width
# At the borders, #cells =
# ceil(kernel_depth/2) * kernel_height * kernel_width or
# kernel_depth * ceil(kernel_height/2) * kernel_width or
# kernel_depth * kernel_height * ceil(kernel_width/2)
for n in range(x_shape[0]):
for k in range(f_shape[3]):
for w in range(y_shape[3]):
for h in range(y_shape[2]):
for d in range(y_shape[1]):
d_in = d > 0 and d < y_shape[1] - 1
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 27 * 3.0
elif d_in + h_in + w_in == 2:
target = 18 * 3.0
elif d_in or h_in or w_in:
target = 12 * 3.0
else:
target = 8 * 3.0
self.assertAllClose(target, value[n, d, h, w, k])
def testConv3DTransposeSame(self):
with self.session(), self.test_scope():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 10, 12, 8, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in range(x_shape[0]):
for k in range(f_shape[3]):
for w in range(y_shape[3]):
for h in range(y_shape[2]):
for d in range(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and 0 < d < y_shape[1] - 1
h_in = h % strides[2] == 0 and 0 < h < y_shape[2] - 1
w_in = w % strides[3] == 0 and 0 < w < y_shape[3] - 1
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
self.assertAllClose(target, value[n, d, h, w, k])
def testConv3DTransposeValid(self):
with self.session(), self.test_scope():
strides = [1, 2, 2, 2, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 5, 6, 4, 3]
y_shape = [2, 11, 13, 9, 2]
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
f_shape = [3, 3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in range(x_shape[0]):
for k in range(f_shape[3]):
for w in range(y_shape[3]):
for h in range(y_shape[2]):
for d in range(y_shape[1]):
# We add a case for locations divisible by the stride.
d_in = d % strides[1] == 0 and pad < d < y_shape[1] - 1 - pad
h_in = h % strides[2] == 0 and pad < h < y_shape[2] - 1 - pad
w_in = w % strides[3] == 0 and pad < w < y_shape[3] - 1 - pad
if d_in + h_in + w_in == 3:
target = 8 * 3.0
elif d_in + h_in + w_in == 2:
target = 4 * 3.0
elif d_in or h_in or w_in:
target = 2 * 3.0
else:
target = 3.0
cache_values[n, d, h, w, k] = target
# copy values in the border
cache_values[n, :, :, 0, k] = cache_values[n, :, :, 1, k]
cache_values[n, :, :, -1, k] = cache_values[n, :, :, -2, k]
cache_values[n, :, 0, :, k] = cache_values[n, :, 1, :, k]
cache_values[n, :, -1, :, k] = cache_values[n, :, -2, :, k]
cache_values[n, 0, :, :, k] = cache_values[n, 1, :, :, k]
cache_values[n, -1, :, :, k] = cache_values[n, -2, :, :, k]
self.assertAllClose(cache_values, value)
def testGradient(self):
x_shape = [2, 3, 4, 3, 2]
f_shape = [3, 3, 3, 2, 2]
y_shape = [2, 6, 8, 6, 2]
strides = [1, 2, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.session(), self.test_scope():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv3d_transpose gradient err = %g " % err)
err_tolerance = 0.001
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
googletest.main()
| Conv3DTransposeTest |
python | spack__spack | lib/spack/spack/spec.py | {
"start": 213985,
"end": 219681
} | class ____(collections.defaultdict):
"""Cache for Specs that uses a spec_like as key, and computes lazily
the corresponding value ``Spec(spec_like``.
"""
def __init__(self):
super().__init__(Spec)
def __missing__(self, key):
value = self.default_factory(key)
self[key] = value
return value
def save_dependency_specfiles(root: Spec, output_directory: str, dependencies: List[Spec]):
"""Given a root spec (represented as a yaml object), index it with a subset
of its dependencies, and write each dependency to a separate yaml file
in the output directory. By default, all dependencies will be written
out. To choose a smaller subset of dependencies to be written, pass a
list of package names in the dependencies parameter. If the format of the
incoming spec is not json, that can be specified with the spec_format
parameter. This can be used to convert from yaml specfiles to the
json format."""
for spec in root.traverse():
if not any(spec.satisfies(dep) for dep in dependencies):
continue
json_path = os.path.join(output_directory, f"{spec.name}.json")
with open(json_path, "w", encoding="utf-8") as fd:
fd.write(spec.to_json(hash=ht.dag_hash))
def get_host_environment_metadata() -> Dict[str, str]:
"""Get the host environment, reduce to a subset that we can store in
the install directory, and add the spack version.
"""
environ = get_host_environment()
return {
"host_os": environ["os"],
"platform": environ["platform"],
"host_target": environ["target"],
"hostname": environ["hostname"],
"spack_version": spack.get_version(),
"kernel_version": platform.version(),
}
def get_host_environment() -> Dict[str, Any]:
"""Returns a dictionary with host information (not including the os.environ)."""
host_platform = spack.platforms.host()
host_target = host_platform.default_target()
host_os = host_platform.default_operating_system()
arch_fmt = "platform={0} os={1} target={2}"
arch_spec = Spec(arch_fmt.format(host_platform, host_os, host_target))
return {
"target": str(host_target),
"os": str(host_os),
"platform": str(host_platform),
"arch": arch_spec,
"architecture": arch_spec,
"arch_str": str(arch_spec),
"hostname": socket.gethostname(),
}
def eval_conditional(string):
"""Evaluate conditional definitions using restricted variable scope."""
valid_variables = get_host_environment()
valid_variables.update({"re": re, "env": os.environ})
return eval(string, valid_variables)
def _inject_patches_variant(root: Spec) -> None:
# This dictionary will store object IDs rather than Specs as keys
# since the Spec __hash__ will change as patches are added to them
spec_to_patches: Dict[int, Set[spack.patch.Patch]] = {}
for s in root.traverse():
# After concretizing, assign namespaces to anything left.
# Note that this doesn't count as a "change". The repository
# configuration is constant throughout a spack run, and
# normalize and concretize evaluate Packages using Repo.get(),
# which respects precedence. So, a namespace assignment isn't
# changing how a package name would have been interpreted and
# we can do it as late as possible to allow as much
# compatibility across repositories as possible.
if s.namespace is None:
s.namespace = spack.repo.PATH.repo_for_pkg(s.name).namespace
if s.concrete:
continue
# Add any patches from the package to the spec.
node_patches = {
patch
for cond, patch_list in spack.repo.PATH.get_pkg_class(s.fullname).patches.items()
if s.satisfies(cond)
for patch in patch_list
}
if node_patches:
spec_to_patches[id(s)] = node_patches
# Also record all patches required on dependencies by depends_on(..., patch=...)
for dspec in root.traverse_edges(deptype=dt.ALL, cover="edges", root=False):
if dspec.spec.concrete:
continue
pkg_deps = spack.repo.PATH.get_pkg_class(dspec.parent.fullname).dependencies
edge_patches: List[spack.patch.Patch] = []
for cond, deps_by_name in pkg_deps.items():
dependency = deps_by_name.get(dspec.spec.name)
if not dependency:
continue
if not dspec.parent.satisfies(cond):
continue
for pcond, patch_list in dependency.patches.items():
if dspec.spec.satisfies(pcond):
edge_patches.extend(patch_list)
if edge_patches:
spec_to_patches.setdefault(id(dspec.spec), set()).update(edge_patches)
for spec in root.traverse():
if id(spec) not in spec_to_patches:
continue
patches = list(spec_to_patches[id(spec)])
variant: vt.VariantValue = spec.variants.setdefault(
"patches", vt.MultiValuedVariant("patches", ())
)
variant.set(*(p.sha256 for p in patches))
# FIXME: Monkey patches variant to store patches order
ordered_hashes = [(*p.ordering_key, p.sha256) for p in patches if p.ordering_key]
ordered_hashes.sort()
tty.debug(
f"Ordered hashes [{spec.name}]: "
+ ", ".join("/".join(str(e) for e in t) for t in ordered_hashes)
)
setattr(
variant, "_patches_in_order_of_appearance", [sha256 for _, _, sha256 in ordered_hashes]
)
| LazySpecCache |
python | ray-project__ray | python/ray/serve/llm/openai_api_models.py | {
"start": 2720,
"end": 2961
} | class ____(_TranscriptionResponse):
"""TranscriptionResponse is the response body for the transcription API.
This model is compatible with vLLM's OpenAI API models.
"""
pass
@PublicAPI(stability="alpha")
| TranscriptionResponse |
python | getsentry__sentry-python | tests/test_dsc.py | {
"start": 486,
"end": 16581
} | class ____(Transport):
"""conftest.TestTransport does not pass in the options so we need this here"""
def __init__(self, options=None):
Transport.__init__(self, options)
def capture_envelope(self, _: Envelope) -> None:
"""No-op capture_envelope for tests"""
pass
def test_dsc_head_of_trace(sentry_init, capture_envelopes):
"""
Our service is the head of the trace (it starts a new trace)
and sends a transaction event to Sentry.
"""
sentry_init(
dsn="https://mysecret@o1234.ingest.sentry.io/12312012",
release="myapp@0.0.1",
environment="canary",
traces_sample_rate=1.0,
transport=TransportWithOptions,
)
envelopes = capture_envelopes()
# We start a new transaction
with sentry_sdk.start_transaction(name="foo"):
pass
assert len(envelopes) == 1
transaction_envelope = envelopes[0]
envelope_trace_header = transaction_envelope.headers["trace"]
assert "trace_id" in envelope_trace_header
assert type(envelope_trace_header["trace_id"]) == str
assert "public_key" in envelope_trace_header
assert type(envelope_trace_header["public_key"]) == str
assert envelope_trace_header["public_key"] == "mysecret"
assert "org_id" in envelope_trace_header
assert type(envelope_trace_header["org_id"]) == str
assert envelope_trace_header["org_id"] == "1234"
assert "sample_rate" in envelope_trace_header
assert type(envelope_trace_header["sample_rate"]) == str
assert envelope_trace_header["sample_rate"] == "1.0"
assert "sampled" in envelope_trace_header
assert type(envelope_trace_header["sampled"]) == str
assert envelope_trace_header["sampled"] == "true"
assert "release" in envelope_trace_header
assert type(envelope_trace_header["release"]) == str
assert envelope_trace_header["release"] == "myapp@0.0.1"
assert "environment" in envelope_trace_header
assert type(envelope_trace_header["environment"]) == str
assert envelope_trace_header["environment"] == "canary"
assert "transaction" in envelope_trace_header
assert type(envelope_trace_header["transaction"]) == str
assert envelope_trace_header["transaction"] == "foo"
def test_dsc_head_of_trace_uses_custom_org_id(sentry_init, capture_envelopes):
"""
Our service is the head of the trace (it starts a new trace)
and sends a transaction event to Sentry.
"""
sentry_init(
dsn="https://mysecret@o1234.ingest.sentry.io/12312012",
org_id="9999",
release="myapp@0.0.1",
environment="canary",
traces_sample_rate=1.0,
transport=TransportWithOptions,
)
envelopes = capture_envelopes()
# We start a new transaction
with sentry_sdk.start_transaction(name="foo"):
pass
assert len(envelopes) == 1
transaction_envelope = envelopes[0]
envelope_trace_header = transaction_envelope.headers["trace"]
assert "org_id" in envelope_trace_header
assert type(envelope_trace_header["org_id"]) == str
assert envelope_trace_header["org_id"] == "9999"
def test_dsc_continuation_of_trace(sentry_init, capture_envelopes):
"""
Another service calls our service and passes tracing information to us.
Our service is continuing the trace and sends a transaction event to Sentry.
"""
sentry_init(
dsn="https://mysecret@o1234.ingest.sentry.io/12312012",
release="myapp@0.0.1",
environment="canary",
traces_sample_rate=1.0,
transport=TransportWithOptions,
)
envelopes = capture_envelopes()
# This is what the upstream service sends us
sentry_trace = "771a43a4192642f0b136d5159a501700-1234567890abcdef-1"
baggage = (
"other-vendor-value-1=foo;bar;baz, "
"sentry-trace_id=771a43a4192642f0b136d5159a501700, "
"sentry-public_key=frontendpublickey, "
"sentry-sample_rate=0.01337, "
"sentry-sampled=true, "
"sentry-release=myfrontend@1.2.3, "
"sentry-environment=bird, "
"sentry-transaction=bar, "
"other-vendor-value-2=foo;bar;"
)
incoming_http_headers = {
"HTTP_SENTRY_TRACE": sentry_trace,
"HTTP_BAGGAGE": baggage,
}
# We continue the incoming trace and start a new transaction
transaction = sentry_sdk.continue_trace(incoming_http_headers)
with sentry_sdk.start_transaction(transaction, name="foo"):
pass
assert len(envelopes) == 1
transaction_envelope = envelopes[0]
envelope_trace_header = transaction_envelope.headers["trace"]
assert "trace_id" in envelope_trace_header
assert type(envelope_trace_header["trace_id"]) == str
assert envelope_trace_header["trace_id"] == "771a43a4192642f0b136d5159a501700"
assert "public_key" in envelope_trace_header
assert type(envelope_trace_header["public_key"]) == str
assert envelope_trace_header["public_key"] == "frontendpublickey"
assert "sample_rate" in envelope_trace_header
assert type(envelope_trace_header["sample_rate"]) == str
assert envelope_trace_header["sample_rate"] == "1.0"
assert "sampled" in envelope_trace_header
assert type(envelope_trace_header["sampled"]) == str
assert envelope_trace_header["sampled"] == "true"
assert "release" in envelope_trace_header
assert type(envelope_trace_header["release"]) == str
assert envelope_trace_header["release"] == "myfrontend@1.2.3"
assert "environment" in envelope_trace_header
assert type(envelope_trace_header["environment"]) == str
assert envelope_trace_header["environment"] == "bird"
assert "transaction" in envelope_trace_header
assert type(envelope_trace_header["transaction"]) == str
assert envelope_trace_header["transaction"] == "bar"
def test_dsc_continuation_of_trace_sample_rate_changed_in_traces_sampler(
sentry_init, capture_envelopes
):
"""
Another service calls our service and passes tracing information to us.
Our service is continuing the trace, but modifies the sample rate.
The DSC propagated further should contain the updated sample rate.
"""
def my_traces_sampler(sampling_context):
return 0.25
sentry_init(
dsn="https://mysecret@o1234.ingest.sentry.io/12312012",
release="myapp@0.0.1",
environment="canary",
traces_sampler=my_traces_sampler,
transport=TransportWithOptions,
)
envelopes = capture_envelopes()
# This is what the upstream service sends us
sentry_trace = "771a43a4192642f0b136d5159a501700-1234567890abcdef-1"
baggage = (
"other-vendor-value-1=foo;bar;baz, "
"sentry-trace_id=771a43a4192642f0b136d5159a501700, "
"sentry-public_key=frontendpublickey, "
"sentry-sample_rate=1.0, "
"sentry-sampled=true, "
"sentry-release=myfrontend@1.2.3, "
"sentry-environment=bird, "
"sentry-transaction=bar, "
"other-vendor-value-2=foo;bar;"
)
incoming_http_headers = {
"HTTP_SENTRY_TRACE": sentry_trace,
"HTTP_BAGGAGE": baggage,
}
# We continue the incoming trace and start a new transaction
with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=125000):
transaction = sentry_sdk.continue_trace(incoming_http_headers)
with sentry_sdk.start_transaction(transaction, name="foo"):
pass
assert len(envelopes) == 1
transaction_envelope = envelopes[0]
envelope_trace_header = transaction_envelope.headers["trace"]
assert "trace_id" in envelope_trace_header
assert type(envelope_trace_header["trace_id"]) == str
assert envelope_trace_header["trace_id"] == "771a43a4192642f0b136d5159a501700"
assert "public_key" in envelope_trace_header
assert type(envelope_trace_header["public_key"]) == str
assert envelope_trace_header["public_key"] == "frontendpublickey"
assert "sample_rate" in envelope_trace_header
assert type(envelope_trace_header["sample_rate"]) == str
assert envelope_trace_header["sample_rate"] == "0.25"
assert "sampled" in envelope_trace_header
assert type(envelope_trace_header["sampled"]) == str
assert envelope_trace_header["sampled"] == "true"
assert "release" in envelope_trace_header
assert type(envelope_trace_header["release"]) == str
assert envelope_trace_header["release"] == "myfrontend@1.2.3"
assert "environment" in envelope_trace_header
assert type(envelope_trace_header["environment"]) == str
assert envelope_trace_header["environment"] == "bird"
assert "transaction" in envelope_trace_header
assert type(envelope_trace_header["transaction"]) == str
assert envelope_trace_header["transaction"] == "bar"
def test_dsc_issue(sentry_init, capture_envelopes):
"""
Our service is a standalone service that does not have tracing enabled. Just uses Sentry for error reporting.
"""
sentry_init(
dsn="https://mysecret@o1234.ingest.sentry.io/12312012",
release="myapp@0.0.1",
environment="canary",
transport=TransportWithOptions,
)
envelopes = capture_envelopes()
# No transaction is started, just an error is captured
try:
1 / 0
except ZeroDivisionError as exp:
sentry_sdk.capture_exception(exp)
assert len(envelopes) == 1
error_envelope = envelopes[0]
envelope_trace_header = error_envelope.headers["trace"]
assert "trace_id" in envelope_trace_header
assert type(envelope_trace_header["trace_id"]) == str
assert "public_key" in envelope_trace_header
assert type(envelope_trace_header["public_key"]) == str
assert envelope_trace_header["public_key"] == "mysecret"
assert "org_id" in envelope_trace_header
assert type(envelope_trace_header["org_id"]) == str
assert envelope_trace_header["org_id"] == "1234"
assert "sample_rate" not in envelope_trace_header
assert "sampled" not in envelope_trace_header
assert "release" in envelope_trace_header
assert type(envelope_trace_header["release"]) == str
assert envelope_trace_header["release"] == "myapp@0.0.1"
assert "environment" in envelope_trace_header
assert type(envelope_trace_header["environment"]) == str
assert envelope_trace_header["environment"] == "canary"
assert "transaction" not in envelope_trace_header
def test_dsc_issue_with_tracing(sentry_init, capture_envelopes):
"""
Our service has tracing enabled and an error occurs in an transaction.
Envelopes containing errors also have the same DSC than the transaction envelopes.
"""
sentry_init(
dsn="https://mysecret@o1234.ingest.sentry.io/12312012",
release="myapp@0.0.1",
environment="canary",
traces_sample_rate=1.0,
transport=TransportWithOptions,
)
envelopes = capture_envelopes()
# We start a new transaction and an error occurs
with sentry_sdk.start_transaction(name="foo"):
try:
1 / 0
except ZeroDivisionError as exp:
sentry_sdk.capture_exception(exp)
assert len(envelopes) == 2
error_envelope, transaction_envelope = envelopes
assert error_envelope.headers["trace"] == transaction_envelope.headers["trace"]
envelope_trace_header = error_envelope.headers["trace"]
assert "trace_id" in envelope_trace_header
assert type(envelope_trace_header["trace_id"]) == str
assert "public_key" in envelope_trace_header
assert type(envelope_trace_header["public_key"]) == str
assert envelope_trace_header["public_key"] == "mysecret"
assert "org_id" in envelope_trace_header
assert type(envelope_trace_header["org_id"]) == str
assert envelope_trace_header["org_id"] == "1234"
assert "sample_rate" in envelope_trace_header
assert envelope_trace_header["sample_rate"] == "1.0"
assert type(envelope_trace_header["sample_rate"]) == str
assert "sampled" in envelope_trace_header
assert type(envelope_trace_header["sampled"]) == str
assert envelope_trace_header["sampled"] == "true"
assert "release" in envelope_trace_header
assert type(envelope_trace_header["release"]) == str
assert envelope_trace_header["release"] == "myapp@0.0.1"
assert "environment" in envelope_trace_header
assert type(envelope_trace_header["environment"]) == str
assert envelope_trace_header["environment"] == "canary"
assert "transaction" in envelope_trace_header
assert type(envelope_trace_header["transaction"]) == str
assert envelope_trace_header["transaction"] == "foo"
@pytest.mark.parametrize(
"traces_sample_rate",
[
0, # no traces will be started, but if incoming traces will be continued (by our instrumentations, not happening in this test)
None, # no tracing at all. This service will never create transactions.
],
)
def test_dsc_issue_twp(sentry_init, capture_envelopes, traces_sample_rate):
"""
Our service does not have tracing enabled, but we receive tracing information from an upstream service.
Error envelopes still contain a DCS. This is called "tracing without performance" or TWP for short.
This way if I have three services A, B, and C, and A and C have tracing enabled, but B does not,
we still can see the full trace in Sentry, and associate errors send by service B to Sentry.
(This test would be service B in this scenario)
"""
sentry_init(
dsn="https://mysecret@o1234.ingest.sentry.io/12312012",
release="myapp@0.0.1",
environment="canary",
traces_sample_rate=traces_sample_rate,
transport=TransportWithOptions,
)
envelopes = capture_envelopes()
# This is what the upstream service sends us
sentry_trace = "771a43a4192642f0b136d5159a501700-1234567890abcdef-1"
baggage = (
"other-vendor-value-1=foo;bar;baz, "
"sentry-trace_id=771a43a4192642f0b136d5159a501700, "
"sentry-public_key=frontendpublickey, "
"sentry-sample_rate=0.01337, "
"sentry-sampled=true, "
"sentry-release=myfrontend@1.2.3, "
"sentry-environment=bird, "
"sentry-transaction=bar, "
"other-vendor-value-2=foo;bar;"
)
incoming_http_headers = {
"HTTP_SENTRY_TRACE": sentry_trace,
"HTTP_BAGGAGE": baggage,
}
# We continue the trace (meaning: saving the incoming trace information on the scope)
# but in this test, we do not start a transaction.
sentry_sdk.continue_trace(incoming_http_headers)
# No transaction is started, just an error is captured
try:
1 / 0
except ZeroDivisionError as exp:
sentry_sdk.capture_exception(exp)
assert len(envelopes) == 1
error_envelope = envelopes[0]
envelope_trace_header = error_envelope.headers["trace"]
assert "trace_id" in envelope_trace_header
assert type(envelope_trace_header["trace_id"]) == str
assert envelope_trace_header["trace_id"] == "771a43a4192642f0b136d5159a501700"
assert "public_key" in envelope_trace_header
assert type(envelope_trace_header["public_key"]) == str
assert envelope_trace_header["public_key"] == "frontendpublickey"
assert "sample_rate" in envelope_trace_header
assert type(envelope_trace_header["sample_rate"]) == str
assert envelope_trace_header["sample_rate"] == "0.01337"
assert "sampled" in envelope_trace_header
assert type(envelope_trace_header["sampled"]) == str
assert envelope_trace_header["sampled"] == "true"
assert "release" in envelope_trace_header
assert type(envelope_trace_header["release"]) == str
assert envelope_trace_header["release"] == "myfrontend@1.2.3"
assert "environment" in envelope_trace_header
assert type(envelope_trace_header["environment"]) == str
assert envelope_trace_header["environment"] == "bird"
assert "transaction" in envelope_trace_header
assert type(envelope_trace_header["transaction"]) == str
assert envelope_trace_header["transaction"] == "bar"
| TransportWithOptions |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 9966,
"end": 12203
} | class ____(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
geom_param_pos = (0, 1)
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = self._handle_param(spheroid, "spheroid", bool)
super().__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection, **extra_context):
clone = self.copy()
function = None
expr2 = clone.source_expressions[1]
geography = self.source_is_geography()
if expr2.output_field.geography != geography:
if isinstance(expr2, Value):
expr2.output_field.geography = geography
else:
clone.source_expressions[1] = Cast(
expr2,
GeometryField(srid=expr2.output_field.srid, geography=geography),
)
if not geography and self.geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special
# distance functions.
if self.spheroid:
# DistanceSpheroid is more accurate and resource intensive than
# DistanceSphere.
function = connection.ops.spatial_function_name("DistanceSpheroid")
# Replace boolean param by the real spheroid of the base field
clone.source_expressions.append(
Value(self.geo_field.spheroid(connection))
)
else:
function = connection.ops.spatial_function_name("DistanceSphere")
return super(Distance, clone).as_sql(
compiler, connection, function=function, **extra_context
)
def as_sqlite(self, compiler, connection, **extra_context):
if self.geo_field.geodetic(connection):
# SpatiaLite returns NULL instead of zero on geodetic coordinates
extra_context["template"] = (
"COALESCE(%(function)s(%(expressions)s, %(spheroid)s), 0)"
)
extra_context["spheroid"] = int(bool(self.spheroid))
return super().as_sql(compiler, connection, **extra_context)
| Distance |
python | coleifer__peewee | tests/shortcuts.py | {
"start": 19267,
"end": 22910
} | class ____(ModelTestCase):
database = get_in_memory_db()
requires = [User, Tweet, Tag, TweetTag]
def setUp(self):
super(TestDictToModel, self).setUp()
self.user = User.create(username='peewee')
def test_simple(self):
data = {'username': 'peewee', 'id': self.user.id}
inst = dict_to_model(User, data)
self.assertTrue(isinstance(inst, User))
self.assertEqual(inst.username, 'peewee')
self.assertEqual(inst.id, self.user.id)
def test_update_model_from_dict(self):
data = {'content': 'tweet', 'user': {'username': 'zaizee'}}
with self.assertQueryCount(0):
user = User(id=3, username='orig')
tweet = Tweet(id=4, content='orig', user=user)
obj = update_model_from_dict(tweet, data)
self.assertEqual(obj.id, 4)
self.assertEqual(obj.content, 'tweet')
self.assertEqual(obj.user.id, 3)
self.assertEqual(obj.user.username, 'zaizee')
def test_related(self):
data = {
'id': 2,
'content': 'tweet-1',
'user': {'id': self.user.id, 'username': 'peewee'}}
with self.assertQueryCount(0):
inst = dict_to_model(Tweet, data)
self.assertTrue(isinstance(inst, Tweet))
self.assertEqual(inst.id, 2)
self.assertEqual(inst.content, 'tweet-1')
self.assertTrue(isinstance(inst.user, User))
self.assertEqual(inst.user.id, self.user.id)
self.assertEqual(inst.user.username, 'peewee')
data['user'] = self.user.id
with self.assertQueryCount(0):
inst = dict_to_model(Tweet, data)
with self.assertQueryCount(1):
self.assertEqual(inst.user, self.user)
def test_backrefs(self):
data = {
'id': self.user.id,
'username': 'peewee',
'tweets': [
{'id': 1, 'content': 't1'},
{'id': 2, 'content': 't2'},
]}
with self.assertQueryCount(0):
inst = dict_to_model(User, data)
self.assertEqual(inst.id, self.user.id)
self.assertEqual(inst.username, 'peewee')
self.assertTrue(isinstance(inst.tweets, list))
t1, t2 = inst.tweets
self.assertEqual(t1.id, 1)
self.assertEqual(t1.content, 't1')
self.assertEqual(t1.user, self.user)
self.assertEqual(t2.id, 2)
self.assertEqual(t2.content, 't2')
self.assertEqual(t2.user, self.user)
def test_unknown_attributes(self):
data = {
'id': self.user.id,
'username': 'peewee',
'xx': 'does not exist'}
self.assertRaises(AttributeError, dict_to_model, User, data)
inst = dict_to_model(User, data, ignore_unknown=True)
self.assertEqual(inst.xx, 'does not exist')
def test_ignore_id_attribute(self):
class Register(Model):
key = CharField(primary_key=True)
data = {'id': 100, 'key': 'k1'}
self.assertRaises(AttributeError, dict_to_model, Register, data)
inst = dict_to_model(Register, data, ignore_unknown=True)
self.assertEqual(inst.__data__, {'key': 'k1'})
class Base(Model):
class Meta:
primary_key = False
class Register2(Model):
key = CharField(primary_key=True)
self.assertRaises(AttributeError, dict_to_model, Register2, data)
inst = dict_to_model(Register2, data, ignore_unknown=True)
self.assertEqual(inst.__data__, {'key': 'k1'})
| TestDictToModel |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 36546,
"end": 40446
} | class ____(Rank):
"""Reciprocal Rank Fusion for combining multiple ranking strategies.
RRF formula: score = -sum(weight_i / (k + rank_i)) for each ranking strategy
The negative is used because RRF produces higher scores for better results,
but Chroma uses ascending order (lower scores = better results).
Args:
ranks: List of Rank expressions to fuse (must have at least one)
k: Smoothing constant (default: 60, standard in literature)
weights: Optional weights for each ranking strategy. If not provided,
all ranks are weighted equally (weight=1.0 each).
normalize: If True, normalize weights to sum to 1.0 (default: False).
When False, weights are used as-is for relative importance.
When True, weights are scaled so they sum to 1.0.
Examples:
# Note: metadata fields (like "sparse_embedding" below) are user-defined and can store any data.
# The field name is just an example - use whatever name matches your metadata structure.
# Basic RRF combining KNN rankings (equal weight)
Rrf([
Knn(query=[0.1, 0.2], return_rank=True),
Knn(query=another_vector, key="custom_embedding", return_rank=True) # Example metadata field
])
# Weighted RRF with relative weights (not normalized)
Rrf(
ranks=[
Knn(query=[0.1, 0.2], return_rank=True),
Knn(query=another_vector, key="custom_embedding", return_rank=True) # Example metadata field
weights=[2.0, 1.0], # First ranking is 2x more important
k=100
)
# Weighted RRF with normalized weights
Rrf(
ranks=[
Knn(query=[0.1, 0.2], return_rank=True),
Knn(query=another_vector, key="custom_embedding", return_rank=True) # Example metadata field
],
weights=[3.0, 1.0], # Will be normalized to [0.75, 0.25]
normalize=True,
k=100
)
"""
ranks: List[Rank]
k: int = 60
weights: Optional[List[float]] = None
normalize: bool = False
def to_dict(self) -> Dict[str, Any]:
"""Convert RRF to a composition of existing expression operators.
Builds: -sum(weight_i / (k + rank_i)) for each rank
Using Python's overloaded operators for cleaner code.
"""
# Validate RRF parameters
if not self.ranks:
raise ValueError("RRF requires at least one rank")
if self.k <= 0:
raise ValueError(f"k must be positive, got {self.k}")
# Validate weights if provided
if self.weights is not None:
if len(self.weights) != len(self.ranks):
raise ValueError(
f"Number of weights ({len(self.weights)}) must match number of ranks ({len(self.ranks)})"
)
if any(w < 0.0 for w in self.weights):
raise ValueError("All weights must be non-negative")
# Populate weights with 1.0 if not provided
weights = self.weights if self.weights else [1.0] * len(self.ranks)
# Normalize weights if requested
if self.normalize:
weight_sum = sum(weights)
if weight_sum == 0:
raise ValueError("Sum of weights must be positive when normalize=True")
weights = [w / weight_sum for w in weights]
# Zip weights with ranks and build terms: weight / (k + rank)
terms = [w / (self.k + rank) for w, rank in zip(weights, self.ranks)]
# Sum all terms - guaranteed to have at least one
rrf_sum: Rank = terms[0]
for term in terms[1:]:
rrf_sum = rrf_sum + term
# Negate (RRF gives higher scores for better, Chroma needs lower for better)
return (-rrf_sum).to_dict()
@dataclass
| Rrf |
python | django__django | tests/prefetch_related/models.py | {
"start": 2591,
"end": 2714
} | class ____(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ["id"]
| Qualification |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_quotes/docstring_singles_mixed_quotes_class_var_1.py | {
"start": 0,
"end": 336
} | class ____():
''"Start with empty string" ' and lint docstring safely'
''' Not a docstring '''
def foo(self, bar='''not a docstring'''):
''"Start with empty string" ' and lint docstring safely'
pass
class Nested(foo()[:]): ''"Start with empty string" ' and lint docstring safely'; pass
| SingleLineDocstrings |
python | tiangolo__fastapi | docs_src/body_nested_models/tutorial003.py | {
"start": 109,
"end": 414
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: Set[str] = set()
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
results = {"item_id": item_id, "item": item}
return results
| Item |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 13171,
"end": 13743
} | class ____(DiagnosticPipError, InstallationError):
reference = "metadata-generation-failed"
def __init__(
self,
*,
package_details: str,
) -> None:
super().__init__(
message="Encountered error while generating package metadata.",
context=escape(package_details),
hint_stmt="See above for details.",
note_stmt="This is an issue with the package mentioned above, not pip.",
)
def __str__(self) -> str:
return "metadata generation failed"
| MetadataGenerationFailed |
python | gevent__gevent | src/greentest/3.14/test_urllib2_localnet.py | {
"start": 2349,
"end": 6763
} | class ____:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num).encode("ascii")).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str.encode("ascii")).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str.encode("ascii")).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str.encode("ascii")).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write(b"Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if "Proxy-Authorization" not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers["Proxy-Authorization"]
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib.request uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
| DigestAuthHandler |
python | encode__django-rest-framework | rest_framework/test.py | {
"start": 13619,
"end": 13890
} | class ____(testcases.LiveServerTestCase):
client_class = APIClient
def cleanup_url_patterns(cls):
if hasattr(cls, '_module_urlpatterns'):
cls._module.urlpatterns = cls._module_urlpatterns
else:
del cls._module.urlpatterns
| APILiveServerTestCase |
python | pytorch__pytorch | test/inductor/test_cutlass_evt.py | {
"start": 3201,
"end": 19583
} | class ____(TestCase):
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(not try_import_cutlass(), "requires cutlass")
def test_py_codegen_accumulator_return(self):
from torch._inductor.codegen.cuda.cutlass_python_evt import CutlassEVTCodegen
from torch._inductor.virtualized import V
size = (100, 300, 200)
buf0 = MockComputedBuffer("buf0", None, torch.float32, size)
buf1 = MockComputedBuffer("buf1", None, torch.float32, size)
buf2 = MockComputedBuffer("buf2", None, torch.float32, size)
# buf0 is acc
# buf1 is external
def inner_fn_buf3(index):
tmp0 = buf0.make_loader()(index)
tmp1 = buf1.make_loader()(index)
tmp2 = buf2.make_loader()(index)
return tmp0 * tmp1 + tmp2
def inner_fn_buf4(index):
tmp0 = buf0.make_loader()(index)
tmp3 = buf3.make_loader()(index)
return tmp0 + tmp3
buf3 = MockComputedBuffer("buf3", inner_fn_buf3, torch.float32, size)
buf4 = MockComputedBuffer("buf4", inner_fn_buf4, torch.float32, size)
with V.set_graph_handler(
MockGraphHandler(
{"buf0": buf0, "buf1": buf1, "buf2": buf2, "buf3": buf3, "buf4": buf4}
)
):
reads, writes, renames, code = CutlassEVTCodegen.ir_to_evt_python_code(
"buf0",
[
MockSchedulerNode(buf3),
MockSchedulerNode(buf4, last_usage=OrderedSet(["buf3"])),
],
OrderedSet([]),
)
self.assertExpectedInline(reads, """['buf1', 'buf2']""")
self.assertExpectedInline(writes, """['buf0', 'buf3', 'buf4']""")
self.assertExpectedInline(
renames,
"""{'accum': 'buf0', 'tmp_0': 'buf0', 'buf1': 'buf1', 'buf2': 'buf2', 'tmp_2': 'buf3', 'D': 'buf4'}""",
)
self.assertExpectedInline(
code,
"""\
def fn(accum, buf1, buf2):
tmp_0 = accum
tmp_1 = tmp_0 * buf1
tmp_2 = tmp_1 + buf2
D = tmp_0 + tmp_2
return tmp_0, tmp_2, D""",
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(not try_import_cutlass(), "requires cutlass")
def test_py_codegen_disjoint_read_indexing(self):
from torch._inductor.codegen.cuda.cutlass_python_evt import CutlassEVTCodegen
from torch._inductor.virtualized import V
size = (100, 300, 200)
buf0 = MockComputedBuffer("buf0", None, torch.float32, size)
permuted_buf_0 = PermuteView.create(buf0, [1, 0, 2])
buf1 = MockComputedBuffer("buf1", None, torch.float32, size)
buf2 = MockComputedBuffer("buf2", None, torch.float32, size)
# buf0 is acc
# buf1 is external
def inner_fn_buf3(index):
tmp0 = permuted_buf_0.make_loader()(index)
tmp1 = buf1.make_loader()(index)
tmp2 = buf2.make_loader()(index)
return tmp0 * tmp1 + tmp2
def inner_fn_buf4(index):
tmp0 = buf0.make_loader()(index)
tmp3 = buf3.make_loader()(index)
return tmp0 + tmp3
buf3 = MockComputedBuffer("buf3", inner_fn_buf3, torch.float32, size)
buf4 = MockComputedBuffer("buf4", inner_fn_buf4, torch.float32, size)
with V.set_graph_handler(
MockGraphHandler(
{"buf0": buf0, "buf1": buf1, "buf2": buf2, "buf3": buf3, "buf4": buf4}
)
):
result = None
try:
CutlassEVTCodegen.ir_to_evt_python_code(
"buf0",
[MockSchedulerNode(buf3), MockSchedulerNode(buf4)],
OrderedSet([]),
)
except NotImplementedError as e:
result = e
self.assertExpectedInline(
str(result),
"""Unsupported indexing for buf0 with index 200*i0 + 60000*i1 + i2, \
index strides [200, 60000, 1], and layout stride [60000, 200, 1]""",
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(not try_import_cutlass(), "requires cutlass")
def test_py_codegen_broadcasting(self):
from torch._inductor.codegen.cuda.cutlass_python_evt import CutlassEVTCodegen
from torch._inductor.virtualized import V
size = (100, 300, 200)
buf0 = MockComputedBuffer("buf0", None, torch.float32, size)
buf1 = MockComputedBuffer("buf1", None, torch.float32, size)
buf2 = MockComputedBuffer("buf2", None, torch.float32, size)
# buf0 is acc
# buf1 is external
def inner_fn_buf3(index):
tmp0 = buf0.make_loader()(index)
tmp1 = buf1.make_loader()(index)
tmp2 = buf2.make_loader()(index)
return tmp0 * tmp1 + tmp2
def inner_fn_buf4(index):
tmp0 = buf0.make_loader()(index)
tmp3 = buf3.make_loader()(index)
return tmp0 + tmp3 * tmp3
buf3 = MockComputedBuffer("buf3", inner_fn_buf3, torch.float32, size)
buf4 = MockComputedBuffer(
"buf4", inner_fn_buf4, torch.float32, (100, 300, 1)
) # broadcast
with V.set_graph_handler(
MockGraphHandler(
{"buf0": buf0, "buf1": buf1, "buf2": buf2, "buf3": buf3, "buf4": buf4}
)
):
reads, writes, renames, code = CutlassEVTCodegen.ir_to_evt_python_code(
"buf0",
[
MockSchedulerNode(buf3),
MockSchedulerNode(buf4, last_usage=OrderedSet(["buf0"])),
],
OrderedSet([]),
)
self.assertExpectedInline(reads, """['buf1', 'buf2']""")
self.assertExpectedInline(writes, """['buf0', 'buf3', 'buf4']""")
self.assertExpectedInline(
renames,
"""{'accum': 'buf0', 'tmp_0': 'buf0', 'buf1': 'buf1', 'buf2': 'buf2', 'tmp_2': 'buf3', 'D': 'buf4'}""",
)
self.assertExpectedInline(
code,
"""\
def fn(accum, buf1, buf2):
tmp_0 = accum
tmp_1 = tmp_0 * buf1
tmp_2 = tmp_1 + buf2
tmp_3 = tmp_2 * tmp_2
D = tmp_0 + tmp_3
return tmp_0, tmp_2, D""",
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(not try_import_cutlass(), "requires cutlass")
def test_py_codegen(self):
from torch._inductor.codegen.cuda.cutlass_python_evt import CutlassEVTCodegen
from torch._inductor.virtualized import V
size = (100, 300, 200)
buf0 = MockComputedBuffer("buf0", None, torch.float32, size)
buf1 = MockComputedBuffer("buf1", None, torch.float32, size)
buf2 = MockComputedBuffer("buf2", None, torch.float32, size)
# buf0 is acc
# buf1 is external
def inner_fn_buf3(index):
tmp0 = buf0.make_loader()(index)
tmp1 = buf1.make_loader()(index)
tmp2 = buf2.make_loader()(index)
return tmp0 * tmp1 + tmp2
def inner_fn_buf4(index):
tmp0 = buf0.make_loader()(index)
tmp3 = buf3.make_loader()(index)
return tmp0 + tmp3
buf3 = MockComputedBuffer("buf3", inner_fn_buf3, torch.float32, size)
buf4 = MockComputedBuffer("buf4", inner_fn_buf4, torch.float32, size)
with V.set_graph_handler(
MockGraphHandler(
{"buf0": buf0, "buf1": buf1, "buf2": buf2, "buf3": buf3, "buf4": buf4}
)
):
reads, writes, renames, code = CutlassEVTCodegen.ir_to_evt_python_code(
"buf0",
[
MockSchedulerNode(buf3),
MockSchedulerNode(buf4),
],
OrderedSet(["buf0"]),
)
self.assertExpectedInline(reads, """['buf1', 'buf2']""")
self.assertExpectedInline(writes, """['buf3', 'buf4']""")
self.assertExpectedInline(
renames,
"""{'accum': 'buf0', 'buf1': 'buf1', 'buf2': 'buf2', 'tmp_1': 'buf3', 'D': 'buf4'}""",
)
self.assertExpectedInline(
code,
"""\
def fn(accum, buf1, buf2):
tmp_0 = accum * buf1
tmp_1 = tmp_0 + buf2
D = accum + tmp_1
return tmp_1, D""",
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(not try_import_cutlass(), "requires cutlass")
def test_example_tensor_creation(self):
from torch._inductor.codegen.cuda.cutlass_lib_extensions.evt_extensions import (
create_example_tensors,
)
from torch._inductor.virtualized import V
with V.set_graph_handler(MockGraphHandler({})):
row_major_buf0 = MockComputedBuffer(
"buf0", None, torch.float32, (3, 4, 1), (4, 1, 0)
)
col_major_buf1 = MockComputedBuffer(
"buf1", None, torch.float32, (3, 2, 1), (1, 3, 0)
)
buffer_renames = {"buf0": "buf0", "buf1": "buf1", "acc": "buf0"}
name_to_buffer = {"buf0": row_major_buf0, "buf1": col_major_buf1}
result = create_example_tensors(
buffer_renames, name_to_buffer, lambda x: int(x)
)
self.assertEqual(result["acc"].shape, (3, 4, 1))
self.assertEqual(result["acc"].stride, (4, 1, 0))
self.assertEqual(
result["acc"].element, torch_dtype_to_cutlass_type(torch.float32)
)
self.assertEqual(result["buf1"].shape, (3, 2, 1))
self.assertEqual(result["buf1"].stride, (1, 3, 0))
self.assertEqual(
result["buf1"].element, torch_dtype_to_cutlass_type(torch.float32)
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(not try_import_cutlass(), "requires cutlass")
def test_evt_argument_codegen(self):
from torch._inductor.codegen.cuda.cuda_env import get_cuda_arch
cuda_arch = int(get_cuda_arch()) # type: ignore[arg-type]
epilogue_functor = _trace(BIAS_CODE, EXAMPLE_TENSORS, cuda_arch)
self.assertExpectedInline(
_render_argument_type(
epilogue_functor,
_create_mock_buffer_name_map(EXAMPLE_TENSORS),
lambda x: int(x),
)[0],
"""\
{ /* thread */
{ /* F */
{ /* compute_1 */
{ /* compute_0 */
{}, /* accum */
{}, /* C */
{}, /* compute_0 */
},
{/* ptr_aux */ (float*) (ptr_0 + ptr_0_offset), /* null_default */ float(0), /* dAux */ {2048, _1{}, _0{}}}, /* aux */
{}, /* compute_1 */
},
{/* ptr_aux */ (float*) (ptr_1 + ptr_1_offset), /* dAux */ {2048, _1{}, _0{}}}, /* F */
},
{/* ptr_col */ (float*) (ptr_2 + ptr_2_offset), /* null_default */ float(0), /* dCol */ {}}, /* bias */
{}, /* compute_2 */
{}, /* compute_3 */
{}, /* compute_4 */
}
""",
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(not try_import_cutlass(), "requires cutlass")
def test_evt_argument_codegen_return_accumulator(self):
from torch._inductor.codegen.cuda.cuda_env import get_cuda_arch
code = """
def fn(accum, bias):
E = accum
D = E + bias
return D, E
"""
example_tensors = {
"accum": CutlassTensor(
element=DataType.f32, shape=(M, N), layout_tag=LayoutType.RowMajor
),
"bias": BIAS,
# "beta": 0.5, TODO: mlazos support scalars
# "alpha": 0.5, TODO: mlazos support scalars
"D": CutlassTensor(
element=DataType.f32, shape=(M, N), layout_tag=LayoutType.RowMajor
),
"E": CutlassTensor(
element=DataType.f32, shape=(M, N), layout_tag=LayoutType.RowMajor
),
}
cuda_arch = int(get_cuda_arch()) # type: ignore[arg-type]
epilogue_functor = _trace(code, example_tensors, cuda_arch)
self.assertExpectedInline(
_render_argument_type(
epilogue_functor,
_create_mock_buffer_name_map(example_tensors),
lambda x: int(x),
)[0],
"""\
{ /* thread */
{ /* E */
{}, /* accum */
{/* ptr_aux */ (float*) (ptr_0 + ptr_0_offset), /* dAux */ {2048, _1{}, _0{}}}, /* E */
},
{/* ptr_col */ (float*) (ptr_1 + ptr_1_offset), /* null_default */ float(0), /* dCol */ {}}, /* bias */
{}, /* compute_0 */
}
""",
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(not try_import_cutlass(), "requires cutlass")
def test_evt_codegen(self):
_, _, code, _ = trace(
BIAS_CODE,
EXAMPLE_TENSORS,
DataType.f32,
DataType.f32,
MockTileDescription(),
EpilogueScheduleType.ScheduleAuto,
_create_mock_buffer_name_map(EXAMPLE_TENSORS),
lambda x: x, # static shapes
)
self.assertExpectedInline(
code,
"""\
using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
cute::Shape<_128, _128, _8>, cutlass::epilogue::collective::EpilogueTileAuto,
float, float,
cutlass::epilogue::collective::EpilogueScheduleAuto
>;
using ElementC = float;
using StrideC = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
using TensorC = cutlass::epilogue::fusion::Sm90SrcFetch<float>;
using Accum = cutlass::epilogue::fusion::Sm90AccFetch;
using AuxDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor<EpilogueDescriptor, \
cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>, float>;
using Aux = cutlass::epilogue::fusion::Sm90AuxLoad<
AuxDescriptor::Stages, typename AuxDescriptor::EpilogueTile, float,
cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>, typename AuxDescriptor::SmemLayoutAtom, \
typename AuxDescriptor::CopyOpS2R
>;
using Bias = cutlass::epilogue::fusion::Sm90ColBroadcast<
0 /*Stages*/, typename EpilogueDescriptor::TileShape, float, float,
cute::Stride<cute::Int<1>, cute::Int<0>, cute::Int<0>>
>;
using Compute0 = cutlass::epilogue::fusion::Sm90Compute<
cutlass::plus, float, float,
cutlass::FloatRoundStyle::round_to_nearest
>;
using EVTCompute0 = cutlass::epilogue::fusion::Sm90EVT<
Compute0,
Accum,
TensorC>;
using Compute1 = cutlass::epilogue::fusion::Sm90Compute<
cutlass::plus, float, float,
cutlass::FloatRoundStyle::round_to_nearest
>;
using EVTCompute1 = cutlass::epilogue::fusion::Sm90EVT<
Compute1,
EVTCompute0,
Aux>;
using FDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
EpilogueDescriptor, cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>, float
>;
using F = cutlass::epilogue::fusion::Sm90AuxStore<
FDescriptor::Stages, typename FDescriptor::EpilogueTile, float,
cutlass::FloatRoundStyle::round_to_nearest, cute::Stride<int64_t, cute::Int<1>, \
cute::Int<0>>, typename FDescriptor::SmemLayoutAtom,
typename FDescriptor::CopyOpR2S
>;
using EVTF = cutlass::epilogue::fusion::Sm90EVT<
F,
EVTCompute1>;
using Compute2 = cutlass::epilogue::fusion::Sm90Compute<
cutlass::epilogue::thread::ReLu, float, float,
cutlass::FloatRoundStyle::round_to_nearest
>;
using Compute3 = cutlass::epilogue::fusion::Sm90Compute<
cutlass::plus, float, float,
cutlass::FloatRoundStyle::round_to_nearest
>;
using Compute4 = cutlass::epilogue::fusion::Sm90Compute<
cutlass::plus, float, float,
cutlass::FloatRoundStyle::round_to_nearest
>;
using DagCompute4 = cutlass::epilogue::fusion::Sm90TopologicalVisitor<
float,
cute::tuple<
cute::seq<>,
cute::seq<>,
cute::seq<0>,
cute::seq<2, 1>,
cute::seq<3, 0>,
>,
EVTF,
Bias,
Compute2,
Compute3,
Compute4
>;
using ElementD = float;
using StrideD = cute::Stride<int64_t, cute::Int<1>, cute::Int<0>>;
""",
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
if HAS_CPU or HAS_CUDA_AND_TRITON:
run_tests(needs="filelock")
| TestCutlassEVT |
python | doocs__leetcode | lcci/03.05.Sort of Stacks/Solution.py | {
"start": 0,
"end": 683
} | class ____:
def __init__(self):
self.stk = []
def push(self, val: int) -> None:
t = []
while self.stk and self.stk[-1] < val:
t.append(self.stk.pop())
self.stk.append(val)
while t:
self.stk.append(t.pop())
def pop(self) -> None:
if not self.isEmpty():
self.stk.pop()
def peek(self) -> int:
return -1 if self.isEmpty() else self.stk[-1]
def isEmpty(self) -> bool:
return not self.stk
# Your SortedStack object will be instantiated and called as such:
# obj = SortedStack()
# obj.push(val)
# obj.pop()
# param_3 = obj.peek()
# param_4 = obj.isEmpty()
| SortedStack |
python | ray-project__ray | python/ray/data/_internal/issue_detection/issue_detector_configuration.py | {
"start": 448,
"end": 1138
} | class ____:
hanging_detector_config: HangingExecutionIssueDetectorConfig = field(
default_factory=HangingExecutionIssueDetectorConfig
)
hash_shuffle_detector_config: HashShuffleAggregatorIssueDetectorConfig = field(
default_factory=HashShuffleAggregatorIssueDetectorConfig
)
high_memory_detector_config: HighMemoryIssueDetectorConfig = field(
default_factory=HighMemoryIssueDetectorConfig
)
detectors: List[Type[IssueDetector]] = field(
default_factory=lambda: [
HangingExecutionIssueDetector,
HashShuffleAggregatorIssueDetector,
HighMemoryIssueDetector,
]
)
| IssueDetectorsConfiguration |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_gaussian_blur.py | {
"start": 285,
"end": 7713
} | class ____(BaseImagePreprocessingLayer):
"""Applies random Gaussian blur to images for data augmentation.
This layer performs a Gaussian blur operation on input images with a
randomly selected degree of blurring, controlled by the `factor` and
`sigma` arguments.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A single float or a tuple of two floats.
`factor` controls the extent to which the image hue is impacted.
`factor=0.0` makes this layer perform a no-op operation,
while a value of `1.0` performs the most aggressive
blurring available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. Default is 1.0.
kernel_size: Integer. Size of the Gaussian kernel used for blurring.
Must be an odd integer. Default is 3.
sigma: Float or tuple of two floats. Standard deviation of the Gaussian
kernel. Controls the intensity of the blur. If a tuple is provided,
a value is sampled between the two for each image. Default is 1.0.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
def __init__(
self,
factor=1.0,
kernel_size=3,
sigma=1.0,
value_range=(0, 255),
data_format=None,
seed=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.kernel_size = self._set_kernel_size(kernel_size, "kernel_size")
self.sigma = self._set_factor_by_name(sigma, "sigma")
self.value_range = value_range
self.seed = seed
self.generator = SeedGenerator(seed)
def _set_kernel_size(self, factor, name):
error_msg = f"{name} must be an odd number. Received: {name}={factor}"
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
error_msg = (
f"The `{name}` argument should be a number "
"(or a list of two numbers) "
f"Received: {name}={factor}"
)
raise ValueError(error_msg)
if (factor[0] % 2 == 0) or (factor[1] % 2 == 0):
raise ValueError(error_msg)
lower, upper = factor
elif isinstance(factor, (int, float)):
if factor % 2 == 0:
raise ValueError(error_msg)
lower, upper = factor, factor
else:
raise ValueError(error_msg)
return lower, upper
def _set_factor_by_name(self, factor, name):
error_msg = (
f"The `{name}` argument should be a number "
"(or a list of two numbers) "
"in the range "
f"[{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. "
f"Received: factor={factor}"
)
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(error_msg)
if (
factor[0] > self._FACTOR_BOUNDS[1]
or factor[1] < self._FACTOR_BOUNDS[0]
):
raise ValueError(error_msg)
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
if (
factor < self._FACTOR_BOUNDS[0]
or factor > self._FACTOR_BOUNDS[1]
):
raise ValueError(error_msg)
factor = abs(factor)
lower, upper = [max(-factor, self._FACTOR_BOUNDS[0]), factor]
else:
raise ValueError(error_msg)
return lower, upper
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
batch_size = 1
elif rank == 4:
batch_size = images_shape[0]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
seed = seed or self._get_seed_generator(self.backend._backend)
blur_probability = self.backend.random.uniform(
shape=(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
random_threshold = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
should_apply_blur = random_threshold < blur_probability
blur_factor = (
self.backend.random.uniform(
shape=(2,),
minval=self.sigma[0],
maxval=self.sigma[1],
seed=seed,
dtype=self.compute_dtype,
)
+ 1e-6
)
return {
"should_apply_blur": should_apply_blur,
"blur_factor": blur_factor,
}
def transform_images(self, images, transformation=None, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training and transformation is not None:
blur_factor = transformation["blur_factor"]
should_apply_blur = transformation["should_apply_blur"]
blur_images = self.backend.image.gaussian_blur(
images,
kernel_size=self.kernel_size,
sigma=blur_factor,
data_format=self.data_format,
)
images = self.backend.numpy.where(
should_apply_blur[:, None, None, None],
blur_images,
images,
)
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
images = self.backend.cast(images, dtype=self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"kernel_size": self.kernel_size,
"sigma": self.sigma,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
| RandomGaussianBlur |
python | kamyu104__LeetCode-Solutions | Python/count-of-substrings-containing-every-vowel-and-k-consonants-i.py | {
"start": 1405,
"end": 2372
} | class ____(object):
def countOfSubstrings(self, word, k):
"""
:type word: str
:type k: int
:rtype: int
"""
VOWELS = set("aeiou")
def count(k):
def update(i, d):
if word[i] not in VOWELS:
curr2[0] += d
return
x = ord(word[i])-ord('a')
if cnt[x] == 0:
curr1[0] += 1
cnt[x] += d
if cnt[x] == 0:
curr1[0] -= 1
result = 0
cnt = [0]*26
curr1, curr2 = [0], [0]
left = 0
for right in xrange(len(word)):
update(right, +1)
while curr1[0] == len(VOWELS) and curr2[0] >= k:
result += len(word)-right
update(left, -1)
left += 1
return result
return count(k)-count(k+1)
| Solution2 |
python | GoogleCloudPlatform__python-docs-samples | appengine/flexible/django_cloudsql/polls/apps.py | {
"start": 612,
"end": 661
} | class ____(AppConfig):
name = "polls"
| PollsConfig |
python | numba__llvmlite | llvmlite/tests/test_refprune.py | {
"start": 6395,
"end": 8161
} | class ____(BaseTestByIR):
refprune_bitmask = llvm.RefPruneSubpasses.PER_BB
per_bb_ir_1 = r"""
define void @main(i8* %ptr) {
call void @NRT_incref(i8* %ptr)
call void @NRT_decref(i8* %ptr)
ret void
}
"""
def test_per_bb_1(self):
mod, stats = self.check(self.per_bb_ir_1)
self.assertEqual(stats.basicblock, 2)
per_bb_ir_2 = r"""
define void @main(i8* %ptr) {
call void @NRT_incref(i8* %ptr)
call void @NRT_incref(i8* %ptr)
call void @NRT_incref(i8* %ptr)
call void @NRT_decref(i8* %ptr)
call void @NRT_decref(i8* %ptr)
ret void
}
"""
def test_per_bb_2(self):
mod, stats = self.check(self.per_bb_ir_2)
self.assertEqual(stats.basicblock, 4)
# not pruned
self.assertIn("call void @NRT_incref(ptr %ptr)", str(mod))
per_bb_ir_3 = r"""
define void @main(ptr %ptr, ptr %other) {
call void @NRT_incref(ptr %ptr)
call void @NRT_incref(ptr %ptr)
call void @NRT_decref(ptr %ptr)
call void @NRT_decref(ptr %other)
ret void
}
"""
def test_per_bb_3(self):
mod, stats = self.check(self.per_bb_ir_3)
self.assertEqual(stats.basicblock, 2)
# not pruned
self.assertIn("call void @NRT_decref(ptr %other)", str(mod))
per_bb_ir_4 = r"""
; reordered
define void @main(ptr %ptr, ptr %other) {
call void @NRT_incref(ptr %ptr)
call void @NRT_decref(ptr %ptr)
call void @NRT_decref(ptr %ptr)
call void @NRT_decref(ptr %other)
call void @NRT_incref(ptr %ptr)
ret void
}
"""
def test_per_bb_4(self):
mod, stats = self.check(self.per_bb_ir_4)
self.assertEqual(stats.basicblock, 4)
# not pruned
self.assertIn("call void @NRT_decref(ptr %other)", str(mod))
| TestPerBB |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 8794,
"end": 9031
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("el_CY")
Faker.seed(0)
def test_vat_id(self):
for _ in range(100):
assert re.search(r"^CY\d{9}\w$", self.fake.vat_id())
| TestElCY |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.