language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | aio-libs__aiohttp | tests/test_websocket_parser.py | {
"start": 764,
"end": 21437
} | class ____(WebSocketReader):
"""WebSocketReader subclass that allows for patching parse_frame."""
def parse_frame(
self, data: bytes
) -> list[tuple[bool, int, bytes | bytearray, int]]:
# This method is overridden to allow for patching in tests.
frames: list[tuple[bool, int, bytes | bytearray, int]] = []
def _handle_frame(
fin: bool,
opcode: int,
payload: bytes | bytearray,
compressed: int,
) -> None:
# This method is overridden to allow for patching in tests.
frames.append((fin, opcode, payload, compressed))
with mock.patch.object(self, "_handle_frame", _handle_frame):
self._feed_data(data)
return frames
def build_frame(
message: bytes,
opcode: int,
noheader: bool = False,
is_fin: bool = True,
ZLibBackend: ZLibBackendWrapper | None = None,
mask: bool = False,
) -> bytes:
# Send a frame over the websocket with message as its payload.
compress = False
if ZLibBackend:
compress = True
compressobj = ZLibBackend.compressobj(wbits=-9)
message = compressobj.compress(message)
message = message + compressobj.flush(ZLibBackend.Z_SYNC_FLUSH)
if message.endswith(WS_DEFLATE_TRAILING):
message = message[:-4]
msg_length = len(message)
if is_fin:
header_first_byte = 0x80 | opcode
else:
header_first_byte = opcode
if compress:
header_first_byte |= 0x40
mask_bit = 0x80 if mask else 0
if msg_length < 126:
header = PACK_LEN1(header_first_byte, msg_length | mask_bit)
elif msg_length < 65536:
header = PACK_LEN2(header_first_byte, 126 | mask_bit, msg_length)
else:
header = PACK_LEN3(header_first_byte, 127 | mask_bit, msg_length)
if mask:
assert not noheader
mask_bytes = PACK_RANDBITS(random.getrandbits(32))
message_arr = bytearray(message)
websocket_mask(mask_bytes, message_arr)
return header + mask_bytes + message_arr
if noheader:
return message
else:
return header + message
def build_close_frame(
code: int = 1000, message: bytes = b"", noheader: bool = False
) -> bytes:
# Close the websocket, sending the specified code and message.
return build_frame(
PACK_CLOSE_CODE(code) + message, opcode=WSMsgType.CLOSE, noheader=noheader
)
@pytest.fixture()
def protocol(loop: asyncio.AbstractEventLoop) -> BaseProtocol:
transport = mock.Mock(spec_set=asyncio.Transport)
protocol = BaseProtocol(loop)
protocol.connection_made(transport)
return protocol
@pytest.fixture()
def out(loop: asyncio.AbstractEventLoop) -> WebSocketDataQueue:
return WebSocketDataQueue(mock.Mock(_reading_paused=False), 2**16, loop=loop)
@pytest.fixture()
def out_low_limit(
loop: asyncio.AbstractEventLoop, protocol: BaseProtocol
) -> WebSocketDataQueue:
return WebSocketDataQueue(protocol, 16, loop=loop)
@pytest.fixture()
def parser_low_limit(
out_low_limit: WebSocketDataQueue,
) -> PatchableWebSocketReader:
return PatchableWebSocketReader(out_low_limit, 4 * 1024 * 1024)
@pytest.fixture()
def parser(out: WebSocketDataQueue) -> PatchableWebSocketReader:
return PatchableWebSocketReader(out, 4 * 1024 * 1024)
def test_feed_data_remembers_exception(parser: WebSocketReader) -> None:
"""Verify that feed_data remembers an exception was already raised internally."""
error, data = parser.feed_data(struct.pack("!BB", 0b01100000, 0b00000000))
assert error is True
assert data == b""
error, data = parser.feed_data(b"")
assert error is True
assert data == b""
def test_parse_frame(parser: PatchableWebSocketReader) -> None:
parser.parse_frame(struct.pack("!BB", 0b00000001, 0b00000001))
res = parser.parse_frame(b"1")
fin, opcode, payload, compress = res[0]
assert (0, 1, b"1", 0) == (fin, opcode, payload, not not compress)
def test_parse_frame_length0(parser: PatchableWebSocketReader) -> None:
fin, opcode, payload, compress = parser.parse_frame(
struct.pack("!BB", 0b00000001, 0b00000000)
)[0]
assert (0, 1, b"", 0) == (fin, opcode, payload, not not compress)
def test_parse_frame_length2(parser: PatchableWebSocketReader) -> None:
parser.parse_frame(struct.pack("!BB", 0b00000001, 126))
parser.parse_frame(struct.pack("!H", 4))
res = parser.parse_frame(b"1234")
fin, opcode, payload, compress = res[0]
assert (0, 1, b"1234", 0) == (fin, opcode, payload, not not compress)
def test_parse_frame_length2_multi_byte(parser: PatchableWebSocketReader) -> None:
"""Ensure a multi-byte length is parsed correctly."""
expected_payload = b"1" * 32768
parser.parse_frame(struct.pack("!BB", 0b00000001, 126))
parser.parse_frame(struct.pack("!H", 32768))
res = parser.parse_frame(b"1" * 32768)
fin, opcode, payload, compress = res[0]
assert (0, 1, expected_payload, 0) == (fin, opcode, payload, not not compress)
def test_parse_frame_length2_multi_byte_multi_packet(
parser: PatchableWebSocketReader,
) -> None:
"""Ensure a multi-byte length with multiple packets is parsed correctly."""
expected_payload = b"1" * 32768
assert parser.parse_frame(struct.pack("!BB", 0b00000001, 126)) == []
assert parser.parse_frame(struct.pack("!H", 32768)) == []
assert parser.parse_frame(b"1" * 8192) == []
assert parser.parse_frame(b"1" * 8192) == []
assert parser.parse_frame(b"1" * 8192) == []
res = parser.parse_frame(b"1" * 8192)
fin, opcode, payload, compress = res[0]
assert len(payload) == 32768
assert (0, 1, expected_payload, 0) == (fin, opcode, payload, not not compress)
def test_parse_frame_length4(parser: PatchableWebSocketReader) -> None:
parser.parse_frame(struct.pack("!BB", 0b00000001, 127))
parser.parse_frame(struct.pack("!Q", 4))
fin, opcode, payload, compress = parser.parse_frame(b"1234")[0]
assert (0, 1, b"1234", 0) == (fin, opcode, payload, compress)
def test_parse_frame_mask(parser: PatchableWebSocketReader) -> None:
parser.parse_frame(struct.pack("!BB", 0b00000001, 0b10000001))
parser.parse_frame(b"0001")
fin, opcode, payload, compress = parser.parse_frame(b"1")[0]
assert (0, 1, b"\x01", 0) == (fin, opcode, payload, compress)
def test_parse_frame_header_reversed_bits(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
with pytest.raises(WebSocketError):
parser.parse_frame(struct.pack("!BB", 0b01100000, 0b00000000))
def test_parse_frame_header_control_frame(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
with pytest.raises(WebSocketError):
parser.parse_frame(struct.pack("!BB", 0b00001000, 0b00000000))
@pytest.mark.xfail()
def test_parse_frame_header_new_data_err(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
with pytest.raises(WebSocketError):
parser.parse_frame(struct.pack("!BB", 0b000000000, 0b00000000))
def test_parse_frame_header_payload_size(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
with pytest.raises(WebSocketError):
parser.parse_frame(struct.pack("!BB", 0b10001000, 0b01111110))
# Protractor event loop will call feed_data with bytearray. Since
# asyncio technically supports memoryview as well, we should test that.
@pytest.mark.parametrize(
argnames="data",
argvalues=[b"", bytearray(b""), memoryview(b"")],
ids=["bytes", "bytearray", "memoryview"],
)
def test_ping_frame(
out: WebSocketDataQueue,
parser: PatchableWebSocketReader,
data: bytes | bytearray | memoryview,
) -> None:
parser._handle_frame(True, WSMsgType.PING, b"data", 0)
res = out._buffer[0]
assert res == WSMessagePing(data=b"data", size=4, extra="")
def test_pong_frame(out: WebSocketDataQueue, parser: PatchableWebSocketReader) -> None:
parser._handle_frame(True, WSMsgType.PONG, b"data", 0)
res = out._buffer[0]
assert res == WSMessagePong(data=b"data", size=4, extra="")
def test_close_frame(out: WebSocketDataQueue, parser: PatchableWebSocketReader) -> None:
parser._handle_frame(True, WSMsgType.CLOSE, b"", 0)
res = out._buffer[0]
assert res == WSMessageClose(data=0, size=0, extra="")
def test_close_frame_info(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
parser._handle_frame(True, WSMsgType.CLOSE, b"0112345", 0)
res = out._buffer[0]
assert res == WSMessageClose(data=12337, size=7, extra="12345")
def test_close_frame_invalid(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
with pytest.raises(WebSocketError) as ctx:
parser._handle_frame(True, WSMsgType.CLOSE, b"1", 0)
assert ctx.value.code == WSCloseCode.PROTOCOL_ERROR
def test_close_frame_invalid_2(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
data = build_close_frame(code=1)
with pytest.raises(WebSocketError) as ctx:
parser._feed_data(data)
assert ctx.value.code == WSCloseCode.PROTOCOL_ERROR
def test_close_frame_unicode_err(parser: PatchableWebSocketReader) -> None:
data = build_close_frame(code=1000, message=b"\xf4\x90\x80\x80")
with pytest.raises(WebSocketError) as ctx:
parser._feed_data(data)
assert ctx.value.code == WSCloseCode.INVALID_TEXT
def test_unknown_frame(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
with pytest.raises(WebSocketError):
parser._handle_frame(True, WSMsgType.CONTINUATION, b"", 0)
def test_simple_text(out: WebSocketDataQueue, parser: PatchableWebSocketReader) -> None:
data = build_frame(b"text", WSMsgType.TEXT)
parser._feed_data(data)
res = out._buffer[0]
assert res == WSMessageText(data="text", size=4, extra="")
def test_simple_text_unicode_err(parser: PatchableWebSocketReader) -> None:
data = build_frame(b"\xf4\x90\x80\x80", WSMsgType.TEXT)
with pytest.raises(WebSocketError) as ctx:
parser._feed_data(data)
assert ctx.value.code == WSCloseCode.INVALID_TEXT
def test_simple_binary(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
data = build_frame(b"binary", WSMsgType.BINARY)
parser._feed_data(data)
res = out._buffer[0]
assert res == WSMessageBinary(data=b"binary", size=6, extra="")
def test_one_byte_at_a_time(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
"""Send one byte at a time to the parser."""
data = build_frame(b"binary", WSMsgType.BINARY)
for i in range(len(data)):
parser._feed_data(data[i : i + 1])
res = out._buffer[0]
assert res == WSMessageBinary(data=b"binary", size=6, extra="")
def test_fragmentation_header(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
data = build_frame(b"a", WSMsgType.TEXT)
parser._feed_data(data[:1])
parser._feed_data(data[1:])
res = out._buffer[0]
assert res == WSMessageText(data="a", size=1, extra="")
def test_large_message(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
large_payload = b"b" * 131072
data = build_frame(large_payload, WSMsgType.BINARY)
parser._feed_data(data)
res = out._buffer[0]
assert res == WSMessageBinary(data=large_payload, size=131072, extra="")
def test_large_masked_message(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
large_payload = b"b" * 131072
data = build_frame(large_payload, WSMsgType.BINARY, mask=True)
parser._feed_data(data)
res = out._buffer[0]
assert res == WSMessageBinary(data=large_payload, size=131072, extra="")
def test_fragmented_masked_message(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
large_payload = b"b" * 100
data = build_frame(large_payload, WSMsgType.BINARY, mask=True)
for i in range(len(data)):
parser._feed_data(data[i : i + 1])
res = out._buffer[0]
assert res == WSMessageBinary(data=large_payload, size=100, extra="")
def test_large_fragmented_masked_message(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
large_payload = b"b" * 131072
data = build_frame(large_payload, WSMsgType.BINARY, mask=True)
for i in range(0, len(data), 16384):
parser._feed_data(data[i : i + 16384])
res = out._buffer[0]
assert res == WSMessageBinary(data=large_payload, size=131072, extra="")
def test_continuation(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
data1 = build_frame(b"line1", WSMsgType.TEXT, is_fin=False)
parser._feed_data(data1)
data2 = build_frame(b"line2", WSMsgType.CONTINUATION)
parser._feed_data(data2)
res = out._buffer[0]
assert res == WSMessageText(data="line1line2", size=10, extra="")
def test_continuation_with_ping(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
data1 = build_frame(b"line1", WSMsgType.TEXT, is_fin=False)
parser._feed_data(data1)
data2 = build_frame(b"", WSMsgType.PING)
parser._feed_data(data2)
data3 = build_frame(b"line2", WSMsgType.CONTINUATION)
parser._feed_data(data3)
res = out._buffer[0]
assert res == WSMessagePing(data=b"", size=0, extra="")
res = out._buffer[1]
assert res == WSMessageText(data="line1line2", size=10, extra="")
def test_continuation_err(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
parser._handle_frame(False, WSMsgType.TEXT, b"line1", 0)
with pytest.raises(WebSocketError):
parser._handle_frame(True, WSMsgType.TEXT, b"line2", 0)
def test_continuation_with_close(
out: WebSocketDataQueue, parser: WebSocketReader
) -> None:
parser._handle_frame(False, WSMsgType.TEXT, b"line1", 0)
parser._handle_frame(
False,
WSMsgType.CLOSE,
build_close_frame(1002, b"test", noheader=True),
False,
)
parser._handle_frame(True, WSMsgType.CONTINUATION, b"line2", 0)
res = out._buffer[0]
assert res == WSMessageClose(data=1002, size=6, extra="test")
res = out._buffer[1]
assert res == WSMessageText(data="line1line2", size=10, extra="")
def test_continuation_with_close_unicode_err(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
parser._handle_frame(False, WSMsgType.TEXT, b"line1", 0)
with pytest.raises(WebSocketError) as ctx:
parser._handle_frame(
False,
WSMsgType.CLOSE,
build_close_frame(1000, b"\xf4\x90\x80\x80", noheader=True),
0,
)
parser._handle_frame(True, WSMsgType.CONTINUATION, b"line2", 0)
assert ctx.value.code == WSCloseCode.INVALID_TEXT
def test_continuation_with_close_bad_code(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
parser._handle_frame(False, WSMsgType.TEXT, b"line1", 0)
with pytest.raises(WebSocketError) as ctx:
parser._handle_frame(
False, WSMsgType.CLOSE, build_close_frame(1, b"test", noheader=True), 0
)
assert ctx.value.code == WSCloseCode.PROTOCOL_ERROR
parser._handle_frame(True, WSMsgType.CONTINUATION, b"line2", 0)
def test_continuation_with_close_bad_payload(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
parser._handle_frame(False, WSMsgType.TEXT, b"line1", 0)
with pytest.raises(WebSocketError) as ctx:
parser._handle_frame(False, WSMsgType.CLOSE, b"1", 0)
assert ctx.value.code == WSCloseCode.PROTOCOL_ERROR
parser._handle_frame(True, WSMsgType.CONTINUATION, b"line2", 0)
def test_continuation_with_close_empty(
out: WebSocketDataQueue, parser: PatchableWebSocketReader
) -> None:
parser._handle_frame(False, WSMsgType.TEXT, b"line1", 0)
parser._handle_frame(False, WSMsgType.CLOSE, b"", 0)
parser._handle_frame(True, WSMsgType.CONTINUATION, b"line2", 0)
res = out._buffer[0]
assert res == WSMessageClose(data=0, size=0, extra="")
res = out._buffer[1]
assert res == WSMessageText(data="line1line2", size=10, extra="")
websocket_mask_data: bytes = b"some very long data for masking by websocket"
websocket_mask_mask: bytes = b"1234"
websocket_mask_masked: bytes = (
b"B]^Q\x11DVFH\x12_[_U\x13PPFR\x14W]A\x14\\S@_X\\T\x14SK\x13CTP@[RYV@"
)
def test_websocket_mask_python() -> None:
message = bytearray(websocket_mask_data)
_websocket_helpers._websocket_mask_python(websocket_mask_mask, message)
assert message == websocket_mask_masked
@pytest.mark.skipif(
not hasattr(_websocket_helpers, "_websocket_mask_cython"), reason="Requires Cython"
)
def test_websocket_mask_cython() -> None:
message = bytearray(websocket_mask_data)
_websocket_helpers._websocket_mask_cython(websocket_mask_mask, message) # type: ignore[attr-defined]
assert message == websocket_mask_masked
assert (
_websocket_helpers.websocket_mask is _websocket_helpers._websocket_mask_cython # type: ignore[attr-defined]
)
def test_websocket_mask_python_empty() -> None:
message = bytearray()
_websocket_helpers._websocket_mask_python(websocket_mask_mask, message)
assert message == bytearray()
@pytest.mark.skipif(
not hasattr(_websocket_helpers, "_websocket_mask_cython"), reason="Requires Cython"
)
def test_websocket_mask_cython_empty() -> None:
message = bytearray()
_websocket_helpers._websocket_mask_cython(websocket_mask_mask, message) # type: ignore[attr-defined]
assert message == bytearray()
def test_parse_compress_frame_single(parser: PatchableWebSocketReader) -> None:
parser.parse_frame(struct.pack("!BB", 0b11000001, 0b00000001))
res = parser.parse_frame(b"1")
fin, opcode, payload, compress = res[0]
assert (1, 1, b"1", True) == (fin, opcode, payload, not not compress)
def test_parse_compress_frame_multi(parser: PatchableWebSocketReader) -> None:
parser.parse_frame(struct.pack("!BB", 0b01000001, 126))
parser.parse_frame(struct.pack("!H", 4))
res = parser.parse_frame(b"1234")
fin, opcode, payload, compress = res[0]
assert (0, 1, b"1234", True) == (fin, opcode, payload, not not compress)
parser.parse_frame(struct.pack("!BB", 0b10000001, 126))
parser.parse_frame(struct.pack("!H", 4))
res = parser.parse_frame(b"1234")
fin, opcode, payload, compress = res[0]
assert (1, 1, b"1234", True) == (fin, opcode, payload, not not compress)
parser.parse_frame(struct.pack("!BB", 0b10000001, 126))
parser.parse_frame(struct.pack("!H", 4))
res = parser.parse_frame(b"1234")
fin, opcode, payload, compress = res[0]
assert (1, 1, b"1234", False) == (fin, opcode, payload, not not compress)
def test_parse_compress_error_frame(parser: PatchableWebSocketReader) -> None:
parser.parse_frame(struct.pack("!BB", 0b01000001, 0b00000001))
parser.parse_frame(b"1")
with pytest.raises(WebSocketError) as ctx:
parser.parse_frame(struct.pack("!BB", 0b11000001, 0b00000001))
parser.parse_frame(b"1")
assert ctx.value.code == WSCloseCode.PROTOCOL_ERROR
def test_parse_no_compress_frame_single(out: WebSocketDataQueue) -> None:
parser_no_compress = PatchableWebSocketReader(out, 0, compress=False)
with pytest.raises(WebSocketError) as ctx:
parser_no_compress.parse_frame(struct.pack("!BB", 0b11000001, 0b00000001))
parser_no_compress.parse_frame(b"1")
assert ctx.value.code == WSCloseCode.PROTOCOL_ERROR
def test_msg_too_large(out: WebSocketDataQueue) -> None:
parser = WebSocketReader(out, 256, compress=False)
data = build_frame(b"text" * 256, WSMsgType.TEXT)
with pytest.raises(WebSocketError) as ctx:
parser._feed_data(data)
assert ctx.value.code == WSCloseCode.MESSAGE_TOO_BIG
def test_msg_too_large_not_fin(out: WebSocketDataQueue) -> None:
parser = WebSocketReader(out, 256, compress=False)
data = build_frame(b"text" * 256, WSMsgType.TEXT, is_fin=False)
with pytest.raises(WebSocketError) as ctx:
parser._feed_data(data)
assert ctx.value.code == WSCloseCode.MESSAGE_TOO_BIG
@pytest.mark.usefixtures("parametrize_zlib_backend")
def test_compressed_msg_too_large(out: WebSocketDataQueue) -> None:
parser = WebSocketReader(out, 256, compress=True)
data = build_frame(b"aaa" * 256, WSMsgType.TEXT, ZLibBackend=ZLibBackend)
with pytest.raises(WebSocketError) as ctx:
parser._feed_data(data)
assert ctx.value.code == WSCloseCode.MESSAGE_TOO_BIG
| PatchableWebSocketReader |
python | run-llama__llama_index | llama-index-core/llama_index/core/extractors/metadata_extractors.py | {
"start": 9063,
"end": 11947
} | class ____(BaseExtractor):
"""
Questions answered extractor. Node-level extractor.
Extracts `questions_this_excerpt_can_answer` metadata field.
Args:
llm (Optional[LLM]): LLM
questions (int): number of questions to extract
prompt_template (str): template for question extraction,
embedding_only (bool): whether to use embedding only
"""
llm: SerializeAsAny[LLM] = Field(description="The LLM to use for generation.")
questions: int = Field(
default=5,
description="The number of questions to generate.",
gt=0,
)
prompt_template: str = Field(
default=DEFAULT_QUESTION_GEN_TMPL,
description="Prompt template to use when generating questions.",
)
embedding_only: bool = Field(
default=True, description="Whether to use metadata for emebddings only."
)
def __init__(
self,
llm: Optional[LLM] = None,
# TODO: llm_predictor arg is deprecated
llm_predictor: Optional[LLM] = None,
questions: int = 5,
prompt_template: str = DEFAULT_QUESTION_GEN_TMPL,
embedding_only: bool = True,
num_workers: int = DEFAULT_NUM_WORKERS,
**kwargs: Any,
) -> None:
"""Init params."""
if questions < 1:
raise ValueError("questions must be >= 1")
super().__init__(
llm=llm or llm_predictor or Settings.llm,
questions=questions,
prompt_template=prompt_template,
embedding_only=embedding_only,
num_workers=num_workers,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "QuestionsAnsweredExtractor"
async def _aextract_questions_from_node(self, node: BaseNode) -> Dict[str, str]:
"""Extract questions from a node and return it's metadata dict."""
if self.is_text_node_only and not isinstance(node, TextNode):
return {}
context_str = node.get_content(metadata_mode=self.metadata_mode)
prompt = PromptTemplate(template=self.prompt_template)
questions = await self.llm.apredict(
prompt, num_questions=self.questions, context_str=context_str
)
return {"questions_this_excerpt_can_answer": questions.strip()}
async def aextract(self, nodes: Sequence[BaseNode]) -> List[Dict]:
questions_jobs = []
for node in nodes:
questions_jobs.append(self._aextract_questions_from_node(node))
metadata_list: List[Dict] = await run_jobs(
questions_jobs, show_progress=self.show_progress, workers=self.num_workers
)
return metadata_list
DEFAULT_SUMMARY_EXTRACT_TEMPLATE = """\
Here is the content of the section:
{context_str}
Summarize the key topics and entities of the section. \
Summary: """
| QuestionsAnsweredExtractor |
python | scrapy__scrapy | tests/test_utils_defer.py | {
"start": 8299,
"end": 9506
} | class ____:
def test_deferred(self):
d = Deferred()
result = deferred_from_coro(d)
assert isinstance(result, Deferred)
assert result is d
def test_object(self):
result = deferred_from_coro(42)
assert result == 42
@inlineCallbacks
def test_coroutine(self):
async def coroutine() -> int:
return 42
result = deferred_from_coro(coroutine())
assert isinstance(result, Deferred)
coro_result = yield result
assert coro_result == 42
@pytest.mark.only_asyncio
@inlineCallbacks
def test_coroutine_asyncio(self):
async def coroutine() -> int:
await asyncio.sleep(0.01)
return 42
result = deferred_from_coro(coroutine())
assert isinstance(result, Deferred)
coro_result = yield result
assert coro_result == 42
@pytest.mark.only_asyncio
@inlineCallbacks
def test_future(self):
future = Future()
result = deferred_from_coro(future)
assert isinstance(result, Deferred)
future.set_result(42)
future_result = yield result
assert future_result == 42
| TestDeferredFromCoro |
python | ray-project__ray | rllib/algorithms/tests/test_env_runner_failures.py | {
"start": 1565,
"end": 5579
} | class ____(gym.Env):
"""Env that fails upon calling `step()`, but only for some remote EnvRunner indices.
The EnvRunner indices that should produce the failure (a ValueError) can be
provided by a list (of ints) under the "bad_indices" key in the env's
config.
.. testcode::
:skipif: True
from ray.rllib.env.env_context import EnvContext
# This env will fail for EnvRunners 1 and 2 (not for the local EnvRunner
# or any others with an index != [1|2]).
bad_env = FaultInjectEnv(
EnvContext(
{"bad_indices": [1, 2]},
worker_index=1,
num_workers=3,
)
)
from ray.rllib.env.env_context import EnvContext
# This env will fail only on the first evaluation EnvRunner, not on the first
# regular EnvRunner.
bad_env = FaultInjectEnv(
EnvContext(
{"bad_indices": [1], "eval_only": True},
worker_index=2,
num_workers=5,
)
)
"""
def __init__(self, config):
# Use RandomEnv to control episode length if needed.
self.env = RandomEnv(config)
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.config = config
# External counter service.
if "counter" in config:
self.counter = ray.get_actor(config["counter"])
else:
self.counter = None
if (
config.get("init_delay", 0) > 0.0
and (
not config.get("init_delay_indices", [])
or self.config.worker_index in config.get("init_delay_indices", [])
)
and
# constructor delay can only happen for recreated actors.
self._get_count() > 0
):
# Simulate an initialization delay.
time.sleep(config.get("init_delay"))
def _increment_count(self):
if self.counter:
eval = self.config.get("evaluation", False)
worker_index = self.config.worker_index
vector_index = self.config.vector_index
ray.wait([self.counter.increment.remote(eval, worker_index, vector_index)])
def _get_count(self):
if self.counter:
eval = self.config.get("evaluation", False)
worker_index = self.config.worker_index
vector_index = self.config.vector_index
return ray.get(self.counter.get.remote(eval, worker_index, vector_index))
return -1
def _maybe_raise_error(self):
# Do not raise simulated error if this EnvRunner is not bad.
if self.config.worker_index not in self.config.get("bad_indices", []):
return
if self.counter:
count = self._get_count()
if self.config.get(
"failure_start_count", -1
) >= 0 and count < self.config.get("failure_start_count"):
return
if self.config.get(
"failure_stop_count", -1
) >= 0 and count >= self.config.get("failure_stop_count"):
return
raise ValueError(
"This is a simulated error from "
f"{'eval-' if self.config.get('evaluation', False) else ''}"
f"env-runner-idx={self.config.worker_index}!"
)
def reset(self, *, seed=None, options=None):
self._increment_count()
self._maybe_raise_error()
return self.env.reset()
def step(self, action):
self._increment_count()
self._maybe_raise_error()
if self.config.get("step_delay", 0) > 0.0 and (
not self.config.get("init_delay_indices", [])
or self.config.worker_index in self.config.get("step_delay_indices", [])
):
# Simulate a step delay.
time.sleep(self.config.get("step_delay"))
return self.env.step(action)
| FaultInjectEnv |
python | pennersr__django-allauth | allauth/headless/mfa/inputs.py | {
"start": 497,
"end": 565
} | class ____(AuthenticateForm, inputs.Input):
pass
| AuthenticateInput |
python | apache__airflow | providers/sftp/tests/unit/sftp/decorators/sensors/test_sftp.py | {
"start": 1199,
"end": 4686
} | class ____:
@patch("airflow.providers.sftp.sensors.sftp.SFTPHook")
def test_decorator_with_file_path(self, sftp_hook_mock, dag_maker):
sftp_hook_mock.return_value.get_mod_time.return_value = "19700101000000"
file_path = "/path/to/file/2021-09-09.txt"
decorated_func_return = "decorated_func_returns"
expected_xcom_return = {"files_found": [file_path], "decorator_return_value": decorated_func_return}
@task.sftp_sensor(path=file_path)
def f():
return decorated_func_return
with dag_maker():
ret = f()
assert ret.operator.execute({}) == expected_xcom_return
@patch("airflow.providers.sftp.sensors.sftp.SFTPHook")
def test_decorator_with_file_path_with_args(self, sftp_hook_mock, dag_maker):
sftp_hook_mock.return_value.get_mod_time.return_value = "19700101000000"
file_path = "/path/to/file/1970-01-01.txt"
op_args = ("op_args_1",)
op_kwargs = {"key": "value"}
decorated_func_return = {"args": op_args, "kwargs": {**op_kwargs, "files_found": [file_path]}}
expected_xcom_return = {"files_found": [file_path], "decorator_return_value": decorated_func_return}
@task.sftp_sensor(path=file_path)
def f(*args, **kwargs):
return {"args": args, "kwargs": kwargs}
with dag_maker():
ret = f(*op_args, **op_kwargs)
assert ret.operator.execute({}) == expected_xcom_return
@patch("airflow.providers.sftp.sensors.sftp.SFTPHook")
def test_decorator_with_file_pattern(self, sftp_hook_mock, dag_maker):
sftp_hook_mock.return_value.get_mod_time.return_value = "19700101000000"
file_path_list = ["/path/to/file/text_file.txt", "/path/to/file/another_text_file.txt"]
sftp_hook_mock.return_value.get_files_by_pattern.return_value = [
"text_file.txt",
"another_text_file.txt",
]
decorated_func_return = "decorated_func_returns"
expected_xcom_return = {
"files_found": file_path_list,
"decorator_return_value": decorated_func_return,
}
@task.sftp_sensor(path="/path/to/file/", file_pattern=".txt")
def f():
return decorated_func_return
with dag_maker():
ret = f()
assert ret.operator.execute({}) == expected_xcom_return
@patch("airflow.providers.sftp.sensors.sftp.SFTPHook")
def test_decorator_with_file_pattern_with_args(self, sftp_hook_mock, dag_maker):
sftp_hook_mock.return_value.get_mod_time.return_value = "19700101000000"
file_path_list = ["/path/to/file/text_file.txt", "/path/to/file/another_text_file.txt"]
op_args = ("op_args_1",)
op_kwargs = {"key": "value"}
sftp_hook_mock.return_value.get_files_by_pattern.return_value = [
"text_file.txt",
"another_text_file.txt",
]
decorated_func_return = {"args": op_args, "kwargs": {**op_kwargs, "files_found": file_path_list}}
expected_xcom_return = {
"files_found": file_path_list,
"decorator_return_value": decorated_func_return,
}
@task.sftp_sensor(path="/path/to/file/", file_pattern=".txt")
def f(*args, **kwargs):
return {"args": args, "kwargs": kwargs}
with dag_maker():
ret = f(*op_args, **op_kwargs)
assert ret.operator.execute({}) == expected_xcom_return
| TestSFTPDecoratorSensor |
python | redis__redis-py | tests/test_connect.py | {
"start": 6829,
"end": 8287
} | class ____(socketserver.StreamRequestHandler):
def setup(self):
pass
def finish(self):
pass
def handle(self):
buffer = b""
command = None
command_ptr = None
fragment_length = None
while self.server.is_serving() or buffer:
try:
buffer += self.request.recv(1024)
except socket.timeout:
continue
if not buffer:
continue
parts = re.split(_CMD_SEP, buffer)
buffer = parts[-1]
for fragment in parts[:-1]:
fragment = fragment.decode()
if fragment.startswith("*") and command is None:
command = [None for _ in range(int(fragment[1:]))]
command_ptr = 0
fragment_length = None
continue
if fragment.startswith("$") and command[command_ptr] is None:
fragment_length = int(fragment[1:])
continue
assert len(fragment) == fragment_length
command[command_ptr] = fragment
command_ptr += 1
if command_ptr < len(command):
continue
command = " ".join(command)
resp = _SUPPORTED_CMDS.get(command, _ERROR_RESP)
self.request.sendall(resp)
command = None
| _RedisRequestHandler |
python | numba__numba | numba/core/types/containers.py | {
"start": 1255,
"end": 2053
} | class ____(SimpleIteratorType):
"""
Convenience base class for some container iterators.
Derived classes must implement the *container_class* attribute.
"""
def __init__(self, container):
assert isinstance(container, self.container_class), container
self.container = container
yield_type = container.dtype
name = "iter(%s)" % container
super(BaseContainerIterator, self).__init__(name, yield_type)
def unify(self, typingctx, other):
cls = type(self)
if isinstance(other, cls):
container = typingctx.unify_pairs(self.container, other.container)
if container is not None:
return cls(container)
@property
def key(self):
return self.container
| BaseContainerIterator |
python | scikit-learn__scikit-learn | doc/sphinxext/autoshortsummary.py | {
"start": 55,
"end": 1891
} | class ____(ModuleLevelDocumenter):
"""An autodocumenter that only renders the short summary of the object."""
# Defines the usage: .. autoshortsummary:: {{ object }}
objtype = "shortsummary"
# Disable content indentation
content_indent = ""
# Avoid being selected as the default documenter for some objects, because we are
# returning `can_document_member` as True for all objects
priority = -99
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
"""Allow documenting any object."""
return True
def get_object_members(self, want_all):
"""Document no members."""
return (False, [])
def add_directive_header(self, sig):
"""Override default behavior to add no directive header or options."""
pass
def add_content(self, more_content):
"""Override default behavior to add only the first line of the docstring.
Modified based on the part of processing docstrings in the original
implementation of this method.
https://github.com/sphinx-doc/sphinx/blob/faa33a53a389f6f8bc1f6ae97d6015fa92393c4a/sphinx/ext/autodoc/__init__.py#L609-L622
"""
sourcename = self.get_sourcename()
docstrings = self.get_doc()
if docstrings is not None:
if not docstrings:
docstrings.append([])
# Get the first non-empty line of the processed docstring; this could lead
# to unexpected results if the object does not have a short summary line.
short_summary = next(
(s for s in self.process_doc(docstrings) if s), "<no summary>"
)
self.add_line(short_summary, sourcename, 0)
def setup(app):
app.add_autodocumenter(ShortSummaryDocumenter)
| ShortSummaryDocumenter |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1151403,
"end": 1151824
} | class ____(ScaleInvalidDataShowAsyOffset):
"""
ScaleInvalidDataShowAsValueyOffset schema wrapper.
Parameters
----------
value : float
Offset for y-position.
"""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"yOffset">'}
def __init__(self, value: Optional[float] = Undefined, **kwds):
super().__init__(value=value, **kwds)
| ScaleInvalidDataShowAsValueyOffset |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 152558,
"end": 157961
} | class ____(Request):
"""
Delete a task along with any information stored for it (statistics, frame updates etc.)
Unless Force flag is provided, operation will fail if task has objects associated with it - i.e. children tasks
and projects. Models that refer to the deleted task will be updated with a task ID indicating a deleted task.
:param move_to_trash: Move task to trash instead of deleting it. For internal
use only, tasks in the trash are not visible from the API and cannot be
restored!
:type move_to_trash: bool
:param force: If not true, call fails if the task status is 'in_progress'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
:param return_file_urls: If set to 'true' then return the urls of the files
that were uploaded by this task. Default value is 'false'
:type return_file_urls: bool
"""
_service = "tasks"
_action = "delete"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is 'in_progress'",
"type": ["boolean", "null"],
},
"move_to_trash": {
"default": False,
"description": "Move task to trash instead of deleting it. For internal use only, tasks in the trash are not visible from the API and cannot be restored!",
"type": ["boolean", "null"],
},
"return_file_urls": {
"description": "If set to 'true' then return the urls of the files that were uploaded by this task. Default value is 'false'",
"type": "boolean",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
move_to_trash: Optional[bool] = False,
force: Optional[bool] = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
return_file_urls: Optional[bool] = None,
**kwargs: Any
) -> None:
super(DeleteRequest, self).__init__(**kwargs)
self.move_to_trash = move_to_trash
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
self.return_file_urls = return_file_urls
@schema_property("move_to_trash")
def move_to_trash(self) -> Optional[bool]:
return self._property_move_to_trash
@move_to_trash.setter
def move_to_trash(self, value: Optional[bool]) -> None:
if value is None:
self._property_move_to_trash = None
return
self.assert_isinstance(value, "move_to_trash", (bool,))
self._property_move_to_trash = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("return_file_urls")
def return_file_urls(self) -> Optional[bool]:
return self._property_return_file_urls
@return_file_urls.setter
def return_file_urls(self, value: Optional[bool]) -> None:
if value is None:
self._property_return_file_urls = None
return
self.assert_isinstance(value, "return_file_urls", (bool,))
self._property_return_file_urls = value
| DeleteRequest |
python | numba__numba | numba/tests/test_parallel_backend.py | {
"start": 15628,
"end": 16921
} | class ____(ThreadLayerTestHelper):
"""
Checks that numba.threading_layer() reports correctly.
"""
_DEBUG = False
backends = {'tbb': skip_no_tbb,
'omp': skip_no_omp,
'workqueue': unittest.skipIf(False, '')}
@classmethod
def _inject(cls, backend, backend_guard):
def test_template(self):
body = """if 1:
X = np.arange(1000000.)
Y = np.arange(1000000.)
Z = busy_func(X, Y)
assert numba.threading_layer() == '%s'
"""
runme = self.template % (body % backend)
cmdline = [sys.executable, '-c', runme]
env = os.environ.copy()
env['NUMBA_THREADING_LAYER'] = str(backend)
out, err = self.run_cmd(cmdline, env=env)
if self._DEBUG:
print(out, err)
injected_test = "test_threading_layer_selector_%s" % backend
setattr(cls, injected_test,
tag("important")(backend_guard(test_template)))
@classmethod
def generate(cls):
for backend, backend_guard in cls.backends.items():
cls._inject(backend, backend_guard)
TestThreadingLayerSelection.generate()
@skip_parfors_unsupported
| TestThreadingLayerSelection |
python | numba__llvmlite | llvmlite/tests/test_ir.py | {
"start": 125244,
"end": 125840
} | class ____(TestBase):
def test_call_transform(self):
mod = ir.Module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), ()), "foo")
bar = ir.Function(mod, ir.FunctionType(ir.VoidType(), ()), "bar")
builder = ir.IRBuilder()
builder.position_at_end(foo.append_basic_block())
call = builder.call(foo, ())
self.assertEqual(call.callee, foo)
modified = ir.replace_all_calls(mod, foo, bar)
self.assertIn(call, modified)
self.assertNotEqual(call.callee, foo)
self.assertEqual(call.callee, bar)
| TestTransforms |
python | walkccc__LeetCode | solutions/408. Valid Word Abbreviation/408.py | {
"start": 0,
"end": 501
} | class ____:
def validWordAbbreviation(self, word: str, abbr: str) -> bool:
i = 0 # word's index
j = 0 # abbr's index
while i < len(word) and j < len(abbr):
if word[i] == abbr[j]:
i += 1
j += 1
continue
if not abbr[j].isdigit() or abbr[j] == '0':
return False
num = 0
while j < len(abbr) and abbr[j].isdigit():
num = num * 10 + int(abbr[j])
j += 1
i += num
return i == len(word) and j == len(abbr)
| Solution |
python | getsentry__sentry-python | tests/test_ai_monitoring.py | {
"start": 12796,
"end": 17383
} | class ____:
def test_client_wraps_truncated_messages_in_annotated_value(self, large_messages):
"""Test that client.py properly wraps truncated messages in AnnotatedValue using scope data"""
from sentry_sdk._types import AnnotatedValue
from sentry_sdk.consts import SPANDATA
class MockSpan:
def __init__(self):
self.span_id = "test_span_123"
self.data = {}
def set_data(self, key, value):
self.data[key] = value
class MockScope:
def __init__(self):
self._gen_ai_original_message_count = {}
small_limit = 3000
span = MockSpan()
scope = MockScope()
original_count = len(large_messages)
# Simulate what integrations do
truncated_messages = truncate_and_annotate_messages(
large_messages, span, scope, max_bytes=small_limit
)
span.set_data(SPANDATA.GEN_AI_REQUEST_MESSAGES, truncated_messages)
# Verify metadata was set on scope
assert span.span_id in scope._gen_ai_original_message_count
assert scope._gen_ai_original_message_count[span.span_id] > 0
# Simulate what client.py does
event = {"spans": [{"span_id": span.span_id, "data": span.data.copy()}]}
# Mimic client.py logic - using scope to get the original length
for event_span in event["spans"]:
span_id = event_span.get("span_id")
span_data = event_span.get("data", {})
if (
span_id
and span_id in scope._gen_ai_original_message_count
and SPANDATA.GEN_AI_REQUEST_MESSAGES in span_data
):
messages = span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES]
n_original_count = scope._gen_ai_original_message_count[span_id]
span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES] = AnnotatedValue(
safe_serialize(messages),
{"len": n_original_count},
)
# Verify the annotation happened
messages_value = event["spans"][0]["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
assert isinstance(messages_value, AnnotatedValue)
assert messages_value.metadata["len"] == original_count
assert isinstance(messages_value.value, str)
def test_annotated_value_shows_correct_original_length(self, large_messages):
"""Test that the annotated value correctly shows the original message count before truncation"""
from sentry_sdk.consts import SPANDATA
class MockSpan:
def __init__(self):
self.span_id = "test_span_456"
self.data = {}
def set_data(self, key, value):
self.data[key] = value
class MockScope:
def __init__(self):
self._gen_ai_original_message_count = {}
small_limit = 3000
span = MockSpan()
scope = MockScope()
original_message_count = len(large_messages)
truncated_messages = truncate_and_annotate_messages(
large_messages, span, scope, max_bytes=small_limit
)
assert len(truncated_messages) < original_message_count
assert span.span_id in scope._gen_ai_original_message_count
stored_original_length = scope._gen_ai_original_message_count[span.span_id]
assert stored_original_length == original_message_count
event = {
"spans": [
{
"span_id": span.span_id,
"data": {SPANDATA.GEN_AI_REQUEST_MESSAGES: truncated_messages},
}
]
}
for event_span in event["spans"]:
span_id = event_span.get("span_id")
span_data = event_span.get("data", {})
if (
span_id
and span_id in scope._gen_ai_original_message_count
and SPANDATA.GEN_AI_REQUEST_MESSAGES in span_data
):
span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES] = AnnotatedValue(
span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES],
{"len": scope._gen_ai_original_message_count[span_id]},
)
messages_value = event["spans"][0]["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
assert isinstance(messages_value, AnnotatedValue)
assert messages_value.metadata["len"] == stored_original_length
assert len(messages_value.value) == len(truncated_messages)
| TestClientAnnotation |
python | apache__airflow | providers/exasol/tests/unit/exasol/operators/test_exasol.py | {
"start": 996,
"end": 2492
} | class ____:
@mock.patch("airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator.get_db_hook")
def test_overwrite_autocommit(self, mock_get_db_hook):
operator = ExasolOperator(task_id="TEST", sql="SELECT 1", autocommit=True)
operator.execute({})
mock_get_db_hook.return_value.run.assert_called_once_with(
sql="SELECT 1",
autocommit=True,
parameters=None,
handler=exasol_fetch_all_handler,
return_last=True,
)
@mock.patch("airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator.get_db_hook")
def test_pass_parameters(self, mock_get_db_hook):
operator = ExasolOperator(task_id="TEST", sql="SELECT {value!s}", parameters={"value": 1})
operator.execute({})
mock_get_db_hook.return_value.run.assert_called_once_with(
sql="SELECT {value!s}",
autocommit=False,
parameters={"value": 1},
handler=exasol_fetch_all_handler,
return_last=True,
)
@mock.patch("airflow.providers.common.sql.operators.sql.BaseSQLOperator.__init__")
def test_overwrite_schema(self, mock_base_op):
ExasolOperator(task_id="TEST", sql="SELECT 1", schema="dummy")
mock_base_op.assert_called_once_with(
conn_id="exasol_default",
database=None,
hook_params={"schema": "dummy"},
default_args={},
task_id="TEST",
)
| TestExasol |
python | astropy__astropy | astropy/utils/iers/tests/test_iers.py | {
"start": 796,
"end": 4689
} | class ____:
"""Basic tests that IERS_B returns correct values"""
@pytest.mark.parametrize("iers_cls", (iers.IERS_B, iers.IERS))
def test_simple(self, iers_cls):
"""Test the default behaviour for IERS_B and IERS."""
# Arguably, IERS itself should not be used at all, but it used to
# provide IERS_B by default so we check that it continues to do so.
# Eventually, IERS should probably be deprecated.
iers_cls.close()
assert iers_cls.iers_table is None
iers_tab = iers_cls.open()
assert iers_cls.iers_table is not None
assert iers_cls.iers_table is iers_tab
assert isinstance(iers_tab, QTable)
assert isinstance(iers_tab, iers.IERS_B)
assert (iers_tab["UT1_UTC"].unit / u.second).is_unity()
assert (iers_tab["PM_x"].unit / u.arcsecond).is_unity()
assert (iers_tab["PM_y"].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0.0, 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(
ut1_utc,
[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s,
atol=0.1 * u.ms,
)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.0)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0.0, return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format="jd", scale="utc")
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(
ut1_utc3,
[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s,
atol=0.1 * u.ms,
)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS_B.close()
iers.IERS_B.open(iers.IERS_B_FILE)
assert iers.IERS_B.iers_table is not None
assert isinstance(iers.IERS_B.iers_table, QTable)
iers.IERS_B.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS_B.open("surely this does not exist")
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open(Path(IERS_A_EXCERPT).as_uri())
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
@pytest.mark.parametrize("path_transform", [os.fspath, Path])
def test_IERS_B_old_style_excerpt(path_transform):
"""Check that the instructions given in `IERS_B.read` actually work."""
# If this test is changed, be sure to also adjust the instructions.
#
# TODO: this test and the note can probably be removed after
# enough time has passed that old-style IERS_B files are simply
# not around any more, say in 2025. If so, also remove the excerpt
# and the ReadMe.eopc04_IAU2000 file.
old_style_file = path_transform(
get_pkg_data_filename(os.path.join("data", "iers_b_old_style_excerpt"))
)
excerpt = iers.IERS_B.read(
old_style_file,
readme=get_pkg_data_filename(
"data/ReadMe.eopc04_IAU2000", package="astropy.utils.iers"
),
data_start=14,
)
assert isinstance(excerpt, QTable)
assert "PM_x_dot" not in excerpt.colnames
| TestBasic |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/preview.py | {
"start": 98,
"end": 441
} | class ____:
# Black's `Preview.dummy_implementations`
def get_release_info(self): ...
def raw_docstring():
r"""Black's `Preview.accept_raw_docstrings`
a
b
"""
pass
def reference_docstring_newlines():
"""A regular docstring for comparison
a
b
"""
pass
| CachedRepository |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 59481,
"end": 61690
} | class ____(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is given by
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
for a real number :math:`x` and :math:`c > 0`.
`dweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _rvs(self, c, size=None, random_state=None):
u = random_state.uniform(size=size)
w = weibull_min.rvs(c, size=size, random_state=random_state)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _sf(self, x, c):
half_weibull_min_sf = 0.5 * stats.weibull_min._sf(np.abs(x), c)
return np.where(x > 0, half_weibull_min_sf, 1 - half_weibull_min_sf)
def _isf(self, q, c):
double_q = 2. * np.where(q <= 0.5, q, 1. - q)
weibull_min_isf = stats.weibull_min._isf(double_q, c)
return np.where(q > 0.5, -weibull_min_isf, weibull_min_isf)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
def _entropy(self, c):
h = stats.weibull_min._entropy(c) - np.log(0.5)
return h
dweibull = dweibull_gen(name='dweibull')
| dweibull_gen |
python | getsentry__sentry | src/sentry/integrations/discord/integration.py | {
"start": 11261,
"end": 12319
} | class ____:
def __init__(self, params):
self.params = params
super().__init__()
def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase:
if "guild_id" not in request.GET or "code" not in request.GET:
state = pipeline.fetch_state(key=IntegrationProviderSlug.DISCORD.value) or {}
redirect_uri = (
absolute_uri("extensions/discord/configure/")
if state.get("use_configure") == "1"
else absolute_uri("extensions/discord/setup/")
)
params = urlencode(
{
"redirect_uri": redirect_uri,
**self.params,
}
)
redirect_uri = f"https://discord.com/api/oauth2/authorize?{params}"
return HttpResponseRedirect(redirect_uri)
pipeline.bind_state("guild_id", request.GET["guild_id"])
pipeline.bind_state("code", request.GET["code"])
return pipeline.next_step()
| DiscordInstallPipeline |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 57464,
"end": 58122
} | class ____(GenericFunction[str]):
"""The SQL CONCAT() function, which concatenates strings.
E.g.:
.. sourcecode:: pycon+sql
>>> print(select(func.concat("a", "b")))
{printsql}SELECT concat(:concat_2, :concat_3) AS concat_1
String concatenation in SQLAlchemy is more commonly available using the
Python ``+`` operator with string datatypes, which will render a
backend-specific concatenation operator, such as :
.. sourcecode:: pycon+sql
>>> print(select(literal("a") + "b"))
{printsql}SELECT :param_1 || :param_2 AS anon_1
"""
type = sqltypes.String()
inherit_cache = True
| concat |
python | redis__redis-py | redis/asyncio/multidb/event.py | {
"start": 266,
"end": 1042
} | class ____:
"""
Event fired when an async active database has been changed.
"""
def __init__(
self,
old_database: AsyncDatabase,
new_database: AsyncDatabase,
command_executor,
**kwargs,
):
self._old_database = old_database
self._new_database = new_database
self._command_executor = command_executor
self._kwargs = kwargs
@property
def old_database(self) -> AsyncDatabase:
return self._old_database
@property
def new_database(self) -> AsyncDatabase:
return self._new_database
@property
def command_executor(self):
return self._command_executor
@property
def kwargs(self):
return self._kwargs
| AsyncActiveDatabaseChanged |
python | walkccc__LeetCode | solutions/17. Letter Combinations of a Phone Number/17.py | {
"start": 0,
"end": 512
} | class ____:
def letterCombinations(self, digits: str) -> list[str]:
if not digits:
return []
digitToLetters = ['', '', 'abc', 'def', 'ghi',
'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
ans = []
def dfs(i: int, path: list[str]) -> None:
if i == len(digits):
ans.append(''.join(path))
return
for letter in digitToLetters[int(digits[i])]:
path.append(letter)
dfs(i + 1, path)
path.pop()
dfs(0, [])
return ans
| Solution |
python | getsentry__sentry | tests/sentry/core/endpoints/test_team_projects.py | {
"start": 1585,
"end": 19476
} | class ____(APITestCase, TestCase):
endpoint = "sentry-api-0-team-project-index"
method = "post"
def setUp(self) -> None:
super().setUp()
self.team = self.create_team(members=[self.user])
self.data = {"name": "foo", "slug": "bar", "platform": "python"}
self.login_as(user=self.user)
def test_simple(self) -> None:
response = self.get_success_response(
self.organization.slug,
self.team.slug,
**self.data,
status_code=201,
)
# fetch from db to check project's team
project = Project.objects.get(id=response.data["id"])
assert project.name == "foo"
assert project.slug == "bar"
assert project.platform == "python"
assert project.teams.first() == self.team
assert response.data["teams"] is not None
assert response.data["teams"][0]["id"] == str(self.team.id)
def test_invalid_numeric_slug(self) -> None:
response = self.get_error_response(
self.organization.slug,
self.team.slug,
name="fake name",
slug="12345",
status_code=400,
)
assert response.data["slug"][0] == DEFAULT_SLUG_ERROR_MESSAGE
def test_generated_slug_not_entirely_numeric(self) -> None:
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="1234",
status_code=201,
)
slug = response.data["slug"]
assert slug.startswith("1234-")
assert not slug.isdecimal()
def test_invalid_platform(self) -> None:
response = self.get_error_response(
self.organization.slug,
self.team.slug,
name="fake name",
platform="fake platform",
status_code=400,
)
assert response.data["platform"][0] == "Invalid platform"
def test_invalid_name(self) -> None:
invalid_name = list(RESERVED_PROJECT_SLUGS)[0]
response = self.get_error_response(
self.organization.slug,
self.team.slug,
name=invalid_name,
platform="python",
status_code=400,
)
assert response.data["name"][0] == f'The name "{invalid_name}" is reserved and not allowed.'
def test_duplicate_slug(self) -> None:
self.create_project(slug="bar")
response = self.get_error_response(
self.organization.slug,
self.team.slug,
**self.data,
status_code=409,
)
assert response.data["detail"] == "A project with this slug already exists."
def test_default_rules(self) -> None:
signal_handler = Mock()
alert_rule_created.connect(signal_handler)
response = self.get_success_response(
self.organization.slug,
self.team.slug,
**self.data,
default_rules=True,
status_code=201,
)
project = Project.objects.get(id=response.data["id"])
rule = Rule.objects.get(project=project)
assert (
rule.data["actions"][0]["fallthroughType"] == FallthroughChoiceType.ACTIVE_MEMBERS.value
)
# Ensure that creating the default alert rule does trigger the
# alert_rule_created signal
assert signal_handler.call_count == 1
alert_rule_created.disconnect(signal_handler)
def test_without_default_rules_disable_member_project_creation(self) -> None:
response = self.get_success_response(
self.organization.slug,
self.team.slug,
**self.data,
default_rules=False,
status_code=201,
)
project = Project.objects.get(id=response.data["id"])
assert not Rule.objects.filter(project=project).exists()
def test_disable_member_project_creation(self) -> None:
test_org = self.create_organization(flags=256)
test_team = self.create_team(organization=test_org)
# org member cannot create project when they are not a team admin of the team
test_member = self.create_user(is_superuser=False)
self.create_member(
user=test_member,
organization=test_org,
role="member",
team_roles=[(test_team, "contributor")],
)
self.login_as(user=test_member)
self.get_error_response(
test_org.slug,
test_team.slug,
**self.data,
status_code=403,
)
# org member can create project when they are a team admin of the team
test_team_admin = self.create_user(is_superuser=False)
self.create_member(
user=test_team_admin,
organization=test_org,
role="member",
team_roles=[(test_team, "admin")],
)
self.login_as(user=test_team_admin)
self.get_success_response(
test_org.slug,
test_team.slug,
status_code=201,
name="test",
slug="test-1",
platform="python",
)
# org admin can create project
test_admin = self.create_user(is_superuser=False)
self.create_member(user=test_admin, organization=test_org, role="admin", teams=[test_team])
self.login_as(user=test_admin)
self.get_success_response(
test_org.slug,
test_team.slug,
status_code=201,
name="test",
slug="test-2",
platform="python",
)
# org manager can create project
test_manager = self.create_user(is_superuser=False)
self.create_member(user=test_manager, organization=test_org, role="manager", teams=[])
self.login_as(user=test_manager)
self.get_success_response(
test_org.slug,
test_team.slug,
status_code=201,
name="test",
slug="test-3",
platform="python",
)
def test_default_inbound_filters(self) -> None:
filters = ["browser-extensions", "legacy-browsers", "web-crawlers", "filtered-transaction"]
python_response = self.get_success_response(
self.organization.slug,
self.team.slug,
**self.data,
status_code=201,
)
python_project = Project.objects.get(id=python_response.data["id"])
python_filter_states = {
filter_id: inbound_filters.get_filter_state(filter_id, python_project)
for filter_id in filters
}
assert not python_filter_states["browser-extensions"]
assert not python_filter_states["legacy-browsers"]
assert not python_filter_states["web-crawlers"]
assert python_filter_states["filtered-transaction"]
project_data = {"name": "foo", "slug": "baz", "platform": "javascript-react"}
javascript_response = self.get_success_response(
self.organization.slug,
self.team.slug,
**project_data,
status_code=201,
)
javascript_project = Project.objects.get(id=javascript_response.data["id"])
javascript_filter_states = {
filter_id: inbound_filters.get_filter_state(filter_id, javascript_project)
for filter_id in filters
}
assert javascript_filter_states["browser-extensions"]
assert set(javascript_filter_states["legacy-browsers"]) == {
"ie",
"firefox",
"chrome",
"safari",
"opera",
"opera_mini",
"android",
"edge",
}
assert javascript_filter_states["web-crawlers"]
assert javascript_filter_states["filtered-transaction"]
@override_options({"similarity.new_project_seer_grouping.enabled": True})
def test_similarity_project_option_valid(self) -> None:
"""
Test that project option for similarity grouping is created when the project platform is
Seer-eligible.
"""
response = self.get_success_response(
self.organization.slug,
self.team.slug,
**self.data,
status_code=201,
)
project = Project.objects.get(id=response.data["id"])
assert project.name == "foo"
assert project.slug == "bar"
assert project.platform == "python"
assert project.teams.first() == self.team
assert (
ProjectOption.objects.get_value(
project=project, key="sentry:similarity_backfill_completed"
)
is not None
)
def test_similarity_project_option_invalid(self) -> None:
"""
Test that project option for similarity grouping is not created when the project platform
is not seer eligible.
"""
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="foo",
slug="bar",
platform="php",
status_code=201,
)
project = Project.objects.get(id=response.data["id"])
assert project.name == "foo"
assert project.slug == "bar"
assert project.platform == "php"
assert project.teams.first() == self.team
assert (
ProjectOption.objects.get_value(
project=project, key="sentry:similarity_backfill_completed"
)
is None
)
def test_builtin_symbol_sources_electron(self) -> None:
"""
Test that project option for builtin symbol sources contains ["electron"] when creating
an Electron project, but uses defaults for other platforms.
"""
# Test Electron project
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="electron-app",
slug="electron-app",
platform="electron",
status_code=201,
)
electron_project = Project.objects.get(id=response.data["id"])
assert electron_project.platform == "electron"
symbol_sources = ProjectOption.objects.get_value(
project=electron_project, key="sentry:builtin_symbol_sources"
)
assert symbol_sources == ["ios", "microsoft", "electron"]
def test_builtin_symbol_sources_not_electron(self) -> None:
# Test non-Electron project (e.g. Python)
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="python-app",
slug="python-app",
platform="python",
status_code=201,
)
python_project = Project.objects.get(id=response.data["id"])
assert python_project.platform == "python"
# Should use default value, not ["electron"]
symbol_sources = ProjectOption.objects.get_value(
project=python_project, key="sentry:builtin_symbol_sources"
)
assert "electron" not in symbol_sources
def test_builtin_symbol_sources_unity(self) -> None:
"""
Test that project option for builtin symbol sources contains relevant buckets
when creating a Unity project, but uses defaults for other platforms.
"""
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="unity-app",
slug="unity-app",
platform="unity",
status_code=201,
)
unity_project = Project.objects.get(id=response.data["id"])
assert unity_project.platform == "unity"
symbol_sources = ProjectOption.objects.get_value(
project=unity_project, key="sentry:builtin_symbol_sources"
)
assert symbol_sources == [
"ios",
"microsoft",
"android",
"nuget",
"unity",
"nvidia",
"ubuntu",
]
def test_builtin_symbol_sources_unreal(self) -> None:
"""
Test that project option for builtin symbol sources contains relevant buckets
when creating a Unreal project, but uses defaults for other platforms.
"""
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="unreal-app",
slug="unreal-app",
platform="unreal",
status_code=201,
)
unreal_project = Project.objects.get(id=response.data["id"])
assert unreal_project.platform == "unreal"
symbol_sources = ProjectOption.objects.get_value(
project=unreal_project, key="sentry:builtin_symbol_sources"
)
assert symbol_sources == ["ios", "microsoft", "android", "nvidia", "ubuntu"]
def test_builtin_symbol_sources_godot(self) -> None:
"""
Test that project option for builtin symbol sources contains relevant buckets
when creating a Godot project, but uses defaults for other platforms.
"""
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="godot-app",
slug="godot-app",
platform="godot",
status_code=201,
)
godot_project = Project.objects.get(id=response.data["id"])
assert godot_project.platform == "godot"
symbol_sources = ProjectOption.objects.get_value(
project=godot_project, key="sentry:builtin_symbol_sources"
)
assert symbol_sources == ["ios", "microsoft", "android", "nuget", "nvidia", "ubuntu"]
@patch("sentry.core.endpoints.team_projects.TeamProjectsEndpoint.create_audit_entry")
def test_create_project_with_origin(self, create_audit_entry: MagicMock) -> None:
signal_handler = Mock()
project_created.connect(signal_handler)
response = self.get_success_response(
self.organization.slug,
self.team.slug,
**self.data,
default_rules=False,
status_code=201,
origin="ui",
)
project = Project.objects.get(id=response.data["id"])
assert create_audit_entry.call_count == 1
# Verify audit log
create_audit_entry.assert_called_once_with(
request=mock.ANY,
organization=self.organization,
target_object=project.id,
event=1154,
data={
**project.get_audit_log_data(),
"origin": "ui",
},
)
# Verify origin is passed to project_created signal
assert signal_handler.call_count == 1
assert signal_handler.call_args[1]["origin"] == "ui"
project_created.disconnect(signal_handler)
def test_project_inherits_autofix_tuning_from_org_option_set(self) -> None:
self.organization.update_option("sentry:default_autofix_automation_tuning", "medium")
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="Project Medium Tuning",
slug="project-medium-tuning",
platform="python",
status_code=201,
)
project = Project.objects.get(id=response.data["id"])
autofix_tuning = ProjectOption.objects.get_value(
project=project, key="sentry:default_autofix_automation_tuning"
)
assert autofix_tuning == "medium"
def test_project_autofix_tuning_none_if_org_option_not_set_in_db(self) -> None:
# Ensure the option is not set for this specific organization,
self.organization.delete_option("sentry:default_autofix_automation_tuning")
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="Project Tuning Default",
slug="project-tuning-default",
platform="python",
status_code=201,
)
project = Project.objects.get(id=response.data["id"])
autofix_tuning = ProjectOption.objects.get_value(
project=project, key="sentry:default_autofix_automation_tuning"
)
assert autofix_tuning is None
def test_console_platform_not_enabled(self) -> None:
response = self.get_error_response(
self.organization.slug,
self.team.slug,
name="Nintendo Project",
platform="nintendo-switch",
status_code=400,
)
assert "Console platform 'nintendo-switch' is not enabled for this organization" in str(
response.data["platform"]
)
def test_console_platform_enabled(self) -> None:
self.organization.update_option("sentry:enabled_console_platforms", ["nintendo-switch"])
response = self.get_success_response(
self.organization.slug,
self.team.slug,
name="Nintendo Project",
slug="nintendo-project",
platform="nintendo-switch",
status_code=201,
)
project = Project.objects.get(id=response.data["id"])
assert project.name == "Nintendo Project"
assert project.platform == "nintendo-switch"
def test_console_platform_xbox_not_enabled(self) -> None:
self.organization.update_option("sentry:enabled_console_platforms", ["nintendo-switch"])
response = self.get_error_response(
self.organization.slug,
self.team.slug,
name="Xbox Project",
platform="xbox",
status_code=400,
)
assert "Console platform 'xbox' is not enabled for this organization" in str(
response.data["platform"]
)
| TeamProjectsCreateTest |
python | ray-project__ray | python/ray/serve/tests/test_multiplex.py | {
"start": 8294,
"end": 19058
} | class ____:
def test_decorator_validation(self):
@serve.multiplexed
async def get_model(model: str):
return
@serve.multiplexed(max_num_models_per_replica=1)
async def get_model2(model: str):
return
@serve.deployment
class MyModel:
@serve.multiplexed
async def get_model(model: str):
return
@serve.deployment
class MyModel2:
@serve.multiplexed(max_num_models_per_replica=1)
async def get_model(self, model: str):
return
# multiplex can only be used with func or method.
with pytest.raises(TypeError):
@serve.deployment
@serve.multiplexed
class BadDecorator:
pass
# max_num_models_per_replica must be an integer
with pytest.raises(TypeError):
@serve.multiplexed(max_num_models_per_replica="1")
async def get_model3(model: str):
pass
# max_num_models_per_replica must be positive
with pytest.raises(ValueError):
@serve.multiplexed(max_num_models_per_replica=0)
async def get_model4(model: str):
pass
# multiplexed function must be async def
with pytest.raises(TypeError):
@serve.multiplexed
def get_model5(model: str):
pass
with pytest.raises(TypeError):
@serve.deployment
class MyModel3:
@serve.multiplexed
def get_model(self, model: str):
return
# no model_id argument in multiplexed function
with pytest.raises(TypeError):
@serve.multiplexed
def get_model6():
pass
with pytest.raises(TypeError):
@serve.deployment
class MyModel4:
@serve.multiplexed
def get_model(self):
return
def test_get_multiplexed_model_id(self):
"""Test get_multiplexed_model_id() API"""
assert serve.get_multiplexed_model_id() == ""
ray.serve.context._serve_request_context.set(
ray.serve.context._RequestContext(multiplexed_model_id="1")
)
assert serve.get_multiplexed_model_id() == "1"
def test_request_routing_info(serve_instance):
"""Test RequestRoutingInfo is passed to the controller & router"""
@serve.deployment
class MyModel:
@serve.multiplexed(max_num_models_per_replica=2)
async def get_model(self, model_id: str):
return
async def __call__(self, model_id: str):
_ = await self.get_model(model_id)
return _get_internal_replica_context().replica_id
handle = serve.run(MyModel.bind())
replica_id = handle.remote("model1").result()
def check_replica_information(
model_ids: List[str],
):
if not handle.is_initialized:
handle._init()
request_router = _get_request_router(handle)
for replica in request_router.curr_replicas.values():
if (
replica.replica_id != replica_id
or model_ids != replica.multiplexed_model_ids
):
return False
return True
wait_for_condition(
check_replica_information,
model_ids={
"model1",
},
)
handle.remote("model2").result()
wait_for_condition(
check_replica_information,
model_ids={
"model1",
"model2",
},
)
# LRU remove the model1
handle.remote("model3").result()
wait_for_condition(
check_replica_information,
model_ids={
"model2",
"model3",
},
)
def check_model_id_in_replicas(handle: DeploymentHandle, model_id: str) -> bool:
if not handle.is_initialized:
handle._init()
request_router = _get_request_router(handle)
replica_to_model_ids = {
tag: replica.multiplexed_model_ids
for tag, replica in request_router.curr_replicas.items()
}
msg = (
f"Model ID '{model_id}' not found in replica_to_model_ids: "
f"{replica_to_model_ids}"
)
assert any(model_id in rep for rep in replica_to_model_ids.values()), msg
return True
def test_multiplexed_e2e(serve_instance):
"""Test multiplexed function end to end"""
@serve.deployment(num_replicas=2)
class Model:
@serve.multiplexed(max_num_models_per_replica=1)
async def get_model(self, tag):
return tag
async def __call__(self, request):
tag = serve.get_multiplexed_model_id()
await self.get_model(tag)
# return pid to check if the same model is used
return os.getpid()
model_id = "1"
handle = serve.run(Model.bind())
headers = {SERVE_MULTIPLEXED_MODEL_ID: model_id}
resp = httpx.get("http://localhost:8000", headers=headers)
initial_pid = resp.json()
wait_for_condition(check_model_id_in_replicas, handle=handle, model_id=model_id)
# Check that the same replica is used repeatedly for the same model_id.
for _ in range(10):
resp = httpx.get("http://localhost:8000", headers=headers)
assert resp.json() == initial_pid
for _ in range(10):
assert (
handle.options(multiplexed_model_id="1").remote("blabla").result()
== initial_pid
)
def test_multiplexed_lru_policy(serve_instance):
"""Test multiplexed function LRU policy"""
@serve.deployment
class Model:
@serve.multiplexed(max_num_models_per_replica=2)
async def get_model(self, tag):
return tag
async def __call__(self, request):
tag = serve.get_multiplexed_model_id()
await self.get_model(tag)
# return pid to check if the same model is used
return os.getpid()
handle = serve.run(Model.bind())
headers = {SERVE_MULTIPLEXED_MODEL_ID: "1"}
httpx.get("http://localhost:8000", headers=headers)
headers = {SERVE_MULTIPLEXED_MODEL_ID: "2"}
httpx.get("http://localhost:8000", headers=headers)
# Make sure model2 will be evicted
headers = {SERVE_MULTIPLEXED_MODEL_ID: "1"}
httpx.get("http://localhost:8000", headers=headers)
headers = {SERVE_MULTIPLEXED_MODEL_ID: "3"}
httpx.get("http://localhost:8000", headers=headers)
wait_for_condition(
(
lambda: check_model_id_in_replicas(handle, "1")
and check_model_id_in_replicas(handle, "3")
)
)
def test_multiplexed_multiple_replicas(serve_instance):
"""Test multiplexed traffic can be sent to multiple replicas"""
signal = SignalActor.remote()
@serve.deployment(num_replicas=2, max_ongoing_requests=1)
class Model:
@serve.multiplexed(max_num_models_per_replica=2)
async def get_model(self, tag):
return tag
async def __call__(self):
tag = serve.get_multiplexed_model_id()
await self.get_model(tag)
await signal.wait.remote()
# return pid to check if the same model is used
return os.getpid()
handle = serve.run(Model.bind()).options(multiplexed_model_id="1")
# Each request should go to different replicas.
pid1_ref = handle.remote()
pid2_ref = handle.remote()
wait_for_condition(lambda: ray.get(signal.cur_num_waiters.remote()) == 2)
# Unblock both requests to finish.
ray.get(signal.send.remote())
assert pid1_ref.result() != pid2_ref.result()
wait_for_condition(check_model_id_in_replicas, handle=handle, model_id="1")
def test_setting_model_id_on_handle_does_not_set_it_locally(serve_instance):
"""
Verify that `.options(multiplexed_model_id="foo")` on a ServeHandle sets it in the
downstream but does not update the model ID in the caller.
"""
@serve.deployment
class Downstream:
def __call__(self):
return serve.get_multiplexed_model_id()
@serve.deployment
class Upstream:
def __init__(self, downstream: DeploymentHandle):
self._h = downstream
async def __call__(self):
model_id_before = serve.get_multiplexed_model_id()
# Make a call with another model ID, verify it's set properly.
other_model_id = await self._h.options(multiplexed_model_id="bar").remote()
assert other_model_id == "bar"
# Model ID shouldn't change after the handle call.
model_id_after = serve.get_multiplexed_model_id()
assert model_id_before == model_id_after
return model_id_before
handle = serve.run(Upstream.bind(Downstream.bind()))
assert handle.options(multiplexed_model_id="foo").remote().result() == "foo"
def test_replica_upgrade_to_cleanup_resource(serve_instance):
"""When replica is upgraded, we need to make sure model resources are released."""
@serve.deployment
class Recorder:
def __init__(self):
self.call_record = set()
def add(self, model_id):
self.call_record.add(model_id)
def get_call_record(self):
return self.call_record
record_handle = serve.run(
Recorder.bind(), name="recorder", route_prefix="/recorder"
)
class MyModel:
def __init__(self, model_id, record_handle):
self.model_id = model_id
self.record_handle = record_handle
async def __del__(self):
await self.record_handle.add.remote(self.model_id)
def __eq__(self, model):
return model.model_id == self.model_id
@serve.deployment(num_replicas=1)
class Model:
def __init__(self, record_handle):
self.record_handle = record_handle
@serve.multiplexed(max_num_models_per_replica=1)
async def get_model(self, tag):
return MyModel(tag, self.record_handle)
async def __call__(self, request):
tag = serve.get_multiplexed_model_id()
await self.get_model(tag)
# return pid to check if the same model is used
return os.getpid()
serve.run(Model.bind(record_handle))
model_id = "1"
headers = {"serve_multiplexed_model_id": model_id}
httpx.get("http://localhost:8000", headers=headers)
assert record_handle.get_call_record.remote().result() == set()
serve.run(Model.bind(record_handle))
assert record_handle.get_call_record.remote().result() == {"1"}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestBasicAPI |
python | walkccc__LeetCode | solutions/1698. Number of Distinct Substrings in a String/1698.py | {
"start": 0,
"end": 789
} | class ____:
def countDistinct(self, s: str) -> int:
BASE = 26
HASH = 1_000_000_007
n = len(s)
ans = 0
pow = [1] + [0] * n # pow[i] := BASE^i
hashes = [0] * (n + 1) # hashes[i] := the hash of s[0..i)
def val(c: str) -> int:
return ord(c) - ord('a')
for i in range(1, n + 1):
pow[i] = pow[i - 1] * BASE % HASH
hashes[i] = (hashes[i - 1] * BASE + val(s[i - 1])) % HASH
def getHash(l: int, r: int) -> int:
"""Returns the hash of s[l..r)."""
hash = (hashes[r] - hashes[l] * pow[r - l]) % HASH
return hash + HASH if hash < 0 else hash
for length in range(1, n + 1):
seen = set()
for i in range(n - length + 1):
seen.add(getHash(i, i + length))
ans += len(seen)
return ans
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol18.py | {
"start": 110,
"end": 174
} | class ____(Protocol): ...
# This should generate an error.
A()
| A |
python | doocs__leetcode | solution/1600-1699/1684.Count the Number of Consistent Strings/Solution2.py | {
"start": 0,
"end": 265
} | class ____:
def countConsistentStrings(self, allowed: str, words: List[str]) -> int:
def f(w):
return reduce(or_, (1 << (ord(c) - ord('a')) for c in w))
mask = f(allowed)
return sum((mask | f(w)) == mask for w in words)
| Solution |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 3013,
"end": 4087
} | class ____:
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_rename_single(self):
self.df.rename({0: 0})
def time_rename_axis0(self):
self.df.rename(self.dict_idx)
def time_rename_axis1(self):
self.df.rename(columns=self.dict_idx)
def time_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
| Rename |
python | numpy__numpy | numpy/lib/tests/test_index_tricks.py | {
"start": 426,
"end": 8475
} | class ____:
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
# test that new shape argument works properly
assert_equal(np.unravel_index(indices=2,
shape=(2, 2)),
(1, 0))
# test that an invalid second keyword argument
# is properly handled, including the old name `dims`.
with assert_raises(TypeError):
np.unravel_index(indices=2, hape=(2, 2))
with assert_raises(TypeError):
np.unravel_index(2, hape=(2, 2))
with assert_raises(TypeError):
np.unravel_index(254, ims=(17, 94))
with assert_raises(TypeError):
np.unravel_index(254, dims=(17, 94))
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
assert_raises(ValueError, np.unravel_index, -1, (2, 2))
assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
assert_raises(ValueError, np.unravel_index, 4, (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))
assert_equal(np.unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), [2, 1, 4])
assert_equal(
np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2 * 3 + 1) * 6 + 4)
arr = np.array([[3, 6, 6], [4, 5, 1]])
assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
assert_equal(
np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
assert_equal(
np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
[12, 13, 13])
assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)
assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
[[3, 6, 6], [4, 5, 1]])
assert_equal(
np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
[[3, 6, 6], [4, 5, 1]])
assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
def test_empty_indices(self):
msg1 = 'indices must be integral: the provided empty sequence was'
msg2 = 'only int indices permitted'
assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5))
assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5))
assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]),
(10, 3, 5))
assert_equal(np.unravel_index(np.array([], dtype=int), (10, 3, 5)),
[[], [], []])
assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []),
(10, 3))
assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']),
(10, 3))
assert_raises_regex(TypeError, msg2, np.ravel_multi_index,
(np.array([]), np.array([])), (5, 3))
assert_equal(np.ravel_multi_index(
(np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), [])
assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int),
(5, 3)), [])
def test_big_indices(self):
# ravel_multi_index for big indices (issue #7546)
if np.intp == np.int64:
arr = ([1, 29], [3, 5], [3, 117], [19, 2],
[2379, 1284], [2, 2], [0, 1])
assert_equal(
np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
[5627771580, 117259570957])
# test unravel_index for big indices (issue #9538)
assert_raises(ValueError, np.unravel_index, 1, (2**32 - 1, 2**31 + 1))
# test overflow checking for too big array (issue #7546)
dummy_arr = ([0], [0])
half_max = np.iinfo(np.intp).max // 2
assert_equal(
np.ravel_multi_index(dummy_arr, (half_max, 2)), [0])
assert_raises(ValueError,
np.ravel_multi_index, dummy_arr, (half_max + 1, 2))
assert_equal(
np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0])
assert_raises(ValueError,
np.ravel_multi_index, dummy_arr, (half_max + 1, 2), order='F')
def test_dtypes(self):
# Test with different data types
for dtype in [np.int16, np.uint16, np.int32,
np.uint32, np.int64, np.uint64]:
coords = np.array(
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
shape = (5, 8)
uncoords = 8 * coords[0] + coords[1]
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape))
uncoords = coords[0] + 5 * coords[1]
assert_equal(
np.ravel_multi_index(coords, shape, order='F'), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
coords = np.array(
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
dtype=dtype)
shape = (5, 8, 10)
uncoords = 10 * (8 * coords[0] + coords[1]) + coords[2]
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape))
uncoords = coords[0] + 5 * (coords[1] + 8 * coords[2])
assert_equal(
np.ravel_multi_index(coords, shape, order='F'), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
def test_clipmodes(self):
# Test clipmodes
assert_equal(
np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),
np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))
assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),
mode=(
'wrap', 'raise', 'clip', 'raise')),
np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))
assert_raises(
ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))
def test_writeability(self):
# gh-7269
x, y = np.unravel_index([1, 2, 3], (4, 5))
assert_(x.flags.writeable)
assert_(y.flags.writeable)
def test_0d(self):
# gh-580
x = np.unravel_index(0, ())
assert_equal(x, ())
assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ())
assert_raises_regex(
ValueError, "out of bounds", np.unravel_index, [1], ())
@pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
def test_empty_array_ravel(self, mode):
res = np.ravel_multi_index(
np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
assert res.shape == (0,)
with assert_raises(ValueError):
np.ravel_multi_index(
np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
def test_empty_array_unravel(self):
res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
# res is a tuple of three empty arrays
assert len(res) == 3
assert all(a.shape == (0,) for a in res)
with assert_raises(ValueError):
np.unravel_index([1], (2, 1, 0))
def test_regression_size_1_index(self):
# actually tests the nditer size one index tracking
# regression test for gh-29690
np.unravel_index(np.array([[1, 0, 1, 0]], dtype=np.uint32), (4,))
| TestRavelUnravelIndex |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 69622,
"end": 72272
} | class ____:
def test_year_full(self):
assert self.locale.year_full(2015) == "2558"
def test_year_abbreviation(self):
assert self.locale.year_abbreviation(2015) == "58"
def test_format_relative_now(self):
result = self.locale._format_relative("ດຽວນີ້", "now", 0)
assert result == "ດຽວນີ້"
def test_format_relative_past(self):
result = self.locale._format_relative("1 ຊົ່ວໂມງ", "hour", 1)
assert result == "ໃນ 1 ຊົ່ວໂມງ"
result = self.locale._format_relative("{0} ຊົ່ວໂມງ", "hours", 2)
assert result == "ໃນ {0} ຊົ່ວໂມງ"
result = self.locale._format_relative("ວິນາທີ", "seconds", 42)
assert result == "ໃນວິນາທີ"
def test_format_relative_future(self):
result = self.locale._format_relative("1 ຊົ່ວໂມງ", "hour", -1)
assert result == "1 ຊົ່ວໂມງ ກ່ອນຫນ້ານີ້"
def test_format_timeframe(self):
# minute(s)
assert self.locale._format_timeframe("minute", 1) == "ນາທີ"
assert self.locale._format_timeframe("minute", -1) == "ນາທີ"
assert self.locale._format_timeframe("minutes", 7) == "7 ນາທີ"
assert self.locale._format_timeframe("minutes", -20) == "20 ນາທີ"
# day(s)
assert self.locale._format_timeframe("day", 1) == "ມື້"
assert self.locale._format_timeframe("day", -1) == "ມື້"
assert self.locale._format_timeframe("days", 7) == "7 ມື້"
assert self.locale._format_timeframe("days", -20) == "20 ມື້"
# week(s)
assert self.locale._format_timeframe("week", 1) == "ອາທິດ"
assert self.locale._format_timeframe("week", -1) == "ອາທິດ"
assert self.locale._format_timeframe("weeks", 7) == "7 ອາທິດ"
assert self.locale._format_timeframe("weeks", -20) == "20 ອາທິດ"
# month(s)
assert self.locale._format_timeframe("month", 1) == "ເດືອນ"
assert self.locale._format_timeframe("month", -1) == "ເດືອນ"
assert self.locale._format_timeframe("months", 7) == "7 ເດືອນ"
assert self.locale._format_timeframe("months", -20) == "20 ເດືອນ"
# year(s)
assert self.locale._format_timeframe("year", 1) == "ປີ"
assert self.locale._format_timeframe("year", -1) == "ປີ"
assert self.locale._format_timeframe("years", 7) == "7 ປີ"
assert self.locale._format_timeframe("years", -20) == "20 ປີ"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "ວັນເສົາ"
assert self.locale.day_abbreviation(dt.isoweekday()) == "ວັນເສົາ"
@pytest.mark.usefixtures("lang_locale")
| TestLaotianLocale |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 53385,
"end": 64685
} | class ____(_fixtures.FixtureTest):
run_inserts = None
def test_basic(self):
User, users = self.classes.User, self.tables.users
m = self.mapper_registry.map_imperatively(User, users)
# save two users
u = User(name="savetester")
u2 = User(name="savetester2")
with fixture_session() as session:
session.add_all((u, u2))
session.flush()
# assert the first one retrieves the same from the identity map
nu = session.get(m, u.id)
assert u is nu
# clear out the identity map, so next get forces a SELECT
session.expunge_all()
# check it again, identity should be different but ids the same
nu = session.get(m, u.id)
assert u is not nu and u.id == nu.id and nu.name == "savetester"
session.commit()
# change first users name and save
with fixture_session() as session:
session.add(u)
u.name = "modifiedname"
assert u in session.dirty
session.flush()
# select both
userlist = (
session.query(User)
.filter(users.c.id.in_([u.id, u2.id]))
.order_by(users.c.name)
.all()
)
eq_(u.id, userlist[0].id)
eq_(userlist[0].name, "modifiedname")
eq_(u2.id, userlist[1].id)
eq_(userlist[1].name, "savetester2")
def test_synonym(self):
users = self.tables.users
class SUser(BasicEntity):
def _get_name(self):
return "User:" + self.name
def _set_name(self, name):
self.name = name + ":User"
syn_name = property(_get_name, _set_name)
self.mapper_registry.map_imperatively(
SUser, users, properties={"syn_name": sa.orm.synonym("name")}
)
u = SUser(syn_name="some name")
eq_(u.syn_name, "User:some name:User")
session = fixture_session()
session.add(u)
session.flush()
session.expunge_all()
u = session.query(SUser).first()
eq_(u.syn_name, "User:some name:User")
def test_lazyattr_commit(self):
"""Lazily loaded relationships.
When a lazy-loaded list is unloaded, and a commit occurs, that the
'passive' call on that list does not blow away its value
"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses)
)
},
)
u = User(name="u1")
u.addresses.append(Address(email_address="u1@e1"))
u.addresses.append(Address(email_address="u1@e2"))
u.addresses.append(Address(email_address="u1@e3"))
u.addresses.append(Address(email_address="u1@e4"))
session = fixture_session()
session.add(u)
session.flush()
session.expunge_all()
u = session.query(User).one()
u.name = "newname"
session.flush()
eq_(len(u.addresses), 4)
def test_inherits(self):
"""a user object that also has the users mailing address."""
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
m1 = self.mapper_registry.map_imperatively(User, users)
class AddressUser(User):
pass
# define a mapper for AddressUser that inherits the User.mapper, and
# joins on the id column
self.mapper_registry.map_imperatively(
AddressUser,
addresses,
inherits=m1,
properties={"address_id": addresses.c.id},
)
au = AddressUser(name="u", email_address="u@e")
session = fixture_session()
session.add(au)
session.flush()
session.expunge_all()
rt = session.query(AddressUser).one()
eq_(au.user_id, rt.user_id)
eq_(rt.id, rt.id)
def test_deferred(self):
"""Deferred column operations"""
orders, Order = self.tables.orders, self.classes.Order
self.mapper_registry.map_imperatively(
Order,
orders,
properties={"description": sa.orm.deferred(orders.c.description)},
)
# don't set deferred attribute, commit session
o = Order(id=42)
session = fixture_session()
session.add(o)
session.commit()
# assert that changes get picked up
o.description = "foo"
session.commit()
eq_(
list(session.execute(orders.select())),
[(42, None, None, "foo", None)],
)
session.expunge_all()
# assert that a set operation doesn't trigger a load operation
o = session.query(Order).filter(Order.description == "foo").one()
def go():
o.description = "hoho"
self.sql_count_(0, go)
session.flush()
eq_(
list(
session.execute(
orders.select(),
)
),
[(42, None, None, "hoho", None)],
)
session.expunge_all()
# test assigning None to an unloaded deferred also works
o = session.query(Order).filter(Order.description == "hoho").one()
o.description = None
session.flush()
eq_(
list(
session.execute(
orders.select(),
)
),
[(42, None, None, None, None)],
)
session.close()
# why no support on oracle ? because oracle doesn't save
# "blank" strings; it saves a single space character.
@testing.fails_on("oracle", "FIXME: unknown")
def test_dont_update_blanks(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
u = User(name="")
session = fixture_session()
session.add(u)
session.flush()
session.expunge_all()
u = session.get(User, u.id)
u.name = ""
self.sql_count_(0, session.flush)
def test_multi_table_selectable(self):
"""Mapped selectables that span tables.
Also tests redefinition of the keynames for the column properties.
"""
addresses, users, User = (
self.tables.addresses,
self.tables.users,
self.classes.User,
)
usersaddresses = sa.join(
users, addresses, users.c.id == addresses.c.user_id
)
m = self.mapper_registry.map_imperatively(
User,
usersaddresses,
properties=dict(
email=addresses.c.email_address,
foo_id=[users.c.id, addresses.c.user_id],
),
)
u = User(name="multitester", email="multi@test.org")
session = fixture_session()
session.add(u)
session.flush()
session.expunge_all()
id_ = m.primary_key_from_instance(u)
u = session.get(User, id_)
assert u.name == "multitester"
conn = session.connection()
user_rows = conn.execute(
users.select().where(users.c.id.in_([u.foo_id]))
).fetchall()
eq_(list(user_rows[0]), [u.foo_id, "multitester"])
address_rows = conn.execute(
addresses.select().where(addresses.c.id.in_([u.id]))
).fetchall()
eq_(list(address_rows[0]), [u.id, u.foo_id, "multi@test.org"])
u.email = "lala@hey.com"
u.name = "imnew"
session.flush()
user_rows = conn.execute(
users.select().where(users.c.id.in_([u.foo_id]))
).fetchall()
eq_(list(user_rows[0]), [u.foo_id, "imnew"])
address_rows = conn.execute(
addresses.select().where(addresses.c.id.in_([u.id]))
).fetchall()
eq_(list(address_rows[0]), [u.id, u.foo_id, "lala@hey.com"])
session.expunge_all()
u = session.get(User, id_)
assert u.name == "imnew"
def test_history_get(self):
"""The history lazy-fetches data when it wasn't otherwise loaded."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all, delete-orphan"
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u = User(name="u1")
u.addresses.append(Address(email_address="u1@e1"))
u.addresses.append(Address(email_address="u1@e2"))
session = fixture_session()
session.add(u)
session.flush()
session.expunge_all()
u = session.get(User, u.id)
session.delete(u)
session.flush()
eq_(
session.connection().scalar(
select(func.count("*")).select_from(users)
),
0,
)
eq_(
session.connection().scalar(
select(func.count("*")).select_from(addresses)
),
0,
)
def test_batch_mode(self):
"""The 'batch=False' flag on mapper()"""
users, User = self.tables.users, self.classes.User
names = []
class Events:
def before_insert(self, mapper, connection, instance):
self.current_instance = instance
names.append(instance.name)
def after_insert(self, mapper, connection, instance):
assert instance is self.current_instance
self.mapper_registry.map_imperatively(User, users, batch=False)
evt = Events()
event.listen(User, "before_insert", evt.before_insert)
event.listen(User, "after_insert", evt.after_insert)
u1 = User(name="user1")
u2 = User(name="user2")
session = fixture_session()
session.add_all((u1, u2))
session.flush()
u3 = User(name="user3")
u4 = User(name="user4")
u5 = User(name="user5")
session.add_all([u4, u5, u3])
session.flush()
# test insert ordering is maintained
eq_(names, ["user1", "user2", "user4", "user5", "user3"])
session.expunge_all()
sa.orm.clear_mappers()
self.mapper_registry.map_imperatively(User, users)
evt = Events()
event.listen(User, "before_insert", evt.before_insert)
event.listen(User, "after_insert", evt.after_insert)
u1 = User(name="user1")
u2 = User(name="user2")
session.add_all((u1, u2))
assert_raises(AssertionError, session.flush)
| SaveTest |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 94924,
"end": 104498
} | class ____(Conv2D):
"""Depthwise 2D convolution.
Depthwise convolution is a type of convolution in which a single convolutional
filter is apply to each input channel (i.e. in a depthwise way).
You can understand depthwise convolution as being
the first step in a depthwise separable convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each input with the layer's kernel (called a depthwise kernel).
- Stack the convolved outputs together (along the channels axis).
Unlike a regular 2D convolution, depthwise convolution does not mix
information across different input channels.
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Args:
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix (
see `keras.initializers`). If None, the default initializer (
'glorot_uniform') will be used.
bias_initializer: Initializer for the bias vector (
see `keras.initializers`). If None, the default initializer (
'zeros') will bs used.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation') (
see `keras.regularizers`).
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`[batch_size, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch_size, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch_size, channels * depth_multiplier, new_rows, new_cols]` if
data_format='channels_first' or 4D tensor with shape:
`[batch_size, new_rows, new_cols, channels * depth_multiplier]` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to padding.
Returns:
A tensor of rank 4 representing
`activation(depthwiseconv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = backend.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0],
self.dilation_rate[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1],
self.dilation_rate[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
| DepthwiseConv2D |
python | doocs__leetcode | solution/2400-2499/2437.Number of Valid Clock Times/Solution2.py | {
"start": 0,
"end": 367
} | class ____:
def countTime(self, time: str) -> int:
def f(s: str, m: int) -> int:
cnt = 0
for i in range(m):
a = s[0] == '?' or (int(s[0]) == i // 10)
b = s[1] == '?' or (int(s[1]) == i % 10)
cnt += a and b
return cnt
return f(time[:2], 24) * f(time[3:], 60)
| Solution |
python | facebookresearch__faiss | tests/test_binary_factory.py | {
"start": 277,
"end": 1653
} | class ____(unittest.TestCase):
def test_factory_IVF(self):
index = faiss.index_binary_factory(16, "BIVF10")
assert index.invlists is not None
assert index.nlist == 10
assert index.code_size == 2
def test_factory_Flat(self):
index = faiss.index_binary_factory(16, "BFlat")
assert index.code_size == 2
def test_factory_HNSW(self):
index = faiss.index_binary_factory(256, "BHNSW32")
assert index.code_size == 32
def test_factory_IVF_HNSW(self):
index = faiss.index_binary_factory(256, "BIVF1024_BHNSW32")
assert index.code_size == 32
assert index.nlist == 1024
def test_factory_Hash(self):
index = faiss.index_binary_factory(256, "BHash12")
assert index.b == 12
def test_factory_MultiHash(self):
index = faiss.index_binary_factory(256, "BHash5x6")
assert index.b == 6
assert index.nhash == 5
def test_factory_IDMap2_prefix(self):
index = faiss.index_binary_factory(16, "IDMap2,BFlat")
assert isinstance(index, faiss.IndexBinaryIDMap2)
assert index.index.code_size == 2
def test_factory_IDMap2_suffix(self):
index = faiss.index_binary_factory(16, "BFlat,IDMap2")
assert isinstance(index, faiss.IndexBinaryIDMap2)
assert index.index.code_size == 2
| TestBinaryFactory |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 74052,
"end": 74883
} | class ____(Response):
"""
Response of dataviews.create endpoint.
:param id: New dataview's ID
:type id: str
"""
_service = "dataviews"
_action = "create"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"id": {"description": "New dataview's ID", "type": ["string", "null"]}
},
"type": "object",
}
def __init__(self, id=None, **kwargs):
super(CreateResponse, self).__init__(**kwargs)
self.id = id
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
| CreateResponse |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 5537,
"end": 7346
} | class ____(ModelOutput):
r"""
last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the vision encoder.
last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the text encoder.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
"""
last_hidden_state_vision: Optional[torch.FloatTensor] = None
last_hidden_state_text: Optional[torch.FloatTensor] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
text_hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the Grounding DINO encoder-decoder model.
"""
)
| GroundingDinoEncoderOutput |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_regions.py | {
"start": 1528,
"end": 2633
} | class ____(UserEndpoint):
owner = ApiOwner.HYBRID_CLOUD
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (UserRegionEndpointPermissions,)
def get(self, request: Request, user: RpcUser, **kwargs: Any) -> Response:
"""
Retrieve the Regions a User has membership in
`````````````````````````````````````````````
Returns a list of regions that the current user has membership in.
:auth: required
"""
organization_ids = OrganizationMemberMapping.objects.filter(user_id=user.id).values_list(
"organization_id", flat=True
)
org_mappings = (
OrganizationMapping.objects.filter(organization_id__in=organization_ids)
.distinct("region_name")
.order_by("region_name")
.values_list("region_name", flat=True)
)
regions = [get_region_by_name(region_name).api_serialize() for region_name in org_mappings]
payload = {
"regions": regions,
}
return Response(payload)
| UserRegionsEndpoint |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-siliconflow-rerank/llama_index/postprocessor/siliconflow_rerank/base.py | {
"start": 714,
"end": 4982
} | class ____(BaseNodePostprocessor):
model: str = Field(
default="BAAI/bge-reranker-v2-m3",
description="Specifies the model to be used.",
)
base_url: str = Field(
default=DEFAULT_SILICONFLOW_API_URL,
description="The URL of the SiliconFlow Rerank API.",
)
api_key: str = Field(default=None, description="The SiliconFlow API key.")
top_n: int = Field(
description="Number of most relevant documents or indices to return."
)
return_documents: bool = Field(
default=True,
description="Specify whether the response should include the document text.",
)
max_chunks_per_doc: int = Field(
default=1024,
description="""\
Maximum number of chunks generated from within a document.
Long documents are divided into multiple chunks for calculation,
and the highest score among the chunks is taken as the document's score.
""",
)
overlap_tokens: int = Field(
default=80,
description="Number of token overlaps between adjacent chunks when documents are chunked.",
)
_session: Any = PrivateAttr()
def __init__(
self,
model: str = "BAAI/bge-reranker-v2-m3",
base_url: str = DEFAULT_SILICONFLOW_API_URL,
api_key: Optional[str] = None,
top_n: int = 4,
return_documents: bool = True,
max_chunks_per_doc: int = 1024,
overlap_tokens: int = 80,
):
super().__init__(
model=model,
base_url=base_url,
api_key=api_key,
top_n=top_n,
return_documents=return_documents,
max_chunks_per_doc=max_chunks_per_doc,
overlap_tokens=overlap_tokens,
)
self._session: requests.Session = requests.Session()
self._session.headers.update(
{
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
)
@classmethod
def class_name(cls) -> str:
return "SiliconFlowRerank"
@property
def _model_kwargs(self) -> Dict[str, Any]:
return {
"return_documents": self.return_documents,
"max_chunks_per_doc": self.max_chunks_per_doc,
"overlap_tokens": self.overlap_tokens,
}
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
response = self._session.post(
self.base_url,
json={
"model": self.model,
"query": query_bundle.query_str,
"documents": texts,
"top_n": self.top_n,
**self._model_kwargs,
},
).json()
if "results" not in response:
raise RuntimeError(response)
new_nodes = []
for result in response["results"]:
new_node_with_score = NodeWithScore(
node=nodes[result["index"]].node, score=result["relevance_score"]
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
| SiliconFlowRerank |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/transport/main.py | {
"start": 3076,
"end": 5862
} | class ____:
"""Manage and intercept SIGTERM and SIGKILL signals."""
def __init__(self):
self.original_sigint = signal.getsignal(signal.SIGINT)
self.original_sigterm = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
if os.name == 'nt':
self.original_sigbreak = signal.getsignal(signal.SIGBREAK)
signal.signal(signal.SIGBREAK, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
"""Capture exit/kill signal and throw and exception."""
logger.info('Termination signal ({}) captured, '
'initiating exit sequence'.format(signum))
raise TerminateSignal("Exit process!")
def restore(self):
"""Restore signal handlers to their original settings."""
signal.signal(signal.SIGINT, self.original_sigint)
signal.signal(signal.SIGTERM, self.original_sigterm)
if os.name == 'nt':
signal.signal(signal.SIGBREAK, self.original_sigbreak)
if __name__ == '__main__':
logger_init(args.transport_debug)
extra_args = [x for x in extra_args if len(x) > 0]
extra_args = ' '.join(extra_args)
logger.debug(extra_args)
process = psutil.Process()
parent_pid = process.ppid()
sig_manager = SignalManager()
if args.stdio_server:
LanguageServerClient = partial(StdioLanguageServerClient,
server_args=extra_args,
log_file=args.server_log_file)
else:
LanguageServerClient = partial(TCPLanguageServerClient,
host=args.server_host,
port=args.server_port)
client = LanguageServerClient(zmq_in_port=args.zmq_in_port,
zmq_out_port=args.zmq_out_port)
client.start()
is_alive = True
def watch_parent_process(pid):
"""
Exit when the given pid is not alive.
Code taken from the Python Language Server project.
"""
global is_alive
if not psutil.pid_exists(pid):
logger.info("parent process %s is not alive, exiting!", pid)
is_alive = False
if is_alive:
threading.Timer(PARENT_PROCESS_WATCH_INTERVAL,
watch_parent_process, args=[pid]).start()
watching_thread = threading.Thread(
target=watch_parent_process, args=(parent_pid,))
watching_thread.daemon = True
watching_thread.start()
try:
while is_alive:
client.listen()
except TerminateSignal:
pass
client.stop()
process.terminate()
process.wait()
| SignalManager |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_block_schemas.py | {
"start": 7253,
"end": 13902
} | class ____:
async def test_read_all_block_schemas(self, session, client, block_schemas):
result = await client.post("/block_schemas/filter")
api_schemas = parse_obj_as(List[schemas.core.BlockSchema], result.json())
assert {s.id for s in api_schemas} == {
block_schemas[0].id,
block_schemas[2].id,
block_schemas[1].id,
}
async def test_read_all_block_schemas_filter_block_type_id_x(
self, session, client, block_schemas, block_type_x
):
result = await client.post(
"/block_schemas/filter",
json=dict(
block_schemas=dict(block_type_id=dict(any_=[str(block_type_x.id)]))
),
)
api_schemas = parse_obj_as(List[schemas.core.BlockSchema], result.json())
assert [s.id for s in api_schemas] == [block_schemas[i].id for i in (2, 0)]
async def test_read_all_block_schemas_filter_block_type_id_y(
self, session, client, block_schemas, block_type_y
):
result = await client.post(
"/block_schemas/filter",
json=dict(
block_schemas=dict(block_type_id=dict(any_=[str(block_type_y.id)]))
),
)
api_schemas = parse_obj_as(List[schemas.core.BlockSchema], result.json())
assert [s.id for s in api_schemas] == [block_schemas[1].id]
async def test_read_all_block_schemas_filter_block_type_id_x_and_y(
self, session, client, block_schemas, block_type_x, block_type_y
):
result = await client.post(
"/block_schemas/filter",
json=dict(
block_schemas=dict(
block_type_id=dict(
any_=[str(block_type_x.id), str(block_type_y.id)]
)
)
),
)
api_schemas = parse_obj_as(List[schemas.core.BlockSchema], result.json())
assert [s.id for s in api_schemas] == [block_schemas[i].id for i in (2, 1, 0)]
async def test_read_block_schema_by_id(self, session, client, block_schemas):
schema_id = block_schemas[0].id
response = await client.get(f"/block_schemas/{schema_id}")
assert response.status_code == status.HTTP_200_OK
block_schema_response = parse_obj_as(schemas.core.BlockSchema, response.json())
assert block_schema_response.id == schema_id
async def test_read_block_schema_by_checksum(self, session, client, block_schemas):
schema_checksum = block_schemas[0].checksum
response = await client.get(f"/block_schemas/checksum/{schema_checksum}")
assert response.status_code == status.HTTP_200_OK
block_schema_response = parse_obj_as(schemas.core.BlockSchema, response.json())
assert block_schema_response.id == block_schemas[0].id
assert block_schema_response.checksum == schema_checksum
async def test_read_block_schema_with_capability_filter(
self, client, block_schemas_with_capabilities
):
result = await client.post(
"/block_schemas/filter",
json=dict(
block_schemas=dict(block_capabilities=dict(all_=["fly", "swim"]))
),
)
assert result.status_code == status.HTTP_200_OK
block_schemas = parse_obj_as(List[schemas.core.BlockSchema], result.json())
assert len(block_schemas) == 1
assert block_schemas[0].id == block_schemas_with_capabilities[0].id
result = await client.post(
"/block_schemas/filter",
json=dict(block_schemas=dict(block_capabilities=dict(all_=["fly"]))),
)
assert result.status_code == status.HTTP_200_OK
block_schemas = parse_obj_as(List[schemas.core.BlockSchema], result.json())
assert len(block_schemas) == 2
assert [block_schema.id for block_schema in block_schemas] == [
block_schemas_with_capabilities[1].id,
block_schemas_with_capabilities[0].id,
]
result = await client.post(
"/block_schemas/filter",
json=dict(block_schemas=dict(block_capabilities=dict(all_=["swim"]))),
)
assert result.status_code == status.HTTP_200_OK
block_schemas = parse_obj_as(List[schemas.core.BlockSchema], result.json())
assert len(block_schemas) == 1
assert block_schemas[0].id == block_schemas_with_capabilities[0].id
async def test_read_block_schema_by_checksum_with_version(
self, session, client, block_type_x
):
# Create two block schemas with the same checksum, but different versions
block_schema_0 = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={}, block_type_id=block_type_x.id, version="1.0.1"
),
)
await session.commit()
block_schema_1 = await models.block_schemas.create_block_schema(
session=session,
block_schema=schemas.actions.BlockSchemaCreate(
fields={}, block_type_id=block_type_x.id, version="1.1.0"
),
)
await session.commit()
assert block_schema_0.checksum == block_schema_1.checksum
assert block_schema_0.id != block_schema_1.id
# Read first version with version query parameter
response_1 = await client.get(
f"/block_schemas/checksum/{block_schema_0.checksum}?version=1.0.1"
)
assert response_1.status_code == status.HTTP_200_OK
block_schema_response_1 = parse_obj_as(
schemas.core.BlockSchema, response_1.json()
)
assert block_schema_response_1.id == block_schema_0.id
# Read second version with version query parameter
response_2 = await client.get(
f"/block_schemas/checksum/{block_schema_1.checksum}?version=1.1.0"
)
assert response_2.status_code == status.HTTP_200_OK
block_schema_response_2 = parse_obj_as(
schemas.core.BlockSchema, response_2.json()
)
assert block_schema_response_2.id == block_schema_1.id
# Read without version. Should return most recently created block schema.
response_3 = await client.get(
f"/block_schemas/checksum/{block_schema_0.checksum}"
)
assert response_3.status_code == status.HTTP_200_OK
block_schema_response_3 = parse_obj_as(
schemas.core.BlockSchema, response_3.json()
)
assert block_schema_response_3.id == block_schema_1.id
| TestReadBlockSchema |
python | ansible__ansible | lib/ansible/galaxy/collection/gpg.py | {
"start": 5193,
"end": 5311
} | class ____(GpgBaseError):
"""No passphrase was supplied."""
@dataclass(frozen=True, slots=True)
| GpgMissingPassPhrase |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 11653,
"end": 11880
} | class ____(BatchDefinitionError):
def __init__(self, name: str) -> None:
super().__init__(
f"BatchDefinition '{name}' not found. Please check the name and try again."
)
| BatchDefinitionNotFoundError |
python | ansible__ansible | test/lib/ansible_test/_internal/host_configs.py | {
"start": 11286,
"end": 12945
} | class ____(RemoteConfig, ControllerHostConfig, PosixConfig):
"""Configuration for a POSIX remote host."""
become: t.Optional[str] = None
def get_defaults(self, context: HostContext) -> PosixRemoteCompletionConfig:
"""Return the default settings."""
# pylint: disable=unexpected-keyword-arg # see: https://github.com/PyCQA/pylint/issues/7434
return filter_completion(remote_completion()).get(self.name) or remote_completion().get(self.platform) or PosixRemoteCompletionConfig(
name=self.name,
placeholder=True,
)
def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
"""Return the default targets for this host config."""
if self.name in filter_completion(remote_completion()):
defaults = self.get_defaults(context)
pythons = {version: defaults.get_python_path(version) for version in defaults.supported_pythons}
else:
pythons = {context.controller_config.python.version: context.controller_config.python.path}
return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()]
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
assert isinstance(defaults, PosixRemoteCompletionConfig)
super().apply_defaults(context, defaults)
self.become = self.become or defaults.become
@property
def have_root(self) -> bool:
"""True if root is available, otherwise False."""
return True
@dataclasses.dataclass
| PosixRemoteConfig |
python | pypa__pip | src/pip/_internal/resolution/resolvelib/base.py | {
"start": 3515,
"end": 5047
} | class ____:
@property
def project_name(self) -> NormalizedName:
"""The "project name" of the candidate.
This is different from ``name`` if this candidate contains extras,
in which case ``name`` would contain the ``[...]`` part, while this
refers to the name of the project.
"""
raise NotImplementedError("Override in subclass")
@property
def name(self) -> str:
"""The name identifying this candidate in the resolver.
This is different from ``project_name`` if this candidate contains
extras, where ``project_name`` would not contain the ``[...]`` part.
"""
raise NotImplementedError("Override in subclass")
@property
def version(self) -> Version:
raise NotImplementedError("Override in subclass")
@property
def is_installed(self) -> bool:
raise NotImplementedError("Override in subclass")
@property
def is_editable(self) -> bool:
raise NotImplementedError("Override in subclass")
@property
def source_link(self) -> Link | None:
raise NotImplementedError("Override in subclass")
def iter_dependencies(self, with_requires: bool) -> Iterable[Requirement | None]:
raise NotImplementedError("Override in subclass")
def get_install_requirement(self) -> InstallRequirement | None:
raise NotImplementedError("Override in subclass")
def format_for_error(self) -> str:
raise NotImplementedError("Subclass should override")
| Candidate |
python | getsentry__sentry | tests/sentry/tasks/test_post_process.py | {
"start": 3763,
"end": 4231
} | class ____:
def __init__(self, expected, group=None):
self.expected = expected
self.expected_group = group
def __eq__(self, other):
matching_id = other.event_id == self.expected.event_id
if self.expected_group:
return (
matching_id
and self.expected_group == other.group
and self.expected_group.id == other.group_id
)
return matching_id
| EventMatcher |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 209654,
"end": 210444
} | class ____(TestCase):
def test_no_iterables(self):
actual = list(mi.filter_map(lambda _: None, []))
expected = []
self.assertEqual(actual, expected)
def test_filter(self):
actual = list(mi.filter_map(lambda _: None, [1, 2, 3]))
expected = []
self.assertEqual(actual, expected)
def test_map(self):
actual = list(mi.filter_map(lambda x: x + 1, [1, 2, 3]))
expected = [2, 3, 4]
self.assertEqual(actual, expected)
def test_filter_map(self):
actual = list(
mi.filter_map(
lambda x: int(x) if x.isnumeric() else None,
['1', 'a', '2', 'b', '3'],
)
)
expected = [1, 2, 3]
self.assertEqual(actual, expected)
| FilterMapTests |
python | bokeh__bokeh | src/bokeh/core/property/alias.py | {
"start": 1700,
"end": 2639
} | class ____(Property[T]):
"""
Alias another property of a model.
Example:
Consider the following class definitions:
.. code-block:: python
from bokeh.model import Model
from bokeh.properties import Alias, Int
class Parent(Model):
width = Int()
class Child(Parent):
plot_width = Alias("width")
"""
name: str
_help: str | None
# Alias is somewhat a quasi-property
readonly: ClassVar[bool] = False
serialized: ClassVar[bool] = False
_default = None
def __init__(self, aliased_name: str, *, help: str | None = None) -> None:
self.aliased_name = aliased_name
self._help = help
self.alternatives = []
self.assertions = []
def make_descriptors(self, base_name: str) -> list[PropertyDescriptor[T]]:
return [ AliasPropertyDescriptor(base_name, self) ]
| Alias |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/visitors.py | {
"start": 22320,
"end": 36065
} | class ____(CloningExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.replacement_traverse` function.
Direct usage of the :func:`.visitors.replacement_traverse` function is
usually preferred.
"""
__slots__ = ()
def replace(
self, elem: ExternallyTraversible
) -> Optional[ExternallyTraversible]:
"""Receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
@overload
def traverse(self, obj: Literal[None]) -> None: ...
@overload
def traverse(
self, obj: ExternallyTraversible
) -> ExternallyTraversible: ...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
def replace(
element: ExternallyTraversible,
**kw: Any,
) -> Optional[ExternallyTraversible]:
for v in self.visitor_iterator:
e = cast(ReplacingExternalTraversal, v).replace(element)
if e is not None:
return e
return None
return replacement_traverse(obj, self.__traverse_options__, replace)
# backwards compatibility
Traversible = Visitable
ClauseVisitor = ExternalTraversal
CloningVisitor = CloningExternalTraversal
ReplacingCloningVisitor = ReplacingExternalTraversal
def iterate(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any] = util.EMPTY_DICT,
) -> Iterator[ExternallyTraversible]:
r"""Traverse the given expression structure, returning an iterator.
Traversal is configured to be breadth-first.
The central API feature used by the :func:`.visitors.iterate`
function is the
:meth:`_expression.ClauseElement.get_children` method of
:class:`_expression.ClauseElement` objects. This method should return all
the :class:`_expression.ClauseElement` objects which are associated with a
particular :class:`_expression.ClauseElement` object. For example, a
:class:`.Case` structure will refer to a series of
:class:`_expression.ColumnElement` objects within its "whens" and "else\_"
member variables.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
"""
if obj is None:
return
yield obj
children = obj.get_children(**opts)
if not children:
return
stack = deque([children])
while stack:
t_iterator = stack.popleft()
for t in t_iterator:
yield t
stack.append(t.get_children(**opts))
@overload
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: Literal[None],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None: ...
@overload
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: ExternallyTraversible,
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> ExternallyTraversible: ...
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: Optional[ExternallyTraversible],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Visit the given expression structure using the given iterator of
objects.
:func:`.visitors.traverse_using` is usually called internally as the result
of the :func:`.visitors.traverse` function.
:param iterator: an iterable or sequence which will yield
:class:`_expression.ClauseElement`
structures; the iterator is assumed to be the
product of the :func:`.visitors.iterate` function.
:param obj: the :class:`_expression.ClauseElement`
that was used as the target of the
:func:`.iterate` function.
:param visitors: dictionary of visit functions. See :func:`.traverse`
for details on this dictionary.
.. seealso::
:func:`.traverse`
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
@overload
def traverse(
obj: Literal[None],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None: ...
@overload
def traverse(
obj: ExternallyTraversible,
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> ExternallyTraversible: ...
def traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure using the default
iterator.
e.g.::
from sqlalchemy.sql import visitors
stmt = select(some_table).where(some_table.c.foo == "bar")
def visit_bindparam(bind_param):
print("found bound value: %s" % bind_param.value)
visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
The iteration of objects uses the :func:`.visitors.iterate` function,
which does a breadth-first traversal using a stack.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
:param visitors: dictionary of visit functions. The dictionary should
have strings as keys, each of which would correspond to the
``__visit_name__`` of a particular kind of SQL expression object, and
callable functions as values, each of which represents a visitor function
for that kind of object.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
@overload
def cloned_traverse(
obj: Literal[None],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None: ...
# a bit of controversy here, as the clone of the lead element
# *could* in theory replace with an entirely different kind of element.
# however this is really not how cloned_traverse is ever used internally
# at least.
@overload
def cloned_traverse(
obj: _ET,
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> _ET: ...
def cloned_traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Clone the given expression structure, allowing modifications by
visitors for mutable objects.
Traversal usage is the same as that of :func:`.visitors.traverse`.
The visitor functions present in the ``visitors`` dictionary may also
modify the internals of the given structure as the traversal proceeds.
The :func:`.cloned_traverse` function does **not** provide objects that are
part of the :class:`.Immutable` interface to the visit methods (this
primarily includes :class:`.ColumnClause`, :class:`.Column`,
:class:`.TableClause` and :class:`.Table` objects). As this traversal is
only intended to allow in-place mutation of objects, :class:`.Immutable`
objects are skipped. The :meth:`.Immutable._clone` method is still called
on each object to allow for objects to replace themselves with a different
object based on a clone of their sub-internals (e.g. a
:class:`.ColumnClause` that clones its subquery to return a new
:class:`.ColumnClause`).
.. versionchanged:: 2.0 The :func:`.cloned_traverse` function omits
objects that are part of the :class:`.Immutable` interface.
The central API feature used by the :func:`.visitors.cloned_traverse`
and :func:`.visitors.replacement_traverse` functions, in addition to the
:meth:`_expression.ClauseElement.get_children`
function that is used to achieve
the iteration, is the :meth:`_expression.ClauseElement._copy_internals`
method.
For a :class:`_expression.ClauseElement`
structure to support cloning and replacement
traversals correctly, it needs to be able to pass a cloning function into
its internal members in order to make copies of them.
.. seealso::
:func:`.visitors.traverse`
:func:`.visitors.replacement_traverse`
"""
cloned: Dict[int, ExternallyTraversible] = {}
stop_on = set(opts.get("stop_on", []))
def deferred_copy_internals(
obj: ExternallyTraversible,
) -> ExternallyTraversible:
return cloned_traverse(obj, opts, visitors)
def clone(elem: ExternallyTraversible, **kw: Any) -> ExternallyTraversible:
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
if "replace" in kw:
newelem = cast(
Optional[ExternallyTraversible], kw["replace"](elem)
)
if newelem is not None:
cloned[id(elem)] = newelem
return newelem
# the _clone method for immutable normally returns "self".
# however, the method is still allowed to return a
# different object altogether; ColumnClause._clone() will
# based on options clone the subquery to which it is associated
# and return the new corresponding column.
cloned[id(elem)] = newelem = elem._clone(clone=clone, **kw)
newelem._copy_internals(clone=clone, **kw)
# however, visit methods which are tasked with in-place
# mutation of the object should not get access to the immutable
# object.
if not elem._is_immutable:
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # type: ignore[assignment] # remove gc cycles
return obj
@overload
def replacement_traverse(
obj: Literal[None],
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType[Any],
) -> None: ...
@overload
def replacement_traverse(
obj: _CE,
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType[Any],
) -> _CE: ...
@overload
def replacement_traverse(
obj: ExternallyTraversible,
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType[Any],
) -> ExternallyTraversible: ...
def replacement_traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType[Any],
) -> Optional[ExternallyTraversible]:
"""Clone the given expression structure, allowing element
replacement by a given replacement function.
This function is very similar to the :func:`.visitors.cloned_traverse`
function, except instead of being passed a dictionary of visitors, all
elements are unconditionally passed into the given replace function.
The replace function then has the option to return an entirely new object
which will replace the one given. If it returns ``None``, then the object
is kept in place.
The difference in usage between :func:`.visitors.cloned_traverse` and
:func:`.visitors.replacement_traverse` is that in the former case, an
already-cloned object is passed to the visitor function, and the visitor
function can then manipulate the internal state of the object.
In the case of the latter, the visitor function should only return an
entirely different object, or do nothing.
The use case for :func:`.visitors.replacement_traverse` is that of
replacing a FROM clause inside of a SQL structure with a different one,
as is a common use case within the ORM.
"""
cloned = {}
stop_on = {id(x) for x in opts.get("stop_on", [])}
def deferred_copy_internals(
obj: ExternallyTraversible,
) -> ExternallyTraversible:
return replacement_traverse(obj, opts, replace)
def clone(elem: ExternallyTraversible, **kw: Any) -> ExternallyTraversible:
if (
id(elem) in stop_on
or "no_replacement_traverse" in elem._annotations
):
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem # type: ignore
else:
# base "already seen" on id(), not hash, so that we don't
# replace an Annotated element with its non-annotated one, and
# vice versa
id_elem = id(elem)
if id_elem not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id_elem] = newelem
return newelem # type: ignore
cloned[id_elem] = newelem = elem._clone(**kw)
newelem._copy_internals(clone=clone, **kw)
return cloned[id_elem] # type: ignore
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # type: ignore[assignment] # remove gc cycles
return obj
| ReplacingExternalTraversal |
python | python__mypy | mypy/test/meta/test_update_data.py | {
"start": 703,
"end": 4814
} | class ____(Suite):
def test_update_data(self) -> None:
# Note: We test multiple testcases rather than 'test case per test case'
# so we could also exercise rewriting multiple testcases at once.
result = _run_pytest_update_data(
"""
[case testCorrect]
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testWrong]
s: str = 42 # E: wrong error
[case testXfail-xfail]
s: str = 42 # E: wrong error
[case testWrongMultiline]
s: str = 42 # E: foo \
# N: bar
[case testMissingMultiline]
s: str = 42; i: int = 'foo'
[case testExtraneous]
s: str = 'foo' # E: wrong error
[case testExtraneousMultiline]
s: str = 'foo' # E: foo \
# E: bar
[case testExtraneousMultilineNonError]
s: str = 'foo' # W: foo \
# N: bar
[case testOutCorrect]
s: str = 42
[out]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testOutWrong]
s: str = 42
[out]
main:1: error: foobar
[case testOutWrongIncremental]
s: str = 42
[out]
main:1: error: foobar
[out2]
main:1: error: foobar
[case testWrongMultipleFiles]
import a, b
s: str = 42 # E: foo
[file a.py]
s1: str = 42 # E: bar
[file b.py]
s2: str = 43 # E: baz
[builtins fixtures/list.pyi]
"""
)
# Assert
expected = dedent_docstring(
"""
[case testCorrect]
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testWrong]
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testXfail-xfail]
s: str = 42 # E: wrong error
[case testWrongMultiline]
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testMissingMultiline]
s: str = 42; i: int = 'foo' # E: Incompatible types in assignment (expression has type "int", variable has type "str") \\
# E: Incompatible types in assignment (expression has type "str", variable has type "int")
[case testExtraneous]
s: str = 'foo'
[case testExtraneousMultiline]
s: str = 'foo'
[case testExtraneousMultilineNonError]
s: str = 'foo'
[case testOutCorrect]
s: str = 42
[out]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testOutWrong]
s: str = 42
[out]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testOutWrongIncremental]
s: str = 42
[out]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[out2]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testWrongMultipleFiles]
import a, b
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[file a.py]
s1: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[file b.py]
s2: str = 43 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[builtins fixtures/list.pyi]
"""
)
assert result.input_updated == expected
| UpdateDataSuite |
python | pandas-dev__pandas | pandas/tests/plotting/test_style.py | {
"start": 197,
"end": 5000
} | class ____:
@pytest.mark.parametrize(
"num_colors, expected",
[
(3, ["red", "green", "blue"]),
(5, ["red", "green", "blue", "red", "green"]),
(7, ["red", "green", "blue", "red", "green", "blue", "red"]),
(2, ["red", "green"]),
(1, ["red"]),
],
)
def test_default_colors_named_from_prop_cycle(self, num_colors, expected):
mpl_params = {
"axes.prop_cycle": plt.cycler(color=["red", "green", "blue"]),
}
with mpl.rc_context(rc=mpl_params):
result = get_standard_colors(num_colors=num_colors)
assert result == expected
@pytest.mark.parametrize(
"num_colors, expected",
[
(1, ["b"]),
(3, ["b", "g", "r"]),
(4, ["b", "g", "r", "y"]),
(5, ["b", "g", "r", "y", "b"]),
(7, ["b", "g", "r", "y", "b", "g", "r"]),
],
)
def test_default_colors_named_from_prop_cycle_string(self, num_colors, expected):
mpl_params = {
"axes.prop_cycle": plt.cycler(color="bgry"),
}
with mpl.rc_context(rc=mpl_params):
result = get_standard_colors(num_colors=num_colors)
assert result == expected
@pytest.mark.parametrize(
"num_colors, expected_name",
[
(1, ["C0"]),
(3, ["C0", "C1", "C2"]),
(
12,
[
"C0",
"C1",
"C2",
"C3",
"C4",
"C5",
"C6",
"C7",
"C8",
"C9",
"C0",
"C1",
],
),
],
)
def test_default_colors_named_undefined_prop_cycle(self, num_colors, expected_name):
with mpl.rc_context(rc={}):
expected = [mpl.colors.to_hex(x) for x in expected_name]
result = get_standard_colors(num_colors=num_colors)
assert result == expected
@pytest.mark.parametrize(
"num_colors, expected",
[
(1, ["red", "green", (0.1, 0.2, 0.3)]),
(2, ["red", "green", (0.1, 0.2, 0.3)]),
(3, ["red", "green", (0.1, 0.2, 0.3)]),
(4, ["red", "green", (0.1, 0.2, 0.3), "red"]),
],
)
def test_user_input_color_sequence(self, num_colors, expected):
color = ["red", "green", (0.1, 0.2, 0.3)]
result = get_standard_colors(color=color, num_colors=num_colors)
assert result == expected
@pytest.mark.parametrize(
"num_colors, expected",
[
(1, ["r", "g", "b", "k"]),
(2, ["r", "g", "b", "k"]),
(3, ["r", "g", "b", "k"]),
(4, ["r", "g", "b", "k"]),
(5, ["r", "g", "b", "k", "r"]),
(6, ["r", "g", "b", "k", "r", "g"]),
],
)
def test_user_input_color_string(self, num_colors, expected):
color = "rgbk"
result = get_standard_colors(color=color, num_colors=num_colors)
assert result == expected
@pytest.mark.parametrize(
"num_colors, expected",
[
(1, [(0.1, 0.2, 0.3)]),
(2, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]),
(3, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]),
],
)
def test_user_input_color_floats(self, num_colors, expected):
color = (0.1, 0.2, 0.3)
result = get_standard_colors(color=color, num_colors=num_colors)
assert result == expected
@pytest.mark.parametrize(
"color, num_colors, expected",
[
("Crimson", 1, ["Crimson"]),
("DodgerBlue", 2, ["DodgerBlue", "DodgerBlue"]),
("firebrick", 3, ["firebrick", "firebrick", "firebrick"]),
],
)
def test_user_input_named_color_string(self, color, num_colors, expected):
result = get_standard_colors(color=color, num_colors=num_colors)
assert result == expected
@pytest.mark.parametrize("color", ["", [], (), Series([], dtype="object")])
def test_empty_color_raises(self, color):
with pytest.raises(ValueError, match="Invalid color argument"):
get_standard_colors(color=color, num_colors=1)
@pytest.mark.parametrize(
"color",
[
"bad_color",
("red", "green", "bad_color"),
(0.1,),
(0.1, 0.2),
(0.1, 0.2, 0.3, 0.4, 0.5), # must be either 3 or 4 floats
],
)
def test_bad_color_raises(self, color):
with pytest.raises(ValueError, match="Invalid color"):
get_standard_colors(color=color, num_colors=5)
| TestGetStandardColors |
python | bokeh__bokeh | src/bokeh/models/plots.py | {
"start": 3198,
"end": 32624
} | class ____(LayoutDOM):
''' Model representing a plot, containing glyphs, guides, annotations.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def select(self, *args, **kwargs):
''' Query this object and all of its references for objects that
match the given selector.
There are a few different ways to call the ``select`` method.
The most general is to supply a JSON-like query dictionary as the
single argument or as keyword arguments:
Args:
selector (JSON-like) : some sample text
Keyword Arguments:
kwargs : query dict key/values as keyword arguments
Additionally, for compatibility with ``Model.select``, a selector
dict may be passed as ``selector`` keyword argument, in which case
the value of ``kwargs['selector']`` is used for the query.
For convenience, queries on just names can be made by supplying
the ``name`` string as the single parameter:
Args:
name (str) : the name to query on
Also queries on just type can be made simply by supplying the
``Model`` subclass as the single parameter:
Args:
type (Model) : the type to query on
Returns:
seq[Model]
Examples:
.. code-block:: python
# These three are equivalent
p.select(selector={"type": HoverTool})
p.select({"type": HoverTool})
p.select(HoverTool)
# These two are also equivalent
p.select({"name": "mycircle"})
p.select("mycircle")
# Keyword arguments can be supplied in place of selector dict
p.select({"name": "foo", "type": HoverTool})
p.select(name="foo", type=HoverTool)
'''
selector = _select_helper(args, kwargs)
# Want to pass selector that is a dictionary
return _list_attr_splat(find(self.references(), selector))
def row(self, row, gridplot):
''' Return whether this plot is in a given row of a GridPlot.
Args:
row (int) : index of the row to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.row(row)
def column(self, col, gridplot):
''' Return whether this plot is in a given column of a GridPlot.
Args:
col (int) : index of the column to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.column(col)
def _axis(self, *sides: PlaceType):
objs: list[Model] = []
for side in sides:
objs.extend(getattr(self, side, []))
axis = [obj for obj in objs if isinstance(obj, Axis)]
return _list_attr_splat(axis)
@property
def xaxis(self):
''' Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension.
'''
return self._axis("above", "below")
@property
def yaxis(self):
''' Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension.
'''
return self._axis("left", "right")
@property
def axis(self):
''' Splattable list of :class:`~bokeh.models.axes.Axis` objects.
'''
return _list_attr_splat(self.xaxis + self.yaxis)
@property
def legend(self):
''' Splattable list of |Legend| objects.
'''
panels = self.above + self.below + self.left + self.right + self.center
legends = [obj for obj in panels if isinstance(obj, Legend)]
return _legend_attr_splat(legends)
@property
def hover(self):
''' Splattable list of :class:`~bokeh.models.tools.HoverTool` objects.
'''
hovers = [obj for obj in self.tools if isinstance(obj, HoverTool)]
return _list_attr_splat(hovers)
def _grid(self, dimension: Literal[0, 1]):
grid = [obj for obj in self.center if isinstance(obj, Grid) and obj.dimension == dimension]
return _list_attr_splat(grid)
@property
def xgrid(self):
''' Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension.
'''
return self._grid(0)
@property
def ygrid(self):
''' Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension.
'''
return self._grid(1)
@property
def grid(self):
''' Splattable list of :class:`~bokeh.models.grids.Grid` objects.
'''
return _list_attr_splat(self.xgrid + self.ygrid)
@property
def tools(self) -> list[Tool]:
return self.toolbar.tools
@tools.setter
def tools(self, tools: list[Tool]):
self.toolbar.tools = tools
def add_layout(self, obj: Renderer | StyledElement, place: PlaceType = "center") -> None:
''' Adds an object to the plot in the specified place.
If the renderer is already a part of a plot, this operation will move
it to the new location. If you need finer control than this, you can
manipulate ``left``, ``right``, ``above``, ``below`` or ``center``
Plot's properties manually.
Args:
obj (Renderer) : the object to add to the Plot
place (str, optional) : where to add the object (default: 'center')
Valid places are: 'left', 'right', 'above', 'below', 'center'.
Returns:
None
'''
if place not in Place:
from ..util.strings import nice_join
raise ValueError(
f"Invalid place '{place}' specified. Valid place values are: {nice_join(Place)}",
)
for name in Place:
panel = getattr(self, name)
while obj in panel:
panel.remove(obj)
getattr(self, place).append(obj)
def add_tools(self, *tools: Tool | str) -> None:
''' Adds tools to the plot.
Args:
*tools (Tool) : the tools to add to the Plot
Returns:
None
'''
for tool in tools:
if isinstance(tool, str):
tool_obj = Tool.from_string(tool)
elif isinstance(tool, Tool):
tool_obj = tool
else:
raise ValueError(f"expected a string or Tool instance, got {tool!r}")
self.toolbar.tools.append(tool_obj)
def remove_tools(self, *tools: Tool) -> None:
''' Removes tools from the plot.
Args:
*tools (Tool) : the tools to remove from the Plot
Returns:
None
'''
for tool in tools:
if not isinstance(tool, Tool):
raise ValueError("All arguments to remove_tool must be Tool subclasses.")
elif tool not in self.toolbar.tools:
from ..util.strings import nice_join
raise ValueError(f"Invalid tool {tool} specified. Available tools are {nice_join(self.toolbar.tools)}")
self.toolbar.tools.remove(tool)
@overload
def add_glyph(self, glyph: Glyph, **kwargs: Any) -> GlyphRenderer: ...
@overload
def add_glyph(self, source: ColumnarDataSource, glyph: Glyph, **kwargs: Any) -> GlyphRenderer: ...
def add_glyph(self, source_or_glyph: Glyph | ColumnarDataSource, glyph: Glyph | None = None, **kwargs: Any) -> GlyphRenderer:
''' Adds a glyph to the plot with associated data sources and ranges.
This function will take care of creating and configuring a Glyph object,
and then add it to the plot's list of renderers.
Args:
source (DataSource) : a data source for the glyphs to all use
glyph (Glyph) : the glyph to add to the Plot
Keyword Arguments:
Any additional keyword arguments are passed on as-is to the
Glyph initializer.
Returns:
GlyphRenderer
'''
if isinstance(source_or_glyph, ColumnarDataSource):
source = source_or_glyph
else:
source, glyph = ColumnDataSource(), source_or_glyph
if not isinstance(source, DataSource):
raise ValueError("'source' argument to add_glyph() must be DataSource subclass")
if not isinstance(glyph, Glyph):
raise ValueError("'glyph' argument to add_glyph() must be Glyph subclass")
g = GlyphRenderer(data_source=source, glyph=glyph, **kwargs)
self.renderers.append(g)
return g
def add_tile(self, tile_source: TileSource | xyzservices.TileProvider | str, retina: bool = False, **kwargs: Any) -> TileRenderer:
''' Adds new ``TileRenderer`` into ``Plot.renderers``
Args:
tile_source (TileSource, xyzservices.TileProvider, str) :
A tile source instance which contain tileset configuration
retina (bool) :
Whether to use retina version of tiles (if available)
Keyword Arguments:
Additional keyword arguments are passed on as-is to the tile renderer
Returns:
TileRenderer : TileRenderer
'''
if not isinstance(tile_source, TileSource):
import xyzservices
if isinstance(tile_source, xyzservices.TileProvider):
selected_provider = tile_source
# allow the same string input you can now pass to get_provider
elif isinstance(tile_source, str):
# Mapping of custom keys to those used in xyzservices
tile_source = tile_source.lower()
if tile_source == "esri_imagery":
tile_source = "esri_worldimagery"
if tile_source == "osm":
tile_source = "openstreetmap_mapnik"
if tile_source.startswith("stamen"):
tile_source = f"stadia.{tile_source}"
if "retina" in tile_source:
tile_source = tile_source.replace("retina", "")
retina = True
selected_provider = xyzservices.providers.query_name(tile_source)
scale_factor = "@2x" if retina else None
tile_source = WMTSTileSource(
url=selected_provider.build_url(scale_factor=scale_factor),
attribution=selected_provider.html_attribution,
min_zoom=selected_provider.get("min_zoom", 0),
max_zoom=selected_provider.get("max_zoom", 30),
)
tile_renderer = TileRenderer(tile_source=tile_source, **kwargs)
self.renderers.append(tile_renderer)
return tile_renderer
@contextmanager
def hold(self, *, render: bool) -> Generator[None, None, None]:
''' Takes care of turning a property on and off within a scope.
Args:
render (bool) :
Turns the property hold_render on and off.
'''
if render:
self.hold_render = True
yield
self.hold_render = False
@error(REQUIRED_RANGE)
def _check_required_range(self) -> str | None:
missing: list[str] = []
if not self.x_range: missing.append('x_range')
if not self.y_range: missing.append('y_range')
if missing:
return ", ".join(missing) + f" [{self}]"
@error(REQUIRED_SCALE)
def _check_required_scale(self) -> str | None:
missing: list[str] = []
if not self.x_scale: missing.append('x_scale')
if not self.y_scale: missing.append('y_scale')
if missing:
return ", ".join(missing) + f" [{self}]"
@error(INCOMPATIBLE_SCALE_AND_RANGE)
def _check_compatible_scale_and_ranges(self) -> str | None:
incompatible: list[str] = []
x_ranges = list(self.extra_x_ranges.values())
if self.x_range: x_ranges.append(self.x_range)
y_ranges = list(self.extra_y_ranges.values())
if self.y_range: y_ranges.append(self.y_range)
if self.x_scale is not None:
for rng in x_ranges:
if isinstance(rng, (DataRange1d, Range1d)) and not isinstance(self.x_scale, (LinearScale, LogScale)):
incompatible.append(f"incompatibility on x-dimension: {rng}, {self.x_scale}")
elif isinstance(rng, FactorRange) and not isinstance(self.x_scale, CategoricalScale):
incompatible.append(f"incompatibility on x-dimension: {rng}, {self.x_scale}")
if self.y_scale is not None:
for rng in y_ranges:
if isinstance(rng, (DataRange1d, Range1d)) and not isinstance(self.y_scale, (LinearScale, LogScale)):
incompatible.append(f"incompatibility on y-dimension: {rng}, {self.y_scale}")
elif isinstance(rng, FactorRange) and not isinstance(self.y_scale, CategoricalScale):
incompatible.append(f"incompatibility on y-dimension: {rng}, {self.y_scale}")
if incompatible:
return ", ".join(incompatible) + f" [{self}]"
@warning(MISSING_RENDERERS)
def _check_missing_renderers(self) -> str | None:
if len(self.renderers) == 0 and len([x for x in self.center if isinstance(x, Annotation)]) == 0:
return str(self)
@error(BAD_EXTRA_RANGE_NAME)
def _check_bad_extra_range_name(self) -> str | None:
msg: str = ""
valid = {
f'{axis}_name': {'default', *getattr(self, f"extra_{axis}s")}
for axis in ("x_range", "y_range")
}
for place in [*list(Place), 'renderers']:
for ref in getattr(self, place):
bad = ', '.join(
f"{axis}='{getattr(ref, axis)}'"
for axis, keys in valid.items()
if getattr(ref, axis, 'default') not in keys
)
if bad:
msg += (", " if msg else "") + f"{bad} [{ref}]"
if msg:
return msg
x_range = Instance(Range, default=InstanceDefault(DataRange1d), help="""
The (default) data range of the horizontal dimension of the plot.
""")
y_range = Instance(Range, default=InstanceDefault(DataRange1d), help="""
The (default) data range of the vertical dimension of the plot.
""")
x_scale = Instance(Scale, default=InstanceDefault(LinearScale), help="""
What kind of scale to use to convert x-coordinates in data space
into x-coordinates in screen space.
""")
y_scale = Instance(Scale, default=InstanceDefault(LinearScale), help="""
What kind of scale to use to convert y-coordinates in data space
into y-coordinates in screen space.
""")
extra_x_ranges = Dict(String, Instance(Range), help="""
Additional named ranges to make available for mapping x-coordinates.
This is useful for adding additional axes.
""")
extra_y_ranges = Dict(String, Instance(Range), help="""
Additional named ranges to make available for mapping y-coordinates.
This is useful for adding additional axes.
""")
extra_x_scales = Dict(String, Instance(Scale), help="""
Additional named scales to make available for mapping x-coordinates.
This is useful for adding additional axes.
.. note:: This feature is experimental and may change in the short term.
""")
extra_y_scales = Dict(String, Instance(Scale), help="""
Additional named scales to make available for mapping y-coordinates.
This is useful for adding additional axes.
.. note:: This feature is experimental and may change in the short term.
""")
window_axis = Enum(WindowAxis, default="none", help="""
An axis to use for windowed auto-ranging when there are data ranges
present on the plot. For example, if ``window_axis`` is set to the
value ``"x"`` then any data ranges in the y-dimension will compute their
auto-ranged extents using only data inside the range bounds for the
x-axis as configured in the current viewport.
If set to "none" (the default) then auto-ranging will use all available
data, regardless of viewport.
""")
hidpi = Bool(default=True, help="""
Whether to use HiDPI mode when available.
""")
title = Either(Null, Instance(Title), default=InstanceDefault(Title, text=""), help="""
A title for the plot. Can be a text string or a Title annotation.
""").accepts(String, lambda text: Title(text=text))
title_location = Nullable(Enum(Location), default="above", help="""
Where the title will be located. Titles on the left or right side
will be rotated.
""")
outline_props = Include(ScalarLineProps, prefix="outline", help="""
The {prop} for the plot border outline.
""")
outline_line_color = Override(default="#e5e5e5")
renderers = List(Instance(Renderer), help="""
A list of all glyph renderers for this plot.
This property can be manipulated by hand, but the ``add_glyph`` is
recommended to help make sure all necessary setup is performed.
""")
toolbar = Instance(Toolbar, default=InstanceDefault(Toolbar), help="""
The toolbar associated with this plot which holds all the tools. It is
automatically created with the plot if necessary.
""")
toolbar_location = Nullable(Enum(Location), default="right", help="""
Where the toolbar will be located. If set to None, no toolbar
will be attached to the plot.
""")
toolbar_sticky = Bool(default=True, help="""
Stick the toolbar to the edge of the plot. Default: True. If False,
the toolbar will be outside of the axes, titles etc.
""")
toolbar_inner = Bool(default=False, help="""
Locate the toolbar inside the frame. Setting this property to ``True``
makes most sense with auto-hidden toolbars.
""")
left = List(Either(Instance(Renderer), Instance(StyledElement)), help="""
A list of renderers to occupy the area to the left of the plot.
""")
right = List(Either(Instance(Renderer), Instance(StyledElement)), help="""
A list of renderers to occupy the area to the right of the plot.
""")
above = List(Either(Instance(Renderer), Instance(StyledElement)), help="""
A list of renderers to occupy the area above of the plot.
""")
below = List(Either(Instance(Renderer), Instance(StyledElement)), help="""
A list of renderers to occupy the area below of the plot.
""")
center = List(Either(Instance(Renderer), Instance(StyledElement)), help="""
A list of renderers to occupy the center area (frame) of the plot.
""")
width: int | None = Override(default=600)
height: int | None = Override(default=600)
frame_width = Nullable(Int, help="""
The width of a plot frame or the inner width of a plot, excluding any
axes, titles, border padding, etc.
""")
frame_height = Nullable(Int, help="""
The height of a plot frame or the inner height of a plot, excluding any
axes, titles, border padding, etc.
""")
frame_align = Either(Bool, LRTB(Optional(Bool)), default=True, help="""
Allows to specify which frame edges to align in multiple-plot layouts.
The default is to align all edges, but users can opt-out from alignment
of each individual edge or all edges. Note also that other properties
may disable alignment of certain edges, especially when using fixed frame
size (``frame_width`` and ``frame_height`` properties).
""")
inner_width = Readonly(Int, help="""
This is the exact width of the plotting canvas, i.e. the width of
the actual plot, without toolbars etc. Note this is computed in a
web browser, so this property will work only in backends capable of
bidirectional communication (server, notebook).
.. note::
This is an experimental feature and the API may change in near future.
""")
inner_height = Readonly(Int, help="""
This is the exact height of the plotting canvas, i.e. the height of
the actual plot, without toolbars etc. Note this is computed in a
web browser, so this property will work only in backends capable of
bidirectional communication (server, notebook).
.. note::
This is an experimental feature and the API may change in near future.
""")
outer_width = Readonly(Int, help="""
This is the exact width of the layout, i.e. the height of
the actual plot, with toolbars etc. Note this is computed in a
web browser, so this property will work only in backends capable of
bidirectional communication (server, notebook).
.. note::
This is an experimental feature and the API may change in near future.
""")
outer_height = Readonly(Int, help="""
This is the exact height of the layout, i.e. the height of
the actual plot, with toolbars etc. Note this is computed in a
web browser, so this property will work only in backends capable of
bidirectional communication (server, notebook).
.. note::
This is an experimental feature and the API may change in near future.
""")
background_fill_props = Include(ScalarFillProps, prefix="background", help="""
The {prop} for the plot background style.
""")
background_hatch_props = Include(ScalarHatchProps, prefix="background", help="""
The {prop} for the plot background style.
""")
background_fill_color = Override(default='#ffffff')
border_line_props = Include(ScalarLineProps, prefix="border", help="""
The {prop} for the plot border style.
""")
border_fill_props = Include(ScalarFillProps, prefix="border", help="""
The {prop} for the plot border style.
""")
border_hatch_props = Include(ScalarHatchProps, prefix="border", help="""
The {prop} for the plot border style.
""")
border_line_color = Override(default=None)
border_fill_color = Override(default='#ffffff')
min_border_top = Nullable(Int, help="""
Minimum size in pixels of the padding region above the top of the
central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_bottom = Nullable(Int, help="""
Minimum size in pixels of the padding region below the bottom of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_left = Nullable(Int, help="""
Minimum size in pixels of the padding region to the left of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_right = Nullable(Int, help="""
Minimum size in pixels of the padding region to the right of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border = Nullable(Int, default=5, help="""
A convenience property to set all all the ``min_border_X`` properties
to the same value. If an individual border property is explicitly set,
it will override ``min_border``.
""")
lod_factor = Int(10, help="""
Decimation factor to use when applying level-of-detail mode.
A ``lod_factor`` of N means that only every Nth point in the data source
will be drawn while interactive events are active. For example, if
``lod_factor=200`` then only every 200th point will be drawn.
The level-of-detail mode is intended to preserve interactive response
times on HTML canvas plots when there are a large number of data points.
Note that a possible alternative to level-of-detail mode is using the
WebGL ``output_backend``. WebGL rendering may allow very large data sets
to remain interactive without any level-of-detail downsampling. When
WebGL output is enabled, level-of-detail mode is not used.
""")
lod_threshold = Nullable(Int, default=2000, help="""
A number of data points, above which level-of-detail downsampling may
be performed by glyph renderers. For example, if ``lod_threshold=10000``
then level-of-detail mode will not be activated if there are fewer than
10000 points in the data source.
Set to ``None`` to disable any level-of-detail downsampling at all.
""")
lod_interval = Int(300, help="""
Interval (in ms) during which an interactive tool event will enable
level-of-detail downsampling.
If a plot needs to be re-drawn within ``lod_interval`` milliseconds
of the last interactive event starting, then level-of-detail mode will
be activated. Larger values mean the level-of-detail mode will be
"easier" to turn on.
""")
lod_timeout = Int(500, help="""
Timeout (in ms) for checking whether interactive tool events are still
occurring. Once level-of-detail mode is enabled, a check is made every
``lod_timeout`` ms. If no interactive tool events have happened,
level-of-detail mode is disabled. Larger values mean the level-of-detail
mode will be "slower" to turn off.
""")
output_backend = Enum(OutputBackend, default="canvas", help="""
Specify the output backend for the plot area. Default is HTML5 Canvas.
.. note::
When set to ``webgl``, glyphs without a WebGL rendering implementation
will fall back to rendering onto 2D canvas.
""")
match_aspect = Bool(default=False, help="""
Specify the aspect ratio behavior of the plot. Aspect ratio is defined as
the ratio of width over height. This property controls whether Bokeh should
attempt to match the (width/height) of *data space* to the (width/height)
in pixels of *screen space*.
Default is ``False`` which indicates that the *data* aspect ratio and the
*screen* aspect ratio vary independently. ``True`` indicates that the plot
aspect ratio of the axes will match the aspect ratio of the pixel extent
the axes. The end result is that a 1x1 area in data space is a square in
pixels, and conversely that a 1x1 pixel is a square in data units.
.. note::
This setting only takes effect when there are two dataranges. This
setting only sets the initial plot draw and subsequent resets. It is
possible for tools (single axis zoom, unconstrained box zoom) to
change the aspect ratio.
.. warning::
This setting is incompatible with linking dataranges across multiple
plots. Doing so may result in undefined behavior.
""")
aspect_scale = Float(default=1, help="""
A value to be given for increased aspect ratio control. This value is added
multiplicatively to the calculated value required for ``match_aspect``.
``aspect_scale`` is defined as the ratio of width over height of the figure.
For example, a plot with ``aspect_scale`` value of 2 will result in a
square in *data units* to be drawn on the screen as a rectangle with a
pixel width twice as long as its pixel height.
.. note::
This setting only takes effect if ``match_aspect`` is set to ``True``.
""")
reset_policy = Enum(ResetPolicy, default="standard", help="""
How a plot should respond to being reset. By default, the standard actions
are to clear any tool state history, return plot ranges to their original
values, undo all selections, and emit a ``Reset`` event. If customization
is desired, this property may be set to ``"event_only"``, which will
suppress all of the actions except the Reset event.
""")
hold_render = Bool(default=False, help="""
When set to True all requests to repaint the plot will be hold off.
This is useful when periodically updating many glyphs. For example, let's
assume we have 10 lines on a plot, each with its own datasource. We stream
to all of them every second in a for loop like so:
.. code:: python
for line in lines:
line.stream(new_points())
The problem with this code is that every stream triggers a re-rendering of
the plot. Even tough repainting only on the last stream would produce almost
identical visual effect. Especially for lines with many points this becomes
computationally expensive and can freeze your browser. Using a convenience
method `hold`, we can control when rendering is initiated like so:
.. code:: python
with plot.hold(render=True):
for line in lines:
line.stream(new_points())
In this case we render newly appended points only after the last stream.
""")
attribution = List(Either(Instance(HTML), String), default=[], help="""
Allows to acknowledge or give credit to data, tile, etc. providers.
This can be in either HTML or plain text forms. Renderers, like
tile renderers, can provide additional attributions which will
be added after attributions provided here.
.. note::
This feature is experimental and may change in the short term.
""")
context_menu = Override(default="auto")
| Plot |
python | kamyu104__LeetCode-Solutions | Python/shortest-impossible-sequence-of-rolls.py | {
"start": 55,
"end": 427
} | class ____(object):
def shortestSequence(self, rolls, k):
"""
:type rolls: List[int]
:type k: int
:rtype: int
"""
l = 0
lookup = set()
for x in rolls:
lookup.add(x)
if len(lookup) != k:
continue
lookup.clear()
l += 1
return l+1
| Solution |
python | doocs__leetcode | solution/0000-0099/0054.Spiral Matrix/Solution.py | {
"start": 0,
"end": 555
} | class ____:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
m, n = len(matrix), len(matrix[0])
dirs = (0, 1, 0, -1, 0)
vis = [[False] * n for _ in range(m)]
i = j = k = 0
ans = []
for _ in range(m * n):
ans.append(matrix[i][j])
vis[i][j] = True
x, y = i + dirs[k], j + dirs[k + 1]
if x < 0 or x >= m or y < 0 or y >= n or vis[x][y]:
k = (k + 1) % 4
i += dirs[k]
j += dirs[k + 1]
return ans
| Solution |
python | pydata__xarray | xarray/core/indexing.py | {
"start": 61715,
"end": 62087
} | class ____(NumpyIndexingAdapter):
__slots__ = ("array",)
def __init__(self, array):
if not hasattr(array, "__array_function__"):
raise TypeError(
"NdArrayLikeIndexingAdapter must wrap an object that "
"implements the __array_function__ protocol"
)
self.array = array
| NdArrayLikeIndexingAdapter |
python | pandas-dev__pandas | pandas/io/formats/xml.py | {
"start": 9541,
"end": 12100
} | class ____(_BaseXMLFormatter):
"""
Class for formatting data in xml using Python standard library
modules: `xml.etree.ElementTree` and `xml.dom.minidom`.
"""
def _build_tree(self) -> bytes:
from xml.etree.ElementTree import (
Element,
SubElement,
tostring,
)
self.root = Element(
f"{self.prefix_uri}{self.root_name}", attrib=self._other_namespaces()
)
for d in self.frame_dicts.values():
elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}")
if not self.attr_cols and not self.elem_cols:
self.elem_cols = list(d.keys())
self._build_elems(d, elem_row)
else:
elem_row = self._build_attribs(d, elem_row)
self._build_elems(d, elem_row)
self.out_xml = tostring(
self.root,
method="xml",
encoding=self.encoding,
xml_declaration=self.xml_declaration,
)
if self.pretty_print:
self.out_xml = self._prettify_tree()
if self.stylesheet is not None:
raise ValueError(
"To use stylesheet, you need lxml installed and selected as parser."
)
return self.out_xml
def _get_prefix_uri(self) -> str:
from xml.etree.ElementTree import register_namespace
uri = ""
if self.namespaces:
for p, n in self.namespaces.items():
if isinstance(p, str) and isinstance(n, str):
register_namespace(p, n)
if self.prefix:
try:
uri = f"{{{self.namespaces[self.prefix]}}}"
except KeyError as err:
raise KeyError(
f"{self.prefix} is not included in namespaces"
) from err
elif "" in self.namespaces:
uri = f"{{{self.namespaces['']}}}"
else:
uri = ""
return uri
@cache_readonly
def _sub_element_cls(self):
from xml.etree.ElementTree import SubElement
return SubElement
def _prettify_tree(self) -> bytes:
"""
Output tree for pretty print format.
This method will pretty print xml with line breaks and indentation.
"""
from xml.dom.minidom import parseString
dom = parseString(self.out_xml)
return dom.toprettyxml(indent=" ", encoding=self.encoding)
| EtreeXMLFormatter |
python | gevent__gevent | src/greentest/3.13/test_queue.py | {
"start": 22429,
"end": 22523
} | class ____(LifoQueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
| PyLifoQueueTest |
python | facebookresearch__faiss | tests/external_module_test.py | {
"start": 295,
"end": 566
} | class ____(unittest.TestCase):
"""test if we can construct a custom IDSelector"""
def test_IDSelector(self):
ids = external_module.IDSelectorModulo(3)
self.assertFalse(ids.is_member(1))
self.assertTrue(ids.is_member(3))
| TestCustomIDSelector |
python | huggingface__transformers | src/transformers/models/markuplm/feature_extraction_markuplm.py | {
"start": 924,
"end": 6443
} | class ____(FeatureExtractionMixin):
r"""
Constructs a MarkupLM feature extractor. This can be used to get a list of nodes and corresponding xpaths from HTML
strings.
This feature extractor inherits from [`~feature_extraction_utils.PreTrainedFeatureExtractor`] which contains most
of the main methods. Users should refer to this superclass for more information regarding those methods.
"""
def __init__(self, **kwargs):
requires_backends(self, ["bs4"])
super().__init__(**kwargs)
def xpath_soup(self, element):
xpath_tags = []
xpath_subscripts = []
child = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
siblings = parent.find_all(child.name, recursive=False)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(siblings) else next(i for i, s in enumerate(siblings, 1) if s is child)
)
child = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def get_three_from_single(self, html_string):
html_code = BeautifulSoup(html_string, "html.parser")
all_doc_strings = []
string2xtag_seq = []
string2xsubs_seq = []
for element in html_code.descendants:
if isinstance(element, bs4.element.NavigableString):
if type(element.parent) is not bs4.element.Tag:
continue
text_in_this_tag = html.unescape(element).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(text_in_this_tag)
xpath_tags, xpath_subscripts = self.xpath_soup(element)
string2xtag_seq.append(xpath_tags)
string2xsubs_seq.append(xpath_subscripts)
if len(all_doc_strings) != len(string2xtag_seq):
raise ValueError("Number of doc strings and xtags does not correspond")
if len(all_doc_strings) != len(string2xsubs_seq):
raise ValueError("Number of doc strings and xsubs does not correspond")
return all_doc_strings, string2xtag_seq, string2xsubs_seq
def construct_xpath(self, xpath_tags, xpath_subscripts):
xpath = ""
for tagname, subs in zip(xpath_tags, xpath_subscripts):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__(self, html_strings) -> BatchFeature:
"""
Main method to prepare for the model one or several HTML strings.
Args:
html_strings (`str`, `list[str]`):
The HTML string or batch of HTML strings from which to extract nodes and corresponding xpaths.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **nodes** -- Nodes.
- **xpaths** -- Corresponding xpaths.
Examples:
```python
>>> from transformers import MarkupLMFeatureExtractor
>>> page_name_1 = "page1.html"
>>> page_name_2 = "page2.html"
>>> page_name_3 = "page3.html"
>>> with open(page_name_1) as f:
... single_html_string = f.read()
>>> feature_extractor = MarkupLMFeatureExtractor()
>>> # single example
>>> encoding = feature_extractor(single_html_string)
>>> print(encoding.keys())
>>> # dict_keys(['nodes', 'xpaths'])
>>> # batched example
>>> multi_html_strings = []
>>> with open(page_name_2) as f:
... multi_html_strings.append(f.read())
>>> with open(page_name_3) as f:
... multi_html_strings.append(f.read())
>>> encoding = feature_extractor(multi_html_strings)
>>> print(encoding.keys())
>>> # dict_keys(['nodes', 'xpaths'])
```"""
# Input type checking for clearer error
valid_strings = False
# Check that strings has a valid type
if isinstance(html_strings, str):
valid_strings = True
elif isinstance(html_strings, (list, tuple)):
if len(html_strings) == 0 or isinstance(html_strings[0], str):
valid_strings = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `list[str]` (batch of examples), "
f"but is of type {type(html_strings)}."
)
is_batched = isinstance(html_strings, (list, tuple)) and (isinstance(html_strings[0], str))
if not is_batched:
html_strings = [html_strings]
# Get nodes + xpaths
nodes = []
xpaths = []
for html_string in html_strings:
all_doc_strings, string2xtag_seq, string2xsubs_seq = self.get_three_from_single(html_string)
nodes.append(all_doc_strings)
xpath_strings = []
for node, tag_list, sub_list in zip(all_doc_strings, string2xtag_seq, string2xsubs_seq):
xpath_string = self.construct_xpath(tag_list, sub_list)
xpath_strings.append(xpath_string)
xpaths.append(xpath_strings)
# return as Dict
data = {"nodes": nodes, "xpaths": xpaths}
encoded_inputs = BatchFeature(data=data, tensor_type=None)
return encoded_inputs
__all__ = ["MarkupLMFeatureExtractor"]
| MarkupLMFeatureExtractor |
python | django__django | tests/sites_tests/tests.py | {
"start": 904,
"end": 8807
} | class ____(TestCase):
databases = {"default", "other"}
@classmethod
def setUpTestData(cls):
cls.site = Site(id=settings.SITE_ID, domain="example.com", name="example.com")
cls.site.save()
def setUp(self):
Site.objects.clear_cache()
self.addCleanup(Site.objects.clear_cache)
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
with self.assertRaises(ObjectDoesNotExist):
Site.objects.get_current()
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't
# return a bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
with self.assertRaises(Site.DoesNotExist):
Site.objects.get_current()
@override_settings(ALLOWED_HOSTS=["example.com"])
def test_get_current_site(self):
# The correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# An exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
with self.assertRaises(ObjectDoesNotExist):
get_current_site(request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={"remove": "django.contrib.sites"}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID=None, ALLOWED_HOSTS=["example.com"])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID=None, ALLOWED_HOSTS=["example.com"])
def test_get_current_site_host_with_trailing_dot(self):
"""
The site is matched if the name in the request has a trailing dot.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com.",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID=None, ALLOWED_HOSTS=["example.com", "example.net"])
def test_get_current_site_no_site_id_and_handle_port_fallback(self):
request = HttpRequest()
s1 = self.site
s2 = Site.objects.create(domain="example.com:80", name="example.com:80")
# Host header without port
request.META = {"HTTP_HOST": "example.com"}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with port - match, no fallback without port
request.META = {"HTTP_HOST": "example.com:80"}
site = get_current_site(request)
self.assertEqual(site, s2)
# Host header with port - no match, fallback without port
request.META = {"HTTP_HOST": "example.com:81"}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with non-matching domain
request.META = {"HTTP_HOST": "example.net"}
with self.assertRaises(ObjectDoesNotExist):
get_current_site(request)
# Ensure domain for RequestSite always matches host header
with self.modify_settings(INSTALLED_APPS={"remove": "django.contrib.sites"}):
request.META = {"HTTP_HOST": "example.com"}
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
request.META = {"HTTP_HOST": "example.com:80"}
site = get_current_site(request)
self.assertEqual(site.name, "example.com:80")
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
with self.assertRaises(ValidationError):
site.full_clean()
site.domain = "test\ttest"
with self.assertRaises(ValidationError):
site.full_clean()
site.domain = "test\ntest"
with self.assertRaises(ValidationError):
site.full_clean()
@override_settings(ALLOWED_HOSTS=["example.com"])
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=None):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using="default")
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID=None, ALLOWED_HOSTS=["example2.com"])
def test_clear_site_cache_domain(self):
site = Site.objects.create(name="example2.com", domain="example2.com")
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using="other")
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using="default")
self.assertEqual(models.SITE_CACHE, {})
def test_unique_domain(self):
site = Site(domain=self.site.domain)
msg = "Site with this Domain name already exists."
with self.assertRaisesMessage(ValidationError, msg):
site.validate_unique()
def test_site_natural_key(self):
self.assertEqual(Site.objects.get_by_natural_key(self.site.domain), self.site)
self.assertEqual(self.site.natural_key(), (self.site.domain,))
@override_settings(SITE_ID="1")
def test_check_site_id(self):
self.assertEqual(
check_site_id(None),
[
checks.Error(
msg="The SITE_ID setting must be an integer",
id="sites.E101",
),
],
)
def test_valid_site_id(self):
for site_id in [1, None]:
with self.subTest(site_id=site_id), self.settings(SITE_ID=site_id):
self.assertEqual(check_site_id(None), [])
@override_settings(ALLOWED_HOSTS=["example.com"])
| SitesFrameworkTests |
python | weaviate__weaviate-python-client | weaviate/auth.py | {
"start": 971,
"end": 1832
} | class ____:
"""Using username and password for authentication with Resource Owner Password flow.
For some providers the scope needs to contain "offline_access" (and "openid" which is automatically added) to return
a refresh token. Without a refresh token the authentication will expire once the lifetime of the access token is up.
Scopes can be given as:
- List of strings: ["scope1", "scope2"]
- space separated string: "scope1 scope2"
"""
username: str
password: str
scope: Optional[SCOPES] = None
def __post_init__(self) -> None:
if self.scope is None:
self.scope_list: List[str] = []
elif isinstance(self.scope, str):
self.scope_list = self.scope.split(" ")
elif isinstance(self.scope, list):
self.scope_list = self.scope
@dataclass
| _ClientPassword |
python | huggingface__transformers | examples/pytorch/speech-recognition/run_speech_recognition_ctc.py | {
"start": 10925,
"end": 32867
} | class ____:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.AutoProcessor`)
The processor used for processing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: AutoProcessor
padding: Union[bool, str] = "longest"
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
feature_extractor_input_name: Optional[str] = "input_values"
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need
# different padding methods
input_features = [
{self.feature_extractor_input_name: feature[self.feature_extractor_input_name]} for feature in features
]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
labels_batch = self.processor.pad(
labels=label_features,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
if "attention_mask" in batch:
batch["attention_mask"] = batch["attention_mask"].to(torch.long)
return batch
def create_vocabulary_from_data(
datasets: DatasetDict,
word_delimiter_token: Optional[str] = None,
unk_token: Optional[str] = None,
pad_token: Optional[str] = None,
):
# Given training and test labels create vocabulary
def extract_all_chars(batch):
all_text = " ".join(batch["target_text"])
vocab = list(set(all_text))
return {"vocab": [vocab], "all_text": [all_text]}
vocabs = datasets.map(
extract_all_chars,
batched=True,
batch_size=-1,
keep_in_memory=True,
remove_columns=datasets["train"].column_names,
)
# take union of all unique characters in each dataset
vocab_set = functools.reduce(
lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]),
vocabs.values(),
)
vocab_dict = {v: k for k, v in enumerate(sorted(vocab_set))}
# replace white space with delimiter token
if word_delimiter_token is not None:
vocab_dict[word_delimiter_token] = vocab_dict[" "]
del vocab_dict[" "]
# add unk and pad token
if unk_token is not None:
vocab_dict[unk_token] = len(vocab_dict)
if pad_token is not None:
vocab_dict[pad_token] = len(vocab_dict)
return vocab_dict
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_process_index) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_process_index):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# 1. First, let's load the dataset
raw_datasets = DatasetDict()
if training_args.do_train:
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'."
" Make sure to set `--audio_column_name` to the correct audio column - one of"
f" {', '.join(raw_datasets['train'].column_names)}."
)
if data_args.text_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--text_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
if data_args.max_train_samples is not None:
raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
if training_args.do_eval:
raw_datasets["eval"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
# 2. We remove some special characters from the datasets
# that make training complicated and do not help in transcribing the speech
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
# that could be easily picked up by the model
chars_to_ignore_regex = (
f"[{''.join(data_args.chars_to_ignore)}]" if data_args.chars_to_ignore is not None else None
)
text_column_name = data_args.text_column_name
def remove_special_characters(batch):
if chars_to_ignore_regex is not None:
batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
else:
batch["target_text"] = batch[text_column_name].lower() + " "
return batch
with training_args.main_process_first(desc="dataset map special characters removal"):
raw_datasets = raw_datasets.map(
remove_special_characters,
remove_columns=[text_column_name],
desc="remove special characters from datasets",
)
# save special tokens for tokenizer
word_delimiter_token = data_args.word_delimiter_token
unk_token = data_args.unk_token
pad_token = data_args.pad_token
# 3. Next, let's load the config as we might need it to create
# the tokenizer
# load config
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
# 4. Next, if no tokenizer file is defined,
# we create the vocabulary of the model by extracting all unique characters from
# the training and evaluation datasets
# We need to make sure that only first rank saves vocabulary
# make sure all processes wait until vocab is created
tokenizer_name_or_path = model_args.tokenizer_name_or_path
tokenizer_kwargs = {}
if tokenizer_name_or_path is None:
# save vocab in training output dir
tokenizer_name_or_path = training_args.output_dir
vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
with training_args.main_process_first():
if os.path.isfile(vocab_file):
try:
os.remove(vocab_file)
except OSError:
# in shared file-systems it might be the case that
# two processes try to delete the vocab file at the some time
pass
with training_args.main_process_first(desc="dataset map vocabulary creation"):
if not os.path.isfile(vocab_file):
os.makedirs(tokenizer_name_or_path, exist_ok=True)
vocab_dict = create_vocabulary_from_data(
raw_datasets,
word_delimiter_token=word_delimiter_token,
unk_token=unk_token,
pad_token=pad_token,
)
# save vocab dict to be loaded into tokenizer
with open(vocab_file, "w") as file:
json.dump(vocab_dict, file)
# if tokenizer has just been created
# it is defined by `tokenizer_class` if present in config else by `model_type`
tokenizer_kwargs = {
"config": config if config.tokenizer_class is not None else None,
"tokenizer_type": (config.model_type if config.tokenizer_class is None else None),
"unk_token": unk_token,
"pad_token": pad_token,
"word_delimiter_token": word_delimiter_token,
}
# 5. Now we can instantiate the feature extractor, tokenizer and model
# Note for distributed training, the .from_pretrained methods guarantee that only
# one local process can concurrently download model & vocab.
# load feature_extractor and tokenizer
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
**tokenizer_kwargs,
)
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
# adapt config
config.update(
{
"feat_proj_dropout": model_args.feat_proj_dropout,
"attention_dropout": model_args.attention_dropout,
"hidden_dropout": model_args.hidden_dropout,
"final_dropout": model_args.final_dropout,
"mask_time_prob": model_args.mask_time_prob,
"mask_time_length": model_args.mask_time_length,
"mask_feature_prob": model_args.mask_feature_prob,
"mask_feature_length": model_args.mask_feature_length,
"gradient_checkpointing": training_args.gradient_checkpointing,
"layerdrop": model_args.layerdrop,
"ctc_loss_reduction": model_args.ctc_loss_reduction,
"ctc_zero_infinity": model_args.ctc_zero_infinity,
"pad_token_id": tokenizer.pad_token_id,
"vocab_size": len(tokenizer),
"activation_dropout": model_args.activation_dropout,
"add_adapter": model_args.add_adapter,
}
)
# create model
model = AutoModelForCTC.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
config=config,
token=data_args.token,
trust_remote_code=data_args.trust_remote_code,
)
# freeze encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
# 6. Now we preprocess the datasets including loading the audio, resampling and normalization
# Thankfully, `datasets` takes care of automatically loading and resampling the audio,
# so that we just need to set the correct target sampling rate and normalize the input
# via the `feature_extractor`
# make sure that dataset decodes audio with correct sampling rate
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
if dataset_sampling_rate != feature_extractor.sampling_rate:
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name,
datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate),
)
# derive max & min input length for sample rate & max duration
max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
audio_column_name = data_args.audio_column_name
num_workers = data_args.preprocessing_num_workers
feature_extractor_input_name = feature_extractor.model_input_names[0]
# `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
phoneme_language = data_args.phoneme_language
# Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
def prepare_dataset(batch):
# load audio
sample = batch[audio_column_name]
inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
batch[feature_extractor_input_name] = getattr(inputs, feature_extractor_input_name)[0]
# take length of raw audio waveform
batch["input_length"] = len(sample["array"].squeeze())
# encode targets
additional_kwargs = {}
if phoneme_language is not None:
additional_kwargs["phonemizer_lang"] = phoneme_language
batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
return batch
with training_args.main_process_first(desc="dataset map preprocessing"):
vectorized_datasets = raw_datasets.map(
prepare_dataset,
remove_columns=next(iter(raw_datasets.values())).column_names,
num_proc=num_workers,
desc="preprocess datasets",
)
def is_audio_in_length_range(length):
return length > min_input_length and length < max_input_length
# filter data that is shorter than min_input_length
vectorized_datasets = vectorized_datasets.filter(
is_audio_in_length_range,
num_proc=num_workers,
input_columns=["input_length"],
)
# 7. Next, we can prepare the training.
# Let's use word error rate (WER) as our evaluation metric,
# instantiate a data collator and the trainer
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
eval_metrics = {metric: evaluate.load(metric, cache_dir=model_args.cache_dir) for metric in data_args.eval_metrics}
# for large datasets it is advised to run the preprocessing on a
# single machine first with ``args.preprocessing_only`` since there will mostly likely
# be a timeout when running the script in distributed mode.
# In a second step ``args.preprocessing_only`` can then be set to `False` to load the
# cached dataset
if data_args.preprocessing_only:
logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
return
# For languages like Chinese with large vocabulary size, we need to discard logits
# and only keep the argmax, otherwise we run out of memory during evaluation.
def preprocess_logits_for_metrics(logits, labels):
pred_ids = torch.argmax(logits, dim=-1)
return pred_ids, labels
def compute_metrics(pred):
pred_ids = pred.predictions[0]
pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
pred_str = tokenizer.batch_decode(pred_ids)
# we do not want to group tokens when computing the metrics
label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
return metrics
# Now save everything to be able to create a single processor later
# make sure all processes wait until data is saved
with training_args.main_process_first():
# only the main process saves them
if is_main_process(training_args.local_process_index):
# save feature extractor, tokenizer and config
feature_extractor.save_pretrained(training_args.output_dir)
tokenizer.save_pretrained(training_args.output_dir)
config.save_pretrained(training_args.output_dir)
try:
processor = AutoProcessor.from_pretrained(training_args.output_dir)
except (OSError, KeyError):
warnings.warn(
"Loading a processor from a feature extractor config that does not"
" include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
" attribute to your `preprocessor_config.json` file to suppress this warning: "
" `'processor_class': 'Wav2Vec2Processor'`",
FutureWarning,
)
processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
# Instantiate custom data collator
data_collator = DataCollatorCTCWithPadding(
processor=processor, feature_extractor_input_name=feature_extractor_input_name
)
# Initialize Trainer
trainer = Trainer(
model=model,
data_collator=data_collator,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
processing_class=processor,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
# 8. Finally, we can start training
# Training
if training_args.do_train:
# use last checkpoint if exist
if os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(vectorized_datasets["train"])
)
metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
)
metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "automatic-speech-recognition",
"tags": ["automatic-speech-recognition", data_args.dataset_name],
"dataset_args": (
f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:"
f" {data_args.eval_split_name}"
),
"dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
}
if "common_voice" in data_args.dataset_name:
kwargs["language"] = config_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
if __name__ == "__main__":
main()
| DataCollatorCTCWithPadding |
python | huggingface__transformers | tests/models/qwen2_vl/test_modeling_qwen2_vl.py | {
"start": 5544,
"end": 14840
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Model tester for `Qwen2VLForConditionalGeneration`.
"""
all_model_classes = (
(
Qwen2VLModel,
Qwen2VLForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {
"image-text-to-text": Qwen2VLForConditionalGeneration,
"any-to-any": Qwen2VLForConditionalGeneration,
}
_is_composite = True
def setUp(self):
self.model_tester = Qwen2VLVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Qwen2VLConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompt has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
curr_input_dict = copy.deepcopy(input_dict)
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
patch_size = config.vision_config.patch_size
one_img_length = (self.model_tester.image_size**2) // (patch_size**2)
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...]
curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:one_img_length]
image_grid_thw = curr_input_dict["image_grid_thw"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw)
def test_forward_with_rope_deltas_cached(self):
"""
Tests that Qwen2-VL computes new rope deltas every forward pass with new set of inputs.
Rope deltas are cached when we generate and re-used for decoding phase, byt are not reset
automatically after generation ends. See https://github.com/huggingface/transformers/pull/36013 for more
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
# Generate and make sure rope_deltas are not `None`
self.assertTrue(model.model.rope_deltas is None)
generation_output = model.generate(
**input_dict, max_new_tokens=4, return_dict_in_generate=True, output_logits=True
)
self.assertTrue(model.model.rope_deltas is not None)
# Now if we try to do forward pass, we should get new rope logits, because cache is not passed
forward_output = model(**input_dict)
torch.testing.assert_close(
generation_output.logits[0], forward_output.logits[:, -1, :], rtol=1e-4, atol=1e-4
)
def attention_mask_padding_matches_padding_free_with_position_ids(
self, attn_implementation: str, fa_kwargs: bool = False
):
max_new_tokens = 30
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
dummy_input = inputs_dict[model_class.main_input_name]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
# make sure that all models have enough positions for generation
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
if 0 in inputs_dict["attention_mask"][:, -1]:
inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1)
dummy_attention_mask = inputs_dict["attention_mask"]
inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation=attn_implementation,
)
.to(torch_device)
.eval()
)
# flatten
padfree_inputs_dict = {
"pixel_values": inputs_dict["pixel_values"],
"image_grid_thw": inputs_dict["image_grid_thw"],
"input_ids": inputs_dict["input_ids"][dummy_attention_mask.bool()].unsqueeze(0),
}
# add position_ids
vision_position_ids, deltas = model.model.get_rope_index(
input_ids=inputs_dict["input_ids"],
image_grid_thw=inputs_dict["image_grid_thw"],
attention_mask=inputs_dict["attention_mask"],
) # [3, bs, padded-seq-len]
vision_padfree_positions = vision_position_ids[:, dummy_attention_mask.bool()].view(
3, -1
) # [3, bs*padfree-len]
text_padfree_positions = torch.cat(
[torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()]
) # [1, bs*padfree-len]
text_padfree_positions = text_padfree_positions.long().unsqueeze(0).to(torch_device)
padfree_inputs_dict["position_ids"] = torch.cat([text_padfree_positions, vision_padfree_positions])[
:, None, :
]
if fa_kwargs:
cu_seq_lens = [0] + dummy_attention_mask.sum(1).tolist()
cu_seq_lens = torch.tensor(cu_seq_lens, device=torch_device)
max_length = cu_seq_lens.diff().max().item()
padfree_inputs_dict.update(
{
"cu_seq_lens_q": cu_seq_lens.cumsum(-1).to(dtype=torch.int32),
"cu_seq_lens_k": cu_seq_lens.cumsum(-1).to(dtype=torch.int32),
"max_length_q": max_length,
"max_length_k": max_length,
}
)
# We need to do simple forward without cache in roder to trigger packed SDPA/FLEX/EAGER path
res_padded = model(**inputs_dict, use_cache=False)
res_padfree = model(**padfree_inputs_dict, use_cache=False)
logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()]
logits_padfree = res_padfree.logits[0]
# acceptable numerical instability
tol = torch.finfo(torch.bfloat16).eps
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
@unittest.skip(reason="Feedforward chunking is not yet supported")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="CPU offload is not yet supported")
def test_cpu_offload(self):
pass
@unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.")
def test_disk_offload_bin(self):
pass
@unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.")
def test_disk_offload_safetensors(self):
pass
@unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.")
def test_model_parallelism(self):
pass
@unittest.skip(reason="Compile not yet supported because in Qwen2VL models")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.")
def test_multi_gpu_data_parallel_forward(self):
pass
@require_torch
| Qwen2VLModelTest |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/envs/unity_aec_env.py | {
"start": 211,
"end": 2494
} | class ____(UnityPettingzooBaseEnv, AECEnv):
"""
Unity AEC (PettingZoo) environment wrapper.
"""
def __init__(self, env: BaseEnv, seed: Optional[int] = None):
"""
Initializes a Unity AEC environment wrapper.
:param env: The UnityEnvironment that is being wrapped.
:param seed: The seed for the action spaces of the agents.
"""
super().__init__(env, seed)
def step(self, action: Any) -> None:
"""
Sets the action of the active agent and get the observation, reward, done
and info of the next agent.
:param action: The action for the active agent
"""
self._assert_loaded()
if len(self._live_agents) <= 0:
raise error.Error(
"You must reset the environment before you can perform a step"
)
# Process action
current_agent = self._agents[self._agent_index]
self._process_action(current_agent, action)
self._agent_index += 1
# Reset reward
for k in self._rewards.keys():
self._rewards[k] = 0
if self._agent_index >= len(self._agents) and self.num_agents > 0:
# The index is too high, time to set the action for the agents we have
self._step()
self._live_agents.sort() # unnecessary, only for passing API test
def observe(self, agent_id):
"""
Returns the observation an agent currently can make. `last()` calls this function.
"""
return (
self._observations[agent_id],
self._cumm_rewards[agent_id],
self._dones[agent_id],
self._infos[agent_id],
)
def last(self, observe=True):
"""
returns observation, cumulative reward, done, info for the current agent (specified by self.agent_selection)
"""
obs, reward, done, info = self.observe(self._agents[self._agent_index])
return obs if observe else None, reward, done, info
@property
def agent_selection(self):
if not self._live_agents:
# If we had an agent finish then return that agent even though it isn't alive.
return self._agents[0]
return self._agents[self._agent_index]
| UnityAECEnv |
python | scipy__scipy | scipy/special/tests/test_iv_ratio.py | {
"start": 5612,
"end": 10108
} | class ____:
@pytest.mark.parametrize('v,x,r', [
(0.5, 0.16666666666666666, 0.8348595870753707),
(0.5, 0.3333333333333333, 0.6784872624683657),
(0.5, 0.5, 0.5378828427399902),
(0.5, 0.6666666666666666, 0.4172170546520899),
(0.5, 0.8333333333333335, 0.3177382097618302),
(1, 0.3380952380952381, 0.8333226950829686),
(1, 0.7083333333333333, 0.6663355641301008),
(1, 1.1666666666666667, 0.4976644768462577),
(1, 1.8666666666666665, 0.325383427747836),
(1, 3.560606060606061, 0.155792340496837),
(2.34, 0.7975238095238094, 0.8329509691844672),
(2.34, 1.7133333333333334, 0.6639784068731155),
(2.34, 2.953333333333333, 0.49318090682197),
(2.34, 5.0826666666666656, 0.3244747301199321),
(2.34, 10.869696969696973, 0.16206488955012377),
(56.789, 19.46575238095238, 0.8332979494608591),
(56.789, 42.55008333333333, 0.6664619000306697),
(56.789, 75.552, 0.4996067618822174),
(56.789, 135.76026666666667, 0.3329471778053873),
(56.789, 307.8642424242425, 0.16650005585392025),
])
def test_against_reference_values(self, v, x, r):
"""The reference values are one minus those of TestIvRatio."""
assert_allclose(iv_ratio_c(v, x), r, rtol=1e-15, atol=0)
@pytest.mark.parametrize('v,x,r', [
(1, np.inf, 0),
(np.inf, 1, 1),
])
def test_inf(self, v, x, r):
"""If exactly one of v or x is inf and the other is within domain,
should return 0 or 1 accordingly."""
assert_equal(iv_ratio_c(v, x), r)
@pytest.mark.parametrize('v', [0.49, -np.inf, np.nan, np.inf])
@pytest.mark.parametrize('x', [-np.finfo(float).smallest_normal,
-np.finfo(float).smallest_subnormal,
-np.inf, np.nan, np.inf])
def test_nan(self, v, x):
"""If at least one argument is out of domain, or if v = x = inf,
the function should return nan."""
assert_equal(iv_ratio_c(v, x), np.nan)
@pytest.mark.parametrize('v', [0.5, 1, np.finfo(float).max, np.inf])
def test_zero_x(self, v):
"""If x is +/-0.0, return 1."""
assert_equal(iv_ratio_c(v, 0.0), 1.0)
assert_equal(iv_ratio_c(v, -0.0), 1.0)
@pytest.mark.parametrize('v,x', [
(1, np.finfo(float).smallest_normal),
(1, np.finfo(float).smallest_subnormal),
(1, np.finfo(float).smallest_subnormal*2),
(1e20, 123),
(np.finfo(float).max, 1),
(np.finfo(float).max, np.sqrt(np.finfo(float).max)),
])
def test_tiny_x(self, v, x):
"""If x is much less than v, the bounds
x x
--------------------------- <= R <= -----------------------
v-0.5+sqrt(x**2+(v+0.5)**2) v-1+sqrt(x**2+(v+1)**2)
collapses to 1-R ~= 1-x/2v. Test against this asymptotic expression.
"""
assert_equal(iv_ratio_c(v, x), 1.0-(0.5*x)/v)
@pytest.mark.parametrize('v,x', [
(1, 1e16),
(1e20, 1e40),
(np.sqrt(np.finfo(float).max), np.finfo(float).max),
])
def test_huge_x(self, v, x):
"""If x is much greater than v, the bounds
x x
--------------------------- <= R <= ---------------------------
v-0.5+sqrt(x**2+(v+0.5)**2) v-0.5+sqrt(x**2+(v-0.5)**2)
collapses to 1-R ~= (v-0.5)/x. Test against this asymptotic expression.
"""
assert_allclose(iv_ratio_c(v, x), (v-0.5)/x, rtol=1e-15, atol=0)
@pytest.mark.parametrize('v,x', [
(np.finfo(float).max, np.finfo(float).max),
(np.finfo(float).max / 3, np.finfo(float).max),
(np.finfo(float).max, np.finfo(float).max / 3),
])
def test_huge_v_x(self, v, x):
"""If both x and v are very large, the bounds
x x
--------------------------- <= R <= -----------------------
v-0.5+sqrt(x**2+(v+0.5)**2) v-1+sqrt(x**2+(v+1)**2)
collapses to 1 - R ~= 1 - x/(v+sqrt(x**2+v**2). Test against this
asymptotic expression, and in particular that no numerical overflow
occurs during intermediate calculations.
"""
t = x / v
expected = 1 - t / (1 + np.hypot(1, t))
assert_allclose(iv_ratio_c(v, x), expected, rtol=4e-16, atol=0)
| TestIvRatioC |
python | huggingface__transformers | src/transformers/models/tvp/processing_tvp.py | {
"start": 1030,
"end": 2488
} | class ____(ProcessorMixin):
r"""
Constructs an TVP processor which wraps a TVP image processor and a Bert tokenizer into a single processor.
[`TvpProcessor`] offers all the functionalities of [`TvpImageProcessor`] and [`BertTokenizerFast`]. See the
[`~TvpProcessor.__call__`] and [`~TvpProcessor.decode`] for more information.
Args:
image_processor ([`TvpImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`BertTokenizerFast`], *optional*):
The tokenizer is a required input.
"""
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
super().__init__(image_processor, tokenizer)
self.video_processor = image_processor
def post_process_video_grounding(self, logits, video_durations):
"""
Compute the time of the video.
Args:
logits (`torch.Tensor`):
The logits output of TvpForVideoGrounding.
video_durations (`float`):
The video's duration.
Returns:
start (`float`):
The start time of the video.
end (`float`):
The end time of the video.
"""
start, end = (
round(logits.tolist()[0][0] * video_durations, 1),
round(logits.tolist()[0][1] * video_durations, 1),
)
return start, end
__all__ = ["TvpProcessor"]
| TvpProcessor |
python | scipy__scipy | scipy/linalg/tests/test_decomp.py | {
"start": 48554,
"end": 49383
} | class ____(TestSVD_GESDD):
lapack_driver = 'gesvd'
# Allocating an array of such a size leads to _ArrayMemoryError(s)
# since the maximum memory that can be in 32-bit (WASM) is 4GB
@pytest.mark.skipif(IS_WASM, reason="out of memory in WASM")
@pytest.mark.xfail_on_32bit("out of memory in 32-bit CI workflow")
@pytest.mark.parallel_threads_limit(2) # 1.9 GiB per thread RAM usage
@pytest.mark.fail_slow(10)
def test_svd_gesdd_nofegfault():
# svd(a) with {U,VT}.size > INT_MAX does not segfault
# cf https://github.com/scipy/scipy/issues/14001
df=np.ones((4799, 53130), dtype=np.float64)
with assert_raises(ValueError):
svd(df)
def test_gesdd_nan_error_message():
A = np.eye(2)
A[0, 0] = np.nan
with pytest.raises(ValueError, match="NaN"):
svd(A, check_finite=False)
| TestSVD_GESVD |
python | sqlalchemy__sqlalchemy | test/sql/test_metadata.py | {
"start": 58903,
"end": 60783
} | class ____(fixtures.TestBase):
def test_metadata_info(self):
m1 = MetaData()
eq_(m1.info, {})
m1 = MetaData(info={"foo": "bar"})
eq_(m1.info, {"foo": "bar"})
def test_foreignkey_constraint_info(self):
fkc = ForeignKeyConstraint(["a"], ["b"], name="bar")
eq_(fkc.info, {})
fkc = ForeignKeyConstraint(
["a"], ["b"], name="bar", info={"foo": "bar"}
)
eq_(fkc.info, {"foo": "bar"})
def test_foreignkey_info(self):
fkc = ForeignKey("a")
eq_(fkc.info, {})
fkc = ForeignKey("a", info={"foo": "bar"})
eq_(fkc.info, {"foo": "bar"})
def test_primarykey_constraint_info(self):
pkc = PrimaryKeyConstraint("a", name="x")
eq_(pkc.info, {})
pkc = PrimaryKeyConstraint("a", name="x", info={"foo": "bar"})
eq_(pkc.info, {"foo": "bar"})
def test_unique_constraint_info(self):
uc = UniqueConstraint("a", name="x")
eq_(uc.info, {})
uc = UniqueConstraint("a", name="x", info={"foo": "bar"})
eq_(uc.info, {"foo": "bar"})
def test_check_constraint_info(self):
cc = CheckConstraint("foo=bar", name="x")
eq_(cc.info, {})
cc = CheckConstraint("foo=bar", name="x", info={"foo": "bar"})
eq_(cc.info, {"foo": "bar"})
def test_index_info(self):
ix = Index("x", "a")
eq_(ix.info, {})
ix = Index("x", "a", info={"foo": "bar"})
eq_(ix.info, {"foo": "bar"})
def test_column_info(self):
c = Column("x", Integer)
eq_(c.info, {})
c = Column("x", Integer, info={"foo": "bar"})
eq_(c.info, {"foo": "bar"})
def test_table_info(self):
t = Table("x", MetaData())
eq_(t.info, {})
t = Table("x", MetaData(), info={"foo": "bar"})
eq_(t.info, {"foo": "bar"})
| InfoTest |
python | encode__httpx | httpx/_auth.py | {
"start": 3600,
"end": 4316
} | class ____(Auth):
"""
Allows the 'auth' argument to be passed as a (username, password) pair,
and uses HTTP Basic authentication.
"""
def __init__(self, username: str | bytes, password: str | bytes) -> None:
self._auth_header = self._build_auth_header(username, password)
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
request.headers["Authorization"] = self._auth_header
yield request
def _build_auth_header(self, username: str | bytes, password: str | bytes) -> str:
userpass = b":".join((to_bytes(username), to_bytes(password)))
token = b64encode(userpass).decode()
return f"Basic {token}"
| BasicAuth |
python | tiangolo__fastapi | fastapi/responses.py | {
"start": 722,
"end": 1216
} | class ____(JSONResponse):
"""
JSON response using the high-performance ujson library to serialize data to JSON.
Read more about it in the
[FastAPI docs for Custom Response - HTML, Stream, File, others](https://fastapi.tiangolo.com/advanced/custom-response/).
"""
def render(self, content: Any) -> bytes:
assert ujson is not None, "ujson must be installed to use UJSONResponse"
return ujson.dumps(content, ensure_ascii=False).encode("utf-8")
| UJSONResponse |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/output/base.py | {
"start": 365,
"end": 6087
} | class ____(metaclass=ABCMeta):
"""
Base class defining the output interface for a
:class:`~prompt_toolkit.renderer.Renderer`.
Actual implementations are
:class:`~prompt_toolkit.output.vt100.Vt100_Output` and
:class:`~prompt_toolkit.output.win32.Win32Output`.
"""
stdout: TextIO | None = None
@abstractmethod
def fileno(self) -> int:
"Return the file descriptor to which we can write for the output."
@abstractmethod
def encoding(self) -> str:
"""
Return the encoding for this output, e.g. 'utf-8'.
(This is used mainly to know which characters are supported by the
output the data, so that the UI can provide alternatives, when
required.)
"""
@abstractmethod
def write(self, data: str) -> None:
"Write text (Terminal escape sequences will be removed/escaped.)"
@abstractmethod
def write_raw(self, data: str) -> None:
"Write text."
@abstractmethod
def set_title(self, title: str) -> None:
"Set terminal title."
@abstractmethod
def clear_title(self) -> None:
"Clear title again. (or restore previous title.)"
@abstractmethod
def flush(self) -> None:
"Write to output stream and flush."
@abstractmethod
def erase_screen(self) -> None:
"""
Erases the screen with the background color and moves the cursor to
home.
"""
@abstractmethod
def enter_alternate_screen(self) -> None:
"Go to the alternate screen buffer. (For full screen applications)."
@abstractmethod
def quit_alternate_screen(self) -> None:
"Leave the alternate screen buffer."
@abstractmethod
def enable_mouse_support(self) -> None:
"Enable mouse."
@abstractmethod
def disable_mouse_support(self) -> None:
"Disable mouse."
@abstractmethod
def erase_end_of_line(self) -> None:
"""
Erases from the current cursor position to the end of the current line.
"""
@abstractmethod
def erase_down(self) -> None:
"""
Erases the screen from the current line down to the bottom of the
screen.
"""
@abstractmethod
def reset_attributes(self) -> None:
"Reset color and styling attributes."
@abstractmethod
def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None:
"Set new color and styling attributes."
@abstractmethod
def disable_autowrap(self) -> None:
"Disable auto line wrapping."
@abstractmethod
def enable_autowrap(self) -> None:
"Enable auto line wrapping."
@abstractmethod
def cursor_goto(self, row: int = 0, column: int = 0) -> None:
"Move cursor position."
@abstractmethod
def cursor_up(self, amount: int) -> None:
"Move cursor `amount` place up."
@abstractmethod
def cursor_down(self, amount: int) -> None:
"Move cursor `amount` place down."
@abstractmethod
def cursor_forward(self, amount: int) -> None:
"Move cursor `amount` place forward."
@abstractmethod
def cursor_backward(self, amount: int) -> None:
"Move cursor `amount` place backward."
@abstractmethod
def hide_cursor(self) -> None:
"Hide cursor."
@abstractmethod
def show_cursor(self) -> None:
"Show cursor."
@abstractmethod
def set_cursor_shape(self, cursor_shape: CursorShape) -> None:
"Set cursor shape to block, beam or underline."
@abstractmethod
def reset_cursor_shape(self) -> None:
"Reset cursor shape."
def ask_for_cpr(self) -> None:
"""
Asks for a cursor position report (CPR).
(VT100 only.)
"""
@property
def responds_to_cpr(self) -> bool:
"""
`True` if the `Application` can expect to receive a CPR response after
calling `ask_for_cpr` (this will come back through the corresponding
`Input`).
This is used to determine the amount of available rows we have below
the cursor position. In the first place, we have this so that the drop
down autocompletion menus are sized according to the available space.
On Windows, we don't need this, there we have
`get_rows_below_cursor_position`.
"""
return False
@abstractmethod
def get_size(self) -> Size:
"Return the size of the output window."
def bell(self) -> None:
"Sound bell."
def enable_bracketed_paste(self) -> None:
"For vt100 only."
def disable_bracketed_paste(self) -> None:
"For vt100 only."
def reset_cursor_key_mode(self) -> None:
"""
For vt100 only.
Put the terminal in normal cursor mode (instead of application mode).
See: https://vt100.net/docs/vt100-ug/chapter3.html
"""
def scroll_buffer_to_prompt(self) -> None:
"For Win32 only."
def get_rows_below_cursor_position(self) -> int:
"For Windows only."
raise NotImplementedError
@abstractmethod
def get_default_color_depth(self) -> ColorDepth:
"""
Get default color depth for this output.
This value will be used if no color depth was explicitly passed to the
`Application`.
.. note::
If the `$PROMPT_TOOLKIT_COLOR_DEPTH` environment variable has been
set, then `outputs.defaults.create_output` will pass this value to
the implementation as the default_color_depth, which is returned
here. (This is not used when the output corresponds to a
prompt_toolkit SSH/Telnet session.)
"""
| Output |
python | wandb__wandb | wandb/sdk/artifacts/artifact.py | {
"start": 3606,
"end": 106891
} | class ____:
"""Flexible and lightweight building block for dataset and model versioning.
Construct an empty W&B Artifact. Populate an artifacts contents with methods that
begin with `add`. Once the artifact has all the desired files, you can call
`run.log_artifact()` to log it.
Args:
name (str): A human-readable name for the artifact. Use the name to identify
a specific artifact in the W&B App UI or programmatically. You can
interactively reference an artifact with the `use_artifact` Public API.
A name can contain letters, numbers, underscores, hyphens, and dots.
The name must be unique across a project.
type (str): The artifact's type. Use the type of an artifact to both organize
and differentiate artifacts. You can use any string that contains letters,
numbers, underscores, hyphens, and dots. Common types include `dataset` or
`model`. Include `model` within your type string if you want to link the
artifact to the W&B Model Registry.
Note that some types reserved for internal use and cannot be set by users.
Such types include `job` and types that start with `wandb-`.
description (str | None) = None: A description of the artifact. For Model or
Dataset Artifacts, add documentation for your standardized team model or
dataset card. View an artifact's description programmatically with the
`Artifact.description` attribute or programmatically with the W&B App UI.
W&B renders the description as markdown in the W&B App.
metadata (dict[str, Any] | None) = None: Additional information about an artifact.
Specify metadata as a dictionary of key-value pairs. You can specify no more
than 100 total keys.
incremental: Use `Artifact.new_draft()` method instead to modify an
existing artifact.
use_as: Deprecated.
Returns:
An `Artifact` object.
"""
_TMP_DIR = tempfile.TemporaryDirectory("wandb-artifacts")
atexit.register(_TMP_DIR.cleanup)
def __init__(
self,
name: str,
type: str,
description: str | None = None,
metadata: dict[str, Any] | None = None,
incremental: bool = False,
use_as: str | None = None,
storage_region: str | None = None,
) -> None:
from wandb.sdk.artifacts._internal_artifact import InternalArtifact
from ._validators import (
validate_artifact_name,
validate_artifact_type,
validate_metadata,
)
if not re.match(r"^[a-zA-Z0-9_\-.]+$", name):
raise ValueError(
f"Artifact name may only contain alphanumeric characters, dashes, "
f"underscores, and dots. Invalid name: {name!r}"
)
if incremental and not isinstance(self, InternalArtifact):
termwarn("Using experimental arg `incremental`")
# Internal.
self._client: RetryingClient | None = None
self._tmp_dir: tempfile.TemporaryDirectory | None = None
self._added_objs: dict[int, tuple[WBValue, ArtifactManifestEntry]] = {}
self._added_local_paths: dict[str, ArtifactManifestEntry] = {}
self._save_handle: MailboxHandle[pb.Result] | None = None
self._download_roots: set[str] = set()
# Set by new_draft(), otherwise the latest artifact will be used as the base.
self._base_id: str | None = None
# Properties.
self._id: str | None = None
# Client IDs don't need cryptographic strength, so use a faster implementation.
self._client_id: str = generate_fast_id(128)
self._sequence_client_id: str = generate_fast_id(128)
self._entity: str | None = None
self._project: str | None = None
self._name: str = validate_artifact_name(name) # includes version after saving
self._version: str | None = None
self._source_entity: str | None = None
self._source_project: str | None = None
self._source_name: str = name # includes version after saving
self._source_version: str | None = None
self._source_artifact: Artifact | None = None
self._is_link: bool = False
self._type: str = validate_artifact_type(type, name)
self._description: str | None = description
self._metadata: dict[str, Any] = validate_metadata(metadata)
self._ttl_duration_seconds: int | None = None
self._ttl_is_inherited: bool = True
self._ttl_changed: bool = False
self._aliases: list[str] = []
self._saved_aliases: list[str] = []
self._tags: list[str] = []
self._saved_tags: list[str] = []
self._distributed_id: str | None = None
self._incremental: bool = incremental
if use_as is not None:
warn_and_record_deprecation(
feature=Deprecated(artifact__init_use_as=True),
message=(
"`use_as` argument is deprecated and does not affect the behaviour of `wandb.Artifact()`"
),
)
self._use_as: str | None = None
self._state: ArtifactState = ArtifactState.PENDING
# NOTE: These fields only reflect the last fetched response from the
# server, if any. If the ArtifactManifest has already been fetched and/or
# populated locally, it should take priority when determining these values.
self._size: NonNegativeInt | None = None
self._digest: str | None = None
self._manifest: ArtifactManifest | None = ArtifactManifestV1(
storage_policy=make_storage_policy(region=storage_region)
)
self._commit_hash: str | None = None
self._file_count: int | None = None
self._created_at: str | None = None
self._updated_at: str | None = None
self._final: bool = False
self._history_step: int | None = None
self._linked_artifacts: list[Artifact] = []
self._fetch_file_urls_decorated: Callable[..., Any] | None = None
# Cache.
artifact_instance_cache_by_client_id[self._client_id] = self
def __repr__(self) -> str:
return f"<Artifact {self.id or self.name}>"
@classmethod
def _from_id(cls, artifact_id: str, client: RetryingClient) -> Artifact | None:
from ._generated import ARTIFACT_BY_ID_GQL, ArtifactByID
from ._validators import FullArtifactPath
if cached_artifact := artifact_instance_cache.get(artifact_id):
return cached_artifact
query = gql_compat(ARTIFACT_BY_ID_GQL, omit_fields=omit_artifact_fields(client))
data = client.execute(query, variable_values={"id": artifact_id})
result = ArtifactByID.model_validate(data)
if (artifact := result.artifact) is None:
return None
src_collection = artifact.artifact_sequence
src_project = src_collection.project
entity_name = src_project.entity.name if src_project else ""
project_name = src_project.name if src_project else ""
name = f"{src_collection.name}:v{artifact.version_index}"
path = FullArtifactPath(prefix=entity_name, project=project_name, name=name)
return cls._from_attrs(path, artifact, client)
@classmethod
def _membership_from_name(
cls, *, path: FullArtifactPath, client: RetryingClient
) -> Artifact:
from ._generated import (
ARTIFACT_MEMBERSHIP_BY_NAME_GQL,
ArtifactMembershipByName,
)
if not server_supports(client, pb.PROJECT_ARTIFACT_COLLECTION_MEMBERSHIP):
raise UnsupportedError(
"Querying for the artifact collection membership is not supported "
"by this version of wandb server. Consider updating to the latest version."
)
query = gql_compat(
ARTIFACT_MEMBERSHIP_BY_NAME_GQL,
omit_fields=omit_artifact_fields(client),
)
gql_vars = {"entity": path.prefix, "project": path.project, "name": path.name}
data = client.execute(query, variable_values=gql_vars)
result = ArtifactMembershipByName.model_validate(data)
if not (project := result.project):
raise ValueError(
f"project {path.project!r} not found under entity {path.prefix!r}"
)
if not (membership := project.artifact_collection_membership):
entity_project = f"{path.prefix}/{path.project}"
raise ValueError(
f"artifact membership {path.name!r} not found in {entity_project!r}"
)
return cls._from_membership(membership, target=path, client=client)
@classmethod
def _from_name(
cls,
*,
path: FullArtifactPath,
client: RetryingClient,
enable_tracking: bool = False,
) -> Artifact:
from ._generated import ARTIFACT_BY_NAME_GQL, ArtifactByName
if server_supports(client, pb.PROJECT_ARTIFACT_COLLECTION_MEMBERSHIP):
return cls._membership_from_name(path=path, client=client)
omit_vars = None if supports_enable_tracking_var(client) else {"enableTracking"}
gql_vars = {
"entity": path.prefix,
"project": path.project,
"name": path.name,
"enableTracking": enable_tracking,
}
query = gql_compat(
ARTIFACT_BY_NAME_GQL,
omit_variables=omit_vars,
omit_fields=omit_artifact_fields(client),
)
data = client.execute(query, variable_values=gql_vars)
result = ArtifactByName.model_validate(data)
if not (project := result.project):
raise ValueError(
f"project {path.project!r} not found under entity {path.prefix!r}"
)
if not (artifact := project.artifact):
entity_project = f"{path.prefix}/{path.project}"
raise ValueError(f"artifact {path.name!r} not found in {entity_project!r}")
return cls._from_attrs(path, artifact, client)
@classmethod
def _from_membership(
cls,
membership: ArtifactMembershipFragment,
target: FullArtifactPath,
client: RetryingClient,
) -> Artifact:
from ._validators import is_artifact_registry_project
if not (
(collection := membership.artifact_collection)
and (name := collection.name)
and (proj := collection.project)
):
raise ValueError("Missing artifact collection project in GraphQL response")
if is_artifact_registry_project(proj.name) and (
target.project == "model-registry"
):
wandb.termwarn(
"This model registry has been migrated and will be discontinued. "
f"Your request was redirected to the corresponding artifact {name!r} in the new registry. "
f"Please update your paths to point to the migrated registry directly, '{proj.name}/{name}'."
)
new_target = replace(target, prefix=proj.entity.name, project=proj.name)
else:
new_target = copy(target)
if not (artifact := membership.artifact):
raise ValueError(f"Artifact {target.to_str()!r} not found in response")
aliases = [a.alias for a in membership.aliases]
return cls._from_attrs(new_target, artifact, client, aliases=aliases)
@classmethod
def _from_attrs(
cls,
path: FullArtifactPath,
attrs: ArtifactFragment,
client: RetryingClient,
aliases: list[str] | None = None,
) -> Artifact:
# Placeholder is required to skip validation.
artifact = cls("placeholder", type="placeholder")
artifact._client = client
artifact._entity = path.prefix
artifact._project = path.project
artifact._name = path.name
artifact._assign_attrs(attrs, aliases)
artifact.finalize()
# Cache.
assert artifact.id is not None
artifact_instance_cache[artifact.id] = artifact
return artifact
# TODO: Eventually factor out is_link. Have to currently use it since some forms of fetching the artifact
# doesn't make it clear if the artifact is a link or not and have to manually set it.
def _assign_attrs(
self,
art: ArtifactFragment,
aliases: list[str] | None = None,
is_link: bool | None = None,
) -> None:
"""Update this Artifact's attributes using the server response."""
from ._validators import validate_metadata, validate_ttl_duration_seconds
self._id = art.id
src_collection = art.artifact_sequence
src_project = src_collection.project
self._source_entity = src_project.entity.name if src_project else ""
self._source_project = src_project.name if src_project else ""
self._source_name = f"{src_collection.name}:v{art.version_index}"
self._source_version = f"v{art.version_index}"
self._entity = self._entity or self._source_entity
self._project = self._project or self._source_project
self._name = self._name or self._source_name
# TODO: Refactor artifact query to fetch artifact via membership instead
# and get the collection type
if is_link is None:
self._is_link = (
self._entity != self._source_entity
or self._project != self._source_project
or self._name.split(":")[0] != self._source_name.split(":")[0]
)
else:
self._is_link = is_link
self._type = art.artifact_type.name
self._description = art.description
# The future of aliases is to move all alias fetches to the membership level
# so we don't have to do the collection fetches below
if aliases:
processed_aliases = aliases
elif art.aliases:
entity = self._entity
project = self._project
collection = self._name.split(":")[0]
processed_aliases = [
art_alias.alias
for art_alias in art.aliases
if (
(coll := art_alias.artifact_collection)
and (proj := coll.project)
and proj.entity.name == entity
and proj.name == project
and coll.name == collection
)
]
else:
processed_aliases = []
version_aliases = list(filter(alias_is_version_index, processed_aliases))
other_aliases = list(filterfalse(alias_is_version_index, processed_aliases))
try:
version = one(
version_aliases, too_short=TooFewItemsError, too_long=TooManyItemsError
)
except TooFewItemsError:
version = f"v{art.version_index}" # default to the source version
except TooManyItemsError:
msg = f"Expected at most one version alias, got {len(version_aliases)}: {version_aliases!r}"
raise ValueError(msg) from None
self._version = version
self._name = self._name if (":" in self._name) else f"{self._name}:{version}"
self._aliases = other_aliases
self._saved_aliases = copy(self._aliases)
self._tags = [tag.name for tag in (art.tags or [])]
self._saved_tags = copy(self._tags)
self._metadata = validate_metadata(art.metadata)
self._ttl_duration_seconds = validate_ttl_duration_seconds(
art.ttl_duration_seconds
)
self._ttl_is_inherited = (
True if (art.ttl_is_inherited is None) else art.ttl_is_inherited
)
self._state = ArtifactState(art.state)
self._size = art.size
self._digest = art.digest
self._manifest = None
self._commit_hash = art.commit_hash
self._file_count = art.file_count
self._created_at = art.created_at
self._updated_at = art.updated_at
self._history_step = art.history_step
@ensure_logged
def new_draft(self) -> Artifact:
"""Create a new draft artifact with the same content as this committed artifact.
Modifying an existing artifact creates a new artifact version known
as an "incremental artifact". The artifact returned can be extended or
modified and logged as a new version.
Returns:
An `Artifact` object.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
"""
# Name, _entity and _project are set to the *source* name/entity/project:
# if this artifact is saved it must be saved to the source sequence.
artifact = Artifact(self.source_name.split(":")[0], self.type)
artifact._entity = self._source_entity
artifact._project = self._source_project
artifact._source_entity = self._source_entity
artifact._source_project = self._source_project
# This artifact's parent is the one we are making a draft from.
artifact._base_id = self.id
# We can reuse the client, and copy over all the attributes that aren't
# version-dependent and don't depend on having been logged.
artifact._client = self._client
artifact._description = self.description
artifact._metadata = self.metadata
artifact._manifest = ArtifactManifest.from_manifest_json(
self.manifest.to_manifest_json()
)
return artifact
# Properties (Python Class managed attributes).
@property
def id(self) -> str | None:
"""The artifact's ID."""
if self.is_draft():
return None
assert self._id is not None
return self._id
@property
@ensure_logged
def entity(self) -> str:
"""The name of the entity that the artifact collection belongs to.
If the artifact is a link, the entity will be the entity of the linked artifact.
"""
assert self._entity is not None
return self._entity
@property
@ensure_logged
def project(self) -> str:
"""The name of the project that the artifact collection belongs to.
If the artifact is a link, the project will be the project of the linked artifact.
"""
assert self._project is not None
return self._project
@property
def name(self) -> str:
"""The artifact name and version of the artifact.
A string with the format `{collection}:{alias}`. If fetched before an artifact is
logged/saved, the name won't contain the alias.
If the artifact is a link, the name will be the name of the linked artifact.
"""
return self._name
@property
def qualified_name(self) -> str:
"""The entity/project/name of the artifact.
If the artifact is a link, the qualified name will be the qualified name of the
linked artifact path.
"""
return f"{self.entity}/{self.project}/{self.name}"
@property
@ensure_logged
def version(self) -> str:
"""The artifact's version.
A string with the format `v{number}`.
If this is a link artifact, the version will be from the linked collection.
"""
assert self._version is not None
return self._version
@property
@ensure_logged
def collection(self) -> ArtifactCollection:
"""The collection this artifact is retrieved from.
A collection is an ordered group of artifact versions.
If this artifact is retrieved from a collection that it is linked to,
return that collection. Otherwise, return the collection
that the artifact version originates from.
The collection that an artifact originates from is known as
the source sequence.
"""
base_name = self.name.split(":")[0]
return ArtifactCollection(
self._client, self.entity, self.project, base_name, self.type
)
@property
@ensure_logged
def source_entity(self) -> str:
"""The name of the entity of the source artifact."""
assert self._source_entity is not None
return self._source_entity
@property
@ensure_logged
def source_project(self) -> str:
"""The name of the project of the source artifact."""
assert self._source_project is not None
return self._source_project
@property
def source_name(self) -> str:
"""The artifact name and version of the source artifact.
A string with the format `{source_collection}:{alias}`. Before the artifact
is saved, contains only the name since the version is not yet known.
"""
return self._source_name
@property
def source_qualified_name(self) -> str:
"""The source_entity/source_project/source_name of the source artifact."""
return f"{self.source_entity}/{self.source_project}/{self.source_name}"
@property
@ensure_logged
def source_version(self) -> str:
"""The source artifact's version.
A string with the format `v{number}`.
"""
assert self._source_version is not None
return self._source_version
@property
@ensure_logged
def source_collection(self) -> ArtifactCollection:
"""The artifact's source collection.
The source collection is the collection that the artifact was logged from.
"""
base_name = self.source_name.split(":")[0]
return ArtifactCollection(
self._client, self.source_entity, self.source_project, base_name, self.type
)
@property
def is_link(self) -> bool:
"""Boolean flag indicating if the artifact is a link artifact.
True: The artifact is a link artifact to a source artifact.
False: The artifact is a source artifact.
"""
return self._is_link
@property
@ensure_logged
def linked_artifacts(self) -> list[Artifact]:
"""Returns a list of all the linked artifacts of a source artifact.
If this artifact is a link artifact (`artifact.is_link == True`),
it will return an empty list.
Limited to 500 results.
"""
if not self.is_link:
self._linked_artifacts = self._fetch_linked_artifacts()
return self._linked_artifacts
@property
@ensure_logged
def source_artifact(self) -> Artifact:
"""Returns the source artifact, which is the original logged artifact.
If this artifact is a source artifact (`artifact.is_link == False`),
it will return itself.
"""
from ._validators import FullArtifactPath
if not self.is_link:
return self
if self._source_artifact is None:
if (client := self._client) is None:
raise ValueError("Client is not initialized")
try:
path = FullArtifactPath(
prefix=self.source_entity,
project=self.source_project,
name=self.source_name,
)
self._source_artifact = self._from_name(path=path, client=client)
except Exception as e:
raise ValueError(
f"Unable to fetch source artifact for linked artifact {self.name}"
) from e
return self._source_artifact
@property
def type(self) -> str:
"""The artifact's type. Common types include `dataset` or `model`."""
return self._type
@property
@ensure_logged
def url(self) -> str:
"""
Constructs the URL of the artifact.
Returns:
str: The URL of the artifact.
"""
from ._validators import is_artifact_registry_project
try:
base_url = self._client.app_url # type: ignore[union-attr]
except AttributeError:
return ""
if not self.is_link:
return self._construct_standard_url(base_url)
if is_artifact_registry_project(self.project):
return self._construct_registry_url(base_url)
if self._type == "model" or self.project == "model-registry":
return self._construct_model_registry_url(base_url)
return self._construct_standard_url(base_url)
def _construct_standard_url(self, base_url: str) -> str:
if not all(
[
base_url,
self.entity,
self.project,
self._type,
self.collection.name,
self._version,
]
):
return ""
return urljoin(
base_url,
f"{self.entity}/{self.project}/artifacts/{quote(self._type)}/{quote(self.collection.name)}/{self._version}",
)
def _construct_registry_url(self, base_url: str) -> str:
from ._validators import remove_registry_prefix
if not all(
[
base_url,
self.entity,
self.project,
self.collection.name,
self._version,
]
):
return ""
try:
org_name = org_info_from_entity(self._client, self.entity).organization.name # type: ignore[union-attr]
except (AttributeError, ValueError):
return ""
selection_path = quote(
f"{self.entity}/{self.project}/{self.collection.name}", safe=""
)
return urljoin(
base_url,
f"orgs/{org_name}/registry/{remove_registry_prefix(self.project)}?selectionPath={selection_path}&view=membership&version={self.version}",
)
def _construct_model_registry_url(self, base_url: str) -> str:
if not all(
[
base_url,
self.entity,
self.project,
self.collection.name,
self._version,
]
):
return ""
selection_path = quote(
f"{self.entity}/{self.project}/{self.collection.name}", safe=""
)
return urljoin(
base_url,
f"{self.entity}/registry/model?selectionPath={selection_path}&view=membership&version={self._version}",
)
@property
def description(self) -> str | None:
"""A description of the artifact."""
return self._description
@description.setter
def description(self, description: str | None) -> None:
"""Set the description of the artifact.
For model or dataset Artifacts, add documentation for your
standardized team model or dataset card. In the W&B UI the
description is rendered as markdown.
Editing the description will apply the changes to the source artifact
and all linked artifacts associated with it.
Args:
description: Free text that offers a description of the artifact.
"""
if self.is_link:
wandb.termwarn(
"Editing the description of this linked artifact will edit the description for the source artifact and it's linked artifacts as well."
)
self._description = description
@property
def metadata(self) -> dict:
"""User-defined artifact metadata.
Structured data associated with the artifact.
"""
return self._metadata
@metadata.setter
def metadata(self, metadata: dict) -> None:
"""User-defined artifact metadata.
Metadata set this way will eventually be queryable and plottable in the UI; e.g.
the class distribution of a dataset.
Note: There is currently a limit of 100 total keys.
Editing the metadata will apply the changes to the source artifact
and all linked artifacts associated with it.
Args:
metadata: Structured data associated with the artifact.
"""
from ._validators import validate_metadata
if self.is_link:
wandb.termwarn(
"Editing the metadata of this linked artifact will edit the metadata for the source artifact and it's linked artifacts as well."
)
self._metadata = validate_metadata(metadata)
@property
def ttl(self) -> timedelta | None:
"""The time-to-live (TTL) policy of an artifact.
Artifacts are deleted shortly after a TTL policy's duration passes.
If set to `None`, the artifact deactivates TTL policies and will be not
scheduled for deletion, even if there is a team default TTL.
An artifact inherits a TTL policy from
the team default if the team administrator defines a default
TTL and there is no custom policy set on an artifact.
Raises:
ArtifactNotLoggedError: Unable to fetch inherited TTL if the
artifact has not been logged or saved.
"""
if self._ttl_is_inherited and (self.is_draft() or self._ttl_changed):
raise ArtifactNotLoggedError(f"{nameof(type(self))}.ttl", self)
if self._ttl_duration_seconds is None:
return None
return timedelta(seconds=self._ttl_duration_seconds)
@ttl.setter
def ttl(self, ttl: timedelta | ArtifactTTL | None) -> None:
"""The time-to-live (TTL) policy of an artifact.
Artifacts are deleted shortly after a TTL policy's duration passes.
If set to `None`, the artifact has no TTL policy set and it is not
scheduled for deletion. An artifact inherits a TTL policy from
the team default if the team administrator defines a default
TTL and there is no custom policy set on an artifact.
Args:
ttl: The duration as a positive `datetime.timedelta` that represents
how long the artifact will remain active from its creation.
"""
if self.type == "wandb-history":
raise ValueError("Cannot set artifact TTL for type wandb-history")
if self.is_link:
raise ValueError(
"Cannot set TTL for link artifact. "
"Unlink the artifact first then set the TTL for the source artifact"
)
self._ttl_changed = True
if isinstance(ttl, ArtifactTTL):
if ttl == ArtifactTTL.INHERIT:
self._ttl_is_inherited = True
else:
raise ValueError(f"Unhandled ArtifactTTL enum {ttl}")
else:
self._ttl_is_inherited = False
if ttl is None:
self._ttl_duration_seconds = None
else:
if ttl.total_seconds() <= 0:
raise ValueError(
f"Artifact TTL Duration has to be positive. ttl: {ttl.total_seconds()}"
)
self._ttl_duration_seconds = int(ttl.total_seconds())
@property
@ensure_logged
def aliases(self) -> list[str]:
"""List of one or more semantically-friendly references or
identifying "nicknames" assigned to an artifact version.
Aliases are mutable references that you can programmatically reference.
Change an artifact's alias with the W&B App UI or programmatically.
See [Create new artifact versions](https://docs.wandb.ai/guides/artifacts/create-a-new-artifact-version)
for more information.
"""
return self._aliases
@aliases.setter
@ensure_logged
def aliases(self, aliases: list[str]) -> None:
"""Set the aliases associated with this artifact."""
from ._validators import validate_aliases
self._aliases = validate_aliases(aliases)
@property
@ensure_logged
def tags(self) -> list[str]:
"""List of one or more tags assigned to this artifact version."""
return self._tags
@tags.setter
@ensure_logged
def tags(self, tags: list[str]) -> None:
"""Set the tags associated with this artifact.
Editing tags will apply the changes to the source artifact
and all linked artifacts associated with it.
"""
from ._validators import validate_tags
if self.is_link:
wandb.termwarn(
"Editing tags will apply the changes to the source artifact and all linked artifacts associated with it."
)
self._tags = validate_tags(tags)
@property
def distributed_id(self) -> str | None:
"""The distributed ID of the artifact.
<!-- lazydoc-ignore: internal -->
"""
return self._distributed_id
@distributed_id.setter
def distributed_id(self, distributed_id: str | None) -> None:
self._distributed_id = distributed_id
@property
def incremental(self) -> bool:
"""Boolean flag indicating if the artifact is an incremental artifact.
<!-- lazydoc-ignore: internal -->
"""
return self._incremental
@property
def use_as(self) -> str | None:
"""Deprecated."""
warn_and_record_deprecation(
feature=Deprecated(artifact__use_as=True),
message=("The use_as property of Artifact is deprecated."),
)
return self._use_as
@property
def state(self) -> str:
"""The status of the artifact. One of: "PENDING", "COMMITTED", or "DELETED"."""
return self._state.value
@property
def manifest(self) -> ArtifactManifest:
"""The artifact's manifest.
The manifest lists all of its contents, and can't be changed once the artifact
has been logged.
"""
if self._manifest is None:
self._manifest = self._fetch_manifest()
return self._manifest
def _fetch_manifest(self) -> ArtifactManifest:
"""Fetch, parse, and load the full ArtifactManifest."""
import requests
from ._generated import FETCH_ARTIFACT_MANIFEST_GQL, FetchArtifactManifest
if (client := self._client) is None:
raise RuntimeError("Client not initialized for artifact queries")
# From the GraphQL API, get the (expiring) directUrl for downloading the manifest.
gql_op = gql(FETCH_ARTIFACT_MANIFEST_GQL)
gql_vars = {"id": self.id}
data = client.execute(gql_op, variable_values=gql_vars)
result = FetchArtifactManifest.model_validate(data)
# Now fetch the actual manifest contents from the directUrl.
if (artifact := result.artifact) and (manifest := artifact.current_manifest):
# FIXME: For successive/repeated calls to `manifest`, figure out how to reuse a single
# `requests.Session` within the constraints of the current artifacts API.
# Right now, `requests.get()` creates a new session for _each_ fetch.
# This is wasteful and introduces a noticeable perf overhead when e.g.
# downloading many artifacts sequentially or concurrently.
response = requests.get(manifest.file.direct_url)
return ArtifactManifest.from_manifest_json(from_json(response.content))
raise ValueError("Failed to fetch artifact manifest")
@property
def digest(self) -> str:
"""The logical digest of the artifact.
The digest is the checksum of the artifact's contents. If an artifact has the
same digest as the current `latest` version, then `log_artifact` is a no-op.
"""
# Use the last fetched value of `Artifact.digest` ONLY if present AND the manifest
# has not been fetched and/or populated locally.
# Otherwise, use the manifest directly to recalculate the digest, as its contents
# may have been locally modified.
return (
self._digest
if (self._manifest is None) and (self._digest is not None)
else self.manifest.digest()
)
@property
def size(self) -> int:
"""The total size of the artifact in bytes.
Includes any references tracked by this artifact.
"""
# Use the last fetched value of `Artifact.size` ONLY if present AND the manifest
# has not been fetched and/or populated locally.
# Otherwise, use the manifest directly to recalculate the size, as its contents
# may have been locally modified.
#
# NOTE on choice of GQL field: `Artifact.size` counts references, while
# `Artifact.storageBytes` does not.
return (
self._size
if (self._manifest is None) and (self._size is not None)
else self.manifest.size()
)
@property
@ensure_logged
def commit_hash(self) -> str:
"""The hash returned when this artifact was committed."""
assert self._commit_hash is not None
return self._commit_hash
@property
@ensure_logged
def file_count(self) -> int:
"""The number of files (including references)."""
assert self._file_count is not None
return self._file_count
@property
@ensure_logged
def created_at(self) -> str:
"""Timestamp when the artifact was created."""
assert self._created_at is not None
return self._created_at
@property
@ensure_logged
def updated_at(self) -> str:
"""The time when the artifact was last updated."""
assert self._created_at is not None
return self._updated_at or self._created_at
@property
@ensure_logged
def history_step(self) -> int | None:
"""The nearest step which logged history metrics for this artifact's source run.
Examples:
```python
run = artifact.logged_by()
if run and (artifact.history_step is not None):
history = run.sample_history(
min_step=artifact.history_step,
max_step=artifact.history_step + 1,
keys=["my_metric"],
)
```
"""
if self._history_step is None:
return None
return max(0, self._history_step - 1)
# State management.
def finalize(self) -> None:
"""Finalize the artifact version.
You cannot modify an artifact version once it is finalized because the artifact
is logged as a specific artifact version. Create a new artifact version
to log more data to an artifact. An artifact is automatically finalized
when you log the artifact with `log_artifact`.
"""
self._final = True
def is_draft(self) -> bool:
"""Check if artifact is not saved.
Returns:
Boolean. `False` if artifact is saved. `True` if artifact is not saved.
"""
return self._state is ArtifactState.PENDING
def _is_draft_save_started(self) -> bool:
return self._save_handle is not None
def save(
self,
project: str | None = None,
settings: wandb.Settings | None = None,
) -> None:
"""Persist any changes made to the artifact.
If currently in a run, that run will log this artifact. If not currently in a
run, a run of type "auto" is created to track this artifact.
Args:
project: A project to use for the artifact in the case that a run is not
already in context.
settings: A settings object to use when initializing an automatic run. Most
commonly used in testing harness.
"""
if self._state is not ArtifactState.PENDING:
return self._update()
if self._incremental:
with telemetry.context() as tel:
tel.feature.artifact_incremental = True
if run := wandb_setup.singleton().most_recent_active_run:
# TODO: Deprecate and encourage explicit log_artifact().
run.log_artifact(self)
else:
if settings is None:
settings = wandb.Settings(silent="true")
with wandb.init( # type: ignore
entity=self._source_entity,
project=project or self._source_project,
job_type="auto",
settings=settings,
) as run:
# redoing this here because in this branch we know we didn't
# have the run at the beginning of the method
if self._incremental:
with telemetry.context(run=run) as tel:
tel.feature.artifact_incremental = True
run.log_artifact(self)
def _set_save_handle(
self,
save_handle: MailboxHandle[pb.Result],
client: RetryingClient,
) -> None:
self._save_handle = save_handle
self._client = client
def wait(self, timeout: int | None = None) -> Artifact:
"""If needed, wait for this artifact to finish logging.
Args:
timeout: The time, in seconds, to wait.
Returns:
An `Artifact` object.
"""
if self.is_draft():
if self._save_handle is None:
raise ArtifactNotLoggedError(nameof(self.wait), self)
try:
result = self._save_handle.wait_or(timeout=timeout)
except TimeoutError as e:
raise WaitTimeoutError(
"Artifact upload wait timed out, failed to fetch Artifact response"
) from e
response = result.response.log_artifact_response
if response.error_message:
raise ValueError(response.error_message)
self._populate_after_save(response.artifact_id)
return self
def _populate_after_save(self, artifact_id: str) -> None:
from ._generated import ARTIFACT_BY_ID_GQL, ArtifactByID
if (client := self._client) is None:
raise RuntimeError("Client not initialized for artifact queries")
query = gql_compat(ARTIFACT_BY_ID_GQL, omit_fields=omit_artifact_fields(client))
data = client.execute(query, variable_values={"id": artifact_id})
result = ArtifactByID.model_validate(data)
if not (artifact := result.artifact):
raise ValueError(f"Unable to fetch artifact with id: {artifact_id!r}")
# _populate_after_save is only called on source artifacts, not linked artifacts
# We have to manually set is_link because we aren't fetching the collection
# the artifact. That requires greater refactoring for commitArtifact to return
# the artifact collection type.
self._assign_attrs(artifact, is_link=False)
@normalize_exceptions
def _update(self) -> None:
"""Persists artifact changes to the wandb backend."""
from ._generated import UPDATE_ARTIFACT_GQL, UpdateArtifact, UpdateArtifactInput
from ._validators import FullArtifactPath, validate_tags
if (client := self._client) is None:
raise RuntimeError("Client not initialized for artifact mutations")
collection = self.name.split(":")[0]
update_alias_inputs = None
if type_info(client, "AddAliasesInput") is not None:
# wandb backend version >= 0.13.0
old_aliases, new_aliases = set(self._saved_aliases), set(self.aliases)
target = FullArtifactPath(
prefix=self.entity, project=self.project, name=collection
)
if added_aliases := (new_aliases - old_aliases):
self._add_aliases(added_aliases, target=target)
if deleted_aliases := (old_aliases - new_aliases):
self._delete_aliases(deleted_aliases, target=target)
self._saved_aliases = copy(self.aliases)
else:
# wandb backend version < 0.13.0
update_alias_inputs = [
{"artifactCollectionName": collection, "alias": alias}
for alias in self.aliases
]
omit_fields = omit_artifact_fields(client)
omit_variables = set()
if {"ttlIsInherited", "ttlDurationSeconds"} & omit_fields:
if self._ttl_changed:
termwarn(
"Server not compatible with setting Artifact TTLs, please upgrade the server to use Artifact TTL"
)
omit_variables |= {"ttlDurationSeconds"}
added_tags = validate_tags(set(self.tags) - set(self._saved_tags))
deleted_tags = validate_tags(set(self._saved_tags) - set(self.tags))
if {"tags"} & omit_fields:
if added_tags or deleted_tags:
termwarn(
"Server not compatible with Artifact tags. "
"To use Artifact tags, please upgrade the server to v0.85 or higher."
)
omit_variables |= {"tagsToAdd", "tagsToDelete"}
gql_op = gql_compat(UPDATE_ARTIFACT_GQL, omit_fields=omit_fields)
gql_input = UpdateArtifactInput(
artifact_id=self.id,
description=self.description,
metadata=json_dumps_safer(self.metadata),
ttl_duration_seconds=self._ttl_duration_seconds_to_gql(),
aliases=update_alias_inputs,
tags_to_add=[{"tagName": t} for t in added_tags],
tags_to_delete=[{"tagName": t} for t in deleted_tags],
)
gql_vars = {"input": gql_input.model_dump(exclude=omit_variables)}
data = client.execute(gql_op, variable_values=gql_vars)
result = UpdateArtifact.model_validate(data).result
if not (result and (artifact := result.artifact)):
raise ValueError("Unable to parse updateArtifact response")
self._assign_attrs(artifact)
self._ttl_changed = False # Reset after updating artifact
def _add_aliases(self, alias_names: set[str], target: FullArtifactPath) -> None:
from ._generated import ADD_ALIASES_GQL, AddAliasesInput
if (client := self._client) is None:
raise RuntimeError("Client not initialized for artifact mutations")
target_props = {
"entityName": target.prefix,
"projectName": target.project,
"artifactCollectionName": target.name,
}
alias_inputs = [{**target_props, "alias": name} for name in alias_names]
gql_op = gql(ADD_ALIASES_GQL)
gql_input = AddAliasesInput(artifact_id=self.id, aliases=alias_inputs)
gql_vars = {"input": gql_input.model_dump()}
try:
client.execute(gql_op, variable_values=gql_vars)
except CommError as e:
raise CommError(
"You do not have permission to add"
f" {'at least one of the following aliases' if len(alias_names) > 1 else 'the following alias'}"
f" to this artifact: {alias_names!r}"
) from e
def _delete_aliases(self, alias_names: set[str], target: FullArtifactPath) -> None:
from ._generated import DELETE_ALIASES_GQL, DeleteAliasesInput
if (client := self._client) is None:
raise RuntimeError("Client not initialized for artifact mutations")
target_props = {
"entityName": target.prefix,
"projectName": target.project,
"artifactCollectionName": target.name,
}
alias_inputs = [{**target_props, "alias": name} for name in alias_names]
gql_op = gql(DELETE_ALIASES_GQL)
gql_input = DeleteAliasesInput(artifact_id=self.id, aliases=alias_inputs)
gql_vars = {"input": gql_input.model_dump()}
try:
client.execute(gql_op, variable_values=gql_vars)
except CommError as e:
raise CommError(
f"You do not have permission to delete"
f" {'at least one of the following aliases' if len(alias_names) > 1 else 'the following alias'}"
f" from this artifact: {alias_names!r}"
) from e
# Adding, removing, getting entries.
def __getitem__(self, name: str) -> WBValue | None:
"""Get the WBValue object located at the artifact relative `name`.
Args:
name: The artifact relative name to get.
Returns:
W&B object that can be logged with `run.log()` and visualized in the W&B UI.
Raises:
ArtifactNotLoggedError: If the artifact isn't logged or the run is offline.
"""
return self.get(name)
def __setitem__(self, name: str, item: WBValue) -> ArtifactManifestEntry:
"""Add `item` to the artifact at path `name`.
Args:
name: The path within the artifact to add the object.
item: The object to add.
Returns:
The added manifest entry
Raises:
ArtifactFinalizedError: You cannot make changes to the current
artifact version because it is finalized. Log a new artifact
version instead.
"""
return self.add(item, name)
@contextlib.contextmanager
@ensure_not_finalized
def new_file(
self, name: str, mode: str = "x", encoding: str | None = None
) -> Iterator[IO]:
"""Open a new temporary file and add it to the artifact.
Args:
name: The name of the new file to add to the artifact.
mode: The file access mode to use to open the new file.
encoding: The encoding used to open the new file.
Returns:
A new file object that can be written to. Upon closing, the file
is automatically added to the artifact.
Raises:
ArtifactFinalizedError: You cannot make changes to the current
artifact version because it is finalized. Log a new artifact
version instead.
"""
overwrite: bool = "x" not in mode
if self._tmp_dir is None:
self._tmp_dir = tempfile.TemporaryDirectory()
path = os.path.join(self._tmp_dir.name, name.lstrip("/"))
Path(path).parent.mkdir(parents=True, exist_ok=True)
try:
with fsync_open(path, mode, encoding) as f:
yield f
except FileExistsError:
raise ValueError(f"File with name {name!r} already exists at {path!r}")
except UnicodeEncodeError as e:
termerror(
f"Failed to open the provided file ({nameof(type(e))}: {e}). Please "
f"provide the proper encoding."
)
raise
self.add_file(
path, name=name, policy="immutable", skip_cache=True, overwrite=overwrite
)
@ensure_not_finalized
def add_file(
self,
local_path: str,
name: str | None = None,
is_tmp: bool | None = False,
skip_cache: bool | None = False,
policy: Literal["mutable", "immutable"] | None = "mutable",
overwrite: bool = False,
) -> ArtifactManifestEntry:
"""Add a local file to the artifact.
Args:
local_path: The path to the file being added.
name: The path within the artifact to use for the file being added.
Defaults to the basename of the file.
is_tmp: If true, then the file is renamed deterministically to avoid
collisions.
skip_cache: If `True`, do not copy files to the cache
after uploading.
policy: By default, set to "mutable". If set to "mutable",
create a temporary copy of the file to prevent corruption
during upload. If set to "immutable", disable
protection and rely on the user not to delete or change the
file.
overwrite: If `True`, overwrite the file if it already exists.
Returns:
The added manifest entry.
Raises:
ArtifactFinalizedError: You cannot make changes to the current
artifact version because it is finalized. Log a new artifact
version instead.
ValueError: Policy must be "mutable" or "immutable"
"""
if not os.path.isfile(local_path):
raise ValueError(f"Path is not a file: {local_path!r}")
name = LogicalPath(name or os.path.basename(local_path))
digest = md5_file_b64(local_path)
if is_tmp:
file_path, file_name = os.path.split(name)
file_name_parts = file_name.split(".")
file_name_parts[0] = b64_to_hex_id(digest)[:20]
name = os.path.join(file_path, ".".join(file_name_parts))
return self._add_local_file(
name,
local_path,
digest=digest,
skip_cache=skip_cache,
policy=policy,
overwrite=overwrite,
)
@ensure_not_finalized
def add_dir(
self,
local_path: str,
name: str | None = None,
skip_cache: bool | None = False,
policy: Literal["mutable", "immutable"] | None = "mutable",
merge: bool = False,
) -> None:
"""Add a local directory to the artifact.
Args:
local_path: The path of the local directory.
name: The subdirectory name within an artifact. The name you
specify appears in the W&B App UI nested by artifact's `type`.
Defaults to the root of the artifact.
skip_cache: If set to `True`, W&B will not copy/move files to
the cache while uploading
policy: By default, "mutable".
- mutable: Create a temporary copy of the file to prevent
corruption during upload.
- immutable: Disable protection, rely on the user not to delete
or change the file.
merge: If `False` (default), throws ValueError if a file was already added
in a previous add_dir call and its content has changed. If `True`,
overwrites existing files with changed content. Always adds new files
and never removes files. To replace an entire directory, pass a name
when adding the directory using `add_dir(local_path, name=my_prefix)`
and call `remove(my_prefix)` to remove the directory, then add it again.
Raises:
ArtifactFinalizedError: You cannot make changes to the current
artifact version because it is finalized. Log a new artifact
version instead.
ValueError: Policy must be "mutable" or "immutable"
"""
if not os.path.isdir(local_path):
raise ValueError(f"Path is not a directory: {local_path!r}")
termlog(
f"Adding directory to artifact ({Path('.', local_path)})... ",
newline=False,
)
start_time = time.monotonic()
paths: deque[tuple[str, str]] = deque()
logical_root = name or "" # shared prefix, if any, for logical paths
for dirpath, _, filenames in os.walk(local_path, followlinks=True):
for fname in filenames:
physical_path = os.path.join(dirpath, fname)
logical_path = os.path.relpath(physical_path, start=local_path)
logical_path = os.path.join(logical_root, logical_path)
paths.append((logical_path, physical_path))
def add_manifest_file(logical_pth: str, physical_pth: str) -> None:
self._add_local_file(
name=logical_pth,
path=physical_pth,
skip_cache=skip_cache,
policy=policy,
overwrite=merge,
)
num_threads = 8
pool = multiprocessing.dummy.Pool(num_threads)
pool.starmap(add_manifest_file, paths)
pool.close()
pool.join()
termlog("Done. %.1fs" % (time.monotonic() - start_time), prefix=False)
@ensure_not_finalized
def add_reference(
self,
uri: ArtifactManifestEntry | str,
name: StrPath | None = None,
checksum: bool = True,
max_objects: int | None = None,
) -> Sequence[ArtifactManifestEntry]:
"""Add a reference denoted by a URI to the artifact.
Unlike files or directories that you add to an artifact, references are not
uploaded to W&B. For more information,
see [Track external files](https://docs.wandb.ai/guides/artifacts/track-external-files).
By default, the following schemes are supported:
- http(s): The size and digest of the file will be inferred by the
`Content-Length` and the `ETag` response headers returned by the server.
- s3: The checksum and size are pulled from the object metadata.
If bucket versioning is enabled, then the version ID is also tracked.
- gs: The checksum and size are pulled from the object metadata. If bucket
versioning is enabled, then the version ID is also tracked.
- https, domain matching `*.blob.core.windows.net`
- Azure: The checksum and size are be pulled from the blob metadata.
If storage account versioning is enabled, then the version ID is
also tracked.
- file: The checksum and size are pulled from the file system. This scheme
is useful if you have an NFS share or other externally mounted volume
containing files you wish to track but not necessarily upload.
For any other scheme, the digest is just a hash of the URI and the size is left
blank.
Args:
uri: The URI path of the reference to add. The URI path can be an object
returned from `Artifact.get_entry` to store a reference to another
artifact's entry.
name: The path within the artifact to place the contents of this reference.
checksum: Whether or not to checksum the resource(s) located at the
reference URI. Checksumming is strongly recommended as it enables
automatic integrity validation. Disabling checksumming will speed up
artifact creation but reference directories will not iterated through so
the objects in the directory will not be saved to the artifact.
We recommend setting `checksum=False` when adding reference objects,
in which case a new version will only be created if the reference URI
changes.
max_objects: The maximum number of objects to consider when adding a
reference that points to directory or bucket store prefix.
By default, the maximum number of objects allowed for Amazon S3,
GCS, Azure, and local files is 10,000,000. Other URI schemas
do not have a maximum.
Returns:
The added manifest entries.
Raises:
ArtifactFinalizedError: You cannot make changes to the current
artifact version because it is finalized. Log a new artifact
version instead.
"""
if name is not None:
name = LogicalPath(name)
# This is a bit of a hack, we want to check if the uri is a of the type
# ArtifactManifestEntry. If so, then recover the reference URL.
if isinstance(uri, ArtifactManifestEntry):
uri_str = uri.ref_url()
elif isinstance(uri, str):
uri_str = uri
url = urlparse(str(uri_str))
if not url.scheme:
raise ValueError(
"References must be URIs. To reference a local file, use file://"
)
manifest_entries = self.manifest.storage_policy.store_reference(
self,
URIStr(uri_str),
name=name,
checksum=checksum,
max_objects=max_objects,
)
for entry in manifest_entries:
self.manifest.add_entry(entry)
return manifest_entries
@ensure_not_finalized
def add(
self, obj: WBValue, name: StrPath, overwrite: bool = False
) -> ArtifactManifestEntry:
"""Add wandb.WBValue `obj` to the artifact.
Args:
obj: The object to add. Currently support one of Bokeh, JoinedTable,
PartitionedTable, Table, Classes, ImageMask, BoundingBoxes2D,
Audio, Image, Video, Html, Object3D
name: The path within the artifact to add the object.
overwrite: If True, overwrite existing objects with the same file
path if applicable.
Returns:
The added manifest entry
Raises:
ArtifactFinalizedError: You cannot make changes to the current
artifact version because it is finalized. Log a new artifact
version instead.
"""
name = LogicalPath(name)
# This is a "hack" to automatically rename tables added to
# the wandb /media/tables directory to their sha-based name.
# TODO: figure out a more appropriate convention.
is_tmp_name = name.startswith("media/tables")
# Validate that the object is one of the correct wandb.Media types
# TODO: move this to checking subclass of wandb.Media once all are
# generally supported
allowed_types = (
data_types.Bokeh,
data_types.JoinedTable,
data_types.PartitionedTable,
data_types.Table,
data_types.Classes,
data_types.ImageMask,
data_types.BoundingBoxes2D,
data_types.Audio,
data_types.Image,
data_types.Video,
data_types.Html,
data_types.Object3D,
data_types.Molecule,
data_types._SavedModel,
)
if not isinstance(obj, allowed_types):
raise TypeError(
f"Found object of type {obj.__class__}, expected one of:"
f" {allowed_types}"
)
obj_id = id(obj)
if obj_id in self._added_objs:
return self._added_objs[obj_id][1]
# If the object is coming from another artifact, save it as a reference
ref_path = obj._get_artifact_entry_ref_url()
if ref_path is not None:
return self.add_reference(ref_path, type(obj).with_suffix(name))[0]
val = obj.to_json(self)
name = obj.with_suffix(name)
entry = self.manifest.get_entry_by_path(name)
if (not overwrite) and (entry is not None):
return entry
if is_tmp_name:
file_path = os.path.join(self._TMP_DIR.name, str(id(self)), name)
folder_path, _ = os.path.split(file_path)
os.makedirs(folder_path, exist_ok=True)
with open(file_path, "w", encoding="utf-8") as tmp_f:
json.dump(val, tmp_f, sort_keys=True)
else:
filemode = "w" if overwrite else "x"
with self.new_file(name, mode=filemode, encoding="utf-8") as f:
json.dump(val, f, sort_keys=True)
file_path = f.name
# Note, we add the file from our temp directory.
# It will be added again later on finalize, but succeed since
# the checksum should match
entry = self.add_file(file_path, name, is_tmp_name)
# We store a reference to the obj so that its id doesn't get reused.
self._added_objs[obj_id] = (obj, entry)
if obj._artifact_target is None:
obj._set_artifact_target(self, entry.path)
if is_tmp_name:
with contextlib.suppress(FileNotFoundError):
os.remove(file_path)
return entry
def _add_local_file(
self,
name: StrPath,
path: StrPath,
digest: B64MD5 | None = None,
skip_cache: bool | None = False,
policy: Literal["mutable", "immutable"] | None = "mutable",
overwrite: bool = False,
) -> ArtifactManifestEntry:
policy = policy or "mutable"
if policy not in ["mutable", "immutable"]:
raise ValueError(
f"Invalid policy {policy!r}. Policy may only be `mutable` or `immutable`."
)
upload_path = path
if policy == "mutable":
with tempfile.NamedTemporaryFile(dir=get_staging_dir(), delete=False) as f:
staging_path = f.name
shutil.copyfile(path, staging_path)
# Set as read-only to prevent changes to the file during upload process
os.chmod(staging_path, stat.S_IRUSR)
upload_path = staging_path
entry = ArtifactManifestEntry(
path=name,
digest=digest or md5_file_b64(upload_path),
size=os.path.getsize(upload_path),
local_path=upload_path,
skip_cache=skip_cache,
)
self.manifest.add_entry(entry, overwrite=overwrite)
self._added_local_paths[os.fspath(path)] = entry
return entry
@ensure_not_finalized
def remove(self, item: StrPath | ArtifactManifestEntry) -> None:
"""Remove an item from the artifact.
Args:
item: The item to remove. Can be a specific manifest entry
or the name of an artifact-relative path. If the item
matches a directory all items in that directory will be removed.
Raises:
ArtifactFinalizedError: You cannot make changes to the current
artifact version because it is finalized. Log a new artifact
version instead.
FileNotFoundError: If the item isn't found in the artifact.
"""
if isinstance(item, ArtifactManifestEntry):
self.manifest.remove_entry(item)
return
path = str(PurePosixPath(item))
if entry := self.manifest.get_entry_by_path(path):
return self.manifest.remove_entry(entry)
entries = self.manifest.get_entries_in_directory(path)
if not entries:
raise FileNotFoundError(f"No such file or directory: {path}")
for entry in entries:
self.manifest.remove_entry(entry)
def get_path(self, name: StrPath) -> ArtifactManifestEntry:
"""Deprecated. Use `get_entry(name)`."""
warn_and_record_deprecation(
feature=Deprecated(artifact__get_path=True),
message="Artifact.get_path(name) is deprecated, use Artifact.get_entry(name) instead.",
)
return self.get_entry(name)
@ensure_logged
def get_entry(self, name: StrPath) -> ArtifactManifestEntry:
"""Get the entry with the given name.
Args:
name: The artifact relative name to get
Returns:
A `W&B` object.
Raises:
ArtifactNotLoggedError: if the artifact isn't logged or the run is offline.
KeyError: if the artifact doesn't contain an entry with the given name.
"""
name = LogicalPath(name)
entry = self.manifest.entries.get(name) or self._get_obj_entry(name)[0]
if entry is None:
raise KeyError(f"Path not contained in artifact: {name}")
entry._parent_artifact = self
return entry
@ensure_logged
def get(self, name: str) -> WBValue | None:
"""Get the WBValue object located at the artifact relative `name`.
Args:
name: The artifact relative name to retrieve.
Returns:
W&B object that can be logged with `run.log()` and
visualized in the W&B UI.
Raises:
ArtifactNotLoggedError: if the artifact isn't logged or the
run is offline.
"""
entry, wb_class = self._get_obj_entry(name)
if entry is None or wb_class is None:
return None
# If the entry is a reference from another artifact, then get it directly from
# that artifact.
if referenced_id := entry._referenced_artifact_id():
assert self._client is not None
artifact = self._from_id(referenced_id, client=self._client)
assert artifact is not None
return artifact.get(uri_from_path(entry.ref))
# Special case for wandb.Table. This is intended to be a short term
# optimization. Since tables are likely to download many other assets in
# artifact(s), we eagerly download the artifact using the parallelized
# `artifact.download`. In the future, we should refactor the deserialization
# pattern such that this special case is not needed.
if wb_class == wandb.Table:
self.download()
# Get the ArtifactManifestEntry
item = self.get_entry(entry.path)
item_path = item.download()
# Load the object from the JSON blob
with open(item_path) as file:
json_obj = json.load(file)
result = wb_class.from_json(json_obj, self)
result._set_artifact_source(self, name)
return result
def get_added_local_path_name(self, local_path: str) -> str | None:
"""Get the artifact relative name of a file added by a local filesystem path.
Args:
local_path: The local path to resolve into an artifact relative name.
Returns:
The artifact relative name.
"""
if entry := self._added_local_paths.get(local_path):
return entry.path
return None
def _get_obj_entry(
self, name: str
) -> tuple[ArtifactManifestEntry, Type[WBValue]] | tuple[None, None]: # noqa: UP006 # `type` shadows `Artifact.type`
"""Return an object entry by name, handling any type suffixes.
When objects are added with `.add(obj, name)`, the name is typically changed to
include the suffix of the object type when serializing to JSON. So we need to be
able to resolve a name, without tasking the user with appending .THING.json.
This method returns an entry if it exists by a suffixed name.
Args:
name: name used when adding
"""
for wb_class in WBValue.type_mapping().values():
wandb_file_name = wb_class.with_suffix(name)
if entry := self.manifest.entries.get(wandb_file_name):
return entry, wb_class
return None, None
# Downloading.
@ensure_logged
def download(
self,
root: StrPath | None = None,
allow_missing_references: bool = False,
skip_cache: bool | None = None,
path_prefix: StrPath | None = None,
multipart: bool | None = None,
) -> FilePathStr:
"""Download the contents of the artifact to the specified root directory.
Existing files located within `root` are not modified. Explicitly delete `root`
before you call `download` if you want the contents of `root` to exactly match
the artifact.
Args:
root: The directory W&B stores the artifact's files.
allow_missing_references: If set to `True`, any invalid reference paths
will be ignored while downloading referenced files.
skip_cache: If set to `True`, the artifact cache will be skipped when
downloading and W&B will download each file into the default root or
specified download directory.
path_prefix: If specified, only files with a path that starts with the given
prefix will be downloaded. Uses unix format (forward slashes).
multipart: If set to `None` (default), the artifact will be downloaded
in parallel using multipart download if individual file size is greater
than 2GB. If set to `True` or `False`, the artifact will be downloaded in
parallel or serially regardless of the file size.
Returns:
The path to the downloaded contents.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
"""
root = self._add_download_root(root)
# TODO: download artifacts using core when implemented
# if is_require_core():
# return self._download_using_core(
# root=root,
# allow_missing_references=allow_missing_references,
# skip_cache=bool(skip_cache),
# path_prefix=path_prefix,
# )
return self._download(
root=root,
allow_missing_references=allow_missing_references,
skip_cache=skip_cache,
path_prefix=path_prefix,
multipart=multipart,
)
def _download_using_core(
self,
root: str,
allow_missing_references: bool = False,
skip_cache: bool = False,
path_prefix: StrPath | None = None,
) -> FilePathStr:
import pathlib
from wandb.sdk.backend.backend import Backend
# TODO: Create a special stream instead of relying on an existing run.
if wandb.run is None:
wl = wandb_setup.singleton()
stream_id = generate_id()
settings = wl.settings.to_proto()
# TODO: remove this
tmp_dir = pathlib.Path(tempfile.mkdtemp())
settings.sync_dir.value = str(tmp_dir)
settings.sync_file.value = str(tmp_dir / f"{stream_id}.wandb")
settings.files_dir.value = str(tmp_dir / "files")
settings.run_id.value = stream_id
service = wl.ensure_service()
service.inform_init(settings=settings, run_id=stream_id)
backend = Backend(settings=wl.settings, service=service)
backend.ensure_launched()
assert backend.interface
backend.interface._stream_id = stream_id # type: ignore
else:
assert wandb.run._backend
backend = wandb.run._backend
assert backend.interface
handle = backend.interface.deliver_download_artifact(
self.id, # type: ignore
root,
allow_missing_references,
skip_cache,
path_prefix, # type: ignore
)
# TODO: Start the download process in the user process too, to handle reference downloads
self._download(
root=root,
allow_missing_references=allow_missing_references,
skip_cache=skip_cache,
path_prefix=path_prefix,
)
result = handle.wait_or(timeout=None)
response = result.response.download_artifact_response
if response.error_message:
raise ValueError(f"Error downloading artifact: {response.error_message}")
return FilePathStr(root)
def _download(
self,
root: str,
allow_missing_references: bool = False,
skip_cache: bool | None = None,
path_prefix: StrPath | None = None,
multipart: bool | None = None,
) -> FilePathStr:
nfiles = len(self.manifest.entries)
size_mb = self.size / _MB
if log := (nfiles > 5000 or size_mb > 50):
termlog(
f"Downloading large artifact {self.name!r}, {size_mb:.2f}MB. {nfiles!r} files...",
)
start_time = time.monotonic()
download_logger = ArtifactDownloadLogger(nfiles=nfiles)
def _download_entry(entry: ArtifactManifestEntry, executor: Executor) -> None:
multipart_executor = (
executor
if should_multipart_download(entry.size, override=multipart)
else None
)
try:
entry.download(root, skip_cache=skip_cache, executor=multipart_executor)
except FileNotFoundError as e:
if allow_missing_references:
wandb.termwarn(str(e))
return
raise
except _GCSIsADirectoryError as e:
logger.debug(str(e))
return
except IsADirectoryError:
wandb.termwarn(
f"Unable to download file {entry.path!r} as there is a directory with the same path, skipping."
)
return
except NotADirectoryError:
wandb.termwarn(
f"Unable to download file {entry.path!r} as there is a file with the same path as a directory this file is expected to be in, skipping."
)
return
download_logger.notify_downloaded()
def _init_thread(
api_key: str | None, cookies: dict | None, headers: dict | None
) -> None:
"""Initialize the thread-local API settings in the CURRENT thread."""
_thread_local_api_settings.api_key = api_key
_thread_local_api_settings.cookies = cookies
_thread_local_api_settings.headers = headers
with ThreadPoolExecutor(
max_workers=64,
initializer=_init_thread,
initargs=(
_thread_local_api_settings.api_key,
_thread_local_api_settings.cookies,
_thread_local_api_settings.headers,
),
) as executor:
batch_size = env.get_artifact_fetch_file_url_batch_size()
active_futures = set()
cursor, has_more = None, True
while has_more:
files_page = self._fetch_file_urls(cursor=cursor, per_page=batch_size)
has_more = files_page.page_info.has_next_page
cursor = files_page.page_info.end_cursor
# `File` nodes are formally nullable, so filter them out just in case.
file_nodes = (e.node for e in files_page.edges if e.node)
for node in file_nodes:
entry = self.get_entry(node.name)
# TODO: uncomment once artifact downloads are supported in core
# if require_core and entry.ref is None:
# # Handled by core
# continue
entry._download_url = node.direct_url
if (not path_prefix) or entry.path.startswith(str(path_prefix)):
active_futures.add(
executor.submit(_download_entry, entry, executor=executor)
)
# Wait for download threads to catch up.
#
# Extra context and observations (tonyyli):
# - Even though the ThreadPoolExecutor limits the number of
# concurrently-executed tasks, its internal task queue is unbounded.
# The code below seems intended to ensure that at most `batch_size`
# "backlogged" futures are held in memory at any given time. This seems
# like a reasonable safeguard against unbounded memory consumption.
#
# - We should probably use a builtin bounded Queue or Semaphore instead.
# Consider this for a future change, or (depending on appetite for risk)
# managing this logic via asyncio instead, if viable.
if len(active_futures) > batch_size:
for future in as_completed(active_futures):
future.result() # check for errors
active_futures.remove(future)
if len(active_futures) <= batch_size:
break
# Check for errors.
for future in as_completed(active_futures):
future.result()
if log:
# If you're wondering if we can display a `timedelta`, note that it
# doesn't really support custom string format specifiers (compared to
# e.g. `datetime` objs). To truncate the number of decimal places for
# the seconds part, we manually convert/format each part below.
dt_secs = abs(time.monotonic() - start_time)
hrs, mins = divmod(dt_secs, 3600)
mins, secs = divmod(mins, 60)
termlog(
f"Done. {int(hrs):02d}:{int(mins):02d}:{secs:04.1f} ({size_mb / dt_secs:.1f}MB/s)",
prefix=False,
)
return FilePathStr(root)
def _build_fetch_file_urls_wrapper(self) -> Callable[..., Any]:
import requests
@retry.retriable(
retry_timedelta=timedelta(minutes=3),
retryable_exceptions=(requests.RequestException),
)
def _impl(cursor: str | None, per_page: int = 5000) -> FileWithUrlConnection:
from ._generated import (
ARTIFACT_COLLECTION_MEMBERSHIP_FILE_URLS_GQL,
ARTIFACT_FILE_URLS_GQL,
ArtifactCollectionMembershipFileUrls,
ArtifactFileUrls,
)
from ._models.pagination import FileWithUrlConnection
if self._client is None:
raise RuntimeError("Client not initialized")
if server_supports(self._client, pb.ARTIFACT_COLLECTION_MEMBERSHIP_FILES):
query = gql(ARTIFACT_COLLECTION_MEMBERSHIP_FILE_URLS_GQL)
gql_vars = {
"entity": self.entity,
"project": self.project,
"collection": self.name.split(":")[0],
"alias": self.version,
"cursor": cursor,
"perPage": per_page,
}
data = self._client.execute(query, variable_values=gql_vars, timeout=60)
result = ArtifactCollectionMembershipFileUrls.model_validate(data)
if not (
(project := result.project)
and (collection := project.artifact_collection)
and (membership := collection.artifact_membership)
and (files := membership.files)
):
raise ValueError(
f"Unable to fetch files for artifact: {self.name!r}"
)
return FileWithUrlConnection.model_validate(files)
else:
query = gql(ARTIFACT_FILE_URLS_GQL)
gql_vars = {"id": self.id, "cursor": cursor, "perPage": per_page}
data = self._client.execute(query, variable_values=gql_vars, timeout=60)
result = ArtifactFileUrls.model_validate(data)
if not ((artifact := result.artifact) and (files := artifact.files)):
raise ValueError(
f"Unable to fetch files for artifact: {self.name!r}"
)
return FileWithUrlConnection.model_validate(files)
return _impl
def _fetch_file_urls(
self, cursor: str | None, per_page: int = 5000
) -> FileWithUrlConnection:
if self._fetch_file_urls_decorated is None:
self._fetch_file_urls_decorated = self._build_fetch_file_urls_wrapper()
return self._fetch_file_urls_decorated(cursor, per_page)
@ensure_logged
def checkout(self, root: str | None = None) -> str:
"""Replace the specified root directory with the contents of the artifact.
WARNING: This will delete all files in `root` that are not included in the
artifact.
Args:
root: The directory to replace with this artifact's files.
Returns:
The path of the checked out contents.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
"""
root = root or self._default_root(include_version=False)
for dirpath, _, files in os.walk(root):
for file in files:
full_path = os.path.join(dirpath, file)
artifact_path = os.path.relpath(full_path, start=root)
try:
self.get_entry(artifact_path)
except KeyError:
# File is not part of the artifact, remove it.
os.remove(full_path)
return self.download(root=root)
@ensure_logged
def verify(self, root: str | None = None) -> None:
"""Verify that the contents of an artifact match the manifest.
All files in the directory are checksummed and the checksums are then
cross-referenced against the artifact's manifest. References are not verified.
Args:
root: The directory to verify. If None artifact will be downloaded to
'./artifacts/self.name/'.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
ValueError: If the verification fails.
"""
root = root or self._default_root()
for dirpath, _, files in os.walk(root):
for file in files:
full_path = os.path.join(dirpath, file)
artifact_path = os.path.relpath(full_path, start=root)
try:
self.get_entry(artifact_path)
except KeyError:
raise ValueError(
f"Found file {full_path} which is not a member of artifact {self.name}"
)
ref_count = 0
for entry in self.manifest.entries.values():
if entry.ref is None:
if md5_file_b64(os.path.join(root, entry.path)) != entry.digest:
raise ValueError(f"Digest mismatch for file: {entry.path}")
else:
ref_count += 1
if ref_count > 0:
termwarn(f"skipped verification of {ref_count} refs")
@ensure_logged
def file(self, root: str | None = None) -> StrPath:
"""Download a single file artifact to the directory you specify with `root`.
Args:
root: The root directory to store the file. Defaults to
`./artifacts/self.name/`.
Returns:
The full path of the downloaded file.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
ValueError: If the artifact contains more than one file.
"""
if root is None:
root = os.path.join(".", "artifacts", self.name)
if len(self.manifest.entries) > 1:
raise ValueError(
"This artifact contains more than one file, call `.download()` to get "
'all files or call .get_entry("filename").download()'
)
return self.get_entry(list(self.manifest.entries)[0]).download(root)
@ensure_logged
def files(
self, names: list[str] | None = None, per_page: int = 50
) -> ArtifactFiles:
"""Iterate over all files stored in this artifact.
Args:
names: The filename paths relative to the root of the artifact you wish to
list.
per_page: The number of files to return per request.
Returns:
An iterator containing `File` objects.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
"""
return ArtifactFiles(self._client, self, names, per_page)
def _default_root(self, include_version: bool = True) -> FilePathStr:
name = self.source_name if include_version else self.source_name.split(":")[0]
root = os.path.join(env.get_artifact_dir(), name)
# In case we're on a system where the artifact dir has a name corresponding to
# an unexpected filesystem, we'll check for alternate roots. If one exists we'll
# use that, otherwise we'll fall back to the system-preferred path.
return FilePathStr(check_exists(root) or system_preferred_path(root))
def _add_download_root(self, dir_path: StrPath | None) -> FilePathStr:
root = str(dir_path or self._default_root())
self._download_roots.add(os.path.abspath(root))
return root
def _local_path_to_name(self, file_path: str) -> str | None:
"""Convert a local file path to a path entry in the artifact."""
abs_file_path = os.path.abspath(file_path)
abs_file_parts = abs_file_path.split(os.sep)
for i in range(len(abs_file_parts) + 1):
if os.path.join(os.sep, *abs_file_parts[:i]) in self._download_roots:
return os.path.join(*abs_file_parts[i:])
return None
# Others.
@ensure_logged
def delete(self, delete_aliases: bool = False) -> None:
"""Delete an artifact and its files.
If called on a linked artifact, only the link is deleted, and the
source artifact is unaffected.
Use `Artifact.unlink()` instead of `Artifact.delete()` to remove a
link between a source artifact and a collection.
Args:
delete_aliases: If set to `True`, delete all aliases associated
with the artifact. If `False`, raise an exception if
the artifact has existing aliases. This parameter is ignored
if the artifact is retrieved from a collection it is linked to.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
"""
if self.is_link:
wandb.termwarn(
"Deleting a link artifact will only unlink the artifact from the source artifact and not delete the source artifact and the data of the source artifact."
)
self._unlink()
else:
self._delete(delete_aliases)
@normalize_exceptions
def _delete(self, delete_aliases: bool = False) -> None:
from ._generated import DELETE_ARTIFACT_GQL, DeleteArtifactInput
if self._client is None:
raise RuntimeError("Client not initialized for artifact mutations")
gql_op = gql(DELETE_ARTIFACT_GQL)
gql_input = DeleteArtifactInput(
artifact_id=self.id,
delete_aliases=delete_aliases,
)
self._client.execute(gql_op, variable_values={"input": gql_input.model_dump()})
@normalize_exceptions
def link(self, target_path: str, aliases: Iterable[str] | None = None) -> Artifact:
"""Link this artifact to a collection.
Args:
target_path: The path of the collection. Path consists of the prefix
"wandb-registry-" along with the registry name and the
collection name `wandb-registry-{REGISTRY_NAME}/{COLLECTION_NAME}`.
aliases: Add one or more aliases to the linked artifact. The
"latest" alias is automatically applied to the most recent artifact
you link.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
Returns:
The linked artifact.
"""
from wandb import Api
from wandb.sdk.internal.internal_api import Api as InternalApi
from ._generated import LINK_ARTIFACT_GQL, LinkArtifact, LinkArtifactInput
from ._validators import ArtifactPath, FullArtifactPath, validate_aliases
if self.is_link:
wandb.termwarn(
"Linking to a link artifact will result in directly linking to the source artifact of that link artifact."
)
# Save the artifact first if necessary
if self.is_draft():
if not self._is_draft_save_started():
# Avoiding public `.source_project` property here,
# as it requires the artifact is logged first.
self.save(project=self._source_project)
# Wait until the artifact is committed before trying to link it.
self.wait()
if (client := self._client) is None:
raise RuntimeError("Client not initialized for artifact mutations")
# FIXME: Find a way to avoid using InternalApi here, due to the perf overhead
settings = InternalApi().settings()
target = ArtifactPath.from_str(target_path).with_defaults(
project=settings.get("project") or "uncategorized",
)
# Parse the entity (first part of the path) appropriately,
# depending on whether we're linking to a registry
if target.is_registry_path():
# In a Registry linking, the entity is used to fetch the organization of the
# artifact, therefore the source artifact's entity is passed to the backend
org = target.prefix or settings.get("organization") or None
target.prefix = resolve_org_entity_name(client, self.source_entity, org)
else:
target = target.with_defaults(prefix=self.source_entity)
# Explicitly convert to FullArtifactPath to ensure all fields are present
target = FullArtifactPath(**asdict(target))
# Prepare the validated GQL input, send it
alias_inputs = [
{"artifactCollectionName": target.name, "alias": a}
for a in validate_aliases(aliases or [])
]
gql_input = LinkArtifactInput(
artifact_id=self.id,
artifact_portfolio_name=target.name,
entity_name=target.prefix,
project_name=target.project,
aliases=alias_inputs,
)
gql_vars = {"input": gql_input.model_dump()}
# Newer server versions can return `artifactMembership` directly in the response,
# avoiding the need to re-fetch the linked artifact at the end.
omit_variables = omit_fields = None
if not server_supports(
client, pb.ARTIFACT_MEMBERSHIP_IN_LINK_ARTIFACT_RESPONSE
):
omit_variables = {"includeAliases"}
omit_fields = {"artifactMembership"}
gql_op = gql_compat(
LINK_ARTIFACT_GQL, omit_variables=omit_variables, omit_fields=omit_fields
)
data = client.execute(gql_op, variable_values=gql_vars)
result = LinkArtifact.model_validate(data).result
# Newer server versions can return artifactMembership directly in the response
if result and (membership := result.artifact_membership):
return self._from_membership(membership, target=target, client=client)
# Old behavior, which requires re-fetching the linked artifact to return it
if not (result and (version_idx := result.version_index) is not None):
raise ValueError("Unable to parse linked artifact version from response")
link_name = f"{target.to_str()}:v{version_idx}"
return Api(overrides={"entity": self.source_entity})._artifact(link_name)
@ensure_logged
def unlink(self) -> None:
"""Unlink this artifact if it is a linked member of an artifact collection.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
ValueError: If the artifact is not linked to any collection.
"""
# Fail early if this isn't a linked artifact to begin with
if not self.is_link:
raise ValueError(
f"Artifact {self.qualified_name!r} is not a linked artifact and cannot be unlinked. "
f"To delete it, use {nameof(self.delete)!r} instead."
)
self._unlink()
@normalize_exceptions
def _unlink(self) -> None:
from ._generated import UNLINK_ARTIFACT_GQL, UnlinkArtifactInput
if self._client is None:
raise RuntimeError("Client not initialized for artifact mutations")
mutation = gql(UNLINK_ARTIFACT_GQL)
gql_input = UnlinkArtifactInput(
artifact_id=self.id,
artifact_portfolio_id=self.collection.id,
)
gql_vars = {"input": gql_input.model_dump()}
try:
self._client.execute(mutation, variable_values=gql_vars)
except CommError as e:
raise CommError(
f"You do not have permission to unlink the artifact {self.qualified_name!r}"
) from e
@ensure_logged
def used_by(self) -> list[Run]:
"""Get a list of the runs that have used this artifact and its linked artifacts.
Returns:
A list of `Run` objects.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
"""
from ._generated import ARTIFACT_USED_BY_GQL, ArtifactUsedBy
if (client := self._client) is None:
raise RuntimeError("Client not initialized for artifact queries")
query = gql(ARTIFACT_USED_BY_GQL)
gql_vars = {"id": self.id}
data = client.execute(query, variable_values=gql_vars)
result = ArtifactUsedBy.model_validate(data)
if (
(artifact := result.artifact)
and (used_by := artifact.used_by)
and (edges := used_by.edges)
):
run_nodes = (e.node for e in edges)
return [
Run(client, proj.entity.name, proj.name, run.name)
for run in run_nodes
if (proj := run.project)
]
return []
@ensure_logged
def logged_by(self) -> Run | None:
"""Get the W&B run that originally logged the artifact.
Returns:
The name of the W&B run that originally logged the artifact.
Raises:
ArtifactNotLoggedError: If the artifact is not logged.
"""
from ._generated import ARTIFACT_CREATED_BY_GQL, ArtifactCreatedBy
if (client := self._client) is None:
raise RuntimeError("Client not initialized for artifact queries")
gql_op = gql(ARTIFACT_CREATED_BY_GQL)
gql_vars = {"id": self.id}
data = client.execute(gql_op, variable_values=gql_vars)
result = ArtifactCreatedBy.model_validate(data)
if (
(artifact := result.artifact)
and (creator := artifact.created_by)
and (name := creator.name)
and (project := creator.project)
):
return Run(client, project.entity.name, project.name, name)
return None
@ensure_logged
def json_encode(self) -> dict[str, Any]:
"""Returns the artifact encoded to the JSON format.
Returns:
A `dict` with `string` keys representing attributes of the artifact.
"""
return artifact_to_json(self)
@staticmethod
def _expected_type(
entity_name: str, project_name: str, name: str, client: RetryingClient
) -> str | None:
"""Returns the expected type for a given artifact name and project."""
from ._generated import ARTIFACT_TYPE_GQL, ArtifactType
name = name if (":" in name) else f"{name}:latest"
gql_op = gql(ARTIFACT_TYPE_GQL)
gql_vars = {"entity": entity_name, "project": project_name, "name": name}
data = client.execute(gql_op, variable_values=gql_vars)
result = ArtifactType.model_validate(data)
if (project := result.project) and (artifact := project.artifact):
return artifact.artifact_type.name
return None
def _ttl_duration_seconds_to_gql(self) -> int | None:
# Set the artifact TTL to `ttl_duration_seconds` if the user provided a value.
# Otherwise, use `ttl_status` to indicate backend values INHERIT (-1) or
# DISABLED (-2) when the TTL is None.
# When `ttl_change is None`, nothing changed and this is a no-op.
INHERIT = -1 # noqa: N806
DISABLED = -2 # noqa: N806
if not self._ttl_changed:
return None
if self._ttl_is_inherited:
return INHERIT
return self._ttl_duration_seconds or DISABLED
def _fetch_linked_artifacts(self) -> list[Artifact]:
"""Fetches all linked artifacts from the server."""
from wandb._pydantic import gql_typename
from ._generated import (
FETCH_LINKED_ARTIFACTS_GQL,
ArtifactPortfolioTypeFields,
FetchLinkedArtifacts,
)
from ._validators import LinkArtifactFields
if self.id is None:
raise ValueError(
"Unable to find any artifact memberships for artifact without an ID"
)
if (client := self._client) is None:
raise ValueError("Client is not initialized")
gql_op = gql_compat(FETCH_LINKED_ARTIFACTS_GQL)
data = client.execute(gql_op, variable_values={"artifactID": self.id})
result = FetchLinkedArtifacts.model_validate(data)
if not (
(artifact := result.artifact)
and (memberships := artifact.artifact_memberships)
and (membership_edges := memberships.edges)
):
raise ValueError("Unable to find any artifact memberships for artifact")
linked_artifacts: deque[Artifact] = deque()
linked_nodes = (
node
for edge in membership_edges
if (
(node := edge.node)
and (col := node.artifact_collection)
and (col.typename__ == gql_typename(ArtifactPortfolioTypeFields))
)
)
for node in linked_nodes:
alias_names = unique_list(a.alias for a in node.aliases)
version = f"v{node.version_index}"
aliases = (
[*alias_names, version]
if version not in alias_names
else [*alias_names]
)
if not (
node
and (col := node.artifact_collection)
and (proj := col.project)
and (proj.entity.name and proj.name)
):
raise ValueError("Unable to fetch fields for linked artifact")
link_fields = LinkArtifactFields(
entity_name=proj.entity.name,
project_name=proj.name,
name=f"{col.name}:{version}",
version=version,
aliases=aliases,
)
link = self._create_linked_artifact_using_source_artifact(link_fields)
linked_artifacts.append(link)
return list(linked_artifacts)
def _create_linked_artifact_using_source_artifact(
self,
link_fields: LinkArtifactFields,
) -> Artifact:
"""Copies the source artifact to a linked artifact."""
linked_artifact = copy(self)
linked_artifact._version = link_fields.version
linked_artifact._aliases = link_fields.aliases
linked_artifact._saved_aliases = copy(link_fields.aliases)
linked_artifact._name = link_fields.name
linked_artifact._entity = link_fields.entity_name
linked_artifact._project = link_fields.project_name
linked_artifact._is_link = link_fields.is_link
linked_artifact._linked_artifacts = link_fields.linked_artifacts
return linked_artifact
| Artifact |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/utils/compare.py | {
"start": 2415,
"end": 2468
} | class ____(HashMixin, dict):
pass
| DictWithHashMixin |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_relationships.py | {
"start": 279,
"end": 1416
} | class ____(TestCase):
def setUp(self):
self.user = User.objects.create(username="foo")
self.role = Role.objects.create(user=self.user)
self.person = Person.objects.create(role=self.role)
def test_export(self):
class MyPersonResource(resources.ModelResource):
role = fields.Field(
column_name="role",
attribute="role",
widget=widgets.ForeignKeyWidget(Role, field="user__username"),
)
class Meta:
model = Person
fields = ["id", "role"]
resource = MyPersonResource()
dataset = resource.export(Person.objects.all())
self.assertEqual(len(dataset), 1)
self.assertEqual("1", dataset[0][0])
self.assertEqual("foo", dataset[0][1])
self.role.user = None
self.role.save()
resource = MyPersonResource()
dataset = resource.export(Person.objects.all())
self.assertEqual(len(dataset), 1)
self.assertEqual("1", dataset[0][0])
self.assertEqual(None, dataset[0][1])
| ForeignKeyWidgetFollowRelationship |
python | mlflow__mlflow | mlflow/projects/_project_spec.py | {
"start": 7386,
"end": 9661
} | class ____:
"""A project specification loaded from an MLproject file in the passed-in directory."""
def __init__(
self,
name,
env_type=None,
env_config_path=None,
entry_points=None,
docker_env=None,
databricks_spark_job_spec=None,
):
self.env_type = env_type
self.env_config_path = env_config_path
self._entry_points = entry_points
self.docker_env = docker_env
self.name = name
self.databricks_spark_job_spec = databricks_spark_job_spec
def get_entry_point(self, entry_point):
if self.databricks_spark_job_spec:
if self.databricks_spark_job_spec.python_file is not None:
# If Databricks Spark job is configured with python_file field,
# it does not need to configure entry_point section
# and the 'entry_point' param in 'mlflow run' command is ignored
return None
if self._entry_points is None or entry_point not in self._entry_points:
raise MlflowException(
f"The entry point '{entry_point}' is not defined in the Databricks spark job "
f"MLproject file."
)
if entry_point in self._entry_points:
return self._entry_points[entry_point]
_, file_extension = os.path.splitext(entry_point)
ext_to_cmd = {".py": "python", ".sh": os.environ.get("SHELL", "bash")}
if file_extension in ext_to_cmd:
command = f"{ext_to_cmd[file_extension]} {quote(entry_point)}"
if not is_string_type(command):
command = command.encode("utf-8")
return EntryPoint(name=entry_point, parameters={}, command=command)
elif file_extension == ".R":
command = f"Rscript -e \"mlflow::mlflow_source('{quote(entry_point)}')\" --args"
return EntryPoint(name=entry_point, parameters={}, command=command)
raise ExecutionException(
"Could not find {0} among entry points {1} or interpret {0} as a "
"runnable script. Supported script file extensions: "
"{2}".format(entry_point, list(self._entry_points.keys()), list(ext_to_cmd.keys()))
)
| Project |
python | getsentry__sentry | tests/sentry/backup/test_imports.py | {
"start": 33038,
"end": 39059
} | class ____(ImportTestCase):
"""
Ensures that only models with the allowed relocation scopes are actually imported.
"""
@staticmethod
def verify_model_inclusion(scope: ImportScope):
"""
Ensure all in-scope models are included, and that no out-of-scope models are included.
Additionally, we verify that each such model had an appropriate `*ImportChunk` written out
atomically alongside it.
"""
included_models = get_matching_exportable_models(
lambda mr: len(mr.get_possible_relocation_scopes() & scope.value) > 0
)
excluded_models = get_matching_exportable_models(
lambda mr: mr.get_possible_relocation_scopes() != {RelocationScope.Excluded}
and not (mr.get_possible_relocation_scopes() & scope.value)
)
for model in included_models:
model_name_str = str(get_model_name(model))
if is_control_model(model):
replica = ControlImportChunkReplica.objects.filter(model=model_name_str).first()
assert replica is not None
with assume_test_silo_mode(SiloMode.CONTROL):
assert model.objects.count() > 0
control = ControlImportChunk.objects.filter(model=model_name_str).first()
assert control is not None
# Ensure that the region-silo replica and the control-silo original are
# identical.
common_fields = {f.name for f in ControlImportChunk._meta.get_fields()} - {
"id",
"date_added",
"date_updated",
}
for field in common_fields:
assert getattr(replica, field, None) == getattr(control, field, None)
else:
assert model.objects.count() > 0
assert RegionImportChunk.objects.filter(model=model_name_str).count() == 1
for model in excluded_models:
if is_control_model(model):
with assume_test_silo_mode(SiloMode.CONTROL):
assert model.objects.count() == 0
else:
assert model.objects.count() == 0
def test_user_import_scoping(self) -> None:
self.create_exhaustive_instance(is_superadmin=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir)
with open(tmp_path, "rb") as tmp_file:
import_in_user_scope(tmp_file, printer=NOOP_PRINTER)
self.verify_model_inclusion(ImportScope.User)
# Test that the import UUID is auto-assigned properly.
with assume_test_silo_mode(SiloMode.CONTROL):
assert ControlImportChunk.objects.values("import_uuid").distinct().count() == 1
assert ControlImportChunkReplica.objects.values("import_uuid").distinct().count() == 1
def test_organization_import_scoping(self) -> None:
self.create_exhaustive_instance(is_superadmin=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir)
with open(tmp_path, "rb") as tmp_file:
import_in_organization_scope(tmp_file, printer=NOOP_PRINTER)
self.verify_model_inclusion(ImportScope.Organization)
# Test that the import UUID is auto-assigned properly.
with assume_test_silo_mode(SiloMode.CONTROL):
assert ControlImportChunk.objects.values("import_uuid").distinct().count() == 1
assert ControlImportChunkReplica.objects.values("import_uuid").distinct().count() == 1
assert RegionImportChunk.objects.values("import_uuid").distinct().count() == 1
assert (
ControlImportChunkReplica.objects.values("import_uuid").first()
== RegionImportChunk.objects.values("import_uuid").first()
)
def test_config_import_scoping(self) -> None:
self.create_exhaustive_instance(is_superadmin=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir)
with open(tmp_path, "rb") as tmp_file:
import_in_config_scope(tmp_file, printer=NOOP_PRINTER)
self.verify_model_inclusion(ImportScope.Config)
# Test that the import UUID is auto-assigned properly.
with assume_test_silo_mode(SiloMode.CONTROL):
assert ControlImportChunk.objects.values("import_uuid").distinct().count() == 1
assert ControlImportChunkReplica.objects.values("import_uuid").distinct().count() == 1
assert RegionImportChunk.objects.values("import_uuid").distinct().count() == 1
assert (
ControlImportChunkReplica.objects.values("import_uuid").first()
== RegionImportChunk.objects.values("import_uuid").first()
)
def test_global_import_scoping(self) -> None:
self.create_exhaustive_instance(is_superadmin=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = self.export_to_tmp_file_and_clear_database(tmp_dir)
with open(tmp_path, "rb") as tmp_file:
import_in_global_scope(tmp_file, printer=NOOP_PRINTER)
self.verify_model_inclusion(ImportScope.Global)
# Test that the import UUID is auto-assigned properly.
with assume_test_silo_mode(SiloMode.CONTROL):
assert ControlImportChunk.objects.values("import_uuid").distinct().count() == 1
assert ControlImportChunkReplica.objects.values("import_uuid").distinct().count() == 1
assert RegionImportChunk.objects.values("import_uuid").distinct().count() == 1
assert (
ControlImportChunkReplica.objects.values("import_uuid").first()
== RegionImportChunk.objects.values("import_uuid").first()
)
| ScopingTests |
python | walkccc__LeetCode | solutions/2653. Sliding Subarray Beauty/2653.py | {
"start": 0,
"end": 607
} | class ____:
def getSubarrayBeauty(self, nums: list[int], k: int, x: int) -> list[int]:
ans = []
count = [0] * 50 # count[i] := the frequency of (i + 50)
for i, num in enumerate(nums):
if num < 0:
count[num + 50] += 1
if i - k >= 0 and nums[i - k] < 0:
count[nums[i - k] + 50] -= 1
if i + 1 >= k:
ans.append(self._getXthSmallestNum(count, x))
return ans
def _getXthSmallestNum(self, count: list[int], x: int) -> int:
prefix = 0
for i in range(50):
prefix += count[i]
if prefix >= x:
return i - 50
return 0
| Solution |
python | doocs__leetcode | lcof/面试题44. 数字序列中某一位的数字/Solution2.py | {
"start": 0,
"end": 289
} | class ____:
def findNthDigit(self, n: int) -> int:
if n < 10:
return n
n -= 10
k, p = 2, 10
while n >= 9 * k * p:
n -= 9 * k * p
k += 1
p *= 10
x = p + n // k
return int(str(x)[n % k])
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/pickleable.py | {
"start": 614,
"end": 648
} | class ____(User):
pass
| EmailUser |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 106671,
"end": 109265
} | class ____(TorchHigherOrderOperatorVariable):
def _call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
from .builder import wrap_fx_proxy
# This is operator for delegation within Executorch which calls a
# specific function in the given lowered module with the given
# operators. The actual operator is defined in the Executorch codebase.
# This is a bad hierarchical violation since
# executorch_call_delegate sits at a higher level than dynamo, but
# there's no real solution to this issue yet.
if len(kwargs) > 0:
unimplemented(
gb_type="executorch_call_delegate: kwargs not supported",
context=f"args: {args}, kwargs: {kwargs}",
explanation=f"executorch_call_delegate expects no keyword arguments (got {len(kwargs)})",
hints=[],
)
if isinstance(args[0], variables.NNModuleVariable):
lowered_module = tx.output.get_submodule(args[0].module_key)
lowered_node = make_attr(tx, args[0].module_key)
elif isinstance(args[0], variables.UnspecializedNNModuleVariable):
# This nn module is special sa delegated by executorch. Just
# install it as a attr in the graph.
lowered_module = args[0].value
lowered_node = tx.output.register_static_attr_and_return_proxy(
"delegate", lowered_module
)
p_args = tuple(arg.as_proxy() for arg in args[1:])
real_sub_args = pytree.tree_map_only(
torch.fx.Proxy, lambda a: get_fake_value(a.node, tx), p_args
)
with tx.fake_mode:
example_value = lowered_module.original_module.module()(*real_sub_args)
# NOTE [Guaranteeing the 1-1 correspondence of FakeTensors and real tensors]:
# executorch modules promise not to alias inputs and outputs.
# Thus, output FakeTensors will correctly not alias input FakeTensors.
_assert_tensors_nonaliasing(real_sub_args, example_value)
p_args = (lowered_node,) + p_args
# Store the invocation as a call
return wrap_fx_proxy(
tx=tx,
proxy=tx.output.create_proxy(
"call_function",
self.value,
args=tuple(p_args),
kwargs={},
),
example_value=example_value,
)
| ExecutorchCallDelegateHigherOrderVariable |
python | huggingface__transformers | src/transformers/models/unispeech/modular_unispeech.py | {
"start": 5439,
"end": 8968
} | class ____(PreTrainedModel):
config: UniSpeechConfig
base_model_prefix = "unispeech"
main_input_name = "input_values"
input_modalities = "audio"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
# gumbel softmax requires special init
if isinstance(module, UniSpeechGumbelVectorQuantizer):
init.normal_(module.weight_proj.weight, mean=0.0, std=1)
init.zeros_(module.weight_proj.bias)
init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechPositionalConvEmbedding):
init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
init.uniform_(module.projection.weight, a=-k, b=k)
init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Conv1d):
init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
UniSpeechBaseModelOutput = Wav2Vec2BaseModelOutput
| UniSpeechPreTrainedModel |
python | pandas-dev__pandas | pandas/tests/indexing/test_chaining_and_caching.py | {
"start": 2280,
"end": 12646
} | class ____:
def test_setitem_chained_setfault(self):
# GH6026
data = ["right", "left", "left", "left", "right", "left", "timeout"]
df = DataFrame({"response": np.array(data)})
mask = df.response == "timeout"
with tm.raises_chained_assignment_error():
df.response[mask] = "none"
tm.assert_frame_equal(df, DataFrame({"response": data}))
recarray = np.rec.fromarrays([data], names=["response"])
df = DataFrame(recarray)
mask = df.response == "timeout"
with tm.raises_chained_assignment_error():
df.response[mask] = "none"
tm.assert_frame_equal(df, DataFrame({"response": data}))
df = DataFrame({"response": data, "response1": data})
df_original = df.copy()
mask = df.response == "timeout"
with tm.raises_chained_assignment_error():
df.response[mask] = "none"
tm.assert_frame_equal(df, df_original)
# GH 6056
expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]})
df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
with tm.raises_chained_assignment_error():
df["A"].iloc[0] = np.nan
expected = DataFrame({"A": ["foo", "bar", "bah", "foo", "bar"]})
result = df.head()
tm.assert_frame_equal(result, expected)
df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
with tm.raises_chained_assignment_error():
df.A.iloc[0] = np.nan
result = df.head()
tm.assert_frame_equal(result, expected)
@pytest.mark.arm_slow
def test_detect_chained_assignment(self):
with option_context("chained_assignment", "raise"):
# work with the chain
df = DataFrame(
np.arange(4).reshape(2, 2), columns=list("AB"), dtype="int64"
)
df_original = df.copy()
with tm.raises_chained_assignment_error():
df["A"][0] = -5
with tm.raises_chained_assignment_error():
df["A"][1] = -6
tm.assert_frame_equal(df, df_original)
@pytest.mark.arm_slow
def test_detect_chained_assignment_raises(self):
# test with the chaining
df = DataFrame(
{
"A": Series(range(2), dtype="int64"),
"B": np.array(np.arange(2, 4), dtype=np.float64),
}
)
df_original = df.copy()
with tm.raises_chained_assignment_error():
df["A"][0] = -5
with tm.raises_chained_assignment_error():
df["A"][1] = -6
tm.assert_frame_equal(df, df_original)
@pytest.mark.arm_slow
def test_detect_chained_assignment_fails(self):
# Using a copy (the chain), fails
df = DataFrame(
{
"A": Series(range(2), dtype="int64"),
"B": np.array(np.arange(2, 4), dtype=np.float64),
}
)
with tm.raises_chained_assignment_error():
df.loc[0]["A"] = -5
@pytest.mark.arm_slow
def test_detect_chained_assignment_doc_example(self):
# Doc example
df = DataFrame(
{
"a": ["one", "one", "two", "three", "two", "one", "six"],
"c": Series(range(7), dtype="int64"),
}
)
indexer = df.a.str.startswith("o")
with tm.raises_chained_assignment_error():
df[indexer]["c"] = 42
@pytest.mark.arm_slow
def test_detect_chained_assignment_object_dtype(self):
df = DataFrame(
{"A": Series(["aaa", "bbb", "ccc"], dtype=object), "B": [1, 2, 3]}
)
df_original = df.copy()
with tm.raises_chained_assignment_error():
df["A"][0] = 111
tm.assert_frame_equal(df, df_original)
@pytest.mark.arm_slow
def test_detect_chained_assignment_is_copy_pickle(self, temp_file):
# gh-5475: Make sure that is_copy is picked up reconstruction
df = DataFrame({"A": [1, 2]})
path = str(temp_file)
df.to_pickle(path)
df2 = pd.read_pickle(path)
df2["B"] = df2["A"]
df2["B"] = df2["A"]
@pytest.mark.arm_slow
def test_detect_chained_assignment_str(self):
idxs = np.random.default_rng(2).integers(len(ascii_letters), size=(100, 2))
idxs.sort(axis=1)
strings = [ascii_letters[x[0] : x[1]] for x in idxs]
df = DataFrame(strings, columns=["letters"])
indexer = df.letters.apply(lambda x: len(x) > 10)
df.loc[indexer, "letters"] = df.loc[indexer, "letters"].apply(str.lower)
@pytest.mark.arm_slow
def test_detect_chained_assignment_sorting(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
ser = df.iloc[:, 0].sort_values()
tm.assert_series_equal(ser, df.iloc[:, 0].sort_values())
tm.assert_series_equal(ser, df[0].sort_values())
@pytest.mark.arm_slow
def test_detect_chained_assignment_false_positives(self):
# see gh-6025: false positives
df = DataFrame({"column1": ["a", "a", "a"], "column2": [4, 8, 9]})
str(df)
df["column1"] = df["column1"] + "b"
str(df)
df = df[df["column2"] != 8]
str(df)
df["column1"] = df["column1"] + "c"
str(df)
@pytest.mark.arm_slow
def test_detect_chained_assignment_undefined_column(self):
# from SO:
# https://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc
df = DataFrame(np.arange(0, 9), columns=["count"])
df["group"] = "b"
df_original = df.copy()
with tm.raises_chained_assignment_error():
df.iloc[0:5]["group"] = "a"
tm.assert_frame_equal(df, df_original)
@pytest.mark.arm_slow
def test_detect_chained_assignment_changing_dtype(self):
# Mixed type setting but same dtype & changing dtype
df = DataFrame(
{
"A": date_range("20130101", periods=5),
"B": np.random.default_rng(2).standard_normal(5),
"C": np.arange(5, dtype="int64"),
"D": ["a", "b", "c", "d", "e"],
}
)
df_original = df.copy()
with tm.raises_chained_assignment_error():
df.loc[2]["D"] = "foo"
with tm.raises_chained_assignment_error():
df.loc[2]["C"] = "foo"
tm.assert_frame_equal(df, df_original)
# TODO: Use tm.raises_chained_assignment_error() when PDEP-6 is enforced
with pytest.raises(TypeError, match="Invalid value"):
with tm.raises_chained_assignment_error():
df["C"][2] = "foo"
def test_setting_with_copy_bug(self):
# operating on a copy
df = DataFrame(
{"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
)
df_original = df.copy()
mask = pd.isna(df.c)
with tm.raises_chained_assignment_error():
df[["c"]][mask] = df[["b"]][mask]
tm.assert_frame_equal(df, df_original)
def test_setting_with_copy_bug_no_warning(self):
# invalid warning as we are returning a new object
# GH 8730
df1 = DataFrame({"x": Series(["a", "b", "c"]), "y": Series(["d", "e", "f"])})
df2 = df1[["x"]]
# this should not raise
df2["y"] = ["g", "h", "i"]
def test_detect_chained_assignment_warnings_errors(self):
df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]})
with tm.raises_chained_assignment_error():
df.loc[0]["A"] = 111
@pytest.mark.parametrize("rhs", [3, DataFrame({0: [1, 2, 3, 4]})])
def test_detect_chained_assignment_warning_stacklevel(self, rhs):
# GH#42570
df = DataFrame(np.arange(25).reshape(5, 5))
df_original = df.copy()
chained = df.loc[:3]
chained[2] = rhs
tm.assert_frame_equal(df, df_original)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
df = DataFrame({"A": 5 * [np.zeros(3)], "B": 5 * [np.ones(3)]})
expected = df["A"].iloc[2]
result = df.loc[2, "A"]
tm.assert_numpy_array_equal(result, expected)
result2 = df.iloc[2]["A"]
tm.assert_numpy_array_equal(result2, expected)
result3 = df["A"].loc[2]
tm.assert_numpy_array_equal(result3, expected)
result4 = df["A"].iloc[2]
tm.assert_numpy_array_equal(result4, expected)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = DataFrame(
np.zeros((10, 4)),
columns=Index(list("ABCD"), dtype=object),
)
df["A"] # cache series
df.loc["Hello Friend"] = df.iloc[0]
assert "Hello Friend" in df["A"].index
assert "Hello Friend" in df["B"].index
def test_cache_updating2(self):
# 10264
df = DataFrame(
np.zeros((5, 5), dtype="int64"),
columns=["a", "b", "c", "d", "e"],
index=range(5),
)
df["f"] = 0
df_orig = df.copy()
with pytest.raises(ValueError, match="read-only"):
df.f.values[3] = 1
tm.assert_frame_equal(df, df_orig)
def test_iloc_setitem_chained_assignment(self):
# GH#3970
with option_context("chained_assignment", None):
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
with tm.raises_chained_assignment_error():
df["bb"].iloc[0] = 0.13
# GH#3970 this lookup used to break the chained setting to 0.15
df.iloc[ck]
with tm.raises_chained_assignment_error():
df["bb"].iloc[0] = 0.15
assert df["bb"].iloc[0] == 2.2
def test_getitem_loc_assignment_slice_state(self):
# GH 13569
df = DataFrame({"a": [10, 20, 30]})
with tm.raises_chained_assignment_error():
df["a"].loc[4] = 40
tm.assert_frame_equal(df, DataFrame({"a": [10, 20, 30]}))
tm.assert_series_equal(df["a"], Series([10, 20, 30], name="a"))
| TestChaining |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_indexing.py | {
"start": 7452,
"end": 11785
} | class ____:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="Y", periods=10)
bad_period = Period("2012", "Y")
with pytest.raises(KeyError, match=r"^Period\('2012', 'Y-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = Period("2017-09-01")
p1 = Period("2017-09-02")
p2 = Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
with pytest.raises(InvalidIndexError, match=re.escape(str(idx0))):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
with pytest.raises(InvalidIndexError, match=re.escape(str(idx1))):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_get_loc_integer(self):
dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
with pytest.raises(KeyError, match="16801"):
pi.get_loc(16801)
pi2 = dti.to_period("Y") # duplicates, ordinals are all 46
with pytest.raises(KeyError, match="46"):
pi2.get_loc(46)
def test_get_loc_invalid_string_raises_keyerror(self):
# GH#34240
pi = period_range("2000", periods=3, name="A")
with pytest.raises(KeyError, match="A"):
pi.get_loc("A")
ser = Series([1, 2, 3], index=pi)
with pytest.raises(KeyError, match="A"):
ser.loc["A"]
with pytest.raises(KeyError, match="A"):
ser["A"]
assert "A" not in ser
assert "A" not in pi
def test_get_loc_mismatched_freq(self):
# see also test_get_indexer_mismatched_dtype testing we get analogous
# behavior for get_loc
dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
pi2 = dti.to_period("W")
pi3 = pi.view(pi2.dtype) # i.e. matching i8 representations
with pytest.raises(KeyError, match="W-SUN"):
pi.get_loc(pi2[0])
with pytest.raises(KeyError, match="W-SUN"):
# even though we have matching i8 values
pi.get_loc(pi3[0])
| TestGetLoc |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 142747,
"end": 145402
} | class ____:
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
(0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
(0, 4): (2, 3, 4, 1),
(1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
(1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
(1, 4): (1, 3, 4, 2),
(2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
(2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
(2, 4): (1, 2, 4, 3),
(3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
(3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
(3, 4): (1, 2, 3, 4)}
def test_exceptions(self):
a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4)
assert_raises(AxisError, np.rollaxis, a, -5, 0)
assert_raises(AxisError, np.rollaxis, a, 0, -5)
assert_raises(AxisError, np.rollaxis, a, 4, 0)
assert_raises(AxisError, np.rollaxis, a, 0, 5)
def test_results(self):
a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
for (i, j) in self.tgtshape:
# positive axis, positive start
res = np.rollaxis(a, axis=i, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, j)], str((i, j)))
assert_(not res.flags['OWNDATA'])
# negative axis, positive start
ip = i + 1
res = np.rollaxis(a, axis=-ip, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, j)])
assert_(not res.flags['OWNDATA'])
# positive axis, negative start
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=i, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, 4 - jp)])
assert_(not res.flags['OWNDATA'])
# negative axis, negative start
ip = i + 1
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=-ip, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
assert_(not res.flags['OWNDATA'])
| TestRollaxis |
python | scikit-learn__scikit-learn | sklearn/tree/_export.py | {
"start": 6214,
"end": 13999
} | class ____:
def __init__(
self,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
impurity=True,
node_ids=False,
proportion=False,
rounded=False,
precision=3,
fontsize=None,
):
self.max_depth = max_depth
self.feature_names = feature_names
self.class_names = class_names
self.label = label
self.filled = filled
self.impurity = impurity
self.node_ids = node_ids
self.proportion = proportion
self.rounded = rounded
self.precision = precision
self.fontsize = fontsize
def get_color(self, value):
# Find the appropriate color & intensity for a node
if self.colors["bounds"] is None:
# Classification tree
color = list(self.colors["rgb"][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0.0
else:
alpha = (sorted_values[0] - sorted_values[1]) / (1 - sorted_values[1])
else:
# Regression tree or multi-output
color = list(self.colors["rgb"][0])
alpha = (value - self.colors["bounds"][0]) / (
self.colors["bounds"][1] - self.colors["bounds"][0]
)
# compute the color as alpha against white
color = [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color]
# Return html color code in #RRGGBB format
return "#%2x%2x%2x" % tuple(color)
def get_fill_color(self, tree, node_id):
# Fetch appropriate color for node
if "rgb" not in self.colors:
# Initialize colors and bounds if required
self.colors["rgb"] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
# The next line uses -max(impurity) instead of min(-impurity)
# and -min(impurity) instead of max(-impurity) on purpose, in
# order to avoid what looks like an issue with SIMD on non
# memory aligned arrays on 32bit OS. For more details see
# https://github.com/scikit-learn/scikit-learn/issues/27506.
self.colors["bounds"] = (-np.max(tree.impurity), -np.min(tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
self.colors["bounds"] = (np.min(tree.value), np.max(tree.value))
if tree.n_outputs == 1:
node_val = tree.value[node_id][0, :]
if (
tree.n_classes[0] == 1
and isinstance(node_val, Iterable)
and self.colors["bounds"] is not None
):
# Unpack the float only for the regression tree case.
# Classification tree requires an Iterable in `get_color`.
node_val = node_val.item()
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
return self.get_color(node_val)
def node_to_str(self, tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (self.label == "root" and node_id == 0) or self.label == "all"
characters = self.characters
node_string = characters[-1]
# Write node ID
if self.node_ids:
if labels:
node_string += "node "
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if self.feature_names is not None:
feature = self.feature_names[tree.feature[node_id]]
feature = self.str_escape(feature)
else:
feature = "x%s%s%s" % (
characters[1],
tree.feature[node_id],
characters[2],
)
node_string += "%s %s %s%s" % (
feature,
characters[3],
round(tree.threshold[node_id], self.precision),
characters[4],
)
# Write impurity
if self.impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif isinstance(criterion, _criterion.MSE) or criterion == "squared_error":
criterion = "squared_error"
elif not isinstance(criterion, str):
criterion = "impurity"
if labels:
node_string += "%s = " % criterion
node_string += (
str(round(tree.impurity[node_id], self.precision)) + characters[4]
)
# Write node sample count
if labels:
node_string += "samples = "
if self.proportion:
percent = (
100.0 * tree.n_node_samples[node_id] / float(tree.n_node_samples[0])
)
node_string += str(round(percent, 1)) + "%" + characters[4]
else:
node_string += str(tree.n_node_samples[node_id]) + characters[4]
# Write node class distribution / regression value
if not self.proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value * tree.weighted_n_node_samples[node_id]
if labels:
node_string += "value = "
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, self.precision)
elif self.proportion:
# Classification
value_text = np.around(value, self.precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, self.precision)
# Strip whitespace
value_text = str(value_text.astype("S32")).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (
self.class_names is not None
and tree.n_classes[0] != 1
and tree.n_outputs == 1
):
# Only done for single-output classification trees
if labels:
node_string += "class = "
if self.class_names is not True:
class_name = self.class_names[np.argmax(value)]
class_name = self.str_escape(class_name)
else:
class_name = "y%s%s%s" % (
characters[1],
np.argmax(value),
characters[2],
)
node_string += class_name
# Clean up any trailing newlines
if node_string.endswith(characters[4]):
node_string = node_string[: -len(characters[4])]
return node_string + characters[5]
def str_escape(self, string):
return string
| _BaseTreeExporter |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 197293,
"end": 201061
} | class ____(Response):
"""
Response of tasks.dequeue_many endpoint.
:param succeeded:
:type succeeded: Sequence[dict]
:param failed:
:type failed: Sequence[dict]
"""
_service = "tasks"
_action = "dequeue_many"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"failed": {
"items": {
"properties": {
"error": {
"description": "Error info",
"properties": {
"codes": {
"items": {"type": "integer"},
"type": "array",
},
"data": {
"additionalProperties": True,
"type": "object",
},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {
"description": "ID of the failed entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"succeeded": {
"items": {
"properties": {
"dequeued": {
"description": "Indicates whether the task was dequeued",
"type": "boolean",
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": "object",
},
"id": {
"description": "ID of the succeeded entity",
"type": "string",
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": "integer",
},
},
"type": "object",
},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, succeeded: Optional[List[dict]] = None, failed: Optional[List[dict]] = None, **kwargs: Any
) -> None:
super(DequeueManyResponse, self).__init__(**kwargs)
self.succeeded = succeeded
self.failed = failed
@schema_property("succeeded")
def succeeded(self) -> Optional[List[dict]]:
return self._property_succeeded
@succeeded.setter
def succeeded(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_succeeded = None
return
self.assert_isinstance(value, "succeeded", (list, tuple))
self.assert_isinstance(value, "succeeded", (dict,), is_array=True)
self._property_succeeded = value
@schema_property("failed")
def failed(self) -> Optional[List[dict]]:
return self._property_failed
@failed.setter
def failed(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_failed = None
return
self.assert_isinstance(value, "failed", (list, tuple))
self.assert_isinstance(value, "failed", (dict,), is_array=True)
self._property_failed = value
| DequeueManyResponse |
python | huggingface__transformers | src/transformers/models/evolla/modeling_evolla.py | {
"start": 21046,
"end": 21612
} | class ____(PreTrainedModel):
config: SaProtConfig
_no_split_modules = ["EvollaSaProtLayer"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": EvollaSaProtLayer,
"attentions": [OutputRecorder(EvollaSaProtSelfAttention, index=1, layer_name="attention")],
"cross_attentions": [
OutputRecorder(EvollaSaProtSelfAttention, index=1, layer_name="crossattention"),
],
}
| EvollaSaProtPreTrainedModel |
python | pytorch__pytorch | torch/jit/_trace.py | {
"start": 49932,
"end": 53296
} | class ____(ScriptModule):
_disable_script_meta = True
def __init__(self, orig, id_set=None, _compilation_unit=None):
# XXX: orig can be a nn.Module or a function!
super().__init__()
assert isinstance(orig, torch.nn.Module)
# Copy a subset of `orig` to a temporary nn.Module.
# This is a way to customize what will actually get compiled by create_script_module
id_set = set()
# This allows us to preserve the original module's qualified name by defining a new
# type with the attribute _jit_override_qualname. In torch._jit_internal._qualified_name
# we have a special case that will look up this attribute to override whatever qualname
# we would get from the python type system
class QualnameWrapper(torch.nn.Module):
pass
QualnameWrapper._jit_override_qualname = torch._jit_internal._qualified_name( # type: ignore[attr-defined]
type(orig)
)
tmp_module = QualnameWrapper()
def check_unique(param):
if param in id_set:
raise ValueError(
"TracedModules don't support parameter sharing between modules"
)
id_set.add(param)
tmp_module.training = orig.training
for name, param in orig._parameters.items():
if param is not None:
tmp_module._parameters[name] = param
check_unique(param)
for name, buf in orig._buffers.items():
if buf is not None:
tmp_module._buffers[name] = buf
check_unique(buf)
for name, val in orig.__dict__.items():
if (
torch._C._jit_is_script_object(val)
and name not in orig._parameters
and name not in orig._buffers
):
setattr(tmp_module, name, val)
if orig._backward_hooks:
raise ValueError(
"Modules that have backward hooks assigned can't be compiled: "
+ str(orig)
)
for name, submodule in orig._modules.items():
if submodule is None:
continue
tmp_module._modules[name] = make_module(
submodule, TracedModule, _compilation_unit=None
)
script_module = torch.jit._recursive.create_script_module(
tmp_module, lambda module: (), share_types=False, is_tracing=True
)
self.__dict__["_name"] = type(orig).__name__
self.__dict__["_actual_script_module"] = script_module
for name in ("_parameters", "_buffers", "_modules", "training"):
delattr(self, name)
def forward(self, *args, **kwargs):
raise RuntimeError("Trace submodules cannot be called.")
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super().__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
return super().__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def _get_name(self):
return self._name
def extra_repr(self):
return f"original_name={self._name}"
| TracedModule |
python | wandb__wandb | wandb/vendor/pygments/lexers/archetype.py | {
"start": 6344,
"end": 8732
} | class ____(AtomsLexer):
"""
Lexer for cADL syntax.
.. versionadded:: 2.1
"""
name = 'cADL'
aliases = ['cadl']
filenames = ['*.cadl']
tokens = {
'path': [
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'/', Punctuation),
(r'\[', Punctuation, 'any_code'),
(r'\s+', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
(r'(cardinality|existence|occurrences|group|include|exclude|'
r'allow_archetype|use_archetype|use_node)\W', Keyword.Type),
(r'(and|or|not|there_exists|xor|implies|for_all)\W', Keyword.Type),
(r'(after|before|closed)\W', Keyword.Type),
(r'(not)\W', Operator),
(r'(matches|is_in)\W', Operator),
# is_in / not is_in char
(u'(\u2208|\u2209)', Operator),
# there_exists / not there_exists / for_all / and / or
(u'(\u2203|\u2204|\u2200|\u2227|\u2228|\u22BB|\223C)',
Operator),
# regex in slot or as string constraint
(r'(\{)(\s*/[^}]+/\s*)(\})',
bygroups(Punctuation, String.Regex, Punctuation)),
# regex in slot or as string constraint
(r'(\{)(\s*\^[^}]+\^\s*)(\})',
bygroups(Punctuation, String.Regex, Punctuation)),
(r'/', Punctuation, 'path'),
# for cardinality etc
(r'(\{)((?:\d+\.\.)?(?:\d+|\*))'
r'((?:\s*;\s*(?:ordered|unordered|unique)){,2})(\})',
bygroups(Punctuation, Number, Number, Punctuation)),
# [{ is start of a tuple value
(r'\[\{', Punctuation),
(r'\}\]', Punctuation),
(r'\{', Punctuation),
(r'\}', Punctuation),
include('constraint_values'),
# type name
(r'[A-Z]\w+(<[A-Z]\w+([A-Za-z_<>]*)>)?', Name.Class),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'\[', Punctuation, 'any_code'),
(r'(~|//|\\\\|\+|-|/|\*|\^|!=|=|<=|>=|<|>]?)', Operator),
(r'\(', Punctuation),
(r'\)', Punctuation),
# for lists of values
(r',', Punctuation),
(r'"', String, 'string'),
# for assumed value
(r';', Punctuation),
],
}
| CadlLexer |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 93691,
"end": 98239
} | class ____(fixtures.TablesTest, AssertsExecutionResults):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
global MyPickleType
class MyPickleType(types.TypeDecorator):
impl = PickleType
cache_ok = True
def process_bind_param(self, value, dialect):
if value:
value.stuff = "this is modified stuff"
return value
def process_result_value(self, value, dialect):
if value:
value.stuff = "this is the right stuff"
return value
Table(
"binary_table",
metadata,
Column(
"primary_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("data", LargeBinary),
Column("data_slice", LargeBinary(100)),
Column("misc", String(30)),
Column("pickled", PickleType),
Column("mypickle", MyPickleType),
)
@testing.requires.non_broken_binary
def test_round_trip(self, connection):
binary_table = self.tables.binary_table
testobj1 = pickleable.Foo("im foo 1")
testobj2 = pickleable.Foo("im foo 2")
testobj3 = pickleable.Foo("im foo 3")
stream1 = self.load_stream("binary_data_one.dat")
stream2 = self.load_stream("binary_data_two.dat")
connection.execute(
binary_table.insert(),
dict(
primary_id=1,
misc="binary_data_one.dat",
data=stream1,
data_slice=stream1[0:100],
pickled=testobj1,
mypickle=testobj3,
),
)
connection.execute(
binary_table.insert(),
dict(
primary_id=2,
misc="binary_data_two.dat",
data=stream2,
data_slice=stream2[0:99],
pickled=testobj2,
),
)
connection.execute(
binary_table.insert(),
dict(
primary_id=3,
misc="binary_data_two.dat",
data=None,
data_slice=stream2[0:99],
pickled=None,
),
)
for stmt in (
binary_table.select().order_by(binary_table.c.primary_id),
text(
"select * from binary_table order by binary_table.primary_id",
).columns(
**{
"pickled": PickleType,
"mypickle": MyPickleType,
"data": LargeBinary,
"data_slice": LargeBinary,
}
),
):
result = connection.execute(stmt).fetchall()
eq_(stream1, result[0]._mapping["data"])
eq_(stream1[0:100], result[0]._mapping["data_slice"])
eq_(stream2, result[1]._mapping["data"])
eq_(testobj1, result[0]._mapping["pickled"])
eq_(testobj2, result[1]._mapping["pickled"])
eq_(testobj3.moredata, result[0]._mapping["mypickle"].moredata)
eq_(
result[0]._mapping["mypickle"].stuff, "this is the right stuff"
)
@testing.requires.binary_comparisons
def test_comparison(self, connection):
"""test that type coercion occurs on comparison for binary"""
binary_table = self.tables.binary_table
expr = binary_table.c.data == "foo"
assert isinstance(expr.right.type, LargeBinary)
data = os.urandom(32)
connection.execute(binary_table.insert(), dict(data=data))
eq_(
connection.scalar(
select(func.count("*"))
.select_from(binary_table)
.where(binary_table.c.data == data)
),
1,
)
@testing.requires.binary_literals
def test_literal_roundtrip(self, connection):
result = connection.execute(
select(cast(literal(b"foo", literal_execute=True), LargeBinary))
)
eq_(result.scalar(), util.b("foo"))
def test_bind_processor_no_dbapi(self):
b = LargeBinary()
eq_(b.bind_processor(default.DefaultDialect()), None)
def load_stream(self, name):
f = os.path.join(os.path.dirname(__file__), "..", name)
with open(f, mode="rb") as o:
return o.read()
| BinaryTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-array-sum.py | {
"start": 151,
"end": 1681
} | class ____(object):
def minArraySum(self, nums, k, op1, op2):
"""
:type nums: List[int]
:type k: int
:type op1: int
:type op2: int
:rtype: int
"""
nums.sort()
left = next((i for i in xrange(len(nums)) if nums[i] >= k), len(nums))
right = next((i for i in xrange(len(nums)) if nums[i] >= 2*k-1), len(nums))
lookup, cnt = [False]*len(nums), 0
for j in reversed(xrange(right, len(nums))):
if not op1:
break
op1 -= 1
nums[j] = (nums[j]+1)//2
if op2:
op2 -= 1
nums[j] -= k
else:
j = right-1
for i in xrange(left, j+1):
if not op2:
break
op2 -= 1
if k%2 == 1 and nums[i]%2 == 0:
lookup[i] = True
nums[i] -= k
else:
i = j+1
for j in reversed(xrange(i, j+1)):
if not op1:
break
op1 -= 1
if k%2 == 1 and nums[j]%2 == 1:
cnt += 1
nums[j] = (nums[j]+1)//2
else:
j = i-1
arr = sorted((nums[idx], idx) for idx in xrange(i))
for _ in xrange(op1):
x, idx = arr.pop()
nums[idx] = (x+1)//2
if cnt and lookup[idx]:
cnt -= 1
nums[idx] -= 1
return sum(nums)
# Time: O(n * op1 * op2)
# Space: O(op1 * op2)
# dp
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/system.py | {
"start": 9728,
"end": 10480
} | class ____(PlanOrchestrationContext, IStepContext):
"""Context for the orchestration of a step.
This context assumes inability to run user code directly. Thus, it does not include any resource
information.
"""
def __init__(
self,
plan_data: PlanData,
log_manager: DagsterLogManager,
executor: Executor,
step: ExecutionStep,
output_capture: Optional[dict[StepOutputHandle, Any]],
):
super().__init__(plan_data, log_manager, executor, output_capture)
self._step = step
@property
def step(self) -> ExecutionStep:
return self._step
@property
def node_handle(self) -> "NodeHandle":
return self.step.node_handle
| StepOrchestrationContext |
python | google__pytype | pytype/datatypes_test.py | {
"start": 129,
"end": 1086
} | class ____(unittest.TestCase):
"""Test AccessTrackingDict."""
def setUp(self):
super().setUp()
self.d = datatypes.AccessTrackingDict({"a": 1, "b": 2})
def test_get(self):
v = self.d["a"]
(item,) = self.d.accessed_subset.items()
self.assertEqual(item, ("a", 1))
self.assertEqual(v, 1)
def test_set(self):
self.d["a"] = 3
(item,) = self.d.accessed_subset.items()
self.assertEqual(item, ("a", 1))
self.assertEqual(self.d["a"], 3)
def test_set_new(self):
self.d["c"] = 3
self.assertFalse(self.d.accessed_subset)
def test_del(self):
del self.d["a"]
(item,) = self.d.accessed_subset.items()
self.assertEqual(item, ("a", 1))
with self.assertRaises(KeyError):
_ = self.d["a"]
def test_repeat_access(self):
self.d["a"] = 3
v = self.d["a"]
(item,) = self.d.accessed_subset.items()
self.assertEqual(item, ("a", 1))
self.assertEqual(v, 3)
| AccessTrackingDictTest |
python | huggingface__transformers | src/transformers/models/glm4v/modeling_glm4v.py | {
"start": 16756,
"end": 23546
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Glm4vTextConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Glm4vTextConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
# In contrast to other models, GLM4V different position ids for the grids
# So we expand the inv_freq to shape (3, ...)
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half_llm(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., 0::2]
x2 = x[..., 1::2]
return torch.stack((-x2, x1), dim=-1).flatten(-2)
def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
"""Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/).
Explanation:
Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding
sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For
vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately.
Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding.
For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal,
height and width) of text embedding is always the same, so the text embedding rotary position embedding has no
difference with modern LLMs.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
mrope_section(`List(int)`):
Multimodal rope section is for channel dimension of temporal, height and width in rope calculation.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
mrope_section = mrope_section * 2
cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
# Interleave them instead of usual shape
cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1)
sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1)
# Keep half or full tensor for later concatenation
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
# Apply rotary embeddings on the first half or full tensor
q_embed = (q_rot * cos) + (rotate_half_llm(q_rot) * sin)
k_embed = (k_rot * cos) + (rotate_half_llm(k_rot) * sin)
# Concatenate back to full shape
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
| Glm4vTextRotaryEmbedding |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 124923,
"end": 126783
} | class ____(Response):
"""
Response of models.update endpoint.
:param updated: Number of models updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "models"
_action = "update"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of models updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(UpdateResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| UpdateResponse |
python | pytorch__pytorch | torch/nn/parallel/distributed.py | {
"start": 7997,
"end": 9235
} | class ____(Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, ddp_weakref, *inputs):
# set_materialize_grads(False) will ensure that None gradients stay as
# None and are not filled with zeros.
ctx.set_materialize_grads(False)
ctx.ddp_weakref = ddp_weakref
ret = inputs
if ddp_weakref()._ddp_sink_clone:
ret = tuple(
inp.clone() if isinstance(inp, torch.Tensor) else inp for inp in inputs
)
return ret
@staticmethod
def backward(ctx, *grad_outputs):
# Enqueue delay allreduce for static graph training on the first
# iteration.
ddp_weakref = ctx.ddp_weakref()
reducer = ddp_weakref.reducer
static_graph = ddp_weakref.static_graph
delay_ar_enqueued = (
static_graph and ddp_weakref._static_graph_delay_allreduce_enqueued
)
if static_graph and not delay_ar_enqueued:
Variable._execution_engine.queue_callback( # type: ignore[call-arg,misc]
reducer._delay_all_reduce
)
ddp_weakref._static_graph_delay_allreduce_enqueued = True
return (None, *grad_outputs)
| _DDPSink |
python | py-pdf__pypdf | pypdf/generic/_data_structures.py | {
"start": 3110,
"end": 8414
} | class ____(list[Any], PdfObject):
def replicate(
self,
pdf_dest: PdfWriterProtocol,
) -> "ArrayObject":
arr = cast(
"ArrayObject",
self._reference_clone(ArrayObject(), pdf_dest, False),
)
for data in self:
if hasattr(data, "replicate"):
arr.append(data.replicate(pdf_dest))
else:
arr.append(data)
return arr
def clone(
self,
pdf_dest: PdfWriterProtocol,
force_duplicate: bool = False,
ignore_fields: Optional[Sequence[Union[str, int]]] = (),
) -> "ArrayObject":
"""Clone object into pdf_dest."""
try:
if self.indirect_reference.pdf == pdf_dest and not force_duplicate: # type: ignore
return self
except Exception:
pass
arr = cast(
"ArrayObject",
self._reference_clone(ArrayObject(), pdf_dest, force_duplicate),
)
for data in self:
if isinstance(data, StreamObject):
dup = data._reference_clone(
data.clone(pdf_dest, force_duplicate, ignore_fields),
pdf_dest,
force_duplicate,
)
arr.append(dup.indirect_reference)
elif hasattr(data, "clone"):
arr.append(data.clone(pdf_dest, force_duplicate, ignore_fields))
else:
arr.append(data)
return arr
def hash_bin(self) -> int:
"""
Used to detect modified object.
Returns:
Hash considering type and value.
"""
return hash((self.__class__, tuple(x.hash_bin() for x in self)))
def items(self) -> Iterable[Any]:
"""Emulate DictionaryObject.items for a list (index, object)."""
return enumerate(self)
def _to_lst(self, lst: Any) -> list[Any]:
# Convert to list, internal
if isinstance(lst, (list, tuple, set)):
pass
elif isinstance(lst, PdfObject):
lst = [lst]
elif isinstance(lst, str):
if lst[0] == "/":
lst = [NameObject(lst)]
else:
lst = [TextStringObject(lst)]
elif isinstance(lst, bytes):
lst = [ByteStringObject(lst)]
else: # for numbers,...
lst = [lst]
return lst
def __add__(self, lst: Any) -> "ArrayObject":
"""
Allow extension by adding list or add one element only
Args:
lst: any list, tuples are extended the list.
other types(numbers,...) will be appended.
if str is passed it will be converted into TextStringObject
or NameObject (if starting with "/")
if bytes is passed it will be converted into ByteStringObject
Returns:
ArrayObject with all elements
"""
temp = ArrayObject(self)
temp.extend(self._to_lst(lst))
return temp
def __iadd__(self, lst: Any) -> Self:
"""
Allow extension by adding list or add one element only
Args:
lst: any list, tuples are extended the list.
other types(numbers,...) will be appended.
if str is passed it will be converted into TextStringObject
or NameObject (if starting with "/")
if bytes is passed it will be converted into ByteStringObject
"""
self.extend(self._to_lst(lst))
return self
def __isub__(self, lst: Any) -> Self:
"""Allow to remove items"""
for x in self._to_lst(lst):
try:
index = self.index(x)
del self[index]
except ValueError:
pass
return self
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes] = None
) -> None:
if encryption_key is not None: # deprecated
deprecation_no_replacement(
"the encryption_key parameter of write_to_stream", "5.0.0"
)
stream.write(b"[")
for data in self:
stream.write(b" ")
data.write_to_stream(stream)
stream.write(b" ]")
@staticmethod
def read_from_stream(
stream: StreamType,
pdf: Optional[PdfReaderProtocol],
forced_encoding: Union[None, str, list[str], dict[int, str]] = None,
) -> "ArrayObject":
arr = ArrayObject()
tmp = stream.read(1)
if tmp != b"[":
raise PdfReadError("Could not read array")
while True:
# skip leading whitespace
tok = stream.read(1)
while tok.isspace():
tok = stream.read(1)
if tok == b"":
break
if tok == b"%":
stream.seek(-1, 1)
skip_over_comment(stream)
continue
stream.seek(-1, 1)
# check for array ending
peek_ahead = stream.read(1)
if peek_ahead == b"]":
break
stream.seek(-1, 1)
# read and append object
arr.append(read_object(stream, pdf, forced_encoding))
return arr
| ArrayObject |
python | plotly__plotly.py | plotly/graph_objs/indicator/_number.py | {
"start": 233,
"end": 4745
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "indicator"
_path_str = "indicator.number"
_valid_props = {"font", "prefix", "suffix", "valueformat"}
@property
def font(self):
"""
Set the font used to display main number
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.number.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.indicator.number.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def prefix(self):
"""
Sets a prefix appearing before the number.
The 'prefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["prefix"]
@prefix.setter
def prefix(self, val):
self["prefix"] = val
@property
def suffix(self):
"""
Sets a suffix appearing next to the number.
The 'suffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["suffix"]
@suffix.setter
def suffix(self, val):
self["suffix"] = val
@property
def valueformat(self):
"""
Sets the value formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
The 'valueformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["valueformat"]
@valueformat.setter
def valueformat(self, val):
self["valueformat"] = val
@property
def _prop_descriptions(self):
return """\
font
Set the font used to display main number
prefix
Sets a prefix appearing before the number.
suffix
Sets a suffix appearing next to the number.
valueformat
Sets the value formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
"""
def __init__(
self, arg=None, font=None, prefix=None, suffix=None, valueformat=None, **kwargs
):
"""
Construct a new Number object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.Number`
font
Set the font used to display main number
prefix
Sets a prefix appearing before the number.
suffix
Sets a suffix appearing next to the number.
valueformat
Sets the value formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
Returns
-------
Number
"""
super().__init__("number")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.indicator.Number
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.Number`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("prefix", arg, prefix)
self._set_property("suffix", arg, suffix)
self._set_property("valueformat", arg, valueformat)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Number |
python | streamlit__streamlit | lib/streamlit/elements/lib/js_number.py | {
"start": 670,
"end": 737
} | class ____(Exception): # noqa: N818
pass
| JSNumberBoundsException |
python | astropy__astropy | astropy/coordinates/builtin_frames/ecliptic.py | {
"start": 6978,
"end": 7773
} | class ____(BaseEclipticFrame):
"""
Heliocentric mean ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000, doc="The equinox time")
obstime = TimeAttribute(
default=DEFAULT_OBSTIME, doc="The reference time (e.g., time of observation)"
)
@format_doc(
base_doc,
components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio,
)
| HeliocentricMeanEcliptic |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.