language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | spack__spack | lib/spack/spack/installer.py | {
"start": 50535,
"end": 50660
} | class ____:
def complete(self) -> bool:
return True
def terminate(self) -> None:
pass
| MockBuildProcess |
python | getsentry__sentry | tests/sentry/testutils/thread_leaks/test_pytest.py | {
"start": 387,
"end": 7745
} | class ____:
"""Test the Sentry event capture functionality for thread leaks."""
@thread_leak_allowlist(reason="Testing thread leak detection itself", issue=99999)
def test_capture_event_strict_no_allowlist(self) -> None:
"""Test capturing events in strict mode without allowlist."""
stop = Event()
thread = Thread(target=stop.wait, daemon=True)
# Set _where to simulate thread leak tracking
from traceback import FrameSummary
thread._where = [FrameSummary(__file__, 28, "test_capture_event_strict_no_allowlist")] # type: ignore[attr-defined]
# Create mock pytest item
mock_item = Mock(spec=pytest.Item)
mock_item.nodeid = "tests/sentry/testutils/thread_leaks/test_pytest.py::TestSentryCapture::test_capture_event_strict_no_allowlist"
try:
thread.start()
thread_leaks = {thread}
# Capture the event
events = sentry.capture_event(
thread_leaks=thread_leaks, strict=True, allowlisted=None, item=mock_item
)
finally:
stop.set()
thread.join()
# Verify event was captured
assert len(events) == 1
# Print event ID for manual verification via Sentry MCP
event_id, event = next(iter(events.items()))
log_test_info(f"Thread leak strict event ID: {event_id}")
# Verify event payload
assert event["level"] == "error" # strict=True, no allowlist
assert event["exception"]["values"][0]["mechanism"]["handled"] is False
assert event["exception"]["values"][0]["mechanism"]["data"]["strict"] is True
assert event["exception"]["values"][0]["mechanism"]["data"]["allowlisted"] is False
@thread_leak_allowlist(reason="Testing thread leak detection itself", issue=99999)
def test_capture_event_non_strict(self) -> None:
"""Test capturing events in non-strict mode."""
stop = Event()
thread = Thread(target=stop.wait, daemon=True)
# Set _where to simulate thread leak tracking
from traceback import FrameSummary
thread._where = [FrameSummary(__file__, 65, "test_capture_event_non_strict")] # type: ignore[attr-defined]
# Create mock pytest item
mock_item = Mock(spec=pytest.Item)
mock_item.nodeid = "tests/sentry/testutils/thread_leaks/test_pytest.py::TestSentryCapture::test_capture_event_non_strict"
try:
thread.start()
thread_leaks = {thread}
# Capture the event
events = sentry.capture_event(
thread_leaks=thread_leaks, strict=False, allowlisted=None, item=mock_item
)
finally:
stop.set()
thread.join()
# Verify event was captured
assert len(events) == 1
# Print event ID for manual verification via Sentry MCP
event_id, event = next(iter(events.items()))
log_test_info(f"Thread leak non-strict event ID: {event_id}")
# Verify event payload
assert event["level"] == "warning" # strict=False
assert event["exception"]["values"][0]["mechanism"]["handled"] is True
# Verify tags for filtering/grouping
assert "tags" in event
assert "thread.target" in event["tags"]
assert event["tags"]["thread.target"] == "threading.Event.wait"
assert event["tags"]["pytest.file"] == "tests/sentry/testutils/thread_leaks/test_pytest.py"
# Verify contexts (replacing extras)
assert "contexts" in event
assert "pytest" in event["contexts"]
assert event["contexts"]["pytest"]["nodeid"] == mock_item.nodeid
assert (
event["contexts"]["pytest"]["file"]
== "tests/sentry/testutils/thread_leaks/test_pytest.py"
)
@thread_leak_allowlist(reason="Testing thread leak detection itself", issue=99999)
def test_capture_event_allowlisted(self) -> None:
"""Test capturing events with allowlist."""
stop = Event()
thread = Thread(target=stop.wait, daemon=True)
# Set _where to simulate thread leak tracking
from traceback import FrameSummary
thread._where = [FrameSummary(__file__, 113, "test_capture_event_allowlisted")] # type: ignore[attr-defined]
# Create mock pytest item
mock_item = Mock(spec=pytest.Item)
mock_item.nodeid = "tests/sentry/testutils/thread_leaks/test_pytest.py::TestSentryCapture::test_capture_event_allowlisted"
# Create mock allowlist marker
mock_marker = Mock(spec=pytest.Mark)
mock_marker.kwargs = {"issue": 12345, "reason": "Known thread leak"}
try:
thread.start()
thread_leaks = {thread}
# Capture the event with allowlist
events = sentry.capture_event(
thread_leaks=thread_leaks,
strict=True, # Even with strict, allowlisted shouldn't be error
allowlisted=mock_marker,
item=mock_item,
)
finally:
stop.set()
thread.join()
# Verify event was captured
assert len(events) == 1
# Print event ID for manual verification via Sentry MCP
event_id, event = next(iter(events.items()))
log_test_info(f"Thread leak allowlisted event ID: {event_id}")
# Verify event payload reflects allowlisted status
assert event["level"] == "info" # allowlisted
# Note: mechanism.handled is still False when strict=True even with allowlist
# This seems like a potential bug but matching current implementation
assert event["exception"]["values"][0]["mechanism"]["handled"] is False
# Verify allowlist information in tags
assert event["tags"]["thread_leak_allowlist.issue"] == "12345"
# Verify allowlist context
assert "thread_leak_allowlist" in event["contexts"]
assert event["contexts"]["thread_leak_allowlist"]["issue"] == 12345
assert event["contexts"]["thread_leak_allowlist"]["reason"] == "Known thread leak"
def test_filters_out_django_dev_server_threads(self) -> None:
"""Test that filter_django_dev_server_threads removes Django dev server threads."""
stop = Event()
from sentry.testutils.thread_leaks import pytest as thread_leaks_pytest
# a mock function that has the qualname of our django dev server thread
def fake_django_process_request_thread() -> None:
pass
fake_django_process_request_thread.__module__ = "django.core.servers.basehttp"
fake_django_process_request_thread.__qualname__ = (
"ThreadedWSGIServer.process_request_thread"
)
# Thread that should be filtered out (Django dev server)
django_thread = Thread(target=fake_django_process_request_thread, daemon=True)
# Thread that should NOT be filtered out
normal_thread = Thread(target=stop.wait, daemon=True)
threads = {django_thread, normal_thread}
filtered = thread_leaks_pytest.filter_django_dev_server_threads(threads)
# Only the normal thread should remain
assert normal_thread in filtered
assert django_thread not in filtered
assert len(filtered) == 1
| TestSentryCapture |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 24957,
"end": 27368
} | class ____(VariableTracker):
"""
This variable is special, it lets you execute arbitrary code at
Dynamo compile time
"""
def reconstruct(self, codegen: "PyCodegen"):
raise NotImplementedError("comptime is special form")
def var_getattr(self, tx: "InstructionTranslator", name: str) -> "VariableTracker":
from ..comptime import comptime
# To support the comptime.print_graph convenience accessors
return VariableTracker.build(
tx, getattr(comptime, name), source=AttrSource(self.source, name)
)
def call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
from ..comptime import ComptimeContext
# TODO: support an expression form as well
# Second argument is runtime lambda, ignored
if kwargs or len(args) > 2:
raise_args_mismatch(
tx,
"comptime()",
"at most 2 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
fn = args[0]
if isinstance(fn, UserFunctionVariable):
fn.get_function()(ComptimeContext(tx))
elif isinstance(fn, NestedUserFunctionVariable):
# We have to manually bind the freevars ourselves
code = fn.get_code()
if fn.closure:
raise_type_error_exc(
tx,
f"comptime function must not have free variables, but these variables were free: {code.co_freevars}",
)
func = types.FunctionType(
code,
fn.f_globals,
fn.fn_name.as_python_constant(),
tuple(fn.defaults.items) if fn.defaults else None,
# We could automatically promote free variables into
# ComptimeVar but this is confusing if you access
# a free variable that we actually DO have the runtime
# value for
# tuple(make_cell(ComptimeVar(i)) for i in fn.closure.items)
(),
)
func(ComptimeContext(tx))
else:
raise RuntimeError(f"unsupported argument to comptime: {type(fn)}")
return variables.ConstantVariable.create(None)
| ComptimeVariable |
python | scrapy__scrapy | tests/test_spidermiddleware_process_start.py | {
"start": 1757,
"end": 1955
} | class ____:
async def process_start(self, start):
yield ITEM_A
async for item_or_request in start:
yield item_or_request
yield ITEM_C
| ModernWrapSpiderMiddleware |
python | doocs__leetcode | solution/1300-1399/1381.Design a Stack With Increment Operation/Solution.py | {
"start": 0,
"end": 828
} | class ____:
def __init__(self, maxSize: int):
self.stk = [0] * maxSize
self.add = [0] * maxSize
self.i = 0
def push(self, x: int) -> None:
if self.i < len(self.stk):
self.stk[self.i] = x
self.i += 1
def pop(self) -> int:
if self.i <= 0:
return -1
self.i -= 1
ans = self.stk[self.i] + self.add[self.i]
if self.i > 0:
self.add[self.i - 1] += self.add[self.i]
self.add[self.i] = 0
return ans
def increment(self, k: int, val: int) -> None:
i = min(k, self.i) - 1
if i >= 0:
self.add[i] += val
# Your CustomStack object will be instantiated and called as such:
# obj = CustomStack(maxSize)
# obj.push(x)
# param_2 = obj.pop()
# obj.increment(k,val)
| CustomStack |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/retrievers/self_query/test_base.py | {
"start": 1832,
"end": 4021
} | class ____(InMemoryVectorStore):
@override
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
@pytest.fixture
def fake_llm() -> FakeLLM:
return FakeLLM(
queries={
"1": """```json
{
"query": "test",
"filter": null
}
```""",
"bar": "baz",
},
sequential_responses=True,
)
@pytest.fixture
def fake_vectorstore() -> InMemoryVectorstoreWithSearch:
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents(
[
Document(
page_content="test",
metadata={
"foo": "bar",
},
),
],
ids=["test"],
)
return vectorstore
@pytest.fixture
def fake_self_query_retriever(
fake_llm: FakeLLM,
fake_vectorstore: InMemoryVectorstoreWithSearch,
) -> SelfQueryRetriever:
return SelfQueryRetriever.from_llm(
llm=fake_llm,
vectorstore=fake_vectorstore,
document_contents="test",
metadata_field_info=[
AttributeInfo(
name="foo",
type="string",
description="test",
),
],
structured_query_translator=FakeTranslator(),
)
def test__get_relevant_documents(fake_self_query_retriever: SelfQueryRetriever) -> None:
relevant_documents = fake_self_query_retriever._get_relevant_documents(
"foo",
run_manager=CallbackManagerForRetrieverRun.get_noop_manager(),
)
assert len(relevant_documents) == 1
assert relevant_documents[0].metadata["foo"] == "bar"
async def test__aget_relevant_documents(
fake_self_query_retriever: SelfQueryRetriever,
) -> None:
relevant_documents = await fake_self_query_retriever._aget_relevant_documents(
"foo",
run_manager=AsyncCallbackManagerForRetrieverRun.get_noop_manager(),
)
assert len(relevant_documents) == 1
assert relevant_documents[0].metadata["foo"] == "bar"
| InMemoryVectorstoreWithSearch |
python | huggingface__transformers | tests/models/nllb_moe/test_modeling_nllb_moe.py | {
"start": 1465,
"end": 9478
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
num_experts=4,
encoder_sparse_step=2,
decoder_sparse_step=1,
expert_capacity=100,
router_jitter_noise=0.0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.encoder_sparse_step = encoder_sparse_step
self.decoder_sparse_step = decoder_sparse_step
self.expert_capacity = expert_capacity
self.router_jitter_noise = router_jitter_noise
self.num_experts = num_experts
def prepare_nllb_moe_inputs_dict(
self,
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
input_ids = input_ids.clamp(self.pad_token_id + 1)
decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1)
config = self.get_config()
inputs_dict = self.prepare_nllb_moe_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return NllbMoeConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
encoder_layerdrop=self.encoder_layerdrop,
decoder_layerdrop=self.decoder_layerdrop,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
expert_capacity=self.expert_capacity,
router_jitter_noise=self.router_jitter_noise,
decoder_sparse_step=self.decoder_sparse_step,
encoder_sparse_step=self.encoder_sparse_step,
num_experts=self.num_experts,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = NllbMoeModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = NllbMoeModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = NllbMoeEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = NllbMoeDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
| NllbMoeModelTester |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 656,
"end": 786
} | class ____:
"""Test class"""
cls_var = 100
@deco
def __init__(self):
pass
| ClassWithDecoInitAndVarsAndDocstring |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 251382,
"end": 253431
} | class ____(torch.nn.Module):
def forward(self, L_y_: "f32[5, 3]", L_x_: "f32[2, 3]"):
l_y_ = L_y_
l_x_ = L_x_
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(5, 'error'); _vmap_increment_nesting = None
child: "f32[3]" = torch._functorch.predispatch._add_batch_dim(l_y_, 0, 1); l_y_ = None
lazy_load_decompositions_1 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_1 = None
_vmap_increment_nesting_1 = torch._functorch.predispatch._vmap_increment_nesting(3, 'error'); _vmap_increment_nesting_1 = None
_add_batch_dim_1: "f32[]" = torch._functorch.predispatch._add_batch_dim(child, 0, 2); child = None
batched_outputs: "f32[2, 3]" = l_x_ * _add_batch_dim_1; l_x_ = _add_batch_dim_1 = None
batched_outputs_1: "f32[3, 2, 3]" = torch._functorch.predispatch._remove_batch_dim(batched_outputs, 2, 3, 0); batched_outputs = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
_remove_batch_dim_1: "f32[5, 3, 2, 3]" = torch._functorch.predispatch._remove_batch_dim(batched_outputs_1, 1, 5, 0); batched_outputs_1 = None
_vmap_decrement_nesting_1 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_1 = None
return (_remove_batch_dim_1,)
""",
)
def test_vmap_multiple_outputs(self):
x = torch.ones(2, 4, 3)
def fn(x):
return torch.vmap(lambda x: (x.sum(0), x.sum(1)))(x)
wrapped_gm = self._compile_check(fn, (x,))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 228969,
"end": 230114
} | class ____(Response):
"""
Response of tasks.delete_configuration endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_configuration"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted=None, **kwargs):
super(DeleteConfigurationResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
| DeleteConfigurationResponse |
python | ansible__ansible | test/units/module_utils/basic/test_run_command.py | {
"start": 7650,
"end": 9966
} | class ____:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_text_stdin(self, rc_am):
(rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world')
assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_ascii_stdout(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(b'hello', fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == 'hello'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_utf8_output(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(u'Žarn§'.encode('utf-8'),
fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(u'لرئيسية'.encode('utf-8'),
fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == to_native(u'Žarn§')
assert stderr == to_native(u'لرئيسية')
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_run_command_fds(mocker, rc_am):
subprocess_mock = mocker.patch('ansible.module_utils.basic.subprocess')
subprocess_mock.Popen.side_effect = AssertionError
try:
rc_am.run_command('synchronize', pass_fds=(101, 42))
except SystemExit:
pass
assert subprocess_mock.Popen.call_args[1]['pass_fds'] == (101, 42)
assert subprocess_mock.Popen.call_args[1]['close_fds'] is True
| TestRunCommandOutput |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/resolution/resolvelib/candidates.py | {
"start": 3840,
"end": 8727
} | class ____(Candidate):
"""A candidate backed by an ``InstallRequirement``.
This represents a package request with the target not being already
in the environment, and needs to be fetched and installed. The backing
``InstallRequirement`` is responsible for most of the leg work; this
class exposes appropriate information to the resolver.
:param link: The link passed to the ``InstallRequirement``. The backing
``InstallRequirement`` will use this link to fetch the distribution.
:param source_link: The link this candidate "originates" from. This is
different from ``link`` when the link is found in the wheel cache.
``link`` would point to the wheel cache, while this points to the
found remote link (e.g. from pypi.org).
"""
dist: BaseDistribution
is_installed = False
def __init__(
self,
link: Link,
source_link: Link,
ireq: InstallRequirement,
factory: "Factory",
name: Optional[NormalizedName] = None,
version: Optional[Version] = None,
) -> None:
self._link = link
self._source_link = source_link
self._factory = factory
self._ireq = ireq
self._name = name
self._version = version
self.dist = self._prepare()
self._hash: Optional[int] = None
def __str__(self) -> str:
return f"{self.name} {self.version}"
def __repr__(self) -> str:
return f"{self.__class__.__name__}({str(self._link)!r})"
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
self._hash = hash((self.__class__, self._link))
return self._hash
def __eq__(self, other: Any) -> bool:
if isinstance(other, self.__class__):
return links_equivalent(self._link, other._link)
return False
@property
def source_link(self) -> Optional[Link]:
return self._source_link
@property
def project_name(self) -> NormalizedName:
"""The normalised name of the project the candidate refers to"""
if self._name is None:
self._name = self.dist.canonical_name
return self._name
@property
def name(self) -> str:
return self.project_name
@property
def version(self) -> Version:
if self._version is None:
self._version = self.dist.version
return self._version
def format_for_error(self) -> str:
return (
f"{self.name} {self.version} "
f"(from {self._link.file_path if self._link.is_file else self._link})"
)
def _prepare_distribution(self) -> BaseDistribution:
raise NotImplementedError("Override in subclass")
def _check_metadata_consistency(self, dist: BaseDistribution) -> None:
"""Check for consistency of project name and version of dist."""
if self._name is not None and self._name != dist.canonical_name:
raise MetadataInconsistent(
self._ireq,
"name",
self._name,
dist.canonical_name,
)
if self._version is not None and self._version != dist.version:
raise MetadataInconsistent(
self._ireq,
"version",
str(self._version),
str(dist.version),
)
# check dependencies are valid
# TODO performance: this means we iterate the dependencies at least twice,
# we may want to cache parsed Requires-Dist
try:
list(dist.iter_dependencies(list(dist.iter_provided_extras())))
except InvalidRequirement as e:
raise MetadataInvalid(self._ireq, str(e))
def _prepare(self) -> BaseDistribution:
try:
dist = self._prepare_distribution()
except HashError as e:
# Provide HashError the underlying ireq that caused it. This
# provides context for the resulting error message to show the
# offending line to the user.
e.req = self._ireq
raise
except InstallationSubprocessError as exc:
# The output has been presented already, so don't duplicate it.
exc.context = "See above for output."
raise
self._check_metadata_consistency(dist)
return dist
def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
requires = self.dist.iter_dependencies() if with_requires else ()
for r in requires:
yield from self._factory.make_requirements_from_spec(str(r), self._ireq)
yield self._factory.make_requires_python_requirement(self.dist.requires_python)
def get_install_requirement(self) -> Optional[InstallRequirement]:
return self._ireq
| _InstallRequirementBackedCandidate |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 842191,
"end": 844012
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"digest_method",
"external_identities",
"idp_certificate",
"issuer",
"organization",
"signature_method",
"sso_url",
)
digest_method = sgqlc.types.Field(URI, graphql_name="digestMethod")
external_identities = sgqlc.types.Field(
sgqlc.types.non_null(ExternalIdentityConnection),
graphql_name="externalIdentities",
args=sgqlc.types.ArgDict(
(
(
"members_only",
sgqlc.types.Arg(Boolean, graphql_name="membersOnly", default=None),
),
("login", sgqlc.types.Arg(String, graphql_name="login", default=None)),
(
"user_name",
sgqlc.types.Arg(String, graphql_name="userName", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
idp_certificate = sgqlc.types.Field(X509Certificate, graphql_name="idpCertificate")
issuer = sgqlc.types.Field(String, graphql_name="issuer")
organization = sgqlc.types.Field(Organization, graphql_name="organization")
signature_method = sgqlc.types.Field(URI, graphql_name="signatureMethod")
sso_url = sgqlc.types.Field(URI, graphql_name="ssoUrl")
| OrganizationIdentityProvider |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 192762,
"end": 199388
} | class ____:
table = [
[1],
[0, 1],
[0, 1, 1],
[0, 1, 3, 1],
[0, 1, 7, 6, 1],
[0, 1, 15, 25, 10, 1],
[0, 1, 31, 90, 65, 15, 1],
[0, 1, 63, 301, 350, 140, 21, 1],
[0, 1, 127, 966, 1701, 1050, 266, 28, 1],
[0, 1, 255, 3025, 7770, 6951, 2646, 462, 36, 1],
[0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1],
]
@pytest.mark.parametrize("is_exact, comp, kwargs", [
(True, assert_equal, {}),
(False, assert_allclose, {'rtol': 1e-12})
])
def test_table_cases(self, is_exact, comp, kwargs):
for n in range(1, len(self.table)):
k_values = list(range(n+1))
row = self.table[n]
comp(row, stirling2([n], k_values, exact=is_exact), **kwargs)
@pytest.mark.parametrize("is_exact, comp, kwargs", [
(True, assert_equal, {}),
(False, assert_allclose, {'rtol': 1e-12})
])
def test_valid_single_integer(self, is_exact, comp, kwargs):
comp(stirling2(0, 0, exact=is_exact), self.table[0][0], **kwargs)
comp(stirling2(4, 2, exact=is_exact), self.table[4][2], **kwargs)
# a single 2-tuple of integers as arguments must return an int and not
# an array whereas arrays of single values should return array
comp(stirling2(5, 3, exact=is_exact), 25, **kwargs)
comp(stirling2([5], [3], exact=is_exact), [25], **kwargs)
@pytest.mark.parametrize("is_exact, comp, kwargs", [
(True, assert_equal, {}),
(False, assert_allclose, {'rtol': 1e-12})
])
def test_negative_integer(self, is_exact, comp, kwargs):
# negative integers for n or k arguments return 0
comp(stirling2(-1, -1, exact=is_exact), 0, **kwargs)
comp(stirling2(-1, 2, exact=is_exact), 0, **kwargs)
comp(stirling2(2, -1, exact=is_exact), 0, **kwargs)
@pytest.mark.parametrize("is_exact, comp, kwargs", [
(True, assert_equal, {}),
(False, assert_allclose, {'rtol': 1e-12})
])
def test_array_inputs(self, is_exact, comp, kwargs):
ans = [self.table[10][3], self.table[10][4]]
comp(stirling2(asarray([10, 10]),
asarray([3, 4]),
exact=is_exact),
ans)
comp(stirling2([10, 10],
asarray([3, 4]),
exact=is_exact),
ans)
comp(stirling2(asarray([10, 10]),
[3, 4],
exact=is_exact),
ans)
@pytest.mark.parametrize("is_exact, comp, kwargs", [
(True, assert_equal, {}),
(False, assert_allclose, {'rtol': 1e-13})
])
def test_mixed_values(self, is_exact, comp, kwargs):
# negative values-of either n or k-should return 0 for the entry
ans = [0, 1, 3, 25, 1050, 5880, 9330]
n = [-1, 0, 3, 5, 8, 10, 10]
k = [-2, 0, 2, 3, 5, 7, 3]
comp(stirling2(n, k, exact=is_exact), ans, **kwargs)
def test_correct_parity(self):
"""Test parity follows well known identity.
en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind#Parity
"""
n, K = 100, np.arange(101)
assert_equal(
stirling2(n, K, exact=True) % 2,
[math.comb(n - (k // 2) - 1, n - k) % 2 for k in K],
)
def test_big_numbers(self):
# via mpmath (bigger than 32bit)
ans = asarray([48063331393110, 48004081105038305])
n = [25, 30]
k = [17, 4]
assert array_equal(stirling2(n, k, exact=True), ans)
# bigger than 64 bit
ans = asarray([2801934359500572414253157841233849412,
14245032222277144547280648984426251])
n = [42, 43]
k = [17, 23]
assert array_equal(stirling2(n, k, exact=True), ans)
@pytest.mark.parametrize("N", [4.5, 3., 4+1j, "12", np.nan])
@pytest.mark.parametrize("K", [3.5, 3, "2", None])
@pytest.mark.parametrize("is_exact", [True, False])
def test_unsupported_input_types(self, N, K, is_exact):
# object, float, string, complex are not supported and raise TypeError
with pytest.raises(TypeError):
stirling2(N, K, exact=is_exact)
@pytest.mark.parametrize("is_exact", [True, False])
def test_numpy_array_int_object_dtype(self, is_exact):
# python integers with arbitrary precision are *not* allowed as
# object type in numpy arrays are inconsistent from api perspective
ans = asarray(self.table[4][1:])
n = asarray([4, 4, 4, 4], dtype=object)
k = asarray([1, 2, 3, 4], dtype=object)
with pytest.raises(TypeError):
array_equal(stirling2(n, k, exact=is_exact), ans)
@pytest.mark.parametrize("is_exact, comp, kwargs", [
(True, assert_equal, {}),
(False, assert_allclose, {'rtol': 1e-13})
])
def test_numpy_array_unsigned_int_dtype(self, is_exact, comp, kwargs):
# numpy unsigned integers are allowed as dtype in numpy arrays
ans = asarray(self.table[4][1:])
n = asarray([4, 4, 4, 4], dtype=np_ulong)
k = asarray([1, 2, 3, 4], dtype=np_ulong)
comp(stirling2(n, k, exact=False), ans, **kwargs)
@pytest.mark.parametrize("is_exact, comp, kwargs", [
(True, assert_equal, {}),
(False, assert_allclose, {'rtol': 1e-13})
])
def test_broadcasting_arrays_correctly(self, is_exact, comp, kwargs):
# broadcasting is handled by stirling2
# test leading 1s are replicated
ans = asarray([[1, 15, 25, 10], [1, 7, 6, 1]]) # shape (2,4)
n = asarray([[5, 5, 5, 5], [4, 4, 4, 4]]) # shape (2,4)
k = asarray([1, 2, 3, 4]) # shape (4,)
comp(stirling2(n, k, exact=is_exact), ans, **kwargs)
# test that dims both mismatch broadcast correctly (5,1) & (6,)
n = asarray([[4], [4], [4], [4], [4]])
k = asarray([0, 1, 2, 3, 4, 5])
ans = asarray([[0, 1, 7, 6, 1, 0] for _ in range(5)])
comp(stirling2(n, k, exact=False), ans, **kwargs)
def test_temme_rel_max_error(self):
# python integers with arbitrary precision are *not* allowed as
# object type in numpy arrays are inconsistent from api perspective
x = list(range(51, 101, 5))
for n in x:
k_entries = list(range(1, n+1))
denom = stirling2([n], k_entries, exact=True)
num = denom - stirling2([n], k_entries, exact=False)
assert np.max(np.abs(num / denom)) < 2e-5
| TestStirling2 |
python | fastai__fastai | fastai/collab.py | {
"start": 586,
"end": 1758
} | class ____(DataLoaders):
"Base `DataLoaders` for collaborative filtering."
@delegates(DataLoaders.from_dblock)
@classmethod
def from_df(cls, ratings, valid_pct=0.2, user_name=None, item_name=None, rating_name=None, seed=None, path='.', **kwargs):
"Create a `DataLoaders` suitable for collaborative filtering from `ratings`."
user_name = ifnone(user_name, ratings.columns[0])
item_name = ifnone(item_name, ratings.columns[1])
rating_name = ifnone(rating_name, ratings.columns[2])
cat_names = [user_name,item_name]
splits = RandomSplitter(valid_pct=valid_pct, seed=seed)(range_of(ratings))
to = TabularCollab(ratings, [Categorify], cat_names, y_names=[rating_name], y_block=TransformBlock(), splits=splits)
return to.dataloaders(path=path, **kwargs)
@classmethod
def from_csv(cls, csv, **kwargs):
"Create a `DataLoaders` suitable for collaborative filtering from `csv`."
return cls.from_df(pd.read_csv(csv), **kwargs)
CollabDataLoaders.from_csv = delegates(to=CollabDataLoaders.from_df)(CollabDataLoaders.from_csv)
# %% ../nbs/45_collab.ipynb 19
| CollabDataLoaders |
python | doocs__leetcode | solution/0200-0299/0221.Maximal Square/Solution.py | {
"start": 0,
"end": 443
} | class ____:
def maximalSquare(self, matrix: List[List[str]]) -> int:
m, n = len(matrix), len(matrix[0])
dp = [[0] * (n + 1) for _ in range(m + 1)]
mx = 0
for i in range(m):
for j in range(n):
if matrix[i][j] == '1':
dp[i + 1][j + 1] = min(dp[i][j + 1], dp[i + 1][j], dp[i][j]) + 1
mx = max(mx, dp[i + 1][j + 1])
return mx * mx
| Solution |
python | marshmallow-code__marshmallow | examples/inflection_example.py | {
"start": 610,
"end": 921
} | class ____(CamelCaseSchema):
first_name = fields.Str(required=True)
last_name = fields.Str(required=True)
schema = UserSchema()
loaded = schema.load({"firstName": "David", "lastName": "Bowie"})
print("Loaded data:")
print(loaded)
dumped = schema.dump(loaded)
print("Dumped data:")
print(dumped)
| UserSchema |
python | crytic__slither | slither/vyper_parsing/declarations/struct.py | {
"start": 308,
"end": 1186
} | class ____: # pylint: disable=too-few-public-methods
def __init__(
self,
st: Structure,
struct: StructDef,
) -> None:
self._structure = st
st.name = struct.name
st.canonical_name = struct.name + self._structure.contract.name
self._elemsNotParsed: List[AnnAssign] = struct.body
def analyze(self, contract) -> None:
for elem_to_parse in self._elemsNotParsed:
elem = StructureVariable()
elem.set_structure(self._structure)
elem.set_offset(elem_to_parse.src, self._structure.contract.compilation_unit)
elem_parser = StructureVariableVyper(elem, elem_to_parse)
elem_parser.analyze(contract)
self._structure.elems[elem.name] = elem
self._structure.add_elem_in_order(elem.name)
self._elemsNotParsed = []
| StructVyper |
python | PyCQA__pylint | pylint/pyreverse/mermaidjs_printer.py | {
"start": 459,
"end": 3767
} | class ____(Printer):
"""Printer for MermaidJS diagrams."""
DEFAULT_COLOR = "black"
NODES: dict[NodeType, str] = {
NodeType.CLASS: "class",
NodeType.PACKAGE: "class",
}
ARROWS: dict[EdgeType, str] = {
EdgeType.INHERITS: "--|>",
EdgeType.COMPOSITION: "--*",
EdgeType.ASSOCIATION: "-->",
EdgeType.AGGREGATION: "--o",
EdgeType.USES: "-->",
EdgeType.TYPE_DEPENDENCY: "..>",
}
def _open_graph(self) -> None:
"""Emit the header lines."""
self.emit("classDiagram")
self._inc_indent()
def _escape_mermaid_text(self, text: str) -> str:
"""Escape characters that conflict with Markdown formatting."""
text = text.replace("__", r"\_\_") # Double underscore → escaped
return text
def emit_node(
self,
name: str,
type_: NodeType,
properties: NodeProperties | None = None,
) -> None:
"""Create a new node.
Nodes can be classes, packages, participants etc.
"""
# pylint: disable=duplicate-code
if properties is None:
properties = NodeProperties(label=name)
nodetype = self.NODES[type_]
body = []
if properties.attrs:
# Escape attribute names to prevent Markdown formatting issues
escaped_attrs = [
self._escape_mermaid_text(attr) for attr in properties.attrs
]
body.extend(escaped_attrs)
if properties.methods:
for func in properties.methods:
args = self._get_method_arguments(func)
# Escape method name and arguments
escaped_method_name = self._escape_mermaid_text(func.name)
escaped_args = [self._escape_mermaid_text(arg) for arg in args]
line = f"{escaped_method_name}({', '.join(escaped_args)})"
line += "*" if func.is_abstract() else ""
if func.returns:
# Escape return type annotation
return_type = get_annotation_label(func.returns)
escaped_return_type = self._escape_mermaid_text(return_type)
line += f" {escaped_return_type}"
body.append(line)
name = name.split(".")[-1]
self.emit(f"{nodetype} {name} {{")
self._inc_indent()
for line in body:
self.emit(line)
self._dec_indent()
self.emit("}")
# apply style for colored output
if properties.color and properties.color != self.DEFAULT_COLOR:
self.emit(f"style {name} fill:{properties.color}")
def emit_edge(
self,
from_node: str,
to_node: str,
type_: EdgeType,
label: str | None = None,
) -> None:
"""Create an edge from one node to another to display relationships."""
from_node = from_node.split(".")[-1]
to_node = to_node.split(".")[-1]
edge = f"{from_node} {self.ARROWS[type_]} {to_node}"
if label:
edge += f" : {self._escape_mermaid_text(label)}"
self.emit(edge)
def _close_graph(self) -> None:
"""Emit the lines needed to properly close the graph."""
self._dec_indent()
| MermaidJSPrinter |
python | ipython__ipython | IPython/core/guarded_eval.py | {
"start": 903,
"end": 1017
} | class ____(Protocol):
def __call__(self, *args, **kwargs) -> HasGetItem:
...
@undoc
| InstancesHaveGetItem |
python | facebookresearch__faiss | tests/test_io.py | {
"start": 434,
"end": 1399
} | class ____(unittest.TestCase):
def test_io_error(self):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index(index, fname)
# should be fine
faiss.read_index(fname)
with open(fname, 'rb') as f:
data = f.read()
# now damage file
with open(fname, 'wb') as f:
f.write(data[:int(len(data) / 2)])
# should make a nice readable exception that mentions the filename
try:
faiss.read_index(fname)
except RuntimeError as e:
if fname not in str(e):
raise
else:
raise
finally:
if os.path.exists(fname):
os.unlink(fname)
| TestIOVariants |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_organization_integration_request.py | {
"start": 586,
"end": 1384
} | class ____(View):
def get(self, request: HttpRequest) -> HttpResponse:
org = Organization(id=1, slug="default", name="Default")
requester = User(name="Rick Swan")
recipient = User(name="James Bond")
recipient_member = OrganizationMember(user_id=recipient.id, organization=org)
notification = IntegrationRequestNotification(
org,
requester,
provider_type="first_party",
provider_slug=IntegrationProviderSlug.SLACK.value,
provider_name="Slack",
)
# hack to avoid a query
notification.role_based_recipient_strategy.set_member_in_cache(recipient_member)
return render_preview_email_for_notification(notification, recipient)
| DebugOrganizationIntegrationRequestEmailView |
python | getsentry__sentry | src/sentry/auth/authenticators/base.py | {
"start": 583,
"end": 623
} | class ____:
type: str
| ActivationResult |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 106781,
"end": 109752
} | class ____(TestCase):
@parametrize(
"context",
[ctx for ctx in supported_multiprocessing_contexts if ctx is not None],
)
@unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_nested_tensor_multiprocessing(self, device, context):
# The 'fork' multiprocessing context doesn't work for CUDA so skip it
if "cuda" in device and context == "fork":
self.skipTest(
f"{context} multiprocessing context not supported for {device}"
)
dataset = [
torch.nested.nested_tensor([torch.randn(5)], device=device)
for _ in range(10)
]
pin_memory_settings = [False]
if device == "cpu" and torch.cuda.is_available():
pin_memory_settings.append(True)
for pin_memory in pin_memory_settings:
loader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
num_workers=4,
collate_fn=_clone_collate,
pin_memory=pin_memory,
multiprocessing_context=context,
)
for i, batch in enumerate(loader):
self.assertEqual(batch[0], dataset[i])
# Error case: default collate_fn doesn't currently support batches of nested tensors.
# Following the current semantics, we'd need to stack them, which isn't possible atm.
with self.assertRaisesRegex(
RuntimeError, "not currently supported by the default collate_fn"
):
loader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
num_workers=4,
multiprocessing_context=context,
)
next(iter(loader))
@parametrize(
"context",
[ctx for ctx in supported_multiprocessing_contexts if ctx is not None],
)
@unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_sparse_tensor_multiprocessing(self, device, context):
# The 'fork' multiprocessing context doesn't work for CUDA so skip it
if "cuda" in device and context == "fork":
self.skipTest(
f"{context} multiprocessing context not supported for {device}"
)
dataset = [torch.randn(5, 5).to_sparse().to(device) for _ in range(10)]
pin_memory_settings = [False]
if device == "cpu" and torch.cuda.is_available():
pin_memory_settings.append(True)
for pin_memory in pin_memory_settings:
loader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
num_workers=4,
collate_fn=_sparse_coo_collate,
pin_memory=pin_memory,
multiprocessing_context=context,
)
for i, batch in enumerate(loader):
self.assertEqual(batch[0], dataset[i])
| TestDataLoaderDeviceType |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 20493,
"end": 21473
} | class ____(dict):
"""
Optimizes empty tag mapping by using a shared singleton read-only dict.
Since mappingproxy is not pickle-able and causes other problems, we had to roll our own.
"""
def __new__(cls):
try:
# noinspection PyUnresolvedReferences
return cls._instance
except AttributeError:
cls._instance = dict.__new__(cls)
# noinspection PyUnresolvedReferences
return cls._instance
def __setitem__(self, key, value):
raise NotImplementedError()
def setdefault(self, __key, __default=None):
raise NotImplementedError()
def update(self, __m, **kwargs):
raise NotImplementedError()
_EMPTY_INTERNAL_TAGS_MAPPING = t.cast(_AnsibleTagsMapping, _EmptyROInternalTagsMapping())
"""
An empty read-only mapping of tags.
Also used as a sentinel to cheaply determine that a type is not tagged by using a reference equality check.
"""
| _EmptyROInternalTagsMapping |
python | django__django | tests/admin_utils/tests.py | {
"start": 3653,
"end": 18019
} | class ____(SimpleTestCase):
empty_value = "-empty-"
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = "example.com"
TITLE_TEXT = "Some title"
CREATED_DATE = datetime.min
ADMIN_METHOD = "admin method"
SIMPLE_FUNCTION = "function"
INSTANCE_ATTRIBUTE = "attr"
class MockModelAdmin:
def get_admin_value(self, obj):
return ADMIN_METHOD
def simple_function(obj):
return SIMPLE_FUNCTION
site_obj = Site(domain=SITE_NAME)
article = Article(
site=site_obj,
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
("site", SITE_NAME),
("created", localize(CREATED_DATE)),
("title", TITLE_TEXT),
("get_admin_value", ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
("test_from_model", article.test_from_model()),
("non_field", INSTANCE_ATTRIBUTE),
("site__domain", SITE_NAME),
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(
resolved_value, field, self.empty_value
)
self.assertEqual(value, resolved_value)
def test_empty_value_display_for_field(self):
tests = [
models.CharField(),
models.DateField(),
models.DecimalField(),
models.FloatField(),
models.URLField(),
models.JSONField(),
models.TimeField(),
]
for model_field in tests:
for value in model_field.empty_values:
with self.subTest(model_field=model_field, empty_value=value):
display_value = display_for_field(
value, model_field, self.empty_value
)
self.assertEqual(display_value, self.empty_value)
def test_empty_value_display_choices(self):
model_field = models.CharField(choices=((None, "test_none"),))
display_value = display_for_field(None, model_field, self.empty_value)
self.assertEqual(display_value, "test_none")
def test_empty_value_display_booleanfield(self):
model_field = models.BooleanField(null=True)
display_value = display_for_field(None, model_field, self.empty_value)
expected = (
f'<img src="{settings.STATIC_URL}admin/img/icon-unknown.svg" alt="None" />'
)
self.assertHTMLEqual(display_value, expected)
def test_json_display_for_field(self):
tests = [
({"a": {"b": "c"}}, '{"a": {"b": "c"}}'),
(["a", "b"], '["a", "b"]'),
("a", '"a"'),
({"a": "你好 世界"}, '{"a": "你好 世界"}'),
({("a", "b"): "c"}, "{('a', 'b'): 'c'}"), # Invalid JSON.
]
for value, display_value in tests:
with self.subTest(value=value):
self.assertEqual(
display_for_field(value, models.JSONField(), self.empty_value),
display_value,
)
def test_url_display_for_field(self):
model_field = models.URLField()
display_value = display_for_field(
"http://example.com", model_field, self.empty_value
)
expected = '<a href="http://example.com">http://example.com</a>'
self.assertHTMLEqual(display_value, expected)
def test_number_formats_display_for_field(self):
display_value = display_for_field(
12345.6789, models.FloatField(), self.empty_value
)
self.assertEqual(display_value, "12345.6789")
display_value = display_for_field(
Decimal("12345.6789"), models.DecimalField(), self.empty_value
)
self.assertEqual(display_value, "12345.6789")
display_value = display_for_field(
12345, models.IntegerField(), self.empty_value
)
self.assertEqual(display_value, "12345")
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_number_formats_with_thousand_separator_display_for_field(self):
display_value = display_for_field(
12345.6789, models.FloatField(), self.empty_value
)
self.assertEqual(display_value, "12,345.6789")
display_value = display_for_field(
Decimal("12345.6789"), models.DecimalField(), self.empty_value
)
self.assertEqual(display_value, "12,345.6789")
display_value = display_for_field(
12345, models.IntegerField(), self.empty_value
)
self.assertEqual(display_value, "12,345")
@isolate_apps("admin_utils")
def test_display_for_field_password_name_not_user_model(self):
class PasswordModel(models.Model):
password = models.CharField(max_length=200)
password_field = PasswordModel._meta.get_field("password")
display_value = display_for_field("test", password_field, self.empty_value)
self.assertEqual(display_value, "test")
def test_password_display_for_field_user_model(self):
password_field = User._meta.get_field("password")
for password in [
"invalid",
"md5$zjIiKM8EiyfXEGiexlQRw4$a59a82cf344546e7bc09cb5f2246370a",
"!b7pk7RNudAXGTNLK6fW5YnBCLVE6UUmeoJJYQHaO",
]:
with self.subTest(password=password):
display_value = display_for_field(
password, password_field, self.empty_value
)
self.assertEqual(display_value, render_password_as_hash(password))
def test_list_display_for_value(self):
display_value = display_for_value([1, 2, 3], self.empty_value)
self.assertEqual(display_value, "1, 2, 3")
display_value = display_for_value(
[1, 2, "buckle", "my", "shoe"], self.empty_value
)
self.assertEqual(display_value, "1, 2, buckle, my, shoe")
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_list_display_for_value_boolean(self):
self.assertEqual(
display_for_value(True, "", boolean=True),
'<img src="/static/admin/img/icon-yes.svg" alt="True">',
)
self.assertEqual(
display_for_value(False, "", boolean=True),
'<img src="/static/admin/img/icon-no.svg" alt="False">',
)
self.assertEqual(display_for_value(True, ""), "True")
self.assertEqual(display_for_value(False, ""), "False")
def test_list_display_for_value_empty(self):
for value in EMPTY_VALUES:
with self.subTest(empty_value=value):
display_value = display_for_value(value, self.empty_value)
self.assertEqual(display_value, self.empty_value)
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(label_for_field("title", Article), "title")
self.assertEqual(label_for_field("hist", Article), "History")
self.assertEqual(
label_for_field("hist", Article, return_attr=True), ("History", None)
)
self.assertEqual(label_for_field("__str__", Article), "article")
with self.assertRaisesMessage(
AttributeError, "Unable to lookup 'unknown' on Article"
):
label_for_field("unknown", Article)
def test_callable(obj):
return "nothing"
self.assertEqual(label_for_field(test_callable, Article), "Test callable")
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable),
)
self.assertEqual(label_for_field("test_from_model", Article), "Test from model")
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model),
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect",
)
self.assertEqual(label_for_field(lambda x: "nothing", Article), "--")
self.assertEqual(label_for_field("site_id", Article), "Site id")
# The correct name and attr are returned when `__` is in the field
# name.
self.assertEqual(label_for_field("site__domain", Article), "Site domain")
self.assertEqual(
label_for_field("site__domain", Article, return_attr=True),
("Site domain", Site._meta.get_field("domain")),
)
def test_label_for_field_failed_lookup(self):
msg = "Unable to lookup 'site__unknown' on Article"
with self.assertRaisesMessage(AttributeError, msg):
label_for_field("site__unknown", Article)
class MockModelAdmin:
@admin.display(description="not Really the Model")
def test_from_model(self, obj):
return "nothing"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model",
)
self.assertEqual(
label_for_field(
"test_from_model", Article, model_admin=MockModelAdmin, return_attr=True
),
("not Really the Model", MockModelAdmin.test_from_model),
)
def test_label_for_field_form_argument(self):
class ArticleForm(forms.ModelForm):
extra_form_field = forms.BooleanField()
class Meta:
fields = "__all__"
model = Article
self.assertEqual(
label_for_field("extra_form_field", Article, form=ArticleForm()),
"Extra form field",
)
msg = "Unable to lookup 'nonexistent' on Article or ArticleForm"
with self.assertRaisesMessage(AttributeError, msg):
label_for_field("nonexistent", Article, form=ArticleForm())
def test_label_for_property(self):
class MockModelAdmin:
@property
@admin.display(description="property short description")
def test_from_property(self):
return "this if from property"
self.assertEqual(
label_for_field("test_from_property", Article, model_admin=MockModelAdmin),
"property short description",
)
def test_help_text_for_field(self):
tests = [
("article", ""),
("unknown", ""),
("hist", "History help text"),
]
for name, help_text in tests:
with self.subTest(name=name):
self.assertEqual(help_text_for_field(name, Article), help_text)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field("location", Event, return_attr=True),
("location", None),
)
self.assertEqual(
label_for_field("event", Location, return_attr=True),
("awesome event", None),
)
self.assertEqual(
label_for_field("guest", Event, return_attr=True),
("awesome guest", None),
)
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe("<i>text</i>"))
cb = forms.BooleanField(label=mark_safe("<i>cb</i>"))
form = MyForm()
self.assertHTMLEqual(
helpers.AdminField(form, "text", is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>',
)
self.assertHTMLEqual(
helpers.AdminField(form, "cb", is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">'
"<i>cb</i></label>",
)
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label="&text")
cb = forms.BooleanField(label="&cb")
form = MyForm()
self.assertHTMLEqual(
helpers.AdminField(form, "text", is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>',
)
self.assertHTMLEqual(
helpers.AdminField(form, "cb", is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb</label>',
)
def test_flatten(self):
flat_all = ["url", "title", "content", "sites"]
inputs = (
((), []),
(("url", "title", ("content", "sites")), flat_all),
(("url", "title", "content", "sites"), flat_all),
((("url", "title"), ("content", "sites")), flat_all),
)
for orig, expected in inputs:
self.assertEqual(flatten(orig), expected)
def test_flatten_fieldsets(self):
"""
Regression test for #18051
"""
fieldsets = ((None, {"fields": ("url", "title", ("content", "sites"))}),)
self.assertEqual(
flatten_fieldsets(fieldsets), ["url", "title", "content", "sites"]
)
fieldsets = ((None, {"fields": ("url", "title", ["content", "sites"])}),)
self.assertEqual(
flatten_fieldsets(fieldsets), ["url", "title", "content", "sites"]
)
def test_quote(self):
self.assertEqual(quote("something\nor\nother"), "something_0Aor_0Aother")
def test_build_q_object_from_lookup_parameters(self):
parameters = {
"title__in": [["Article 1", "Article 2"]],
"hist__iexact": ["history"],
"site__pk": [1, 2],
}
q_obj = build_q_object_from_lookup_parameters(parameters)
self.assertEqual(
q_obj,
models.Q(title__in=["Article 1", "Article 2"])
& models.Q(hist__iexact="history")
& (models.Q(site__pk=1) | models.Q(site__pk=2)),
)
| UtilsTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingTypeIs1.py | {
"start": 782,
"end": 970
} | class ____(A):
pass
def func4(a: str | A):
if type(a) is B:
reveal_type(a, expected_text="B")
else:
reveal_type(a, expected_text="str | A")
T = TypeVar("T")
| B |
python | wandb__wandb | wandb/sdk/artifacts/_generated/registry_versions.py | {
"start": 531,
"end": 747
} | class ____(GQLResult):
name: str
artifact_memberships: Optional[
RegistryVersionsOrganizationOrgEntityArtifactMemberships
] = Field(alias="artifactMemberships")
| RegistryVersionsOrganizationOrgEntity |
python | eventlet__eventlet | tests/pools_test.py | {
"start": 135,
"end": 293
} | class ____(pools.Pool):
def create(self):
self.current_integer = getattr(self, 'current_integer', 0) + 1
return self.current_integer
| IntPool |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-cart/source_cart/source.py | {
"start": 772,
"end": 1286
} | class ____(AbstractHeaderAuthenticator):
def __init__(self, access_token, store_name):
self.auth_method = AuthMethod.SINGLE_STORE_ACCESS_TOKEN
self._store_name = store_name
self._access_token = access_token
def get_auth_header(self) -> Mapping[str, Any]:
return {"X-AC-Auth-Token": self._access_token}
def url_base(self) -> str:
return f"https://{self._store_name}/api/v1/"
def extra_params(self, stream, params):
return {}
| CustomHeaderAuthenticator |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py | {
"start": 10932,
"end": 16627
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.ID)
name = graphene.NonNull(graphene.String)
pipeline_name = graphene.NonNull(graphene.String)
solid_selection = graphene.List(graphene.NonNull(graphene.String))
mode = graphene.NonNull(graphene.String)
partitionsOrError = graphene.Field(
graphene.NonNull(GraphenePartitionsOrError),
cursor=graphene.String(),
limit=graphene.Int(),
reverse=graphene.Boolean(),
)
partition = graphene.Field(GraphenePartition, partition_name=graphene.NonNull(graphene.String))
partitionStatusesOrError = graphene.NonNull(GraphenePartitionStatusesOrError)
partitionRuns = non_null_list(GraphenePartitionRun)
repositoryOrigin = graphene.NonNull(GrapheneRepositoryOrigin)
hasLaunchBackfillPermission = graphene.NonNull(graphene.Boolean)
hasCancelBackfillPermission = graphene.NonNull(graphene.Boolean)
backfills = graphene.Field(
non_null_list(GraphenePartitionBackfill),
cursor=graphene.String(),
limit=graphene.Int(),
)
class Meta:
name = "PartitionSet"
def __init__(
self,
remote_partition_set: RemotePartitionSet,
):
self._remote_partition_set = check.inst_param(
remote_partition_set, "remote_partition_set", RemotePartitionSet
)
self._partition_names = None
super().__init__(
name=remote_partition_set.name,
pipeline_name=remote_partition_set.job_name,
solid_selection=remote_partition_set.op_selection,
mode=remote_partition_set.mode,
)
def _get_partition_names(self, graphene_info: ResolveInfo) -> Sequence[str]:
if self._partition_names is None:
result = graphene_info.context.get_partition_names(
repository_selector=self._remote_partition_set.repository_handle.to_selector(),
job_name=self._remote_partition_set.job_name,
instance=graphene_info.context.instance,
selected_asset_keys=None,
)
if isinstance(result, PartitionExecutionErrorSnap):
raise DagsterUserCodeProcessError.from_error_info(result.error)
self._partition_names = result.partition_names
return self._partition_names
def resolve_id(self, _graphene_info: ResolveInfo):
return self._remote_partition_set.get_remote_origin_id()
@capture_error
def resolve_partitionsOrError(
self,
graphene_info: ResolveInfo,
cursor: Optional[str] = None,
limit: Optional[int] = None,
reverse: Optional[bool] = None,
):
return get_partitions(
self._remote_partition_set.repository_handle,
self._remote_partition_set,
self._get_partition_names(graphene_info),
cursor=cursor,
limit=limit,
reverse=reverse or False,
)
def resolve_partition(self, graphene_info: ResolveInfo, partition_name: str):
return get_partition_by_name(
graphene_info,
self._remote_partition_set.repository_handle,
self._remote_partition_set,
partition_name,
)
def resolve_partitionRuns(self, graphene_info: ResolveInfo):
return get_partition_set_partition_runs(
graphene_info,
self._remote_partition_set,
self._get_partition_names(graphene_info),
)
@capture_error
def resolve_partitionStatusesOrError(self, graphene_info: ResolveInfo):
return get_partition_set_partition_statuses(
graphene_info,
self._remote_partition_set,
self._get_partition_names(graphene_info),
)
def resolve_repositoryOrigin(self, _):
origin = self._remote_partition_set.get_remote_origin().repository_origin
return GrapheneRepositoryOrigin(origin)
def resolve_hasLaunchBackfillPermission(self, graphene_info: ResolveInfo) -> bool:
return has_permission_for_job(
graphene_info,
Permissions.LAUNCH_PARTITION_BACKFILL,
JobSelector(
location_name=self._remote_partition_set.repository_handle.location_name,
repository_name=self._remote_partition_set.repository_handle.repository_name,
job_name=self._remote_partition_set.job_name,
),
)
def resolve_hasCancelBackfillPermission(self, graphene_info: ResolveInfo) -> bool:
return has_permission_for_job(
graphene_info,
Permissions.CANCEL_PARTITION_BACKFILL,
JobSelector(
location_name=self._remote_partition_set.repository_handle.location_name,
repository_name=self._remote_partition_set.repository_handle.repository_name,
job_name=self._remote_partition_set.job_name,
),
)
def resolve_backfills(
self,
graphene_info: ResolveInfo,
cursor: Optional[str] = None,
limit: Optional[int] = None,
):
matching = [
backfill
for backfill in graphene_info.context.instance.get_backfills(
cursor=cursor,
)
if backfill.partition_set_origin
and backfill.partition_set_origin.partition_set_name == self._remote_partition_set.name
and backfill.partition_set_origin.repository_origin.repository_name
== self._remote_partition_set.repository_handle.repository_name
]
return [GraphenePartitionBackfill(backfill) for backfill in matching[:limit]]
| GraphenePartitionSet |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-monday/unit_tests/integrations/monday_requests/base_requests_builder.py | {
"start": 839,
"end": 1621
} | class ____(MondayRequestBuilder):
def __init__(self) -> None:
self._authenticator: str = None
@property
def url(self) -> str:
return f"https://api.monday.com/v2"
@property
def headers(self) -> Dict[str, Any]:
return (super().headers or {}) | {
"Authorization": self._authenticator.client_access_token,
}
@property
def query_params(self):
return super().query_params
def with_authenticator(self, authenticator: Authenticator) -> "MondayBaseRequestBuilder":
self._authenticator: Authenticator = authenticator
return self
def with_board_ids(self, board_ids: List[int]) -> "MondayBaseRequestBuilder":
self._board_ids = board_ids
return self
| MondayBaseRequestBuilder |
python | tensorflow__tensorflow | tensorflow/python/distribute/v1/cross_device_ops_test.py | {
"start": 4238,
"end": 10987
} | class ____(test.TestCase, parameterized.TestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertIsInstance(left, indexed_slices_lib.IndexedSlices)
self.assertIsInstance(right, indexed_slices_lib.IndexedSlices)
self.assertEqual(
device_util.resolve(left.device), device_util.resolve(right.device))
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def _assert_mirrored_equal(self,
left_list,
right_list,
sess=None,
run_options=None):
if not isinstance(left_list, list):
left_list, right_list = [left_list], [right_list]
for left, right in zip(left_list, right_list):
self.assertEqual(type(left), type(right))
# Convert Mirrored to a list since sess.run(Mirrored) only returns one
# value.
if isinstance(left, value_lib.Mirrored):
left, right = left.values, right.values
else:
# When there's only one replica Mirrored is automatically unwrapped.
left, right = [left], [right]
for left_value, right_value in zip(left, right):
self.assertEqual(
device_util.resolve(left_value.device),
device_util.resolve(right_value.device))
# Densify IndexedSlices.
left = [ops.convert_to_tensor(v) for v in left]
right = [ops.convert_to_tensor(v) for v in right]
if not context.executing_eagerly():
left, right = sess.run((left, right), options=run_options)
for left_value, right_value in zip(left, right):
self.assertAllEqual(left_value, right_value)
def _testReductionAndBroadcast(self, cross_device_ops, devices):
if context.num_gpus() < sum(1 for d in devices if "GPU" in d.upper()):
self.skipTest("Not enough GPUs")
with self.cached_session() as sess:
values = [constant_op.constant(float(d)) for d in range(len(devices))]
per_replica = _make_per_replica(values, devices)
mean = (len(devices) - 1.) / 2.
values_2 = [constant_op.constant(d + 1.0) for d in range(len(devices))]
per_replica_2 = _make_per_replica(values_2, devices)
mean_2 = mean + 1.
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1.,
device_util.resolve(_cpu_device))
destination_str = device_util.resolve(_cpu_device)
all_destinations = [
destination_mirrored,
destination_different,
destination_str,
]
# test reduce()
for destinations in all_destinations:
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica,
destinations=destinations), _fake_mirrored(mean, destinations),
sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations), sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica,
destinations=destinations),
_fake_mirrored(mean * len(devices), destinations), sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices), destinations), sess)
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_mirrored_equal(
cross_device_ops.batch_reduce(reduce_util.ReduceOp.MEAN,
[(per_replica, d1),
(per_replica_2, d2)]),
[_fake_mirrored(mean, d1),
_fake_mirrored(mean_2, d2)], sess)
self._assert_mirrored_equal(
cross_device_ops.batch_reduce(reduce_util.ReduceOp.SUM,
[(per_replica, d1),
(per_replica_2, d2)]),
[
_fake_mirrored(mean * len(devices), d1),
_fake_mirrored(mean_2 * len(devices), d2)
], sess)
# test broadcast()
for destinations in all_destinations:
self._assert_mirrored_equal(
cross_device_ops.broadcast(constant_op.constant(1.), destinations),
_fake_mirrored(1., destinations), sess)
def _testIndexedSlicesAllReduce(self, devices, cross_device_ops_instance,
reduce_op, batch_reduce):
with self.cached_session() as sess:
dense_shape = [5, 2]
t0 = _make_indexed_slices([[1., 2.]], [1], dense_shape, devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], dense_shape,
devices[1])
per_replica = value_lib.PerReplica((t0, t1))
if batch_reduce:
result = cross_device_ops_instance.batch_reduce(
reduce_op, [(per_replica, per_replica)])
else:
result = cross_device_ops_instance.reduce(reduce_op, per_replica,
per_replica)
total_indices_with_dups = [1, 1, 3]
total_indices_without_dups = [1, 3]
if reduce_op == reduce_util.ReduceOp.SUM:
total_values_with_dups = [[1., 2.], [3., 4.], [5., 6.]]
total_values_without_dups = [[4., 6.], [5., 6.]]
else:
assert reduce_op == reduce_util.ReduceOp.MEAN
total_values_with_dups = [[0.5, 1.], [1.5, 2.], [2.5, 3.]]
total_values_without_dups = [[2., 3.], [2.5, 3.]]
total_mirrored_with_dups = _make_mirrored_indexed_slices(
devices, total_values_with_dups, total_indices_with_dups, dense_shape)
total_mirrored_without_dups = _make_mirrored_indexed_slices(
devices, total_values_without_dups, total_indices_without_dups,
dense_shape)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices, as well as when the duplicate indices are summed up.
if batch_reduce:
total_mirrored_with_dups = [total_mirrored_with_dups]
total_mirrored_without_dups = [total_mirrored_without_dups]
self._assert_mirrored_equal(total_mirrored_with_dups, result, sess)
self._assert_mirrored_equal(total_mirrored_without_dups, result, sess)
| CrossDeviceOpsTestBase |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 23078,
"end": 23400
} | class ____(oracle._OracleDate):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return value.date()
else:
return value
return process
| _CXOracleDate |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/formats.py | {
"start": 0,
"end": 418
} | class ____:
"""XYXY contains axis indices for the XYXY format.
All values in the XYXY format should be absolute pixel values.
The XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
| XYXY |
python | kamyu104__LeetCode-Solutions | Python/split-array-into-consecutive-subsequences.py | {
"start": 29,
"end": 913
} | class ____(object):
def isPossible(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
pre, cur = float("-inf"), 0
cnt1, cnt2, cnt3 = 0, 0, 0
i = 0
while i < len(nums):
cnt = 0
cur = nums[i]
while i < len(nums) and cur == nums[i]:
cnt += 1
i += 1
if cur != pre + 1:
if cnt1 != 0 or cnt2 != 0:
return False
cnt1, cnt2, cnt3 = cnt, 0, 0
else:
if cnt < cnt1 + cnt2:
return False
cnt1, cnt2, cnt3 = max(0, cnt - (cnt1 + cnt2 + cnt3)), \
cnt1, \
cnt2 + min(cnt3, cnt - (cnt1 + cnt2))
pre = cur
return cnt1 == 0 and cnt2 == 0
| Solution |
python | mwaskom__seaborn | tests/_core/test_scales.py | {
"start": 18912,
"end": 22153
} | class ____:
@pytest.fixture
def t(self):
dates = pd.to_datetime(["1972-09-27", "1975-06-24", "1980-12-14"])
return pd.Series(dates, name="x")
@pytest.fixture
def x(self, t):
return pd.Series(mpl.dates.date2num(t), name=t.name)
def test_coordinate_defaults(self, t, x):
s = Temporal()._setup(t, Coordinate())
assert_array_equal(s(t), x)
def test_interval_defaults(self, t, x):
s = Temporal()._setup(t, IntervalProperty())
normed = (x - x.min()) / (x.max() - x.min())
assert_array_equal(s(t), normed)
def test_interval_with_range(self, t, x):
values = (1, 3)
s = Temporal((1, 3))._setup(t, IntervalProperty())
normed = (x - x.min()) / (x.max() - x.min())
expected = normed * (values[1] - values[0]) + values[0]
assert_array_equal(s(t), expected)
def test_interval_with_norm(self, t, x):
norm = t[1], t[2]
s = Temporal(norm=norm)._setup(t, IntervalProperty())
n = mpl.dates.date2num(norm)
normed = (x - n[0]) / (n[1] - n[0])
assert_array_equal(s(t), normed)
def test_color_defaults(self, t, x):
cmap = color_palette("ch:", as_cmap=True)
s = Temporal()._setup(t, Color())
normed = (x - x.min()) / (x.max() - x.min())
assert_array_equal(s(t), cmap(normed)[:, :3]) # FIXME RGBA
def test_color_named_values(self, t, x):
name = "viridis"
cmap = color_palette(name, as_cmap=True)
s = Temporal(name)._setup(t, Color())
normed = (x - x.min()) / (x.max() - x.min())
assert_array_equal(s(t), cmap(normed)[:, :3]) # FIXME RGBA
def test_coordinate_axis(self, t, x):
ax = mpl.figure.Figure().subplots()
s = Temporal()._setup(t, Coordinate(), ax.xaxis)
assert_array_equal(s(t), x)
locator = ax.xaxis.get_major_locator()
formatter = ax.xaxis.get_major_formatter()
assert isinstance(locator, mpl.dates.AutoDateLocator)
assert isinstance(formatter, mpl.dates.AutoDateFormatter)
def test_tick_locator(self, t):
locator = mpl.dates.YearLocator(month=3, day=15)
s = Temporal().tick(locator)
a = PseudoAxis(s._setup(t, Coordinate())._matplotlib_scale)
a.set_view_interval(0, 365)
assert 73 in a.major.locator()
def test_tick_upto(self, t, x):
n = 8
ax = mpl.figure.Figure().subplots()
Temporal().tick(upto=n)._setup(t, Coordinate(), ax.xaxis)
locator = ax.xaxis.get_major_locator()
assert set(locator.maxticks.values()) == {n}
def test_label_formatter(self, t):
formatter = mpl.dates.DateFormatter("%Y")
s = Temporal().label(formatter)
a = PseudoAxis(s._setup(t, Coordinate())._matplotlib_scale)
a.set_view_interval(10, 1000)
label, = a.major.formatter.format_ticks([100])
assert label == "1970"
def test_label_concise(self, t, x):
ax = mpl.figure.Figure().subplots()
Temporal().label(concise=True)._setup(t, Coordinate(), ax.xaxis)
formatter = ax.xaxis.get_major_formatter()
assert isinstance(formatter, mpl.dates.ConciseDateFormatter)
| TestTemporal |
python | apache__airflow | providers/apache/kafka/tests/integration/apache/kafka/triggers/test_await_message.py | {
"start": 1263,
"end": 2980
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="trigger.await_message.test.integration.test_1",
conn_type="kafka",
extra=json.dumps(
{
"socket.timeout.ms": 10,
"bootstrap.servers": "broker:29092",
"group.id": "trigger.await_message.test.integration.test_1",
"enable.auto.commit": False,
"auto.offset.reset": "beginning",
}
),
)
)
@pytest.mark.asyncio
async def test_trigger_await_message_test_1(self):
"""
Await message waits for a message that returns truthy
"""
TOPIC = "trigger.await_message.test.integration.test_1"
p = Producer(**{"bootstrap.servers": "broker:29092"})
for _ in range(20):
p.produce(TOPIC, "not_this")
p.produce(TOPIC, TOPIC)
assert len(p) == 21
x = p.flush()
assert x == 0
trigger = AwaitMessageTrigger(
topics=[TOPIC],
apply_function="integration.apache.kafka.triggers.test_await_message._apply_function",
apply_function_args=None,
apply_function_kwargs=None,
kafka_config_id="trigger.await_message.test.integration.test_1",
poll_timeout=0,
poll_interval=1,
)
generator = trigger.run()
actual = await generator.__anext__()
assert actual.payload.value() == bytes(TOPIC, "utf-8")
| TestTrigger |
python | doocs__leetcode | solution/1700-1799/1725.Number Of Rectangles That Can Form The Largest Square/Solution.py | {
"start": 0,
"end": 306
} | class ____:
def countGoodRectangles(self, rectangles: List[List[int]]) -> int:
ans = mx = 0
for l, w in rectangles:
x = min(l, w)
if mx < x:
ans = 1
mx = x
elif mx == x:
ans += 1
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/count-pairs-with-xor-in-a-range.py | {
"start": 64,
"end": 783
} | class ____(object):
def countPairs(self, nums, low, high):
"""
:type nums: List[int]
:type low: int
:type high: int
:rtype: int
"""
def count(nums, x):
result = 0
dp = collections.Counter(nums)
while x:
if x&1:
result += sum(dp[(x^1)^k]*dp[k] for k in dp.iterkeys())//2 # current limit is xxxxx1*****, count xor pair with xxxxx0***** pattern
dp = collections.Counter({k>>1: dp[k]+dp[k^1] for k in dp.iterkeys()})
x >>= 1
return result
return count(nums, high+1)-count(nums, low)
# Time: O(n)
# Space: O(n)
# trie solution
| Solution |
python | scrapy__scrapy | tests/test_squeues.py | {
"start": 1665,
"end": 1753
} | class ____(MarshalFifoDiskQueueTest):
chunksize = 1
| ChunkSize1MarshalFifoDiskQueueTest |
python | ansible__ansible | test/units/regex/test_invalid_var_names.py | {
"start": 232,
"end": 789
} | class ____(unittest.TestCase):
def test_positive_matches(self):
for name, invalid, sanitized in test_cases:
self.assertEqual(C.INVALID_VARIABLE_NAMES.findall(name), invalid)
def test_negative_matches(self):
for name in ('this_is_valid', 'Also_1_valid', 'noproblem'):
self.assertEqual(C.INVALID_VARIABLE_NAMES.findall(name), [])
def test_get_setting(self):
for name, invalid, sanitized in test_cases:
self.assertEqual(C.INVALID_VARIABLE_NAMES.sub('_', name), sanitized)
| TestInvalidVars |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/builders.py | {
"start": 1625,
"end": 3979
} | class ____(Builder):
def __init__(self, model_type: type, *args: Any, **kwargs: Any) -> None:
super(ObjectBuilder, self).__init__(*args, **kwargs)
self.properties = {}
self.required = []
self.type = model_type
self.register_type(self.type, self)
def add_field(self, name: str, field: Any, schema: Any) -> None:
_apply_validators_modifications(schema, field)
self.properties[name] = schema
if field.required:
self.required.append(name)
def build(self) -> str:
builder = self.get_builder(self.type)
if self.is_definition and not self.is_root:
self.add_definition(builder)
[self.maybe_build(value) for _, value in self.properties.items()]
return "#/definitions/{name}".format(name=self.type_name)
else:
return builder.build_definition(nullable=self.nullable)
@property
def type_name(self) -> str:
module_name = "{module}.{name}".format(
module=self.type.__module__,
name=self.type.__name__,
)
return module_name.replace(".", "_").lower()
def build_definition(self, add_defintitions: bool = True, nullable: bool = False) -> dict:
properties = dict((name, self.maybe_build(value)) for name, value in self.properties.items())
schema = {
"type": "object",
"additionalProperties": False,
"properties": properties,
}
if self.required:
schema["required"] = self.required
if self.definitions and add_defintitions:
schema["definitions"] = dict(
(builder.type_name, builder.build_definition(False, False)) for builder in self.definitions
)
return schema
@property
def is_definition(self) -> bool:
if self.count_type(self.type) > 1:
return True
elif self.parent:
return self.parent.is_definition
else:
return False
@property
def is_root(self) -> bool:
return not bool(self.parent)
def _apply_validators_modifications(field_schema: dict, field: Any) -> None:
for validator in field.validators:
try:
validator.modify_schema(field_schema)
except AttributeError:
pass
| ObjectBuilder |
python | weaviate__weaviate-python-client | weaviate/collections/classes/types.py | {
"start": 678,
"end": 736
} | class ____(_WeaviateInput):
number: str
| _PhoneNumberBase |
python | pypa__warehouse | warehouse/admin/forms.py | {
"start": 181,
"end": 1789
} | class ____(wtforms.Form):
"""
Form for validating upload limit input in admin interface.
Used by both project and organization admin views to ensure
consistent validation of upload limits.
"""
upload_limit = wtforms.StringField(
validators=[
wtforms.validators.Optional(),
],
filters=[lambda x: None if (x == "" or not x) else x],
)
def validate_upload_limit(self, field):
"""
Validate upload limit value.
- Empty string means remove the limit (use system default)
- Must be a valid integer if provided
- Must be between MIN and MAX allowed values
"""
if field.data is None:
# Already None from filter
return
try:
limit_value = int(field.data)
except (ValueError, TypeError):
raise wtforms.ValidationError(
"Upload limit must be a valid integer or empty"
)
# Check minimum (must be at least the system default)
min_limit = MAX_FILESIZE // ONE_MIB
if limit_value < min_limit:
raise wtforms.ValidationError(
f"Upload limit can not be less than {min_limit:0.1f}MiB"
)
# Check maximum (capped at 1GB)
max_limit = UPLOAD_LIMIT_CAP // ONE_MIB
if limit_value > max_limit:
raise wtforms.ValidationError(
f"Upload limit can not be greater than {max_limit:0.1f}MiB"
)
# Convert to bytes for storage
field.data = limit_value * ONE_MIB
| SetUploadLimitForm |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 37564,
"end": 41214
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: MoshiConfig, layer_idx: int, use_flexible_linear: bool, use_rope=True):
super().__init__()
self.hidden_size = config.hidden_size
self.use_flexible_linear = use_flexible_linear
self.self_attn = MOSHI_ATTENTION_CLASSES[config._attn_implementation](
config=config, layer_idx=layer_idx, use_flexible_linear=use_flexible_linear, use_rope=use_rope
)
self.mlp = MoshiGatingMLP(config, use_flexible_linear)
self.input_layernorm = MoshiRMSNorm(self.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = MoshiRMSNorm(self.hidden_size, eps=config.rms_norm_eps)
self.sliding_window = config.sliding_window
self._attn_implementation = config._attn_implementation
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = (
self.mlp(hidden_states) if not self.use_flexible_linear else self.mlp(hidden_states, cache_position)
)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
@auto_docstring
| MoshiDecoderLayer |
python | getsentry__sentry | src/sentry_plugins/jira/client.py | {
"start": 342,
"end": 3235
} | class ____(ApiClient):
"""
The JIRA API Client, so you don't have to.
"""
PROJECT_URL = "/rest/api/2/project"
META_URL = "/rest/api/2/issue/createmeta"
CREATE_URL = "/rest/api/2/issue"
PRIORITIES_URL = "/rest/api/2/priority"
VERSIONS_URL = "/rest/api/2/project/{}/versions"
USERS_URL = "/rest/api/2/user/assignable/search"
ISSUE_URL = "/rest/api/2/issue/{}"
SEARCH_URL = "/rest/api/2/search/"
COMMENT_URL = "/rest/api/2/issue/{}/comment"
plugin_name = "jira"
cache_time = 60
def __init__(self, instance_uri, username, password):
self.base_url = instance_uri.rstrip("/")
self.username = username
self.password = password
super().__init__(verify_ssl=False)
def request(self, method, path, data=None, params=None):
if self.username and self.password:
auth = self.username, self.password
else:
auth = None
return self._request(method, path, data=data, params=params, auth=auth)
def get_projects_list(self):
return self.get_cached(self.PROJECT_URL)
def get_create_meta(self, project):
return self.get(
self.META_URL, params={"projectKeys": project, "expand": "projects.issuetypes.fields"}
)
def get_create_meta_for_project(self, project):
metas = self.get_create_meta(project)
# We saw an empty JSON response come back from the API :(
if not metas:
return None
# XXX(dcramer): document how this is possible, if it even is
if len(metas["projects"]) > 1:
raise ApiError("More than one project found.")
try:
return metas["projects"][0]
except IndexError:
return None
def get_versions(self, project):
return self.get_cached(self.VERSIONS_URL.format(project))
def get_priorities(self):
return self.get_cached(self.PRIORITIES_URL)
def search_users_for_project(self, project, username):
return self.get(self.USERS_URL, params={"project": project, "username": username})
def create_issue(self, raw_form_data):
data = {"fields": raw_form_data}
return self.post(self.CREATE_URL, data=data)
def get_issue(self, key):
return self.get(self.ISSUE_URL.format(key))
def create_comment(self, issue_key, comment):
return self.post(self.COMMENT_URL.format(issue_key), data={"body": comment})
def search_issues(self, project, query):
# check if it looks like an issue id
if re.search(r"^[A-Za-z]+-\d+$", query) and project.lower() in query.lower():
jql = 'id="{}"'.format(query.replace('"', '\\"'))
else:
jql = 'text ~ "{}"'.format(query.replace('"', '\\"'))
jql = f'project="{project}" AND {jql}'
return self.get(self.SEARCH_URL, params={"jql": jql})
| JiraClient |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_overridden_method.py | {
"start": 644,
"end": 999
} | class ____(SuperClass):
def prop(self): # [invalid-overridden-method]
return None
def async_method(self): # [invalid-overridden-method]
return None
@property
def method_a(self): # [invalid-overridden-method]
return None
async def method_b(self): # [invalid-overridden-method]
return None
| InvalidDerived |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 91488,
"end": 91638
} | class ____(_PrintableStructure):
_fields_ = [
('avgFactor', c_uint),
('frequency', c_uint),
]
| c_nvmlVgpuSchedSetDataWithARR_t |
python | encode__django-rest-framework | rest_framework/permissions.py | {
"start": 3118,
"end": 3586
} | class ____(metaclass=BasePermissionMetaclass):
"""
A base class from which all permission classes should inherit.
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
def has_object_permission(self, request, view, obj):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
| BasePermission |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 23333,
"end": 23477
} | class ____(PivotTableAbstract):
chunk = staticmethod(methods.pivot_count)
aggregate_func = staticmethod(methods.pivot_agg)
| PivotTableCount |
python | apache__airflow | providers/google/tests/unit/google/cloud/openlineage/test_utils.py | {
"start": 5522,
"end": 49797
} | class ____(MagicMock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.inputs = [
read_file_json("providers/google/tests/unit/google/cloud/utils/table_details.json"),
read_file_json("providers/google/tests/unit/google/cloud/utils/out_table_details.json"),
]
@property
def _properties(self):
return self.inputs.pop()
def test_merge_column_lineage_facets():
result = merge_column_lineage_facets(
[
ColumnLineageDatasetFacet(
fields={
"c": Fields(
inputFields=[
InputField(
"bigquery",
"a.b.1",
"c",
[
column_lineage_dataset.Transformation(
"type", "some_subtype", "desc", False
)
],
)
],
transformationType="IDENTITY",
transformationDescription="IDENTICAL",
),
"d": Fields(
inputFields=[
InputField(
"bigquery",
"a.b.2",
"d",
[column_lineage_dataset.Transformation("t", "s", "d", False)],
)
],
transformationType="",
transformationDescription="",
),
}
),
ColumnLineageDatasetFacet(
fields={
"c": Fields(
inputFields=[
InputField(
"bigquery",
"a.b.3",
"x",
[
column_lineage_dataset.Transformation(
"another_type", "different_subtype", "example", True
)
],
),
InputField(
"bigquery",
"a.b.1",
"c",
[
column_lineage_dataset.Transformation(
"diff_type", "diff_subtype", "diff", True
)
],
),
],
transformationType="",
transformationDescription="",
),
"e": Fields(
inputFields=[InputField("bigquery", "a.b.1", "e")],
transformationType="IDENTITY",
transformationDescription="IDENTICAL",
),
}
),
ColumnLineageDatasetFacet(
fields={
"c": Fields(
inputFields=[InputField("bigquery", "a.b.3", "x")],
transformationType="",
transformationDescription="",
)
}
),
]
)
assert result == ColumnLineageDatasetFacet(
fields={
"c": Fields(
inputFields=[
InputField(
"bigquery",
"a.b.1",
"c",
[
column_lineage_dataset.Transformation("type", "some_subtype", "desc", False),
column_lineage_dataset.Transformation("diff_type", "diff_subtype", "diff", True),
],
),
InputField(
"bigquery",
"a.b.3",
"x",
[
column_lineage_dataset.Transformation(
"another_type", "different_subtype", "example", True
)
],
),
],
transformationType="",
transformationDescription="",
),
"d": Fields(
inputFields=[
InputField(
"bigquery",
"a.b.2",
"d",
[column_lineage_dataset.Transformation("t", "s", "d", False)],
)
],
transformationType="",
transformationDescription="",
),
"e": Fields(
inputFields=[InputField("bigquery", "a.b.1", "e")],
transformationType="",
transformationDescription="",
),
}
)
def test_get_facets_from_bq_table():
expected_facets = {
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING", description="field1 description"),
SchemaDatasetFacetFields(name="field2", type="INTEGER"),
]
),
"documentation": DocumentationDatasetFacet(description="Table description."),
"symlink": SymlinksDatasetFacet(
identifiers=[
Identifier(namespace="gs://bucket", name="path/to", type="file"),
Identifier(namespace="gs://second_bucket", name="path/to/other", type="file"),
]
),
}
result = get_facets_from_bq_table(TEST_TABLE)
assert result == expected_facets
def test_get_facets_from_empty_bq_table():
result = get_facets_from_bq_table(TEST_EMPTY_TABLE)
assert result == {}
def test_get_identity_column_lineage_facet_source_datasets_schemas_are_subsets():
field_names = ["field1", "field2", "field3"]
input_datasets = [
Dataset(
namespace="gs://first_bucket",
name="dir1",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING"),
]
)
},
),
Dataset(
namespace="gs://second_bucket",
name="dir2",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field2", type="STRING"),
]
)
},
),
]
expected_facet = ColumnLineageDatasetFacet(
fields={
"field1": Fields(
inputFields=[
InputField(
namespace="gs://first_bucket",
name="dir1",
field="field1",
)
],
transformationType="IDENTITY",
transformationDescription="identical",
),
"field2": Fields(
inputFields=[
InputField(
namespace="gs://second_bucket",
name="dir2",
field="field2",
),
],
transformationType="IDENTITY",
transformationDescription="identical",
),
# field3 is missing here as it's not present in any source dataset
}
)
result = get_identity_column_lineage_facet(dest_field_names=field_names, input_datasets=input_datasets)
assert result == {"columnLineage": expected_facet}
def test_get_identity_column_lineage_facet_multiple_input_datasets():
field_names = ["field1", "field2"]
schema_facet = SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING"),
SchemaDatasetFacetFields(name="field2", type="STRING"),
]
)
input_datasets = [
Dataset(namespace="gs://first_bucket", name="dir1", facets={"schema": schema_facet}),
Dataset(namespace="gs://second_bucket", name="dir2", facets={"schema": schema_facet}),
]
expected_facet = ColumnLineageDatasetFacet(
fields={
"field1": Fields(
inputFields=[
InputField(
namespace="gs://first_bucket",
name="dir1",
field="field1",
),
InputField(
namespace="gs://second_bucket",
name="dir2",
field="field1",
),
],
transformationType="IDENTITY",
transformationDescription="identical",
),
"field2": Fields(
inputFields=[
InputField(
namespace="gs://first_bucket",
name="dir1",
field="field2",
),
InputField(
namespace="gs://second_bucket",
name="dir2",
field="field2",
),
],
transformationType="IDENTITY",
transformationDescription="identical",
),
}
)
result = get_identity_column_lineage_facet(dest_field_names=field_names, input_datasets=input_datasets)
assert result == {"columnLineage": expected_facet}
def test_get_identity_column_lineage_facet_dest_cols_not_in_input_datasets():
field_names = ["x", "y"]
input_datasets = [
Dataset(
namespace="gs://first_bucket",
name="dir1",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING"),
]
)
},
),
Dataset(
namespace="gs://second_bucket",
name="dir2",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field2", type="STRING"),
]
)
},
),
]
result = get_identity_column_lineage_facet(dest_field_names=field_names, input_datasets=input_datasets)
assert result == {}
def test_get_identity_column_lineage_facet_no_schema_in_input_dataset():
field_names = ["field1", "field2"]
input_datasets = [
Dataset(namespace="gs://first_bucket", name="dir1"),
]
result = get_identity_column_lineage_facet(dest_field_names=field_names, input_datasets=input_datasets)
assert result == {}
def test_get_identity_column_lineage_facet_no_field_names():
field_names = []
schema_facet = SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING"),
SchemaDatasetFacetFields(name="field2", type="STRING"),
]
)
input_datasets = [
Dataset(namespace="gs://first_bucket", name="dir1", facets={"schema": schema_facet}),
Dataset(namespace="gs://second_bucket", name="dir2", facets={"schema": schema_facet}),
]
result = get_identity_column_lineage_facet(dest_field_names=field_names, input_datasets=input_datasets)
assert result == {}
def test_get_identity_column_lineage_facet_no_input_datasets():
field_names = ["field1", "field2"]
input_datasets = []
result = get_identity_column_lineage_facet(dest_field_names=field_names, input_datasets=input_datasets)
assert result == {}
@pytest.mark.parametrize(
("input_path", "expected_output"),
[
("/path/to/file.txt", "path/to/file.txt"), # Full file path
("file.txt", "file.txt"), # File path in root directory
("/path/to/dir/", "path/to/dir"), # Directory path
("/path/to/dir/*", "path/to/dir"), # Path with wildcard at the end
("/path/to/dir/*.csv", "path/to/dir"), # Path with wildcard in file name
("/path/to/dir/file.*", "path/to/dir"), # Path with wildcard in file extension
("/path/to/*/dir/file.csv", "path/to"), # Path with wildcard in the middle
("/path/to/dir/pre_", "path/to/dir"), # Path with prefix
("/pre", "/"), # Prefix only
("/*", "/"), # Wildcard after root slash
("/", "/"), # Root path
("", "/"), # Empty path
(".", "/"), # Current directory
("*", "/"), # Wildcard only
],
)
def test_extract_ds_name_from_gcs_path(input_path, expected_output):
assert extract_ds_name_from_gcs_path(input_path) == expected_output
@patch("airflow.providers.openlineage.plugins.listener.get_openlineage_listener")
@patch("airflow.providers.openlineage.conf.is_disabled")
def test_is_openlineage_provider_accessible(mock_is_disabled, mock_get_listener):
mock_is_disabled.return_value = False
mock_get_listener.return_value = True
assert _is_openlineage_provider_accessible() is True
@patch("airflow.providers.openlineage.plugins.listener.get_openlineage_listener")
@patch("airflow.providers.openlineage.conf.is_disabled")
def test_is_openlineage_provider_disabled(mock_is_disabled, mock_get_listener):
mock_is_disabled.return_value = True
assert _is_openlineage_provider_accessible() is False
@patch("airflow.providers.openlineage.plugins.listener.get_openlineage_listener")
@patch("airflow.providers.openlineage.conf.is_disabled")
def test_is_openlineage_listener_not_found(mock_is_disabled, mock_get_listener):
mock_is_disabled.return_value = False
mock_get_listener.return_value = None
assert _is_openlineage_provider_accessible() is False
@pytest.mark.parametrize(
("job", "expected"),
[
({"sparkJob": {}}, "sparkJob"),
({"pysparkJob": {}}, "pysparkJob"),
({"spark_job": {}}, "spark_job"),
({"pyspark_job": {}}, "pyspark_job"),
({"unsupportedJob": {}}, None),
({}, None),
],
)
def test_extract_supported_job_type_from_dataproc_job(job, expected):
assert _extract_supported_job_type_from_dataproc_job(job) == expected
def test_replace_dataproc_job_properties_injection():
job_type = "sparkJob"
original_job = {job_type: {"properties": {"existingProperty": "value"}}}
new_properties = {"newProperty": "newValue"}
updated_job = _replace_dataproc_job_properties(original_job, job_type, new_properties)
assert updated_job[job_type]["properties"] == {"newProperty": "newValue"}
assert original_job[job_type]["properties"] == {"existingProperty": "value"}
def test_replace_dataproc_job_properties_key_error():
original_job = {"sparkJob": {"properties": {"existingProperty": "value"}}}
job_type = "nonExistentJobType"
new_properties = {"newProperty": "newValue"}
with pytest.raises(KeyError, match=f"Job type '{job_type}' is missing in the job definition."):
_replace_dataproc_job_properties(original_job, job_type, new_properties)
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_job_provider_not_accessible(mock_is_accessible):
mock_is_accessible.return_value = False
job = {"sparkJob": {"properties": {"existingProperty": "value"}}}
result = inject_openlineage_properties_into_dataproc_job(job, EXAMPLE_CONTEXT, True, True)
assert result == job
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
@patch("airflow.providers.google.cloud.openlineage.utils._extract_supported_job_type_from_dataproc_job")
def test_inject_openlineage_properties_into_dataproc_job_unsupported_job_type(
mock_extract_job_type, mock_is_accessible
):
mock_is_accessible.return_value = True
mock_extract_job_type.return_value = None
job = {"unsupportedJob": {"properties": {"existingProperty": "value"}}}
result = inject_openlineage_properties_into_dataproc_job(job, EXAMPLE_CONTEXT, True, True)
assert result == job
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
@patch("airflow.providers.google.cloud.openlineage.utils._extract_supported_job_type_from_dataproc_job")
def test_inject_openlineage_properties_into_dataproc_job_no_injection(
mock_extract_job_type, mock_is_accessible
):
mock_is_accessible.return_value = True
mock_extract_job_type.return_value = "sparkJob"
inject_parent_job_info = False
job = {"sparkJob": {"properties": {"existingProperty": "value"}}}
result = inject_openlineage_properties_into_dataproc_job(
job, EXAMPLE_CONTEXT, inject_parent_job_info, False
)
assert result == job
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_job_parent_info_only(mock_is_ol_accessible):
mock_is_ol_accessible.return_value = True
expected_properties = {
"existingProperty": "value",
**OPENLINEAGE_PARENT_JOB_EXAMPLE_SPARK_PROPERTIES,
}
job = {"sparkJob": {"properties": {"existingProperty": "value"}}}
result = inject_openlineage_properties_into_dataproc_job(job, EXAMPLE_CONTEXT, True, False)
assert result == {"sparkJob": {"properties": expected_properties}}
@patch("airflow.providers.openlineage.plugins.listener._openlineage_listener")
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_job_transport_info_only(
mock_is_ol_accessible, mock_ol_listener
):
mock_is_ol_accessible.return_value = True
mock_ol_listener.adapter.get_or_create_openlineage_client.return_value.transport = HttpTransport(
HttpConfig.from_dict(OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_CONFIG)
)
expected_properties = {
"existingProperty": "value",
**OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_SPARK_PROPERTIES,
}
job = {"sparkJob": {"properties": {"existingProperty": "value"}}}
result = inject_openlineage_properties_into_dataproc_job(job, EXAMPLE_CONTEXT, False, True)
assert result == {"sparkJob": {"properties": expected_properties}}
@patch("airflow.providers.openlineage.plugins.listener._openlineage_listener")
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_job_all_injections(
mock_is_ol_accessible, mock_ol_listener
):
mock_is_ol_accessible.return_value = True
mock_ol_listener.adapter.get_or_create_openlineage_client.return_value.transport = HttpTransport(
HttpConfig.from_dict(OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_CONFIG)
)
expected_properties = {
"existingProperty": "value",
**OPENLINEAGE_PARENT_JOB_EXAMPLE_SPARK_PROPERTIES,
**OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_SPARK_PROPERTIES,
}
job = {"sparkJob": {"properties": {"existingProperty": "value"}}}
result = inject_openlineage_properties_into_dataproc_job(job, EXAMPLE_CONTEXT, True, True)
assert result == {"sparkJob": {"properties": expected_properties}}
@pytest.mark.parametrize(
("batch", "expected"),
[
({"spark_batch": {}}, True),
({"pyspark_batch": {}}, True),
({"unsupported_batch": {}}, False),
({}, False),
(Batch(spark_batch={"jar_file_uris": ["uri"]}), True),
(Batch(pyspark_batch={"main_python_file_uri": "uri"}), True),
(Batch(pyspark_batch={}), False),
(Batch(spark_sql_batch={}), False),
(Batch(), False),
],
)
def test_is_dataproc_batch_of_supported_type(batch, expected):
assert _is_dataproc_batch_of_supported_type(batch) == expected
def test_extract_dataproc_batch_properties_batch_object_with_runtime_object():
properties = {"key1": "value1", "key2": "value2"}
mock_runtime_config = RuntimeConfig(properties=properties)
mock_batch = Batch(runtime_config=mock_runtime_config)
result = _extract_dataproc_batch_properties(mock_batch)
assert result == properties
def test_extract_dataproc_batch_properties_batch_object_with_runtime_dict():
properties = {"key1": "value1", "key2": "value2"}
mock_batch = Batch(runtime_config={"properties": properties})
result = _extract_dataproc_batch_properties(mock_batch)
assert result == {"key1": "value1", "key2": "value2"}
def test_extract_dataproc_batch_properties_batch_object_with_runtime_object_empty():
mock_batch = Batch(runtime_config=RuntimeConfig())
result = _extract_dataproc_batch_properties(mock_batch)
assert result == {}
def test_extract_dataproc_batch_properties_dict_with_runtime_config_object():
properties = {"key1": "value1", "key2": "value2"}
mock_runtime_config = RuntimeConfig(properties=properties)
mock_batch_dict = {"runtime_config": mock_runtime_config}
result = _extract_dataproc_batch_properties(mock_batch_dict)
assert result == properties
def test_extract_dataproc_batch_properties_dict_with_properties_dict():
properties = {"key1": "value1", "key2": "value2"}
mock_batch_dict = {"runtime_config": {"properties": properties}}
result = _extract_dataproc_batch_properties(mock_batch_dict)
assert result == properties
def test_extract_dataproc_batch_properties_empty_runtime_config():
mock_batch_dict = {"runtime_config": {}}
result = _extract_dataproc_batch_properties(mock_batch_dict)
assert result == {}
def test_extract_dataproc_batch_properties_empty_dict():
assert _extract_dataproc_batch_properties({}) == {}
def test_extract_dataproc_batch_properties_empty_batch():
assert _extract_dataproc_batch_properties(Batch()) == {}
def test_replace_dataproc_batch_properties_with_batch_object():
original_batch = Batch(
spark_batch={
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
runtime_config=RuntimeConfig(properties={"existingProperty": "value"}),
)
new_properties = {"newProperty": "newValue"}
expected_batch = Batch(
spark_batch={
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
runtime_config=RuntimeConfig(properties={"newProperty": "newValue"}),
)
updated_batch = _replace_dataproc_batch_properties(original_batch, new_properties)
assert updated_batch == expected_batch
assert original_batch.runtime_config.properties == {"existingProperty": "value"}
assert original_batch.spark_batch.main_class == "org.apache.spark.examples.SparkPi"
def test_replace_dataproc_batch_properties_with_batch_object_and_run_time_config_dict():
original_batch = Batch(
spark_batch={
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
runtime_config={"properties": {"existingProperty": "value"}},
)
new_properties = {"newProperty": "newValue"}
expected_batch = Batch(
spark_batch={
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
runtime_config={"properties": {"newProperty": "newValue"}},
)
updated_batch = _replace_dataproc_batch_properties(original_batch, new_properties)
assert updated_batch == expected_batch
assert original_batch.runtime_config.properties == {"existingProperty": "value"}
assert original_batch.spark_batch.main_class == "org.apache.spark.examples.SparkPi"
def test_replace_dataproc_batch_properties_with_empty_batch_object():
original_batch = Batch()
new_properties = {"newProperty": "newValue"}
expected_batch = Batch(runtime_config=RuntimeConfig(properties={"newProperty": "newValue"}))
updated_batch = _replace_dataproc_batch_properties(original_batch, new_properties)
assert updated_batch == expected_batch
assert original_batch == Batch()
def test_replace_dataproc_batch_properties_with_dict():
original_batch = {
"spark_batch": {
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
"runtime_config": {"properties": {"existingProperty": "value"}},
}
new_properties = {"newProperty": "newValue"}
expected_batch = {
"spark_batch": {
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
"runtime_config": {"properties": {"newProperty": "newValue"}},
}
updated_batch = _replace_dataproc_batch_properties(original_batch, new_properties)
assert updated_batch == expected_batch
assert original_batch["runtime_config"]["properties"] == {"existingProperty": "value"}
assert original_batch["spark_batch"]["main_class"] == "org.apache.spark.examples.SparkPi"
def test_replace_dataproc_batch_properties_with_dict_and_run_time_config_object():
original_batch = {
"spark_batch": {
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
"runtime_config": RuntimeConfig(properties={"existingProperty": "value"}),
}
new_properties = {"newProperty": "newValue"}
expected_batch = {
"spark_batch": {
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
"runtime_config": RuntimeConfig(properties={"newProperty": "newValue"}),
}
updated_batch = _replace_dataproc_batch_properties(original_batch, new_properties)
assert updated_batch == expected_batch
assert original_batch["runtime_config"].properties == {"existingProperty": "value"}
assert original_batch["spark_batch"]["main_class"] == "org.apache.spark.examples.SparkPi"
def test_replace_dataproc_batch_properties_with_empty_dict():
original_batch = {}
new_properties = {"newProperty": "newValue"}
expected_batch = {"runtime_config": {"properties": {"newProperty": "newValue"}}}
updated_batch = _replace_dataproc_batch_properties(original_batch, new_properties)
assert updated_batch == expected_batch
assert original_batch == {}
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_batch_provider_not_accessible(mock_is_accessible):
mock_is_accessible.return_value = False
result = inject_openlineage_properties_into_dataproc_batch(EXAMPLE_BATCH, None, True, True)
assert result == EXAMPLE_BATCH
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
@patch("airflow.providers.google.cloud.openlineage.utils._is_dataproc_batch_of_supported_type")
def test_inject_openlineage_properties_into_dataproc_batch_unsupported_batch_type(
mock_valid_job_type, mock_is_accessible
):
mock_is_accessible.return_value = True
mock_valid_job_type.return_value = False
batch = {
"unsupported_batch": {
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
"runtime_config": {"properties": {"existingProperty": "value"}},
}
result = inject_openlineage_properties_into_dataproc_batch(batch, None, True, True)
assert result == batch
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
@patch("airflow.providers.google.cloud.openlineage.utils._is_dataproc_batch_of_supported_type")
def test_inject_openlineage_properties_into_dataproc_batch_no_injection(
mock_valid_job_type, mock_is_accessible
):
mock_is_accessible.return_value = True
mock_valid_job_type.return_value = True
result = inject_openlineage_properties_into_dataproc_batch(EXAMPLE_BATCH, None, False, False)
assert result == EXAMPLE_BATCH
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_batch_parent_info_only(mock_is_ol_accessible):
mock_is_ol_accessible.return_value = True
expected_properties = {
"existingProperty": "value",
**OPENLINEAGE_PARENT_JOB_EXAMPLE_SPARK_PROPERTIES,
}
expected_batch = {
**EXAMPLE_BATCH,
"runtime_config": {"properties": expected_properties},
}
result = inject_openlineage_properties_into_dataproc_batch(EXAMPLE_BATCH, EXAMPLE_CONTEXT, True, False)
assert result == expected_batch
@patch("airflow.providers.openlineage.plugins.listener._openlineage_listener")
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_batch_transport_info_only(
mock_is_ol_accessible, mock_ol_listener
):
mock_is_ol_accessible.return_value = True
mock_ol_listener.adapter.get_or_create_openlineage_client.return_value.transport = HttpTransport(
HttpConfig.from_dict(OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_CONFIG)
)
expected_properties = {"existingProperty": "value", **OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_SPARK_PROPERTIES}
expected_batch = {
**EXAMPLE_BATCH,
"runtime_config": {"properties": expected_properties},
}
result = inject_openlineage_properties_into_dataproc_batch(EXAMPLE_BATCH, EXAMPLE_CONTEXT, False, True)
assert result == expected_batch
@patch("airflow.providers.openlineage.plugins.listener._openlineage_listener")
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_batch_all_injections(
mock_is_ol_accessible, mock_ol_listener
):
mock_is_ol_accessible.return_value = True
mock_ol_listener.adapter.get_or_create_openlineage_client.return_value.transport = HttpTransport(
HttpConfig.from_dict(OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_CONFIG)
)
expected_properties = {
"existingProperty": "value",
**OPENLINEAGE_PARENT_JOB_EXAMPLE_SPARK_PROPERTIES,
**OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_SPARK_PROPERTIES,
}
expected_batch = {
**EXAMPLE_BATCH,
"runtime_config": {"properties": expected_properties},
}
result = inject_openlineage_properties_into_dataproc_batch(EXAMPLE_BATCH, EXAMPLE_CONTEXT, True, True)
assert result == expected_batch
@pytest.mark.parametrize(
("input_uris", "expected_output"),
[
(["gs://bucket/blob"], {("gs://bucket", "/")}),
(["gs://bucket/blob/*"], {("gs://bucket", "blob")}),
(
[
"https://googleapis.com/bigtable/projects/project/instances/instance/appProfiles/profile/tables/table",
"https://googleapis.com/bigtable/projects/project/instances/instance/tables/table",
],
{("bigtable://project/instance", "table"), ("bigtable://project/instance", "table")},
),
(
[
"gs://bucket/blob",
"https://googleapis.com/bigtable/projects/project/instances/instance/tables/table",
"invalid_uri",
],
{("gs://bucket", "/"), ("bigtable://project/instance", "table")},
),
([], set()),
(["invalid_uri"], set()),
],
)
def test_get_namespace_name_from_source_uris(input_uris, expected_output):
assert get_namespace_name_from_source_uris(input_uris) == expected_output
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_workflow_template_provider_not_accessible(
mock_is_accessible,
):
mock_is_accessible.return_value = False
template = {"workflow": "template"} # It does not matter what the dict is, we should return it unmodified
result = inject_openlineage_properties_into_dataproc_workflow_template(template, None, True, True)
assert result == template
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
@patch("airflow.providers.google.cloud.openlineage.utils._extract_supported_job_type_from_dataproc_job")
def test_inject_openlineage_properties_into_dataproc_workflow_template_no_injection(
mock_extract_job_type, mock_is_accessible
):
mock_is_accessible.return_value = True
mock_extract_job_type.return_value = "sparkJob"
template = {"workflow": "template"} # It does not matter what the dict is, we should return it unmodified
result = inject_openlineage_properties_into_dataproc_workflow_template(template, None, False, False)
assert result == template
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_workflow_template_parent_info_only(
mock_is_ol_accessible,
):
mock_is_ol_accessible.return_value = True
template = {
**EXAMPLE_TEMPLATE,
"jobs": [
{
"step_id": "job_1",
"pyspark_job": {
"main_python_file_uri": "gs://bucket1/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
{
"step_id": "job_2",
"pyspark_job": {
"main_python_file_uri": "gs://bucket2/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.parentJobNamespace": "test",
},
},
},
{
"step_id": "job_3",
"hive_job": {
"main_python_file_uri": "gs://bucket3/hive_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
],
}
expected_template = {
**EXAMPLE_TEMPLATE,
"jobs": [
{
"step_id": "job_1",
"pyspark_job": {
"main_python_file_uri": "gs://bucket1/spark_job.py",
"properties": { # Injected properties
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.parentJobName": "dag_id.task_id",
"spark.openlineage.parentJobNamespace": "default",
"spark.openlineage.parentRunId": "01931885-2800-7be7-aa8d-aaa15c337267",
"spark.openlineage.rootParentJobName": "dag_id",
"spark.openlineage.rootParentJobNamespace": "default",
"spark.openlineage.rootParentRunId": "01931885-2800-799d-8041-88a263ffa0d8",
},
},
},
{
"step_id": "job_2",
"pyspark_job": { # Not modified because it's already present
"main_python_file_uri": "gs://bucket2/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.parentJobNamespace": "test",
},
},
},
{
"step_id": "job_3",
"hive_job": { # Not modified because it's unsupported job type
"main_python_file_uri": "gs://bucket3/hive_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
],
}
result = inject_openlineage_properties_into_dataproc_workflow_template(
template, EXAMPLE_CONTEXT, True, False
)
assert result == expected_template
@patch("airflow.providers.openlineage.plugins.listener._openlineage_listener")
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_workflow_template_transport_info_only(
mock_is_ol_accessible, mock_ol_listener
):
mock_is_ol_accessible.return_value = True
mock_ol_listener.adapter.get_or_create_openlineage_client.return_value.transport = HttpTransport(
HttpConfig.from_dict(OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_CONFIG)
)
template = {
**EXAMPLE_TEMPLATE,
"jobs": [
{
"step_id": "job_1",
"pyspark_job": {
"main_python_file_uri": "gs://bucket1/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
{
"step_id": "job_2",
"pyspark_job": {
"main_python_file_uri": "gs://bucket2/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.transport.type": "console",
},
},
},
{
"step_id": "job_3",
"hive_job": {
"main_python_file_uri": "gs://bucket3/hive_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
],
}
expected_template = {
**EXAMPLE_TEMPLATE,
"jobs": [
{
"step_id": "job_1",
"pyspark_job": {
"main_python_file_uri": "gs://bucket1/spark_job.py",
"properties": { # Injected properties
"spark.sql.shuffle.partitions": "1",
**OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_SPARK_PROPERTIES,
},
},
},
{
"step_id": "job_2",
"pyspark_job": { # Not modified because it's already present
"main_python_file_uri": "gs://bucket2/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.transport.type": "console",
},
},
},
{
"step_id": "job_3",
"hive_job": { # Not modified because it's unsupported job type
"main_python_file_uri": "gs://bucket3/hive_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
],
}
result = inject_openlineage_properties_into_dataproc_workflow_template(
template, EXAMPLE_CONTEXT, False, True
)
assert result == expected_template
@patch("airflow.providers.openlineage.plugins.listener._openlineage_listener")
@patch("airflow.providers.google.cloud.openlineage.utils._is_openlineage_provider_accessible")
def test_inject_openlineage_properties_into_dataproc_workflow_template_all_injections(
mock_is_ol_accessible, mock_ol_listener
):
mock_is_ol_accessible.return_value = True
mock_ol_listener.adapter.get_or_create_openlineage_client.return_value.transport = HttpTransport(
HttpConfig.from_dict(OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_CONFIG)
)
template = {
**EXAMPLE_TEMPLATE,
"jobs": [
{
"step_id": "job_1",
"pyspark_job": {
"main_python_file_uri": "gs://bucket1/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
{
"step_id": "job_2",
"pyspark_job": {
"main_python_file_uri": "gs://bucket2/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.transport.type": "console",
},
},
},
{
"step_id": "job_3",
"hive_job": {
"main_python_file_uri": "gs://bucket3/hive_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
{
"step_id": "job_4",
"pyspark_job": {
"main_python_file_uri": "gs://bucket2/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.parentJobNamespace": "test",
},
},
},
],
}
expected_template = {
"id": "test-workflow",
"placement": {
"cluster_selector": {
"zone": "europe-central2-c",
"cluster_labels": {"key": "value"},
}
},
"jobs": [
{
"step_id": "job_1",
"pyspark_job": {
"main_python_file_uri": "gs://bucket1/spark_job.py",
"properties": { # Injected properties
"spark.sql.shuffle.partitions": "1",
**OPENLINEAGE_PARENT_JOB_EXAMPLE_SPARK_PROPERTIES,
**OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_SPARK_PROPERTIES,
},
},
},
{
"step_id": "job_2",
"pyspark_job": { # Only parent info injected
"main_python_file_uri": "gs://bucket2/spark_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.transport.type": "console",
**OPENLINEAGE_PARENT_JOB_EXAMPLE_SPARK_PROPERTIES,
},
},
},
{
"step_id": "job_3",
"hive_job": { # Not modified because it's unsupported job type
"main_python_file_uri": "gs://bucket3/hive_job.py",
"properties": {
"spark.sql.shuffle.partitions": "1",
},
},
},
{
"step_id": "job_4",
"pyspark_job": {
"main_python_file_uri": "gs://bucket2/spark_job.py",
"properties": { # Only transport info injected
"spark.sql.shuffle.partitions": "1",
"spark.openlineage.parentJobNamespace": "test",
**OPENLINEAGE_HTTP_TRANSPORT_EXAMPLE_SPARK_PROPERTIES,
},
},
},
],
}
result = inject_openlineage_properties_into_dataproc_workflow_template(
template, EXAMPLE_CONTEXT, True, True
)
assert result == expected_template
| TableMock |
python | nedbat__coveragepy | coverage/parser.py | {
"start": 19068,
"end": 19766
} | class ____(Protocol):
"""The type for AstArcAnalyzer.add_arc()."""
def __call__(
self,
start: TLineNo,
end: TLineNo,
missing_cause_msg: str | None = None,
action_msg: str | None = None,
) -> None:
"""
Record an arc from `start` to `end`.
`missing_cause_msg` is a description of the reason the arc wasn't
taken if it wasn't taken. For example, "the condition on line 10 was
never true."
`action_msg` is a description of what the arc does, like "jump to line
10" or "exit from function 'fooey'."
"""
TArcFragments = dict[TArc, list[tuple[Optional[str], Optional[str]]]]
| TAddArcFn |
python | langchain-ai__langchain | libs/core/tests/unit_tests/language_models/chat_models/test_base.py | {
"start": 12615,
"end": 16278
} | class ____(NoStreamingModel):
streaming: bool = False
@override
def _stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
yield ChatGenerationChunk(message=AIMessageChunk(content="stream"))
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
def test_disable_streaming(
*,
disable_streaming: bool | Literal["tool_calling"],
) -> None:
model = StreamingModel(disable_streaming=disable_streaming)
assert model.invoke([]).content == "invoke"
expected = "invoke" if disable_streaming is True else "stream"
assert next(model.stream([])).content == expected
assert (
model.invoke([], config={"callbacks": [LogStreamCallbackHandler()]}).content
== expected
)
expected = "invoke" if disable_streaming in {"tool_calling", True} else "stream"
assert next(model.stream([], tools=[{"type": "function"}])).content == expected
assert (
model.invoke(
[], config={"callbacks": [LogStreamCallbackHandler()]}, tools=[{}]
).content
== expected
)
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
async def test_disable_streaming_async(
*,
disable_streaming: bool | Literal["tool_calling"],
) -> None:
model = StreamingModel(disable_streaming=disable_streaming)
assert (await model.ainvoke([])).content == "invoke"
expected = "invoke" if disable_streaming is True else "stream"
async for c in model.astream([]):
assert c.content == expected
break
assert (
await model.ainvoke([], config={"callbacks": [_AstreamEventsCallbackHandler()]})
).content == expected
expected = "invoke" if disable_streaming in {"tool_calling", True} else "stream"
async for c in model.astream([], tools=[{}]):
assert c.content == expected
break
assert (
await model.ainvoke(
[], config={"callbacks": [_AstreamEventsCallbackHandler()]}, tools=[{}]
)
).content == expected
async def test_streaming_attribute_overrides_streaming_callback() -> None:
model = StreamingModel(streaming=False)
assert (
await model.ainvoke([], config={"callbacks": [_AstreamEventsCallbackHandler()]})
).content == "invoke"
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
def test_disable_streaming_no_streaming_model(
*,
disable_streaming: bool | Literal["tool_calling"],
) -> None:
model = NoStreamingModel(disable_streaming=disable_streaming)
assert model.invoke([]).content == "invoke"
assert next(model.stream([])).content == "invoke"
assert (
model.invoke([], config={"callbacks": [LogStreamCallbackHandler()]}).content
== "invoke"
)
assert next(model.stream([], tools=[{}])).content == "invoke"
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
async def test_disable_streaming_no_streaming_model_async(
*,
disable_streaming: bool | Literal["tool_calling"],
) -> None:
model = NoStreamingModel(disable_streaming=disable_streaming)
assert (await model.ainvoke([])).content == "invoke"
async for c in model.astream([]):
assert c.content == "invoke"
break
assert (
await model.ainvoke([], config={"callbacks": [_AstreamEventsCallbackHandler()]})
).content == "invoke"
async for c in model.astream([], tools=[{}]):
assert c.content == "invoke"
break
| StreamingModel |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs_auto_attribs.py | {
"start": 231,
"end": 365
} | class ____:
a: str = 0
b = field()
c: int = foo()
d = list()
@define(auto_attribs=None) # auto_attribs = None => True
| C |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0088_domain_field_edits.py | {
"start": 149,
"end": 1049
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0087_use_booleanfield_null"),
]
operations = [
migrations.AlterField(
model_name="domain",
name="canonical",
field=models.BooleanField(
default=False,
help_text="This domain is the primary one where the documentation is served from",
),
),
migrations.AlterField(
model_name="domain",
name="cname",
field=models.BooleanField(
default=False, help_text="This domain is a CNAME for the project"
),
),
migrations.AlterField(
model_name="domain",
name="machine",
field=models.BooleanField(default=False, help_text="This domain was auto-created"),
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 179597,
"end": 181622
} | class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.true_positives_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0.15, 0.5, 0.85])
_assert_metric_variables(self, ('true_positives/true_positives:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.true_positives_at_thresholds(
predictions=predictions, labels=labels, thresholds=[0.15, 0.5, 0.85])
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp)
self.assertAllEqual((3, 1, 0), tp_update_op)
self.assertAllEqual((3, 1, 0), tp)
@test_util.run_deprecated_v1
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.true_positives_at_thresholds(
predictions=predictions, labels=labels, weights=37.0,
thresholds=[0.15, 0.5, 0.85])
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp)
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op)
self.assertAllEqual((111.0, 37.0, 0.0), tp)
if __name__ == '__main__':
test.main()
| TruePositivesAtThresholdsTest |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-weather/llama_index/tools/weather/base.py | {
"start": 175,
"end": 4208
} | class ____(BaseToolSpec):
"""Open Weather tool spec."""
spec_functions = ["weather_at_location", "forecast_tomorrow_at_location"]
def __init__(self, key: str, temp_units: str = "celsius") -> None:
"""Initialize with parameters."""
try:
from pyowm import OWM
except ImportError:
raise ImportError(
"The OpenWeatherMap tool requires the pyowm package to be installed. "
"Please install it using `pip install pyowm`."
)
self.key = key
self.temp_units = temp_units
self._owm = OWM(self.key)
self._mgr = self._owm.weather_manager()
def _format_temp(self, temperature: Any, temp_unit: str) -> str:
return (
f" - Current: {temperature['temp']}{temp_unit}\n"
f" - High: {temperature['temp_max']}{temp_unit}\n"
f" - Low: {temperature['temp_min']}{temp_unit}\n"
f" - Feels like: {temperature['feels_like']}{temp_unit}"
)
def _format_weather(
self, place: str, temp_str: str, w: Any, time_str: str = "now"
) -> str:
"""
Format weather response from OpenWeatherMap.
Function thanks to
langchain/utilities/openweathermap.py
"""
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds
return (
f"In {place}, the weather for {time_str} is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
"Temperature: \n"
f"{temp_str}\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index!s}\n"
f"Cloud cover: {clouds}%"
)
def weather_at_location(self, location: str) -> List[Document]:
"""
Finds the current weather at a location.
Args:
place (str):
The place to find the weather at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
try:
observation = self._mgr.weather_at_place(location)
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
w = observation.weather
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w)
return [Document(text=weather_text, metadata={"weather from": location})]
def forecast_tomorrow_at_location(self, location: str) -> List[Document]:
"""
Finds the weather forecast for tomorrow at a location.
Args:
location (str):
The location to find the weather tomorrow at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
from pyowm.utils import timestamps
try:
forecast = self._mgr.forecast_at_place(location, "3h")
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
tomorrow = timestamps.tomorrow()
w = forecast.get_weather_at(tomorrow)
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w, "tomorrow")
return [
Document(
text=weather_text,
metadata={
"weather from": location,
"forecast for": tomorrow.strftime("%Y-%m-%d"),
},
)
]
| OpenWeatherMapToolSpec |
python | apache__airflow | airflow-ctl/src/airflowctl/ctl/commands/config_command.py | {
"start": 1224,
"end": 31335
} | class ____:
"""
Class representing the configuration changes in Airflow 3.0.
:param config: The configuration parameter being changed.
:param default_change: If the change is a default value change.
:param old_default: The old default value (valid only if default_change is True).
:param new_default: The new default value for the configuration parameter.
:param suggestion: A suggestion for replacing or handling the removed configuration.
:param renamed_to: The new section and option if the configuration is renamed.
:param was_deprecated: If the config is removed, whether the old config was deprecated.
:param was_removed: If the config is removed.
:param is_invalid_if: If the current config value is invalid in the future.
:param breaking: Mark if this change is known to be breaking and causing errors/ warnings / deprecations.
:param remove_if_equals: For removal rules, remove the option only if its current value equals this value.
"""
config: ConfigParameter
default_change: bool = False
old_default: str | bool | int | float | None = None
new_default: str | bool | int | float | None = None
suggestion: str = ""
renamed_to: ConfigParameter | None = None
was_deprecated: bool = True
was_removed: bool = True
is_invalid_if: Any = None
breaking: bool = False
remove_if_equals: str | bool | int | float | None = None
def message(self, api_client=NEW_API_CLIENT) -> str | None:
"""Generate a message for this configuration change."""
if self.default_change:
value = self._get_option_value(api_client.configs.list())
if value != self.new_default:
return (
f"Changed default value of `{self.config.option}` in `{self.config.section}` "
f"from `{self.old_default}` to `{self.new_default}`."
)
if self.renamed_to:
if self.config.section != self.renamed_to.section:
return (
f"`{self.config.option}` configuration parameter moved from `{self.config.section}` section to "
f"`{self.renamed_to.section}` section as `{self.renamed_to.option}`."
)
return (
f"`{self.config.option}` configuration parameter renamed to `{self.renamed_to.option}` "
f"in the `{self.config.section}` section."
)
if self.was_removed and not self.remove_if_equals:
return (
f"Removed{' deprecated' if self.was_deprecated else ''} `{self.config.option}` configuration parameter "
f"from `{self.config.section}` section."
f"{self.suggestion}"
)
if self.is_invalid_if is not None:
value = self._get_option_value(api_client.configs.list())
if value == self.is_invalid_if:
return (
f"Invalid value `{self.is_invalid_if}` set for `{self.config.option}` configuration parameter "
f"in `{self.config.section}` section. {self.suggestion}"
)
return None
def _get_option_value(self, config_resp: Config) -> str | None:
for section in config_resp.sections:
if section.name == self.config.section:
for option in section.options:
if option.key == self.config.option:
return option.value if isinstance(option.value, str) else str(option.value)
return None
CONFIGS_CHANGES = [
# admin
ConfigChange(
config=ConfigParameter("admin", "hide_sensitive_variable_fields"),
renamed_to=ConfigParameter("core", "hide_sensitive_var_conn_fields"),
),
ConfigChange(
config=ConfigParameter("admin", "sensitive_variable_fields"),
renamed_to=ConfigParameter("core", "sensitive_var_conn_names"),
),
# core
ConfigChange(
config=ConfigParameter("core", "executor"),
default_change=True,
old_default="SequentialExecutor",
new_default="LocalExecutor",
was_removed=False,
breaking=True,
),
ConfigChange(
config=ConfigParameter("core", "hostname"),
was_removed=True,
remove_if_equals=":",
),
ConfigChange(
config=ConfigParameter("core", "check_slas"),
suggestion="The SLA feature is removed in Airflow 3.0, to be replaced with Airflow Alerts in future",
),
ConfigChange(
config=ConfigParameter("core", "strict_dataset_uri_validation"),
suggestion="Dataset URI with a defined scheme will now always be validated strictly, "
"raising a hard error on validation failure.",
),
ConfigChange(
config=ConfigParameter("core", "dag_default_view"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("core", "dag_orientation"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("core", "dataset_manager_class"),
renamed_to=ConfigParameter("core", "asset_manager_class"),
),
ConfigChange(
config=ConfigParameter("core", "dataset_manager_kwargs"),
renamed_to=ConfigParameter("core", "asset_manager_kwargs"),
),
ConfigChange(
config=ConfigParameter("core", "worker_precheck"),
renamed_to=ConfigParameter("celery", "worker_precheck"),
),
ConfigChange(
config=ConfigParameter("core", "non_pooled_task_slot_count"),
renamed_to=ConfigParameter("core", "default_pool_task_slot_count"),
),
ConfigChange(
config=ConfigParameter("core", "dag_concurrency"),
renamed_to=ConfigParameter("core", "max_active_tasks_per_dag"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_conn"),
renamed_to=ConfigParameter("database", "sql_alchemy_conn"),
),
ConfigChange(
config=ConfigParameter("core", "sql_engine_encoding"),
renamed_to=ConfigParameter("database", "sql_engine_encoding"),
),
ConfigChange(
config=ConfigParameter("core", "sql_engine_collation_for_ids"),
renamed_to=ConfigParameter("database", "sql_engine_collation_for_ids"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_pool_enabled"),
renamed_to=ConfigParameter("database", "sql_alchemy_pool_enabled"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_pool_size"),
renamed_to=ConfigParameter("database", "sql_alchemy_pool_size"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_max_overflow"),
renamed_to=ConfigParameter("database", "sql_alchemy_max_overflow"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_pool_recycle"),
renamed_to=ConfigParameter("database", "sql_alchemy_pool_recycle"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_pool_pre_ping"),
renamed_to=ConfigParameter("database", "sql_alchemy_pool_pre_ping"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_schema"),
renamed_to=ConfigParameter("database", "sql_alchemy_schema"),
),
ConfigChange(
config=ConfigParameter("core", "sql_alchemy_connect_args"),
renamed_to=ConfigParameter("database", "sql_alchemy_connect_args"),
),
ConfigChange(
config=ConfigParameter("core", "load_default_connections"),
renamed_to=ConfigParameter("database", "load_default_connections"),
),
ConfigChange(
config=ConfigParameter("core", "max_db_retries"),
renamed_to=ConfigParameter("database", "max_db_retries"),
),
ConfigChange(config=ConfigParameter("core", "task_runner")),
ConfigChange(config=ConfigParameter("core", "enable_xcom_pickling")),
ConfigChange(
config=ConfigParameter("core", "dag_file_processor_timeout"),
renamed_to=ConfigParameter("dag_processor", "dag_file_processor_timeout"),
),
ConfigChange(
config=ConfigParameter("core", "dag_processor_manager_log_location"),
),
ConfigChange(
config=ConfigParameter("core", "log_processor_filename_template"),
),
ConfigChange(
config=ConfigParameter("core", "parallelism"),
was_removed=False,
is_invalid_if="0",
suggestion="Please set the `parallelism` configuration parameter to a value greater than 0.",
),
# api
ConfigChange(
config=ConfigParameter("api", "access_control_allow_origin"),
renamed_to=ConfigParameter("api", "access_control_allow_origins"),
),
ConfigChange(
config=ConfigParameter("api", "auth_backend"),
renamed_to=ConfigParameter("fab", "auth_backends"),
),
ConfigChange(
config=ConfigParameter("api", "auth_backends"),
renamed_to=ConfigParameter("fab", "auth_backends"),
),
# logging
ConfigChange(
config=ConfigParameter("logging", "enable_task_context_logger"),
suggestion="Remove TaskContextLogger: Replaced by the Log table for better handling of task log "
"messages outside the execution context.",
),
ConfigChange(
config=ConfigParameter("logging", "dag_processor_manager_log_location"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("logging", "dag_processor_manager_log_stdout"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("logging", "log_processor_filename_template"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("logging", "log_filename_template"),
was_removed=True,
remove_if_equals="{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log",
breaking=True,
),
ConfigChange(
config=ConfigParameter("logging", "log_filename_template"),
was_removed=True,
remove_if_equals="dag_id={{ ti.dag_id }}/run_id={{ ti.run_id }}/task_id={{ ti.task_id }}/{% if ti.map_index >= 0 %}map_index={{ ti.map_index }}/{% endif %}attempt={{ try_number }}.log",
breaking=True,
),
# metrics
ConfigChange(
config=ConfigParameter("metrics", "metrics_use_pattern_match"),
),
ConfigChange(
config=ConfigParameter("metrics", "timer_unit_consistency"),
suggestion="In Airflow 3.0, the `timer_unit_consistency` setting in the `metrics` section is "
"removed as it is now the default behaviour. This is done to standardize all timer and "
"timing metrics to milliseconds across all metric loggers",
),
ConfigChange(
config=ConfigParameter("metrics", "statsd_allow_list"),
renamed_to=ConfigParameter("metrics", "metrics_allow_list"),
),
ConfigChange(
config=ConfigParameter("metrics", "statsd_block_list"),
renamed_to=ConfigParameter("metrics", "metrics_block_list"),
),
# traces
ConfigChange(
config=ConfigParameter("traces", "otel_task_log_event"),
),
# operators
ConfigChange(
config=ConfigParameter("operators", "allow_illegal_arguments"),
),
# webserver
ConfigChange(
config=ConfigParameter("webserver", "allow_raw_html_descriptions"),
),
ConfigChange(
config=ConfigParameter("webserver", "cookie_samesite"),
),
ConfigChange(
config=ConfigParameter("webserver", "update_fab_perms"),
renamed_to=ConfigParameter("fab", "update_fab_perms"),
),
ConfigChange(
config=ConfigParameter("webserver", "auth_rate_limited"),
renamed_to=ConfigParameter("fab", "auth_rate_limited"),
),
ConfigChange(
config=ConfigParameter("webserver", option="auth_rate_limit"),
renamed_to=ConfigParameter("fab", "auth_rate_limit"),
),
ConfigChange(
config=ConfigParameter("webserver", "config_file"),
renamed_to=ConfigParameter("fab", "config_file"),
),
ConfigChange(
config=ConfigParameter("webserver", "session_backend"),
renamed_to=ConfigParameter("fab", "session_backend"),
),
ConfigChange(
config=ConfigParameter("webserver", "session_lifetime_days"),
renamed_to=ConfigParameter("fab", "session_lifetime_minutes"),
),
ConfigChange(
config=ConfigParameter("webserver", "force_log_out_after"),
renamed_to=ConfigParameter("fab", "session_lifetime_minutes"),
),
ConfigChange(
config=ConfigParameter("webserver", "session_lifetime_minutes"),
renamed_to=ConfigParameter("fab", "session_lifetime_minutes"),
),
ConfigChange(
config=ConfigParameter("webserver", "base_url"),
renamed_to=ConfigParameter("api", "base_url"),
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_host"),
renamed_to=ConfigParameter("api", "host"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_port"),
renamed_to=ConfigParameter("api", "port"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "workers"),
renamed_to=ConfigParameter("api", "workers"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_worker_timeout"),
renamed_to=ConfigParameter("api", "worker_timeout"),
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_ssl_cert"),
renamed_to=ConfigParameter("api", "ssl_cert"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_ssl_key"),
renamed_to=ConfigParameter("api", "ssl_key"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "access_logfile"),
renamed_to=ConfigParameter("api", "access_logfile"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "error_logfile"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "access_logformat"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "web_server_master_timeout"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "worker_refresh_batch_size"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "worker_refresh_interval"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "reload_on_plugin_change"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "worker_class"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "expose_stacktrace"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "log_fetch_delay_sec"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "log_auto_tailing_offset"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "log_animation_speed"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "default_dag_run_display_number"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "enable_proxy_fix"),
renamed_to=ConfigParameter("fab", "enable_proxy_fix"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_for"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_for"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_proto"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_proto"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_host"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_host"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_port"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_port"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "proxy_fix_x_prefix"),
renamed_to=ConfigParameter("fab", "proxy_fix_x_prefix"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("webserver", "expose_config"),
renamed_to=ConfigParameter("api", "expose_config"),
),
ConfigChange(
config=ConfigParameter("webserver", "cookie_secure"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "analytics_tool"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "analytics_id"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "analytics_url"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "show_recent_stats_for_completed_runs"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "run_internal_api"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "caching_hash_method"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "show_trigger_form_if_no_params"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "num_recent_configurations_for_trigger"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "allowed_payload_size"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "max_form_memory_size"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "max_form_parts"),
was_deprecated=False,
),
ConfigChange(
config=ConfigParameter("webserver", "default_ui_timezone"),
was_deprecated=False,
),
# policy
ConfigChange(
config=ConfigParameter("policy", "airflow_local_settings"),
renamed_to=ConfigParameter("policy", "task_policy"),
),
ConfigChange(
config=ConfigParameter("webserver", "navbar_logo_text_color"),
was_deprecated=False,
),
# scheduler
ConfigChange(
config=ConfigParameter("scheduler", "dependency_detector"),
),
ConfigChange(
config=ConfigParameter("scheduler", "allow_trigger_in_future"),
),
ConfigChange(
config=ConfigParameter("scheduler", "catchup_by_default"),
default_change=True,
old_default="True",
was_removed=False,
new_default="False",
suggestion="In Airflow 3.0 the default value for `catchup_by_default` is set to `False`. "
"This means that DAGs without explicit definition of the `catchup` parameter will not "
"catchup by default. "
"If your DAGs rely on catchup behavior, not explicitly defined in the DAG definition, "
"set this configuration parameter to `True` in the `scheduler` section of your `airflow.cfg` "
"to enable the behavior from Airflow 2.x.",
breaking=True,
),
ConfigChange(
config=ConfigParameter("scheduler", "create_cron_data_intervals"),
default_change=True,
old_default="True",
new_default="False",
was_removed=False,
breaking=True,
),
ConfigChange(
config=ConfigParameter("scheduler", "create_delta_data_intervals"),
default_change=True,
old_default="True",
new_default="False",
was_removed=False,
breaking=True,
),
ConfigChange(
config=ConfigParameter("scheduler", "processor_poll_interval"),
renamed_to=ConfigParameter("scheduler", "scheduler_idle_sleep_time"),
),
ConfigChange(
config=ConfigParameter("scheduler", "deactivate_stale_dags_interval"),
renamed_to=ConfigParameter("scheduler", "parsing_cleanup_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_on"), renamed_to=ConfigParameter("metrics", "statsd_on")
),
ConfigChange(
config=ConfigParameter("scheduler", "max_threads"),
renamed_to=ConfigParameter("dag_processor", "parsing_processes"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_host"),
renamed_to=ConfigParameter("metrics", "statsd_host"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_port"),
renamed_to=ConfigParameter("metrics", "statsd_port"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_prefix"),
renamed_to=ConfigParameter("metrics", "statsd_prefix"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_allow_list"),
renamed_to=ConfigParameter("metrics", "statsd_allow_list"),
),
ConfigChange(
config=ConfigParameter("scheduler", "stat_name_handler"),
renamed_to=ConfigParameter("metrics", "stat_name_handler"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_datadog_enabled"),
renamed_to=ConfigParameter("metrics", "statsd_datadog_enabled"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_datadog_tags"),
renamed_to=ConfigParameter("metrics", "statsd_datadog_tags"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_datadog_metrics_tags"),
renamed_to=ConfigParameter("metrics", "statsd_datadog_metrics_tags"),
),
ConfigChange(
config=ConfigParameter("scheduler", "statsd_custom_client_path"),
renamed_to=ConfigParameter("metrics", "statsd_custom_client_path"),
),
ConfigChange(
config=ConfigParameter("scheduler", "parsing_processes"),
renamed_to=ConfigParameter("dag_processor", "parsing_processes"),
),
ConfigChange(
config=ConfigParameter("scheduler", "file_parsing_sort_mode"),
renamed_to=ConfigParameter("dag_processor", "file_parsing_sort_mode"),
),
ConfigChange(
config=ConfigParameter("scheduler", "max_callbacks_per_loop"),
renamed_to=ConfigParameter("dag_processor", "max_callbacks_per_loop"),
),
ConfigChange(
config=ConfigParameter("scheduler", "min_file_process_interval"),
renamed_to=ConfigParameter("dag_processor", "min_file_process_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "stale_dag_threshold"),
renamed_to=ConfigParameter("dag_processor", "stale_dag_threshold"),
),
ConfigChange(
config=ConfigParameter("scheduler", "print_stats_interval"),
renamed_to=ConfigParameter("dag_processor", "print_stats_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "dag_dir_list_interval"),
renamed_to=ConfigParameter("dag_processor", "refresh_interval"),
breaking=True,
),
ConfigChange(
config=ConfigParameter("scheduler", "local_task_job_heartbeat_sec"),
renamed_to=ConfigParameter("scheduler", "task_instance_heartbeat_sec"),
),
ConfigChange(
config=ConfigParameter("scheduler", "scheduler_zombie_task_threshold"),
renamed_to=ConfigParameter("scheduler", "task_instance_heartbeat_timeout"),
),
ConfigChange(
config=ConfigParameter("scheduler", "zombie_detection_interval"),
renamed_to=ConfigParameter("scheduler", "task_instance_heartbeat_timeout_detection_interval"),
),
ConfigChange(
config=ConfigParameter("scheduler", "child_process_log_directory"),
renamed_to=ConfigParameter("logging", "dag_processor_child_process_log_directory"),
),
# celery
ConfigChange(
config=ConfigParameter("celery", "stalled_task_timeout"),
renamed_to=ConfigParameter("scheduler", "task_queued_timeout"),
),
ConfigChange(
config=ConfigParameter("celery", "default_queue"),
renamed_to=ConfigParameter("operators", "default_queue"),
),
ConfigChange(
config=ConfigParameter("celery", "task_adoption_timeout"),
renamed_to=ConfigParameter("scheduler", "task_queued_timeout"),
),
# kubernetes_executor
ConfigChange(
config=ConfigParameter("kubernetes_executor", "worker_pods_pending_timeout"),
renamed_to=ConfigParameter("scheduler", "task_queued_timeout"),
),
ConfigChange(
config=ConfigParameter("kubernetes_executor", "worker_pods_pending_timeout_check_interval"),
renamed_to=ConfigParameter("scheduler", "task_queued_timeout_check_interval"),
),
# smtp
ConfigChange(
config=ConfigParameter("smtp", "smtp_user"),
suggestion="Please use the SMTP connection (`smtp_default`).",
),
ConfigChange(
config=ConfigParameter("smtp", "smtp_password"),
suggestion="Please use the SMTP connection (`smtp_default`).",
),
# database
ConfigChange(
config=ConfigParameter("database", "load_default_connections"),
),
# triggerer
ConfigChange(
config=ConfigParameter("triggerer", "default_capacity"),
renamed_to=ConfigParameter("triggerer", "capacity"),
breaking=True,
),
# email
ConfigChange(
config=ConfigParameter("email", "email_backend"),
was_removed=True,
remove_if_equals="airflow.contrib.utils.sendgrid.send_email",
),
# elasticsearch
ConfigChange(
config=ConfigParameter("elasticsearch", "log_id_template"),
was_removed=True,
remove_if_equals="{dag_id}-{task_id}-{logical_date}-{try_number}",
breaking=True,
),
]
@provide_api_client(kind=ClientKind.CLI)
def lint(args, api_client=NEW_API_CLIENT) -> None:
"""
Lint the airflow.cfg file for removed, or renamed configurations.
This function scans the Airflow configuration file for parameters that are removed or renamed in
Airflow 3.0. It provides suggestions for alternative parameters or settings where applicable.
CLI Arguments:
--section: str (optional)
The specific section of the configuration to lint.
Example: --section core
--option: str (optional)
The specific option within a section to lint.
Example: --option check_slas
--ignore-section: str (optional)
A section to ignore during linting.
Example: --ignore-section webserver
--ignore-option: str (optional)
An option to ignore during linting.
Example: --ignore-option smtp_user
--verbose: flag (optional)
Enables detailed output, including the list of ignored sections and options.
Example: --verbose
Examples:
1. Lint all sections and options:
airflowctl config lint
2. Lint a specific section:
airflowctl config lint --section core,webserver
3. Lint specific sections and options:
airflowctl config lint --section smtp --option smtp_user
4. Ignore a section:
airflowctl config lint --ignore-section webserver,api
5. Ignore an options:
airflowctl config lint --ignore-option smtp_user,session_lifetime_days
6. Enable verbose output:
airflowctl config lint --verbose
:param args: The CLI arguments for linting configurations.
"""
lint_issues = []
section_to_check_if_provided = args.section or []
option_to_check_if_provided = args.option or []
ignore_sections = args.ignore_section or []
ignore_options = args.ignore_option or []
try:
all_configs = api_client.configs.list()
for configuration in CONFIGS_CHANGES:
if (
section_to_check_if_provided
and configuration.config.section not in section_to_check_if_provided
):
continue
if option_to_check_if_provided and configuration.config.option not in option_to_check_if_provided:
continue
if (
configuration.config.section in ignore_sections
or configuration.config.option in ignore_options
):
continue
target_section = next(
(section for section in all_configs.sections if section.name == configuration.config.section),
None,
)
if target_section:
target_option = next(
(
option
for option in target_section.options
if option.key == configuration.config.option
),
None,
)
if target_option:
if configuration.message(api_client=api_client) is not None:
lint_issues.append(configuration.message(api_client=api_client))
if lint_issues:
rich.print("[red]Found issues in your airflow.cfg:[/red]")
for issue in lint_issues:
rich.print(f" - [yellow]{issue}[/yellow]")
if args.verbose:
rich.print("\n[blue]Detailed Information:[/blue]")
rich.print(f"Ignored sections: [green]{', '.join(ignore_sections)}[/green]")
rich.print(f"Ignored options: [green]{', '.join(ignore_options)}[/green]")
rich.print("\n[red]Please update your configuration file accordingly.[/red]")
else:
rich.print("[green]No issues found in your airflow.cfg. It is ready for Airflow 3![/green]")
except Exception as e:
rich.print(f"[red]Lint configs failed: {e}")
sys.exit(1)
| ConfigChange |
python | skorch-dev__skorch | skorch/tests/test_net.py | {
"start": 1012,
"end": 154192
} | class ____:
@pytest.fixture(scope='module')
def data(self, classifier_data):
return classifier_data
@pytest.fixture(scope='module')
def dummy_callback(self):
from skorch.callbacks import Callback
cb = Mock(spec=Callback)
# make dummy behave like an estimator
cb.get_params.return_value = {}
cb.set_params = lambda **kwargs: cb
return cb
@pytest.fixture(scope='module')
def module_cls(self, classifier_module):
return classifier_module
@pytest.fixture(scope='module')
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture
def dataset_cls(self):
from skorch.dataset import Dataset
return Dataset
@pytest.fixture
def checkpoint_cls(self):
from skorch.callbacks import Checkpoint
return Checkpoint
@pytest.fixture(scope='module')
def net(self, net_cls, module_cls, dummy_callback):
return net_cls(
module_cls,
callbacks=[('dummy', dummy_callback)],
max_epochs=10,
lr=0.1,
)
@pytest.fixture(scope='module')
def pipe(self, net):
return Pipeline([
('scale', StandardScaler()),
('net', net),
])
@pytest.fixture(scope='module')
def net_fit(self, net_cls, module_cls, dummy_callback, data):
# Careful, don't call additional fits or set_params on this,
# since that would have side effects on other tests.
X, y = data
# We need a new instance of the net and cannot reuse the net
# fixture, because otherwise fixture net and net_fit refer to
# the same object; also, we cannot clone(net) because this
# will result in the dummy_callback not being the mock anymore
net = net_cls(
module_cls,
callbacks=[('dummy', dummy_callback)],
max_epochs=10,
lr=0.1,
)
return net.fit(X, y)
@pytest.fixture
def net_pickleable(self, net_fit):
"""NeuralNet instance that removes callbacks that are not
pickleable.
"""
# callback fixture not pickleable, remove it
callbacks = net_fit.callbacks
net_fit.callbacks = []
callbacks_ = net_fit.callbacks_
# remove mock callback
net_fit.callbacks_ = [(n, cb) for n, cb in net_fit.callbacks_
if not isinstance(cb, Mock)]
net_clone = copy.deepcopy(net_fit)
net_fit.callbacks = callbacks
net_fit.callbacks_ = callbacks_
return net_clone
@pytest.mark.parametrize("copy_method", ["pickle", "copy.deepcopy"])
def test_train_net_after_copy(self, net_cls, module_cls, data,
copy_method):
# This test comes from [issue #317], and makes sure that models
# can be trained after copying (which is really pickling).
#
# [issue #317]:https://github.com/skorch-dev/skorch/issues/317
X, y = data
n1 = net_cls(module_cls)
n1.partial_fit(X, y, epochs=1)
if copy_method == "copy.deepcopy":
n2 = copy.deepcopy(n1)
elif copy_method == "pickle":
n2 = pickle.loads(pickle.dumps(n1))
else:
raise ValueError
# Test to make sure the parameters got copied correctly
close = [torch.allclose(p1, p2)
for p1, p2 in zip(n1.module_.parameters(),
n2.module_.parameters())]
assert all(close)
# make sure the parameters change
# at least two epochs to make sure `train_loss` updates after copy
# (this is a check for the bug in #317, where `train_loss` didn't
# update at all after copy. This covers that case).
n2.partial_fit(X, y, epochs=2)
far = [not torch.allclose(p1, p2)
for p1, p2 in zip(n1.module_.parameters(),
n2.module_.parameters())]
assert all(far)
# Make sure the model is being trained, and the loss actually changes
# (and hopefully decreases, but no test for that)
# If copied incorrectly, the optimizer can't see the gradients
# calculated by loss.backward(), so the loss stays *exactly* the same
assert n2.history[-1]['train_loss'] != n2.history[-2]['train_loss']
# Make sure the optimizer params and module params point to the same
# memory
for opt_param, param in zip(
n2.module_.parameters(),
n2.optimizer_.param_groups[0]['params']):
assert param is opt_param
def test_net_init_one_unknown_argument(self, net_cls, module_cls):
with pytest.raises(ValueError) as e:
net_cls(module_cls, unknown_arg=123).initialize()
expected = ("__init__() got unexpected argument(s) unknown_arg. "
"Either you made a typo, or you added new arguments "
"in a subclass; if that is the case, the subclass "
"should deal with the new arguments explicitly.")
assert e.value.args[0] == expected
def test_net_init_two_unknown_arguments(self, net_cls, module_cls):
with pytest.raises(ValueError) as e:
net_cls(module_cls, lr=0.1, mxa_epochs=5,
warm_start=False, bathc_size=20).initialize()
expected = ("__init__() got unexpected argument(s) "
"bathc_size, mxa_epochs. "
"Either you made a typo, or you added new arguments "
"in a subclass; if that is the case, the subclass "
"should deal with the new arguments explicitly.")
assert e.value.args[0] == expected
@pytest.mark.parametrize('name, suggestion', [
('iterator_train_shuffle', 'iterator_train__shuffle'),
('optimizer_momentum', 'optimizer__momentum'),
('modulenum_units', 'module__num_units'),
('criterionreduce', 'criterion__reduce'),
('callbacks_mycb__foo', 'callbacks__mycb__foo'),
])
def test_net_init_missing_dunder_in_prefix_argument(
self, net_cls, module_cls, name, suggestion):
# forgot to use double-underscore notation
with pytest.raises(ValueError) as e:
net_cls(module_cls, **{name: 123}).initialize()
tmpl = "Got an unexpected argument {}, did you mean {}?"
expected = tmpl.format(name, suggestion)
assert e.value.args[0] == expected
def test_net_init_missing_dunder_in_2_prefix_arguments(
self, net_cls, module_cls):
# forgot to use double-underscore notation in 2 arguments
with pytest.raises(ValueError) as e:
net_cls(
module_cls,
max_epochs=7, # correct
iterator_train_shuffle=True, # uses _ instead of __
optimizerlr=0.5, # missing __
).initialize()
expected = ("Got an unexpected argument iterator_train_shuffle, "
"did you mean iterator_train__shuffle?\n"
"Got an unexpected argument optimizerlr, "
"did you mean optimizer__lr?")
assert e.value.args[0] == expected
def test_net_init_missing_dunder_and_unknown(
self, net_cls, module_cls):
# unknown argument and forgot to use double-underscore notation
with pytest.raises(ValueError) as e:
net_cls(
module_cls,
foobar=123,
iterator_train_shuffle=True,
).initialize()
expected = ("__init__() got unexpected argument(s) foobar. "
"Either you made a typo, or you added new arguments "
"in a subclass; if that is the case, the subclass "
"should deal with the new arguments explicitly.\n"
"Got an unexpected argument iterator_train_shuffle, "
"did you mean iterator_train__shuffle?")
assert e.value.args[0] == expected
def test_net_with_new_attribute_with_name_clash(
self, net_cls, module_cls):
# This covers a bug that existed when a new "settable"
# argument was added whose name starts the same as the name
# for an existing argument
class MyNet(net_cls):
# add "optimizer_2" as a valid prefix so that it works
# with set_params
prefixes_ = net_cls.prefixes_[:] + ['optimizer_2']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.optimizer_2 = torch.optim.SGD
# the following line used to raise this error: "TypeError: Got
# an unexpected argument optimizer_2__lr, did you mean
# optimizer__2__lr?" because it was erronously assumed that
# "optimizer_2__lr" should be dispatched to "optimizer", not
# "optimizer_2".
MyNet(module_cls, optimizer_2__lr=0.123) # should not raise
def test_net_init_with_iterator_valid_shuffle_false_no_warning(
self, net_cls, module_cls, recwarn):
# If a user sets iterator_valid__shuffle=False, everything is good and
# no warning should be issued, see
# https://github.com/skorch-dev/skorch/issues/907
net_cls(module_cls, iterator_valid__shuffle=False).initialize()
assert not recwarn.list
def test_net_init_with_iterator_valid_shuffle_true_warns(
self, net_cls, module_cls, recwarn):
# If a user sets iterator_valid__shuffle=True, they might be
# in for a surprise, since predict et al. will result in
# shuffled predictions. It is best to warn about this, since
# most of the times, this is not what users actually want.
expected = (
"You set iterator_valid__shuffle=True; this is most likely not what you "
"want because the values returned by predict and predict_proba will be "
"shuffled.")
# warning expected here
with pytest.warns(UserWarning, match=expected):
net_cls(module_cls, iterator_valid__shuffle=True).initialize()
def test_fit(self, net_fit):
# fitting does not raise anything
pass
@pytest.mark.parametrize('method', INFERENCE_METHODS)
def test_not_init_raises(self, net_cls, module_cls, data, method):
from skorch.exceptions import NotInitializedError
net = net_cls(module_cls)
X = data[0]
with pytest.raises(NotInitializedError) as exc:
# we call `list` because `forward_iter` is lazy
list(getattr(net, method)(X))
msg = ("This NeuralNetClassifier instance is not initialized yet. "
"Call 'initialize' or 'fit' with appropriate arguments "
"before using this method.")
assert exc.value.args[0] == msg
def test_not_fitted_raises(self, net_cls, module_cls):
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import NotFittedError
net = net_cls(module_cls)
with pytest.raises(NotFittedError) as exc:
check_is_fitted(net)
msg = (
"This NeuralNetClassifier instance is not fitted yet. "
"Call 'fit' with appropriate arguments before "
"using this estimator."
)
assert exc.value.args[0] == msg
def test_not_fitted_other_attributes(self, module_cls):
# pass attributes to check for explicitly
with patch('skorch.net.check_is_fitted') as check:
from skorch import NeuralNetClassifier
net = NeuralNetClassifier(module_cls)
attributes = ['foo', 'bar_']
net.check_is_fitted(attributes=attributes)
args = check.call_args_list[0][0][1]
assert args == attributes
def test_net_learns(self, net_cls, module_cls, data):
X, y = data
net = net_cls(
module_cls,
max_epochs=10,
lr=0.1,
)
net.fit(X, y)
y_pred = net.predict(X)
assert accuracy_score(y, y_pred) > ACCURACY_EXPECTED
def test_forward(self, net_fit, data):
X = data[0]
n = len(X)
y_forward = net_fit.forward(X)
assert is_torch_data_type(y_forward)
# Expecting (number of samples, number of output units)
assert y_forward.shape == (n, 2)
y_proba = net_fit.predict_proba(X)
assert np.allclose(to_numpy(y_forward), y_proba)
def test_forward_device_cpu(self, net_fit, data):
X = data[0]
# CPU by default
y_forward = net_fit.forward(X)
assert isinstance(X, np.ndarray)
assert not y_forward.is_cuda
y_forward = net_fit.forward(X, device='cpu')
assert isinstance(X, np.ndarray)
assert not y_forward.is_cuda
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
def test_forward_device_gpu(self, net_fit, data):
X = data[0]
y_forward = net_fit.forward(X, device='cuda:0')
assert isinstance(X, np.ndarray)
assert y_forward.is_cuda
def test_dropout(self, net_fit, data):
# Note: does not test that dropout is really active during
# training.
X = data[0]
# check that dropout not active by default
y_proba = to_numpy(net_fit.forward(X))
y_proba2 = to_numpy(net_fit.forward(X))
assert np.allclose(y_proba, y_proba2, rtol=1e-7)
# check that dropout can be activated
y_proba = to_numpy(net_fit.forward(X, training=True))
y_proba2 = to_numpy(net_fit.forward(X, training=True))
assert not np.allclose(y_proba, y_proba2, rtol=1e-7)
def test_pickle_save_load(self, net_pickleable, data, tmpdir):
X, y = data
score_before = accuracy_score(y, net_pickleable.predict(X))
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
with open(str(p), 'wb') as f:
pickle.dump(net_pickleable, f)
del net_pickleable
with open(str(p), 'rb') as f:
net_new = pickle.load(f)
score_after = accuracy_score(y, net_new.predict(X))
assert np.isclose(score_after, score_before)
def test_pickle_save_load_device_is_none(self, net_pickleable):
# It is legal to set device=None, but in that case we cannot know what
# device was meant, so we should fall back to CPU.
from skorch.exceptions import DeviceWarning
net_pickleable.set_params(device=None)
msg = (
f"Setting self.device = cpu since the requested device "
f"was not specified"
)
with pytest.warns(DeviceWarning, match=msg):
net_loaded = pickle.loads(pickle.dumps(net_pickleable))
params = net_loaded.get_all_learnable_params()
assert all(param.device.type == 'cpu' for _, param in params)
def train_picklable_cuda_net(self, net_pickleable, data):
X, y = data
w = torch.FloatTensor([1.] * int(y.max() + 1)).to('cuda')
# Use stateful optimizer (CUDA variables in state) and
# a CUDA parametrized criterion along with a CUDA net.
net_pickleable.set_params(
device='cuda',
criterion__weight=w,
optimizer=torch.optim.Adam,
)
net_pickleable.fit(X, y)
return net_pickleable
@pytest.fixture
def pickled_cuda_net_path(self, net_pickleable, data):
path = os.path.join('skorch', 'tests', 'net_cuda.pkl')
# Assume that a previous run on a CUDA-capable device
# created `net_cuda.pkl`.
if not torch.cuda.is_available():
assert os.path.exists(path)
return path
net_pickleable = self.train_picklable_cuda_net(net_pickleable, data)
with open(path, 'wb') as f:
pickle.dump(net_pickleable, f)
return path
@pytest.mark.parametrize('cuda_available', {False, torch.cuda.is_available()})
def test_pickle_load(self, cuda_available, pickled_cuda_net_path):
with patch('torch.cuda.is_available', lambda *_: cuda_available):
with open(pickled_cuda_net_path, 'rb') as f:
pickle.load(f)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_device_torch_device(self, net_cls, module_cls, device):
# Check if native torch.device works as well.
if device.startswith('cuda') and not torch.cuda.is_available():
pytest.skip()
net = net_cls(module=module_cls, device=torch.device(device))
net = net.initialize()
assert net.module_.sequential[0].weight.device.type.startswith(device)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
@pytest.mark.parametrize(
'save_dev, cuda_available, load_dev, expect_warning',
[
('cuda', False, 'cpu', True),
('cuda', True, 'cuda', False),
('cpu', True, 'cpu', False),
('cpu', False, 'cpu', False),
])
def test_pickle_save_and_load_mixed_devices(
self,
net_cls,
module_cls,
tmpdir,
save_dev,
cuda_available,
load_dev,
expect_warning,
recwarn,
):
from skorch.exceptions import DeviceWarning
net = net_cls(module=module_cls, device=save_dev).initialize()
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
with open(str(p), 'wb') as f:
pickle.dump(net, f)
del net
with patch('torch.cuda.is_available', lambda *_: cuda_available):
with open(str(p), 'rb') as f:
if not expect_warning:
m = pickle.load(f)
assert not any(w.category == DeviceWarning for w in recwarn.list)
else:
with pytest.warns(DeviceWarning) as w:
m = pickle.load(f)
assert torch.device(m.device) == torch.device(load_dev)
if expect_warning:
# We should have captured two warnings:
# 1. one for the failed load
# 2. for switching devices on the net instance
# remove possible future warning about weights_only=False
assert len(w.list) == 2
assert w.list[0].message.args[0] == (
'Requested to load data to CUDA but no CUDA devices '
'are available. Loading on device "cpu" instead.')
assert w.list[1].message.args[0] == (
'Setting self.device = {} since the requested device ({}) '
'is not available.'.format(load_dev, save_dev))
def test_pickle_save_and_load_uninitialized(
self, net_cls, module_cls, tmpdir):
net = net_cls(module_cls)
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
with open(str(p), 'wb') as f:
# does not raise
pickle.dump(net, f)
with open(str(p), 'rb') as f:
pickle.load(f)
def test_save_params_invalid_argument_name_raises(self, net_fit):
msg = ("save_params got an unexpected argument 'foobar', "
"did you mean 'f_foobar'?")
with pytest.raises(TypeError, match=msg):
net_fit.save_params(foobar='some-file.pt')
def test_load_params_invalid_argument_name_raises(self, net_fit):
msg = ("load_params got an unexpected argument 'foobar', "
"did you mean 'f_foobar'?")
with pytest.raises(TypeError, match=msg):
net_fit.load_params(foobar='some-file.pt')
def test_save_params_with_f_params_and_f_module_raises(self, net_fit):
msg = "save_params called with both f_params and f_module, please choose one"
with pytest.raises(TypeError, match=msg):
net_fit.save_params(f_module='weights.pt', f_params='params.pt')
def test_load_params_with_f_params_and_f_module_raises(self, net_fit):
msg = "load_params called with both f_params and f_module, please choose one"
with pytest.raises(TypeError, match=msg):
net_fit.load_params(f_module='weights.pt', f_params='params.pt')
def test_save_params_no_state_dict_raises(self, net_fit):
msg = ("You are trying to save 'f_max_epochs' but for that to work, the net "
"needs to have an attribute called 'net.max_epochs_' that is a PyTorch "
"Module or Optimizer; make sure that it exists and check for typos.")
with pytest.raises(AttributeError, match=msg):
net_fit.save_params(f_max_epochs='some-file.pt')
def test_load_params_no_state_dict_raises(self, net_fit):
msg = ("You are trying to load 'f_max_epochs' but for that to work, the net "
"needs to have an attribute called 'net.max_epochs_' that is a PyTorch "
"Module or Optimizer; make sure that it exists and check for typos.")
with pytest.raises(AttributeError, match=msg):
net_fit.load_params(f_max_epochs='some-file.pt')
def test_save_params_unknown_attribute_raises(self, net_fit):
msg = ("You are trying to save 'f_unknown' but for that to work, the net "
"needs to have an attribute called 'net.unknown_' that is a PyTorch "
"Module or Optimizer; make sure that it exists and check for typos.")
with pytest.raises(AttributeError, match=msg):
net_fit.save_params(f_unknown='some-file.pt')
def test_load_params_unknown_attribute_raises(self, net_fit):
msg = ("You are trying to load 'f_unknown' but for that to work, the net "
"needs to have an attribute called 'net.unknown_' that is a PyTorch "
"Module or Optimizer; make sure that it exists and check for typos.")
with pytest.raises(AttributeError, match=msg):
net_fit.load_params(f_unknown='some-file.pt')
def test_load_params_no_warning(self, net_fit, tmp_path, recwarn):
# See discussion in 1063
# Ensure that there is no FutureWarning (and DeprecationWarning for good
# measure) caused by torch.load.
net_fit.save_params(f_params=tmp_path / 'weights.pt')
net_fit.load_params(f_params=tmp_path / 'weights.pt')
assert not any(
isinstance(warning.message, (DeprecationWarning, FutureWarning))
for warning in recwarn.list
)
@pytest.mark.parametrize('use_safetensors', [False, True])
def test_save_load_state_dict_file(
self, net_cls, module_cls, net_fit, data, tmpdir, use_safetensors):
net = net_cls(module_cls).initialize()
X, y = data
score_before = accuracy_score(y, net_fit.predict(X))
score_untrained = accuracy_score(y, net.predict(X))
assert not np.isclose(score_before, score_untrained)
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
with open(str(p), 'wb') as f:
net_fit.save_params(f_params=f, use_safetensors=use_safetensors)
del net_fit
with open(str(p), 'rb') as f:
net.load_params(f_params=f, use_safetensors=use_safetensors)
score_after = accuracy_score(y, net.predict(X))
assert np.isclose(score_after, score_before)
@pytest.mark.parametrize('use_safetensors', [False, True])
def test_save_load_state_dict_str(
self, net_cls, module_cls, net_fit, data, tmpdir, use_safetensors):
net = net_cls(module_cls).initialize()
X, y = data
score_before = accuracy_score(y, net_fit.predict(X))
score_untrained = accuracy_score(y, net.predict(X))
assert not np.isclose(score_before, score_untrained)
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
net_fit.save_params(f_params=str(p), use_safetensors=use_safetensors)
del net_fit
net.load_params(f_params=str(p), use_safetensors=use_safetensors)
score_after = accuracy_score(y, net.predict(X))
assert np.isclose(score_after, score_before)
def test_save_load_state_dict_no_duplicate_registration_after_initialize(
self, net_cls, module_cls, net_fit, tmpdir):
# #781
net = net_cls(module_cls).initialize()
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
with open(str(p), 'wb') as f:
net_fit.save_params(f_params=f)
del net_fit
with open(str(p), 'rb') as f:
net.load_params(f_params=f)
# check that there are no duplicates in _modules, _criteria, _optimizers
# pylint: disable=protected-access
assert net._modules == ['module']
assert net._criteria == ['criterion']
assert net._optimizers == ['optimizer']
def test_save_load_state_dict_no_duplicate_registration_after_clone(
self, net_fit, tmpdir):
# #781
net = clone(net_fit).initialize()
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
with open(str(p), 'wb') as f:
net_fit.save_params(f_params=f)
del net_fit
with open(str(p), 'rb') as f:
net.load_params(f_params=f)
# check that there are no duplicates in _modules, _criteria, _optimizers
# pylint: disable=protected-access
assert net._modules == ['module']
assert net._criteria == ['criterion']
assert net._optimizers == ['optimizer']
@pytest.mark.parametrize('file_str', [True, False])
def test_save_load_safetensors_used(self, net_fit, file_str, tmpdir):
# Safetensors' capacity to save and load net params is already covered
# in other tests. This is a test to exclude the (trivial) bug that even
# with use_safetensors=True, safetensors is not actually being used
# (instead accidentally using pickle or something like that). To test
# this, we directly open the stored file using safetensors and check its
# contents. If it were, say, a pickle file, this test would fail.
from safetensors import safe_open
p = tmpdir.mkdir('skorch').join('testmodel.safetensors')
if file_str:
net_fit.save_params(f_params=str(p), use_safetensors=True)
else:
with open(str(p), 'wb') as f:
net_fit.save_params(f_params=f, use_safetensors=True)
state_dict_loaded = {}
with safe_open(str(p), framework='pt', device=net_fit.device) as f:
for key in f.keys():
state_dict_loaded[key] = f.get_tensor(key)
state_dict = net_fit.module_.state_dict()
assert state_dict_loaded.keys() == state_dict.keys()
for key in state_dict:
torch.testing.assert_close(state_dict[key], state_dict_loaded[key])
def test_save_optimizer_with_safetensors_raises(self, net_cls, module_cls, tmpdir):
# safetensors cannot safe anything except for tensors. The state_dict of
# the optimizer contains other stuff. Therefore, an error with a helpful
# message is raised.
p = tmpdir.mkdir('skorch').join('optimizer.safetensors')
net = net_cls(module_cls).initialize()
with pytest.raises(ValueError) as exc:
net.save_params(f_optimizer=str(p), use_safetensors=True)
msg = exc.value.args[0]
assert msg.startswith("You are trying to store")
assert "optimizer.safetensors" in msg
assert msg.endswith("don't use safetensors.")
@pytest.fixture(scope='module')
def net_fit_adam(self, net_cls, module_cls, data):
net = net_cls(
module_cls, max_epochs=2, lr=0.1,
optimizer=torch.optim.Adam)
net.fit(*data)
return net
@pytest.fixture(scope='module')
def criterion_with_params_cls(self):
class MyCriterion(nn.Module):
"""Criterion with learnable parameters"""
def __init__(self):
super().__init__()
self.lin = nn.Linear(2, 1)
def forward(self, y_pred, y_true):
return ((self.lin(y_pred) - y_true.float()) ** 2).sum()
return MyCriterion
@pytest.fixture
def net_fit_criterion(self, net_cls, module_cls, criterion_with_params_cls, data):
"""Replace criterion by a module so that it has learnt parameters"""
net = net_cls(
module_cls,
criterion=criterion_with_params_cls,
max_epochs=2,
lr=0.1,
optimizer=torch.optim.Adam,
)
net.fit(*data)
return net
def test_save_load_state_dict_file_with_history_optimizer_criterion(
self, net_cls, module_cls, criterion_with_params_cls, net_fit_criterion, tmpdir):
skorch_tmpdir = tmpdir.mkdir('skorch')
p = skorch_tmpdir.join('testmodel.pkl')
o = skorch_tmpdir.join('optimizer.pkl')
c = skorch_tmpdir.join('criterion.pkl')
h = skorch_tmpdir.join('history.json')
with ExitStack() as stack:
p_fp = stack.enter_context(open(str(p), 'wb'))
o_fp = stack.enter_context(open(str(o), 'wb'))
c_fp = stack.enter_context(open(str(c), 'wb'))
h_fp = stack.enter_context(open(str(h), 'w'))
net_fit_criterion.save_params(
f_params=p_fp, f_optimizer=o_fp, f_criterion=c_fp, f_history=h_fp)
# 'step' is state from the Adam optimizer
orig_steps = [v['step'] for v in
net_fit_criterion.optimizer_.state_dict()['state'].values()]
orig_loss = np.array(net_fit_criterion.history[:, 'train_loss'])
orig_criterion_weight = dict(
net_fit_criterion.criterion_.named_parameters())['lin.weight']
del net_fit_criterion
with ExitStack() as stack:
p_fp = stack.enter_context(open(str(p), 'rb'))
o_fp = stack.enter_context(open(str(o), 'rb'))
c_fp = stack.enter_context(open(str(c), 'rb'))
h_fp = stack.enter_context(open(str(h), 'r'))
new_net = net_cls(
module_cls,
criterion=criterion_with_params_cls,
optimizer=torch.optim.Adam,
).initialize()
new_net.load_params(
f_params=p_fp, f_optimizer=o_fp, f_criterion=c_fp, f_history=h_fp)
new_steps = [v['step'] for v in
new_net.optimizer_.state_dict()['state'].values()]
new_loss = np.array(new_net.history[:, 'train_loss'])
assert np.allclose(orig_loss, new_loss)
assert orig_steps == new_steps
new_criterion_weight = dict(new_net.criterion_.named_parameters())[
'lin.weight']
assert (orig_criterion_weight == new_criterion_weight).all()
def test_save_load_state_dict_str_with_history_optimizer(
self, net_cls, module_cls, net_fit_adam, tmpdir):
skorch_tmpdir = tmpdir.mkdir('skorch')
p = str(skorch_tmpdir.join('testmodel.pkl'))
o = str(skorch_tmpdir.join('optimizer.pkl'))
h = str(skorch_tmpdir.join('history.json'))
net_fit_adam.save_params(f_params=p, f_optimizer=o, f_history=h)
# 'step' is state from the Adam optimizer
orig_steps = [v['step'] for v in
net_fit_adam.optimizer_.state_dict()['state'].values()]
orig_loss = np.array(net_fit_adam.history[:, 'train_loss'])
del net_fit_adam
new_net = net_cls(
module_cls, optimizer=torch.optim.Adam).initialize()
new_net.load_params(f_params=p, f_optimizer=o, f_history=h)
new_steps = [v['step'] for v in
new_net.optimizer_.state_dict()['state'].values()]
new_loss = np.array(new_net.history[:, 'train_loss'])
assert np.allclose(orig_loss, new_loss)
assert orig_steps == new_steps
@pytest.mark.parametrize("explicit_init", [True, False])
@pytest.mark.parametrize('use_safetensors', [False, True])
def test_save_and_load_from_checkpoint(
self, net_cls, module_cls, data, checkpoint_cls, tmpdir,
explicit_init, use_safetensors):
skorch_dir = tmpdir.mkdir('skorch')
f_params = skorch_dir.join('params.pt')
f_optimizer = skorch_dir.join('optimizer.pt')
f_criterion = skorch_dir.join('criterion.pt')
f_history = skorch_dir.join('history.json')
kwargs = dict(
monitor=None,
f_params=str(f_params),
f_optimizer=str(f_optimizer),
f_criterion=str(f_criterion),
f_history=str(f_history),
use_safetensors=use_safetensors,
)
if use_safetensors:
# safetensors cannot safe optimizers
kwargs['f_optimizer'] = None
cp = checkpoint_cls(**kwargs)
net = net_cls(
module_cls, max_epochs=4, lr=0.1,
optimizer=torch.optim.Adam, callbacks=[cp])
net.fit(*data)
del net
assert f_params.exists()
assert f_criterion.exists()
assert f_history.exists()
if not use_safetensors:
# safetensors cannot safe optimizers
assert f_optimizer.exists()
new_net = net_cls(
module_cls, max_epochs=4, lr=0.1,
optimizer=torch.optim.Adam, callbacks=[cp])
if explicit_init:
new_net.initialize()
new_net.load_params(checkpoint=cp, use_safetensors=use_safetensors)
assert len(new_net.history) == 4
new_net.partial_fit(*data)
# fit ran twice for a total of 8 epochs
assert len(new_net.history) == 8
def test_checkpoint_with_prefix_and_dirname(
self, net_cls, module_cls, data, checkpoint_cls, tmpdir):
exp_dir = tmpdir.mkdir('skorch')
exp_basedir = exp_dir.join('exp1')
cp = checkpoint_cls(
monitor=None, fn_prefix='unet_', dirname=str(exp_basedir))
net = net_cls(
module_cls, max_epochs=4, lr=0.1,
optimizer=torch.optim.Adam, callbacks=[cp])
net.fit(*data)
assert exp_basedir.join('unet_params.pt').exists()
assert exp_basedir.join('unet_optimizer.pt').exists()
assert exp_basedir.join('unet_history.json').exists()
@pytest.mark.parametrize('use_safetensors', [False, True])
def test_save_and_load_from_checkpoint_formatting(
self, net_cls, module_cls, data, checkpoint_cls, tmpdir, use_safetensors):
def epoch_3_scorer(net, *_):
return 1 if net.history[-1, 'epoch'] == 3 else 0
from skorch.callbacks import EpochScoring
scoring = EpochScoring(
scoring=epoch_3_scorer, on_train=True)
skorch_dir = tmpdir.mkdir('skorch')
f_params = skorch_dir.join('model_epoch_{last_epoch[epoch]}.pt')
f_optimizer = skorch_dir.join('optimizer_epoch_{last_epoch[epoch]}.pt')
f_criterion = skorch_dir.join('criterion_epoch_{last_epoch[epoch]}.pt')
f_history = skorch_dir.join('history.json')
kwargs = dict(
monitor='epoch_3_scorer',
f_params=str(f_params),
f_optimizer=str(f_optimizer),
f_criterion=str(f_criterion),
f_history=str(f_history),
use_safetensors=use_safetensors,
)
if use_safetensors:
# safetensors cannot safe optimizers
kwargs['f_optimizer'] = None
cp = checkpoint_cls(**kwargs)
net = net_cls(
module_cls, max_epochs=5, lr=0.1,
optimizer=torch.optim.Adam, callbacks=[
('my_score', scoring), cp
])
net.fit(*data)
del net
assert skorch_dir.join('model_epoch_3.pt').exists()
assert skorch_dir.join('criterion_epoch_3.pt').exists()
assert skorch_dir.join('history.json').exists()
if not use_safetensors:
# safetensors cannot safe optimizers
assert skorch_dir.join('optimizer_epoch_3.pt').exists()
new_net = net_cls(
module_cls, max_epochs=5, lr=0.1,
optimizer=torch.optim.Adam, callbacks=[
('my_score', scoring), cp
])
new_net.load_params(checkpoint=cp, use_safetensors=use_safetensors)
# original run saved checkpoint at epoch 3
assert len(new_net.history) == 3
new_net.partial_fit(*data)
# training continued from the best epoch of the first run,
# the best epoch in the first run happened at epoch 3,
# the second ran for 5 epochs, so the final history of the new
# net is 3+5 = 7
assert len(new_net.history) == 8
assert new_net.history[:, 'event_cp'] == [
False, False, True, False, False, False, False, False]
def test_save_params_not_init_optimizer(
self, net_cls, module_cls, tmpdir):
from skorch.exceptions import NotInitializedError
net = net_cls(module_cls)._initialize_module()
skorch_tmpdir = tmpdir.mkdir('skorch')
p = skorch_tmpdir.join('testmodel.pkl')
o = skorch_tmpdir.join('optimizer.pkl')
with pytest.raises(NotInitializedError) as exc:
net.save_params(f_params=str(p), f_optimizer=o)
expected = ("Cannot save state of an un-initialized model. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
assert exc.value.args[0] == expected
def test_load_params_not_init_optimizer(
self, net_cls, module_cls, tmpdir):
from skorch.exceptions import NotInitializedError
net = net_cls(module_cls).initialize()
skorch_tmpdir = tmpdir.mkdir('skorch')
p = skorch_tmpdir.join('testmodel.pkl')
net.save_params(f_params=str(p))
net = net_cls(module_cls) # not initialized
o = skorch_tmpdir.join('optimizer.pkl')
with pytest.raises(NotInitializedError) as exc:
net.load_params(f_optimizer=str(o))
expected = ("Cannot load state of an un-initialized model. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
assert exc.value.args[0] == expected
def test_save_state_dict_not_init(
self, net_cls, module_cls, tmpdir):
from skorch.exceptions import NotInitializedError
net = net_cls(module_cls)
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
with pytest.raises(NotInitializedError) as exc:
net.save_params(f_params=str(p))
expected = ("Cannot save state of an un-initialized model. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
assert exc.value.args[0] == expected
def test_load_state_dict_not_init(
self, net_cls, module_cls, tmpdir):
from skorch.exceptions import NotInitializedError
net = net_cls(module_cls)
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
with pytest.raises(NotInitializedError) as exc:
net.load_params(f_params=str(p))
expected = ("Cannot load state of an un-initialized model. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
assert exc.value.args[0] == expected
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
def test_save_load_state_cuda_intercompatibility(
self, net_cls, module_cls, tmpdir):
# This test checks that cuda weights can be loaded even without cuda,
# falling back to 'cpu', but there should be a warning. This test does
# not work with safetensors. The reason is probably that the patch does
# not affect safetensors.
from skorch.exceptions import DeviceWarning
net = net_cls(module_cls, device='cuda').initialize()
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
net.save_params(f_params=str(p))
with patch('torch.cuda.is_available', lambda *_: False):
with pytest.warns(DeviceWarning) as w:
net.load_params(f_params=str(p))
assert w.list[0].message.args[0] == (
'Requested to load data to CUDA but no CUDA devices '
'are available. Loading on device "cpu" instead.')
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
@pytest.mark.parametrize('use_safetensors', [False, True])
def test_save_params_cuda_load_params_cpu_when_cuda_available(
self, net_cls, module_cls, data, use_safetensors, tmpdir):
# Test that if we have a cuda device, we can save cuda
# parameters and then load them to cpu
X, y = data
net = net_cls(module_cls, device='cuda', max_epochs=1).fit(X, y)
p = tmpdir.mkdir('skorch').join('testmodel.pkl')
net.save_params(f_params=str(p), use_safetensors=use_safetensors)
net2 = net_cls(module_cls, device='cpu').initialize()
net2.load_params(f_params=str(p), use_safetensors=use_safetensors)
net2.predict(X) # does not raise
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
@pytest.mark.parametrize('parameter,name', [
('f_params', 'net_cuda.pt'),
('f_optimizer', 'optimizer_cuda.pt'),
])
def test_load_cuda_params_to_cuda(
self, parameter, name, net_cls, module_cls, data):
net = net_cls(module_cls, device='cuda').initialize()
# object was trained with CUDA
kwargs = {parameter: os.path.join('skorch', 'tests', name)}
net.load_params(**kwargs)
net.predict(data[0]) # does not raise
@pytest.mark.parametrize('parameter,name', [
('f_params', 'net_cuda.pt'),
('f_optimizer', 'optimizer_cuda.pt'),
])
def test_load_cuda_params_to_cpu(
self, parameter, name, net_cls, module_cls, data):
# Note: This test will pass trivially when CUDA is available
# but triggered a bug when CUDA is not available.
net = net_cls(module_cls).initialize()
# object was trained with CUDA
kwargs = {parameter: os.path.join('skorch', 'tests', name)}
net.load_params(**kwargs)
net.predict(data[0]) # does not raise
def test_save_params_with_history_file_obj(
self, net_cls, module_cls, net_fit, tmpdir):
net = net_cls(module_cls).initialize()
history_before = net_fit.history
p = tmpdir.mkdir('skorch').join('history.json')
with open(str(p), 'w') as f:
net_fit.save_params(f_history=f)
del net_fit
with open(str(p), 'r') as f:
net.load_params(f_history=f)
assert net.history == history_before
@pytest.mark.parametrize('converter', [str, Path])
def test_save_params_with_history_file_path(
self, net_cls, module_cls, net_fit, tmpdir, converter):
# Test loading/saving with different kinds of path representations.
net = net_cls(module_cls).initialize()
history_before = net_fit.history
p = tmpdir.mkdir('skorch').join('history.json')
net_fit.save_params(f_history=converter(p))
del net_fit
net.load_params(f_history=converter(p))
assert net.history == history_before
@pytest.mark.parametrize('method, call_count', [
('on_train_begin', 1),
('on_train_end', 1),
('on_epoch_begin', 10),
('on_epoch_end', 10),
# by default: 80/20 train/valid split
('on_batch_begin', (800 // 128 + 1) * 10 + (200 // 128 + 1) * 10),
('on_batch_end', (800 // 128 + 1) * 10 + (200 // 128 + 1) * 10),
])
def test_callback_is_called(self, net_fit, method, call_count):
# callback -2 is the mocked callback
method = getattr(net_fit.callbacks_[-2][1], method)
assert method.call_count == call_count
assert method.call_args_list[0][0][0] is net_fit
def test_history_correct_shape(self, net_fit):
assert len(net_fit.history) == net_fit.max_epochs
def test_history_default_keys(self, net_fit):
expected_keys = {
'train_loss', 'valid_loss', 'epoch', 'dur', 'batches', 'valid_acc'}
for row in net_fit.history:
assert expected_keys.issubset(row)
def test_history_is_filled(self, net_fit):
assert len(net_fit.history) == net_fit.max_epochs
def test_initializing_net_with_custom_history(self, net_cls, module_cls, data):
# It is possible to pass a custom history instance to the net and have
# the net use said history
from skorch.history import History
class MyHistory(History):
pass
net = net_cls(module_cls, history=MyHistory(), max_epochs=3)
X, y = data
net.fit(X[:100], y[:100])
assert isinstance(net.history, MyHistory)
def test_set_params_works(self, net, data):
X, y = data
net.fit(X, y)
assert net.module_.sequential[0].out_features == 10
assert isinstance(net.module_.sequential[1], nn.ReLU)
assert net.module_.sequential[3].in_features == 10
assert np.isclose(net.lr, 0.1)
net.set_params(
module__hidden_units=20,
module__nonlin=nn.Tanh(),
lr=0.2,
)
net.fit(X, y)
assert net.module_.sequential[0].out_features == 20
assert isinstance(net.module_.sequential[1], nn.Tanh)
assert net.module_.sequential[3].in_features == 20
assert np.isclose(net.lr, 0.2)
def test_unknown_set_params_gives_helpful_message(self, net_fit):
# test that the error message of set_params includes helpful
# information instead of, e.g., generator expressions.
# sklearn 0.2x does not output the parameter names so we can
# skip detailled checks of the error message there.
sklearn_0_2x_string = "Check the list of available parameters with `estimator.get_params().keys()`"
with pytest.raises(ValueError) as e:
net_fit.set_params(invalid_parameter_xyz=42)
exception_str = str(e.value)
if sklearn_0_2x_string in exception_str:
return
expected_keys = ["module", "criterion"]
for key in expected_keys:
assert key in exception_str[exception_str.find("Valid parameters are: ") :]
def test_set_params_then_initialize_remembers_param(
self, net_cls, module_cls):
net = net_cls(module_cls)
# net does not 'forget' that params were set
assert net.verbose != 123
net.set_params(verbose=123)
assert net.verbose == 123
net.initialize()
assert net.verbose == 123
def test_set_params_on_callback_then_initialize_remembers_param(
self, net_cls, module_cls):
net = net_cls(module_cls).initialize()
# net does not 'forget' that params were set
assert dict(net.callbacks_)['print_log'].sink is print
net.set_params(callbacks__print_log__sink=123)
assert dict(net.callbacks_)['print_log'].sink == 123
net.initialize()
assert dict(net.callbacks_)['print_log'].sink == 123
def test_changing_model_reinitializes_optimizer(self, net, data):
# The idea is that we change the model using `set_params` to
# add parameters. Since the optimizer depends on the model
# parameters it needs to be reinitialized.
X, y = data
net.set_params(module__nonlin=nn.ReLU())
net.fit(X, y)
net.set_params(module__nonlin=nn.PReLU())
assert isinstance(net.module_.nonlin, nn.PReLU)
d1 = net.module_.nonlin.weight.data.clone().cpu().numpy()
# make sure that we do not initialize again by making sure that
# the network is initialized and by using partial_fit.
assert net.initialized_
net.partial_fit(X, y)
d2 = net.module_.nonlin.weight.data.clone().cpu().numpy()
# all newly introduced parameters should have been trained (changed)
# by the optimizer after 10 epochs.
assert (abs(d2 - d1) > 1e-05).all()
def test_setting_optimizer_needs_model(self, net_cls, module_cls):
net = net_cls(module_cls)
assert not hasattr(net, 'module_')
# should not break
net.set_params(optimizer=torch.optim.SGD)
def test_setting_lr_after_init_reflected_in_optimizer(
self, net_cls, module_cls):
# Fixes a bug that occurred when using set_params(lr=new_lr)
# after initialization: The new lr was not reflected in the
# optimizer.
net = net_cls(module_cls).initialize()
net.set_params(lr=10)
assert net.lr == 10
pg_lrs = [pg['lr'] for pg in net.optimizer_.param_groups]
for pg_lr in pg_lrs:
assert pg_lr == 10
@pytest.mark.parametrize('kwargs,expected', [
({}, ""),
(
# virtual params should prevent re-initialization
{'optimizer__lr': 0.12, 'optimizer__momentum': 0.34},
("")
),
(
{'module__input_units': 12, 'module__hidden_units': 34},
("Re-initializing module because the following "
"parameters were re-set: module__hidden_units, module__input_units.\n"
"Re-initializing criterion.\n"
"Re-initializing optimizer.")
),
(
{'criterion__reduce': False, 'criterion__size_average': True},
("Re-initializing criterion because the following "
"parameters were re-set: criterion__reduce, criterion__size_average.\n"
"Re-initializing optimizer.")
),
(
{'module__input_units': 12, 'criterion__reduce': True,
'optimizer__momentum': 0.56},
("Re-initializing module because the following "
"parameters were re-set: module__input_units.\n"
"Re-initializing criterion.\n"
"Re-initializing optimizer.")
),
])
def test_reinitializing_module_optimizer_message(
self, net_cls, module_cls, kwargs, expected, capsys):
# When net is initialized, if module, criterion, or optimizer need to be
# re-initialized, alert the user to the fact what parameters were
# responsible for re-initialization. Note that when the module/criterion
# parameters but not optimizer parameters were changed, the optimizer is
# re-initialized but not because the optimizer parameters changed.
net = net_cls(module_cls).initialize()
net.set_params(**kwargs)
msg = capsys.readouterr()[0].strip()
assert msg == expected
@pytest.mark.parametrize('kwargs', [
{},
{'module__input_units': 12, 'module__hidden_units': 34},
{'lr': 0.12},
{'optimizer__lr': 0.12},
{'module__input_units': 12, 'lr': 0.56},
])
def test_reinitializing_module_optimizer_not_initialized_no_message(
self, net_cls, module_cls, kwargs, capsys):
# When net is *not* initialized, set_params on module or
# optimizer should not trigger a message.
net = net_cls(module_cls)
net.set_params(**kwargs)
msg = capsys.readouterr()[0].strip()
assert msg == ""
@pytest.mark.parametrize('kwargs, expected', [
({}, ""), # no param, no message
({'lr': 0.12}, ""), # virtual param
({'optimizer__lr': 0.12}, ""), # virtual param
({'module__input_units': 12}, "Re-initializing optimizer."),
({'module__input_units': 12, 'lr': 0.56}, "Re-initializing optimizer."),
])
def test_reinitializing_module_optimizer_when_initialized_message(
self, net_cls, module_cls, kwargs, expected, capsys):
# When the not *is* initialized, set_params on module should trigger a
# message
net = net_cls(module_cls).initialize()
net.set_params(**kwargs)
msg = capsys.readouterr()[0].strip()
# don't check the whole message since it may contain other bits not
# tested here
assert expected in msg
def test_set_params_on_uninitialized_net_doesnt_initialize(self, net_cls, module_cls):
# It used to be the case that setting a parameter on, say, the module
# would always (re-)initialize the module, even if the whole net was not
# initialized yet. This is unnecessary at best and can break things at
# worst.
net = net_cls(module_cls)
net.set_params(module__input_units=12)
assert not net.initialized_
assert not hasattr(net, 'module_')
def test_optimizer_param_groups(self, net_cls, module_cls):
net = net_cls(
module_cls,
optimizer__param_groups=[
('sequential.0.*', {'lr': 0.1}),
('sequential.3.*', {'lr': 0.5}),
],
)
net.initialize()
# two custom (1st linear, 2nd linear), one default with the
# rest of the parameters (output).
assert len(net.optimizer_.param_groups) == 3
assert net.optimizer_.param_groups[0]['lr'] == 0.1
assert net.optimizer_.param_groups[1]['lr'] == 0.5
assert net.optimizer_.param_groups[2]['lr'] == net.lr
def test_module_params_in_init(self, net_cls, module_cls, data):
X, y = data
net = net_cls(
module=module_cls,
module__hidden_units=20,
module__nonlin=nn.Tanh(),
)
net.fit(X, y)
assert net.module_.sequential[0].out_features == 20
assert net.module_.sequential[3].in_features == 20
assert isinstance(net.module_.sequential[1], nn.Tanh)
def test_module_initialized_with_partial_module(self, net_cls, module_cls):
net = net_cls(partial(module_cls, hidden_units=123))
net.initialize()
assert net.module_.sequential[0].out_features == 123
def test_criterion_init_with_params(self, net_cls, module_cls):
call_count = 0
class MyCriterion(nn.Module):
def __init__(self, spam=None):
nonlocal call_count
super().__init__()
self.spam = spam
call_count += 1
net = net_cls(module_cls, criterion=MyCriterion, criterion__spam='eggs')
net.initialize()
assert call_count == 1
assert net.criterion_.spam == 'eggs'
def test_criterion_set_params(self, net_cls, module_cls):
call_count = 0
class MyCriterion(nn.Module):
def __init__(self, spam=None):
nonlocal call_count
super().__init__()
self.spam = spam
call_count += 1
net = net_cls(module_cls, criterion=MyCriterion)
net.initialize()
net.set_params(criterion__spam='eggs')
assert call_count == 2
assert net.criterion_.spam == 'eggs'
def test_criterion_non_module(self, net_cls, module_cls, data):
# test non-nn.Module classes passed as criterion
class SimpleCriterion:
def __call__(self, y_pred, y_true):
return y_pred.mean()
net = net_cls(module_cls, criterion=SimpleCriterion)
net.initialize()
net.fit(*data)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_criterion_params_on_device(self, net_cls, module_cls, device):
# attributes like criterion.weight should be automatically moved
# to the Net's device.
criterion = torch.nn.NLLLoss
weight = torch.ones(2)
net = net_cls(
module_cls,
criterion=criterion,
criterion__weight=weight,
device=device,
)
assert weight.device.type == 'cpu'
net.initialize()
assert net.criterion_.weight.device.type == device
def test_callback_with_name_init_with_params(self, net_cls, module_cls):
mock = Mock()
net = net_cls(
module_cls,
criterion=Mock(),
callbacks=[('cb0', mock)],
callbacks__cb0__spam='eggs',
)
net.initialize()
assert mock.initialize.call_count == 1
assert mock.set_params.call_args_list[0][1]['spam'] == 'eggs'
def test_callback_set_params(self, net_cls, module_cls):
mock = Mock()
net = net_cls(
module_cls,
criterion=Mock(),
callbacks=[('cb0', mock)],
)
net.initialize()
net.set_params(callbacks__cb0__spam='eggs')
assert mock.initialize.call_count == 2 # callbacks are re-initialized
assert mock.set_params.call_args_list[-1][1]['spam'] == 'eggs'
def test_callback_name_collides_with_default(self, net_cls, module_cls):
net = net_cls(module_cls, callbacks=[('train_loss', Mock())])
with pytest.raises(ValueError) as exc:
net.initialize()
expected = ("Found duplicate user-set callback name 'train_loss'. "
"Use unique names to correct this.")
assert str(exc.value) == expected
def test_callback_same_inferred_name_twice(self, net_cls, module_cls):
cb0 = Mock()
cb1 = Mock()
cb0.__class__.__name__ = 'some-name'
cb1.__class__.__name__ = 'some-name'
net = net_cls(module_cls, callbacks=[cb0, cb1])
net.initialize()
cbs = dict(net.callbacks_)
assert 'some-name_1' in cbs
assert 'some-name_2' in cbs
assert cbs['some-name_1'] is cb0
assert cbs['some-name_2'] is cb1
def test_callback_keeps_order(self, net_cls, module_cls):
cb0 = Mock()
cb1 = Mock()
cb0.__class__.__name__ = 'B-some-name'
cb1.__class__.__name__ = 'A-some-name'
net = net_cls(module_cls, callbacks=[cb0, cb1])
net.initialize()
cbs_names = [name for name, _ in net.callbacks_]
expected_names = ['epoch_timer', 'train_loss', 'valid_loss',
'valid_acc', 'B-some-name', 'A-some-name',
'print_log']
assert expected_names == cbs_names
def test_callback_custom_name_is_untouched(self, net_cls, module_cls):
callbacks = [('cb0', Mock()),
('cb0', Mock())]
net = net_cls(module_cls, callbacks=callbacks)
with pytest.raises(ValueError) as exc:
net.initialize()
expected = ("Found duplicate user-set callback name 'cb0'. "
"Use unique names to correct this.")
assert str(exc.value) == expected
def test_callback_unique_naming_avoids_conflicts(
self, net_cls, module_cls):
# pylint: disable=invalid-name
from skorch.callbacks import Callback
class cb0(Callback):
pass
class cb0_1(Callback):
pass
callbacks = [cb0(), cb0(), cb0_1()]
net = net_cls(module_cls, callbacks=callbacks)
with pytest.raises(ValueError) as exc:
net.initialize()
expected = ("Assigning new callback name failed "
"since new name 'cb0_1' exists already.")
assert str(exc.value) == expected
def test_in_sklearn_pipeline(self, pipe, data):
X, y = data
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.set_params(net__module__hidden_units=20)
def test_grid_search_works(self, net_cls, module_cls, data):
net = net_cls(module_cls)
X, y = data
params = {
'lr': [0.01, 0.02],
'max_epochs': [10, 20],
'module__hidden_units': [10, 20],
}
gs = GridSearchCV(net, params, refit=True, cv=3, scoring='accuracy')
gs.fit(X[:100], y[:100]) # for speed
print(gs.best_score_, gs.best_params_)
def test_change_get_loss(self, net_cls, module_cls, data):
from skorch.utils import to_tensor
class MyNet(net_cls):
# pylint: disable=unused-argument
def get_loss(self, y_pred, y_true, X=None, training=False):
y_true = to_tensor(y_true, device='cpu')
loss_a = torch.abs(y_true.float() - y_pred[:, 1]).mean()
loss_b = ((y_true.float() - y_pred[:, 1]) ** 2).mean()
if training:
self.history.record_batch('loss_a', to_numpy(loss_a))
self.history.record_batch('loss_b', to_numpy(loss_b))
return loss_a + loss_b
X, y = data
net = MyNet(module_cls, max_epochs=1)
net.fit(X, y)
diffs = []
all_losses = net.history[
-1, 'batches', :, ('train_loss', 'loss_a', 'loss_b')]
diffs = [total - a - b for total, a, b in all_losses]
assert np.allclose(diffs, 0, atol=1e-7)
def test_net_no_valid(self, net_cls, module_cls, data):
net = net_cls(
module_cls,
max_epochs=10,
lr=0.1,
train_split=None,
)
X, y = data
net.fit(X, y)
assert net.history[:, 'train_loss']
with pytest.raises(KeyError):
# pylint: disable=pointless-statement
net.history[:, 'valid_loss']
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
def test_use_cuda_on_model(self, net_cls, module_cls):
net_cuda = net_cls(module_cls, device='cuda')
net_cuda.initialize()
net_cpu = net_cls(module_cls, device='cpu')
net_cpu.initialize()
cpu_tensor = net_cpu.module_.sequential[0].weight.data
assert isinstance(cpu_tensor, torch.FloatTensor)
gpu_tensor = net_cuda.module_.sequential[0].weight.data
assert isinstance(gpu_tensor, torch.cuda.FloatTensor)
def test_get_params_works(self, net_cls, module_cls):
from skorch.callbacks import EpochScoring
net = net_cls(
module_cls, callbacks=[('myscore', EpochScoring('myscore'))])
params = net.get_params(deep=True)
# test a couple of expected parameters
assert 'verbose' in params
assert 'module' in params
assert 'callbacks' in params
assert 'callbacks__print_log__sink' in params
# not yet initialized
assert 'callbacks__myscore__scoring' not in params
net.initialize()
params = net.get_params(deep=True)
# now initialized
assert 'callbacks__myscore__scoring' in params
def test_get_params_no_unwanted_params(self, net, net_fit):
# #781
# make sure certain keys are not returned
keys_unwanted = {'_modules', '_criteria', '_optimizers'}
for net_ in (net, net_fit):
keys_found = set(net_.get_params())
overlap = keys_found & keys_unwanted
assert not overlap
def test_get_params_with_uninit_callbacks(self, net_cls, module_cls):
from skorch.callbacks import EpochTimer
net = net_cls(
module_cls,
callbacks=[EpochTimer, ('other_timer', EpochTimer)],
)
# none of this raises an exception
net = clone(net)
net.get_params()
net.initialize()
net.get_params()
def test_get_params_no_learned_params(self, net_fit):
params = net_fit.get_params()
params_learned = set(filter(lambda x: x.endswith('_'), params))
assert not params_learned
def test_clone_results_in_uninitialized_net(
self, net_fit, data):
X, y = data
accuracy = accuracy_score(net_fit.predict(X), y)
assert accuracy > ACCURACY_EXPECTED # make sure net has learned
net_cloned = clone(net_fit).set_params(max_epochs=0)
net_cloned.callbacks_ = []
net_cloned.partial_fit(X, y)
accuracy_cloned = accuracy_score(net_cloned.predict(X), y)
assert accuracy_cloned < ACCURACY_EXPECTED
assert not net_cloned.history
def test_clone_copies_parameters(self, net_cls, module_cls):
kwargs = dict(
module__hidden_units=20,
lr=0.2,
iterator_train__batch_size=123,
)
net = net_cls(module_cls, **kwargs)
net_cloned = clone(net)
params = net_cloned.get_params()
for key, val in kwargs.items():
assert params[key] == val
def test_with_initialized_module(self, net_cls, module_cls, data):
X, y = data
net = net_cls(module_cls(), max_epochs=1)
net.fit(X, y)
def test_with_initialized_module_other_params(self, net_cls, module_cls, data):
X, y = data
net = net_cls(module_cls(), max_epochs=1, module__hidden_units=123)
net.fit(X, y)
weight = net.module_.sequential[0].weight.data
assert weight.shape[0] == 123
def test_with_initialized_module_non_default(
self, net_cls, module_cls, data, capsys):
X, y = data
net = net_cls(module_cls(hidden_units=123), max_epochs=1)
net.fit(X, y)
weight = net.module_.sequential[0].weight.data
assert weight.shape[0] == 123
stdout = capsys.readouterr()[0]
assert "Re-initializing module!" not in stdout
def test_no_callbacks(self, net_cls, module_cls):
net = net_cls(module_cls, callbacks="disable")
net.initialize()
assert net.callbacks_ == []
def test_message_fit_with_initialized_net(
self, net_cls, module_cls, data, capsys):
net = net_cls(module_cls).initialize()
net.fit(*data)
stdout = capsys.readouterr()[0]
msg_module = "Re-initializing module"
assert msg_module in stdout
msg_optimizer = "Re-initializing optimizer"
assert msg_optimizer in stdout
# bug: https://github.com/skorch-dev/skorch/issues/436
not_expected = 'because the following parameters were re-set'
assert not_expected not in stdout
def test_with_initialized_module_partial_fit(
self, net_cls, module_cls, data, capsys):
X, y = data
module = module_cls(hidden_units=123)
net = net_cls(module, max_epochs=0)
net.partial_fit(X, y)
for p0, p1 in zip(module.parameters(), net.module_.parameters()):
assert p0.data.shape == p1.data.shape
assert (p0 == p1).data.all()
stdout = capsys.readouterr()[0]
assert "Re-initializing module!" not in stdout
def test_with_initialized_module_warm_start(
self, net_cls, module_cls, data, capsys):
X, y = data
module = module_cls(hidden_units=123)
net = net_cls(module, max_epochs=0, warm_start=True)
net.partial_fit(X, y)
for p0, p1 in zip(module.parameters(), net.module_.parameters()):
assert p0.data.shape == p1.data.shape
assert (p0 == p1).data.all()
stdout = capsys.readouterr()[0]
assert "Re-initializing module!" not in stdout
def test_with_initialized_sequential(self, net_cls, data, capsys):
X, y = data
module = nn.Sequential(
nn.Linear(X.shape[1], 10),
nn.ReLU(),
nn.Linear(10, 2),
nn.Softmax(dim=-1),
)
net = net_cls(module, max_epochs=1)
net.fit(X, y)
stdout = capsys.readouterr()[0]
assert "Re-initializing module!" not in stdout
def test_call_fit_twice_retrains(self, net_cls, module_cls, data):
# test that after second fit call, even without entering the
# fit loop, parameters have changed (because the module was
# re-initialized)
X, y = data[0][:100], data[1][:100]
net = net_cls(module_cls, warm_start=False).fit(X, y)
params_before = net.module_.parameters()
net.max_epochs = 0
net.fit(X, y)
params_after = net.module_.parameters()
assert not net.history
for p0, p1 in zip(params_before, params_after):
assert (p0 != p1).data.any()
def test_call_fit_twice_warmstart(self, net_cls, module_cls, data):
X, y = data[0][:100], data[1][:100]
net = net_cls(module_cls, warm_start=True).fit(X, y)
params_before = net.module_.parameters()
net.max_epochs = 0
net.fit(X, y)
params_after = net.module_.parameters()
assert len(net.history) == 10
for p0, p1 in zip(params_before, params_after):
assert (p0 == p1).data.all()
def test_partial_fit_first_call(self, net_cls, module_cls, data):
# It should be possible to partial_fit without calling fit first.
X, y = data[0][:100], data[1][:100]
# does not raise
net_cls(module_cls, warm_start=True).partial_fit(X, y)
def test_call_partial_fit_after_fit(self, net_cls, module_cls, data):
X, y = data[0][:100], data[1][:100]
net = net_cls(module_cls, warm_start=False).fit(X, y)
params_before = net.module_.parameters()
net.max_epochs = 0
net.partial_fit(X, y)
params_after = net.module_.parameters()
assert len(net.history) == 10
for p0, p1 in zip(params_before, params_after):
assert (p0 == p1).data.all()
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
def test_binary_classification_with_cuda(self, net_cls, module_cls, data):
X, y = data
assert y.ndim == 1
assert set(y) == {0, 1}
net = net_cls(module_cls, max_epochs=1, device='cuda')
# does not raise
net.fit(X, y)
def test_net_initialized_with_custom_dataset_args(
self, net_cls, module_cls, data, dataset_cls):
side_effect = []
class MyDataset(dataset_cls):
def __init__(self, *args, foo, **kwargs):
super().__init__(*args, **kwargs)
side_effect.append(foo)
net = net_cls(
module_cls,
dataset=MyDataset,
dataset__foo=123,
max_epochs=1,
)
net.fit(*data)
assert side_effect == [123]
@pytest.mark.xfail(raises=ValueError)
def test_net_initialized_with_initalized_dataset(
self, net_cls, module_cls, data, dataset_cls):
net = net_cls(
module_cls,
dataset=dataset_cls(*data),
max_epochs=1,
# Disable caching to highlight the issue with this
# test case (mismatching size between y values)
callbacks__valid_acc__use_caching=False,
)
# FIXME: When dataset is initialized, X and y do not matter
# anymore
net.fit(*data) # should not raise
def test_net_initialized_with_partialed_dataset(
self, net_cls, module_cls, data, dataset_cls):
X, y = data
net = net_cls(
module_cls,
dataset=partial(dataset_cls, length=len(y)),
train_split=None,
max_epochs=1,
)
net.fit(X, y) # does not raise
def test_net_initialized_with_initalized_dataset_and_kwargs_raises(
self, net_cls, module_cls, data, dataset_cls):
net = net_cls(
module_cls,
dataset=dataset_cls(*data),
dataset__foo=123,
max_epochs=1,
)
with pytest.raises(TypeError) as exc:
net.fit(*data)
expected = ("Trying to pass an initialized Dataset while passing "
"Dataset arguments ({'foo': 123}) is not allowed.")
assert exc.value.args[0] == expected
def test_repr_uninitialized_works(self, net_cls, module_cls):
net = net_cls(
module_cls,
module__hidden_units=55,
)
result = net.__repr__()
expected = """<class 'skorch.classifier.NeuralNetClassifier'>[uninitialized](
module={},
module__hidden_units=55,
)""".format(module_cls)
assert result == expected
def test_repr_initialized_works(self, net_cls, module_cls):
net = net_cls(
module_cls,
module__hidden_units=42,
)
net.initialize()
result = net.__repr__()
expected = """<class 'skorch.classifier.NeuralNetClassifier'>[initialized](
module_=MLPModule(
(nonlin): ReLU()
(output_nonlin): Softmax()
(sequential): Sequential(
(0): Linear(in_features=20, out_features=42, bias=True)
(1): ReLU()
(2): Dropout(p=0.5)
(3): Linear(in_features=42, out_features=42, bias=True)
(4): ReLU()
(5): Dropout(p=0.5)
(6): Linear(in_features=42, out_features=2, bias=True)
(7): Softmax()
)
),
)"""
expected = expected.replace("Softmax()", "Softmax(dim=-1)")
expected = expected.replace("Dropout(p=0.5)",
"Dropout(p=0.5, inplace=False)")
assert result == expected
def test_repr_fitted_works(self, net_cls, module_cls, data):
X, y = data
net = net_cls(
module_cls,
module__hidden_units=11,
module__nonlin=nn.PReLU(),
)
net.fit(X[:50], y[:50])
result = net.__repr__()
expected = """<class 'skorch.classifier.NeuralNetClassifier'>[initialized](
module_=MLPModule(
(nonlin): PReLU(num_parameters=1)
(output_nonlin): Softmax(dim=-1)
(sequential): Sequential(
(0): Linear(in_features=20, out_features=11, bias=True)
(1): PReLU(num_parameters=1)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=11, out_features=11, bias=True)
(4): PReLU(num_parameters=1)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=11, out_features=2, bias=True)
(7): Softmax(dim=-1)
)
),
)"""
assert result == expected
def test_fit_params_passed_to_module(self, net_cls, data):
from skorch.toy import MLPModule
X, y = data
side_effect = []
class FPModule(MLPModule):
# pylint: disable=arguments-differ
def forward(self, X, **fit_params):
side_effect.append(fit_params)
return super().forward(X)
net = net_cls(FPModule, max_epochs=1, batch_size=50, train_split=None)
# remove callbacks to have better control over side_effect
net.initialize()
net.callbacks_ = []
net.fit(X[:100], y[:100], foo=1, bar=2)
net.fit(X[:100], y[:100], bar=3, baz=4)
assert len(side_effect) == 4 # 2 epochs à 2 batches
assert side_effect[0] == dict(foo=1, bar=2)
assert side_effect[1] == dict(foo=1, bar=2)
assert side_effect[2] == dict(bar=3, baz=4)
assert side_effect[3] == dict(bar=3, baz=4)
def test_fit_params_passed_to_module_in_pipeline(self, net_cls, data):
from skorch.toy import MLPModule
X, y = data
side_effect = []
class FPModule(MLPModule):
# pylint: disable=arguments-differ
def forward(self, X, **fit_params):
side_effect.append(fit_params)
return super().forward(X)
net = net_cls(FPModule, max_epochs=1, batch_size=50, train_split=None)
net.initialize()
net.callbacks_ = []
pipe = Pipeline([
('net', net),
])
pipe.fit(X[:100], y[:100], net__foo=1, net__bar=2)
pipe.fit(X[:100], y[:100], net__bar=3, net__baz=4)
assert len(side_effect) == 4 # 2 epochs à 2 batches
assert side_effect[0] == dict(foo=1, bar=2)
assert side_effect[1] == dict(foo=1, bar=2)
assert side_effect[2] == dict(bar=3, baz=4)
assert side_effect[3] == dict(bar=3, baz=4)
def test_fit_params_passed_to_train_split(self, net_cls, data):
from skorch.toy import MLPModule
X, y = data
side_effect = []
# pylint: disable=unused-argument
def fp_train_split(dataset, y=None, **fit_params):
side_effect.append(fit_params)
return dataset, dataset
class FPModule(MLPModule):
# pylint: disable=unused-argument,arguments-differ
def forward(self, X, **fit_params):
return super().forward(X)
net = net_cls(
FPModule,
max_epochs=1,
batch_size=50,
train_split=fp_train_split,
)
net.initialize()
net.callbacks_ = []
net.fit(X[:100], y[:100], foo=1, bar=2)
net.fit(X[:100], y[:100], bar=3, baz=4)
assert len(side_effect) == 2 # 2 epochs
assert side_effect[0] == dict(foo=1, bar=2)
assert side_effect[1] == dict(bar=3, baz=4)
def test_data_dict_and_fit_params(self, net_cls, data):
from skorch.toy import MLPModule
X, y = data
class FPModule(MLPModule):
# pylint: disable=unused-argument,arguments-differ
def forward(self, X0, X1, **fit_params):
assert fit_params.get('foo') == 3
return super().forward(X0)
net = net_cls(FPModule, max_epochs=1, batch_size=50, train_split=None)
# does not raise
net.fit({'X0': X, 'X1': X}, y, foo=3)
def test_data_dict_and_fit_params_conflicting_names_raises(
self, net_cls, data):
from skorch.toy import MLPModule
X, y = data
class FPModule(MLPModule):
# pylint: disable=unused-argument,arguments-differ
def forward(self, X0, X1, **fit_params):
return super().forward(X0)
net = net_cls(FPModule, max_epochs=1, batch_size=50, train_split=None)
with pytest.raises(ValueError) as exc:
net.fit({'X0': X, 'X1': X}, y, X1=3)
expected = "X and fit_params contain duplicate keys: X1"
assert exc.value.args[0] == expected
def test_fit_with_dataset(self, net_cls, module_cls, data, dataset_cls):
ds = dataset_cls(*data)
net = net_cls(module_cls, max_epochs=1)
net.fit(ds, data[1])
for key in ('train_loss', 'valid_loss', 'valid_acc'):
assert key in net.history[-1]
def test_predict_with_dataset(self, net_cls, module_cls, data, dataset_cls):
ds = dataset_cls(*data)
net = net_cls(module_cls).initialize()
y_pred = net.predict(ds)
y_proba = net.predict_proba(ds)
assert y_pred.shape[0] == len(ds)
assert y_proba.shape[0] == len(ds)
def test_fit_with_dataset_X_y_inaccessible_does_not_raise(
self, net_cls, module_cls, data):
class MyDataset(torch.utils.data.Dataset):
"""Dataset with inaccessible X and y"""
def __init__(self, X, y):
self.xx = X # incorrect attribute name
self.yy = y # incorrect attribute name
def __len__(self):
return len(self.xx)
def __getitem__(self, i):
return self.xx[i], self.yy[i]
ds = MyDataset(*data)
net = net_cls(module_cls, max_epochs=1)
net.fit(ds, data[1]) # does not raise
def test_fit_with_dataset_without_explicit_y(
self, net_cls, module_cls, dataset_cls, data):
from skorch.dataset import ValidSplit
net = net_cls(
module_cls,
max_epochs=1,
train_split=ValidSplit(stratified=False),
)
ds = dataset_cls(*data)
net.fit(ds, None) # does not raise
for key in ('train_loss', 'valid_loss', 'valid_acc'):
assert key in net.history[-1]
def test_fit_with_dataset_stratified_without_explicit_y_raises(
self, net_cls, module_cls, dataset_cls, data):
from skorch.dataset import ValidSplit
net = net_cls(
module_cls,
train_split=ValidSplit(stratified=True),
)
ds = dataset_cls(*data)
with pytest.raises(ValueError) as exc:
net.fit(ds, None)
msg = "Stratified CV requires explicitly passing a suitable y."
assert exc.value.args[0] == msg
@pytest.fixture
def dataset_1_item(self):
class Dataset(torch.utils.data.Dataset):
def __len__(self):
return 100
def __getitem__(self, i):
return 0.0
return Dataset
def test_fit_with_dataset_one_item_error(
self, net_cls, module_cls, dataset_1_item):
net = net_cls(module_cls, train_split=None)
with pytest.raises(ValueError) as exc:
net.fit(dataset_1_item(), None)
msg = ("You are using a non-skorch dataset that returns 1 value. "
"Remember that for skorch, Dataset.__getitem__ must return "
"exactly 2 values, X and y (more info: "
"https://skorch.readthedocs.io/en/stable/user/dataset.html).")
assert exc.value.args[0] == msg
def test_predict_with_dataset_one_item_error(
self, net_cls, module_cls, dataset_1_item):
net = net_cls(module_cls, train_split=None).initialize()
with pytest.raises(ValueError) as exc:
net.predict(dataset_1_item())
msg = ("You are using a non-skorch dataset that returns 1 value. "
"Remember that for skorch, Dataset.__getitem__ must return "
"exactly 2 values, X and y (more info: "
"https://skorch.readthedocs.io/en/stable/user/dataset.html).")
assert exc.value.args[0] == msg
@pytest.fixture
def dataset_3_items(self):
class Dataset(torch.utils.data.Dataset):
def __len__(self):
return 100
def __getitem__(self, i):
return 0.0, 0.0, 0.0
return Dataset
def test_fit_with_dataset_three_items_error(
self, net_cls, module_cls, dataset_3_items):
net = net_cls(module_cls, train_split=None)
with pytest.raises(ValueError) as exc:
net.fit(dataset_3_items(), None)
msg = ("You are using a non-skorch dataset that returns 3 values. "
"Remember that for skorch, Dataset.__getitem__ must return "
"exactly 2 values, X and y (more info: "
"https://skorch.readthedocs.io/en/stable/user/dataset.html).")
assert exc.value.args[0] == msg
def test_predict_with_dataset_three_items_error(
self, net_cls, module_cls, dataset_3_items):
net = net_cls(module_cls, train_split=None).initialize()
with pytest.raises(ValueError) as exc:
net.predict(dataset_3_items())
msg = ("You are using a non-skorch dataset that returns 3 values. "
"Remember that for skorch, Dataset.__getitem__ must return "
"exactly 2 values, X and y (more info: "
"https://skorch.readthedocs.io/en/stable/user/dataset.html).")
assert exc.value.args[0] == msg
@pytest.fixture
def multiouput_net(self, net_cls, multiouput_module):
return net_cls(multiouput_module).initialize()
def test_multioutput_forward_iter(self, multiouput_net, data):
X = data[0]
y_infer = next(multiouput_net.forward_iter(X))
assert isinstance(y_infer, tuple)
assert len(y_infer) == 3
assert y_infer[0].shape[0] == min(len(X), multiouput_net.batch_size)
def test_multioutput_forward(self, multiouput_net, data):
X = data[0]
n = len(X)
y_infer = multiouput_net.forward(X)
assert isinstance(y_infer, tuple)
assert len(y_infer) == 3
for arr in y_infer:
assert is_torch_data_type(arr)
# Expecting full output: (number of samples, number of output units)
assert y_infer[0].shape == (n, 2)
# Expecting only column 0: (number of samples,)
assert y_infer[1].shape == (n,)
# Expecting only every other row: (number of samples/2, number
# of output units)
assert y_infer[2].shape == (n // 2, 2)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
def test_multioutput_forward_device_gpu(self, multiouput_net, data):
X = data[0]
y_infer = multiouput_net.forward(X, device='cuda:0')
assert isinstance(y_infer, tuple)
assert len(y_infer) == 3
for arr in y_infer:
assert arr.is_cuda
def test_multioutput_predict(self, multiouput_net, data):
X = data[0]
n = len(X)
# does not raise
y_pred = multiouput_net.predict(X)
# Expecting only 1 column containing predict class:
# (number of samples,)
assert y_pred.shape == (n,)
assert set(y_pred) == {0, 1}
def test_multiouput_predict_proba(self, multiouput_net, data):
X = data[0]
n = len(X)
# does not raise
y_proba = multiouput_net.predict_proba(X)
# Expecting full output: (number of samples, number of output units)
assert y_proba.shape == (n, 2)
# Probabilities, hence these limits
assert y_proba.min() >= 0
assert y_proba.max() <= 1
def test_setting_callback_possible(self, net_cls, module_cls):
from skorch.callbacks import EpochTimer, PrintLog
net = net_cls(module_cls, callbacks=[('mycb', PrintLog())])
net.initialize()
assert isinstance(dict(net.callbacks_)['mycb'], PrintLog)
net.set_params(callbacks__mycb=EpochTimer())
assert isinstance(dict(net.callbacks_)['mycb'], EpochTimer)
def test_setting_callback_default_possible(self, net_cls, module_cls):
from skorch.callbacks import EpochTimer, PrintLog
net = net_cls(module_cls)
net.initialize()
assert isinstance(dict(net.callbacks_)['print_log'], PrintLog)
net.set_params(callbacks__print_log=EpochTimer())
assert isinstance(dict(net.callbacks_)['print_log'], EpochTimer)
def test_setting_callback_to_none_possible(self, net_cls, module_cls, data):
from skorch.callbacks import Callback
X, y = data[0][:30], data[1][:30] # accelerate test
side_effects = []
class DummyCallback(Callback):
def __init__(self, i):
self.i = i
# pylint: disable=unused-argument, arguments-differ
def on_epoch_end(self, *args, **kwargs):
side_effects.append(self.i)
net = net_cls(
module_cls,
max_epochs=2,
callbacks=[
('cb0', DummyCallback(0)),
('cb1', DummyCallback(1)),
('cb2', DummyCallback(2)),
],
)
net.fit(X, y)
# all 3 callbacks write to output twice
assert side_effects == [0, 1, 2, 0, 1, 2]
# deactivate cb1
side_effects.clear()
net.set_params(callbacks__cb1=None)
net.fit(X, y)
assert side_effects == [0, 2, 0, 2]
def test_setting_callback_to_none_and_more_params_during_init_raises(
self, net_cls, module_cls):
# if a callback is set to None, setting more params for it
# should not work
net = net_cls(
module_cls, callbacks__print_log=None, callbacks__print_log__sink=1)
with pytest.raises(ValueError) as exc:
net.initialize()
msg = ("Trying to set a parameter for callback print_log "
"which does not exist.")
assert exc.value.args[0] == msg
def test_setting_callback_to_none_and_more_params_later_raises(
self, net_cls, module_cls):
# this should work
net = net_cls(module_cls)
net.set_params(callbacks__print_log__sink=123)
net.set_params(callbacks__print_log=None)
net = net_cls(module_cls)
net.set_params(callbacks__print_log=None)
with pytest.raises(ValueError) as exc:
net.set_params(callbacks__print_log__sink=123)
msg = ("Trying to set a parameter for callback print_log "
"which does not exist.")
assert exc.value.args[0] == msg
def test_set_params_on_init_net_normal_param_works(self, net_cls, module_cls):
# setting "normal" arguments like max_epoch works on an initialized net
net = net_cls(module_cls).initialize()
net.set_params(max_epochs=3, callbacks=[]) # does not raise
net.initialize()
def test_set_params_with_unknown_key_raises(self, net):
with pytest.raises(ValueError) as exc:
net.set_params(foo=123)
msg = exc.value.args[0]
# message contains "'" around variable name starting from sklearn 1.1
assert (
msg.startswith("Invalid parameter foo for")
or msg.startswith("Invalid parameter 'foo' for")
)
@pytest.fixture()
def sequence_module_cls(self):
"""Simple sequence model with variable size dim 1."""
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = torch.nn.Linear(1, 1)
# pylint: disable=arguments-differ
def forward(self, x):
n = np.random.randint(1, 4)
y = self.l(x.float())
return torch.randn(1, n, 2) + 0 * y
return Mod
def test_net_variable_prediction_lengths(
self, net_cls, sequence_module_cls):
# neural net should work fine with fixed y_true but varying y_pred
# sequences.
X = np.array([1, 5, 3, 6, 2])
y = np.array([[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 0], [0, 1, 0]])
X, y = X[:, np.newaxis], y[:, :, np.newaxis]
X, y = X.astype('float32'), y.astype('float32')
net = net_cls(
sequence_module_cls,
batch_size=1,
max_epochs=2,
train_split=None,
)
# Mock loss function
# pylint: disable=unused-argument
def loss_fn(y_pred, y_true, **kwargs):
return y_pred[:, 0, 0]
net.get_loss = loss_fn
net.fit(X, y)
def test_net_variable_label_lengths(self, net_cls, sequence_module_cls):
# neural net should work fine with variable length y_true sequences.
X = np.array([1, 5, 3, 6, 2])
y = np.array([[1], [1, 0, 1], [1, 1], [1, 1, 0], [1, 0]], dtype=object)
X = X[:, np.newaxis].astype('float32')
y = np.array(
[np.array(n, dtype='float32')[:, np.newaxis] for n in y], dtype=object
)
net = net_cls(
sequence_module_cls,
batch_size=1,
max_epochs=2,
train_split=None,
)
# Mock loss function
# pylint: disable=unused-argument
def loss_fn(y_pred, y_true, **kwargs):
return y_pred[:, 0, 0]
net.get_loss = loss_fn
# check_data complains about y.shape = (n,) but
# we know that it is actually (n, m) with m in [1;3].
net.check_data = lambda *_, **kw: None
net.fit(X, y)
def test_no_grad_during_validation(self, net_cls, module_cls, data):
"""Test that gradient is only calculated during training step,
not validation step."""
# pylint: disable=unused-argument
def check_grad(*args, loss, training, **kwargs):
if training:
assert loss.requires_grad
else:
assert not loss.requires_grad
mock_cb = Mock(on_batch_end=check_grad)
net = net_cls(module_cls, max_epochs=1, callbacks=[mock_cb])
net.fit(*data)
def test_callback_on_grad_computed(self, net_cls, module_cls, data):
module = module_cls()
expected_names = set(name for name, _ in module.named_parameters())
def on_grad_computed(*args, named_parameters, **kwargs):
names = set(name for name, _ in named_parameters)
assert expected_names == names
mock_cb = Mock(on_grad_computed=on_grad_computed)
net = net_cls(module, max_epochs=1, callbacks=[mock_cb])
net.fit(*data)
@pytest.mark.parametrize('training', [True, False])
def test_no_grad_during_evaluation_unless_training(
self, net_cls, module_cls, data, training):
"""Test that gradient is only calculated in training mode
during evaluation step."""
from skorch.utils import to_tensor
net = net_cls(module_cls).initialize()
Xi = to_tensor(data[0][:3], device='cpu')
batch = Xi, None
y_eval = net.evaluation_step(batch, training=training)
assert y_eval.requires_grad is training
@pytest.mark.parametrize(
'net_kwargs,expected_train_batch_size,expected_valid_batch_size',
[
({'batch_size': -1}, 800, 200),
({'iterator_train__batch_size': -1}, 800, 128),
({'iterator_valid__batch_size': -1}, 128, 200),
]
)
def test_batch_size_neg_1_uses_whole_dataset(
self, net_cls, module_cls, data, net_kwargs,
expected_train_batch_size, expected_valid_batch_size):
train_loader_mock = Mock(side_effect=torch.utils.data.DataLoader)
valid_loader_mock = Mock(side_effect=torch.utils.data.DataLoader)
net = net_cls(module_cls, max_epochs=1,
iterator_train=train_loader_mock,
iterator_valid=valid_loader_mock,
**net_kwargs)
net.fit(*data)
train_batch_size = net.history[:, 'batches', :, 'train_batch_size'][0][0]
valid_batch_size = net.history[:, 'batches', :, 'valid_batch_size'][0][0]
assert train_batch_size == expected_train_batch_size
assert valid_batch_size == expected_valid_batch_size
# pylint: disable=unsubscriptable-object
train_kwargs = train_loader_mock.call_args[1]
valid_kwargs = valid_loader_mock.call_args[1]
assert train_kwargs['batch_size'] == expected_train_batch_size
assert valid_kwargs['batch_size'] == expected_valid_batch_size
@pytest.mark.parametrize('batch_size', [40, 100])
def test_batch_count(self, net_cls, module_cls, data, batch_size):
net = net_cls(module_cls, max_epochs=1, batch_size=batch_size)
X, y = data
net.fit(X, y)
train_batch_count = int(0.8 * len(X)) / batch_size
valid_batch_count = int(0.2 * len(X)) / batch_size
assert net.history[:, "train_batch_count"] == [train_batch_count]
assert net.history[:, "valid_batch_count"] == [valid_batch_count]
@flaky(max_runs=5)
def test_fit_lbfgs_optimizer(self, net_cls, module_cls, data):
# need to randomize the seed, otherwise flaky always runs with
# the exact same seed
torch.manual_seed(int(time.time()))
X, y = data
net = net_cls(
module_cls,
optimizer=torch.optim.LBFGS,
lr=1.0,
batch_size=-1,
)
net.fit(X, y)
last_epoch = net.history[-1]
assert last_epoch['train_loss'] < 1.0
assert last_epoch['valid_loss'] < 1.0
assert last_epoch['valid_acc'] > 0.75
def test_accumulator_that_returns_last_value(
self, net_cls, module_cls, data):
# We define an optimizer that calls the step function 3 times
# and an accumulator that returns the last of those calls. We
# then test that the correct values were stored.
from skorch.utils import FirstStepAccumulator
side_effect = []
class SGD3Calls(torch.optim.SGD):
def step(self, closure=None):
for _ in range(3):
loss = super().step(closure)
side_effect.append(float(loss))
class MyAccumulator(FirstStepAccumulator):
"""Accumulate all steps and return the last."""
def store_step(self, step):
if self.step is None:
self.step = [step]
else:
self.step.append(step)
def get_step(self):
# Losses should only ever be retrieved after storing 3
# times.
assert len(self.step) == 3
return self.step[-1]
X, y = data
max_epochs = 2
batch_size = 100
net = net_cls(
module_cls,
optimizer=SGD3Calls,
max_epochs=max_epochs,
batch_size=batch_size,
train_split=None,
)
net.get_train_step_accumulator = MyAccumulator
net.fit(X, y)
# Number of loss calculations is total number of batches x 3.
num_batches_per_epoch = int(np.ceil(len(y) / batch_size))
expected_calls = 3 * num_batches_per_epoch * max_epochs
assert len(side_effect) == expected_calls
# Every 3rd loss calculation (i.e. the last per call) should
# be stored in the history.
expected_losses = list(
flatten(net.history[:, 'batches', :, 'train_loss']))
assert np.allclose(side_effect[2::3], expected_losses)
@pytest.fixture
def predefined_split(self):
from skorch.helper import predefined_split
return predefined_split
def test_predefined_split(
self, net_cls, module_cls, data, predefined_split, dataset_cls):
train_loader_mock = Mock(side_effect=torch.utils.data.DataLoader)
valid_loader_mock = Mock(side_effect=torch.utils.data.DataLoader)
train_ds = dataset_cls(*data)
valid_ds = dataset_cls(*data)
net = net_cls(
module_cls, max_epochs=1,
iterator_train=train_loader_mock,
iterator_valid=valid_loader_mock,
train_split=predefined_split(valid_ds)
)
net.fit(train_ds, None)
# pylint: disable=unsubscriptable-object
train_loader_ds = train_loader_mock.call_args[0][0]
valid_loader_ds = valid_loader_mock.call_args[0][0]
assert train_loader_ds == train_ds
assert valid_loader_ds == valid_ds
def test_predefined_split_with_y(
self, net_cls, module_cls, data, predefined_split, dataset_cls):
# A change in the signature of utils._make_split in #646 led
# to a bug reported in #681, namely `TypeError: _make_split()
# got multiple values for argument 'valid_ds'`. This is a test
# for the bug.
X, y = data
X_train, y_train, X_valid, y_valid = X[:800], y[:800], X[800:], y[800:]
valid_ds = dataset_cls(X_valid, y_valid)
net = net_cls(
module_cls,
max_epochs=1,
train_split=predefined_split(valid_ds),
)
net.fit(X_train, y_train)
def test_set_lr_at_runtime_doesnt_reinitialize(self, net_fit):
with patch('skorch.NeuralNet.initialize_optimizer') as f:
net_fit.set_params(lr=0.9)
assert not f.called
def test_set_lr_at_runtime_sets_lr(self, net_fit):
new_lr = net_fit.lr + 1
net_fit.set_params(lr=new_lr)
assert net_fit.lr == new_lr
assert net_fit.optimizer_.param_groups[0]['lr'] == new_lr
def test_set_lr_at_runtime_sets_lr_via_pgroup_0(self, net_fit):
new_lr = net_fit.lr + 1
net_fit.set_params(optimizer__param_groups__0__lr=new_lr)
# note that setting group does not set global lr
assert net_fit.lr != new_lr
assert net_fit.optimizer_.param_groups[0]['lr'] == new_lr
def test_set_lr_at_runtime_sets_lr_pgroups(self, net_cls, module_cls, data):
lr_pgroup_0 = 0.1
lr_pgroup_1 = 0.2
lr_pgroup_0_new = 0.3
lr_pgroup_1_new = 0.4
net = net_cls(
module_cls,
lr=lr_pgroup_1,
max_epochs=1,
optimizer__param_groups=[
('sequential.0.*', {'lr': lr_pgroup_0}),
])
net.fit(*data)
# optimizer__param_groups=[g1] will create
# - param group 0 matching the definition of g1
# - param group 1 matching all other parameters
assert net.optimizer_.param_groups[0]['lr'] == lr_pgroup_0
assert net.optimizer_.param_groups[1]['lr'] == lr_pgroup_1
net.set_params(optimizer__param_groups__0__lr=lr_pgroup_0_new)
net.set_params(optimizer__param_groups__1__lr=lr_pgroup_1_new)
assert net.optimizer_.param_groups[0]['lr'] == lr_pgroup_0_new
assert net.optimizer_.param_groups[1]['lr'] == lr_pgroup_1_new
def test_criterion_training_set_correctly(self, net_cls, module_cls, data):
# check that criterion's training attribute is set correctly
X, y = data[0][:50], data[1][:50] # don't need all the data
side_effect = []
class MyCriterion(nn.NLLLoss):
"""Criterion that records its training attribute"""
def forward(self, *args, **kwargs):
side_effect.append(self.training)
return super().forward(*args, **kwargs)
net = net_cls(module_cls, criterion=MyCriterion, max_epochs=1)
net.fit(X, y)
# called once with training=True for train step, once with
# training=False for validation step
assert side_effect == [True, False]
net.partial_fit(X, y)
# same logic as before
assert side_effect == [True, False, True, False]
def test_criterion_is_not_a_torch_module(self, net_cls, module_cls, data):
X, y = data[0][:50], data[1][:50] # don't need all the data
def my_criterion():
return torch.nn.functional.nll_loss
net = net_cls(module_cls, criterion=my_criterion, max_epochs=1)
net.fit(X, y) # does not raise
@pytest.mark.parametrize('acc_steps', [1, 2, 3, 5, 10])
def test_gradient_accumulation(self, net_cls, module_cls, data, acc_steps):
# Test if gradient accumulation technique is possible,
# i.e. performing a weight update only every couple of
# batches.
mock_optimizer = Mock()
class GradAccNet(net_cls):
"""Net that accumulates gradients"""
def __init__(self, *args, acc_steps=acc_steps, **kwargs):
super().__init__(*args, **kwargs)
self.acc_steps = acc_steps
def initialize_optimizer(self):
# This is not necessary for gradient accumulation but
# only for testing purposes
super().initialize_optimizer()
# pylint: disable=access-member-before-definition
self.true_optimizer_ = self.optimizer_
mock_optimizer.step.side_effect = self.true_optimizer_.step
mock_optimizer.zero_grad.side_effect = self.true_optimizer_.zero_grad
self.optimizer_ = mock_optimizer
return self
def get_loss(self, *args, **kwargs):
loss = super().get_loss(*args, **kwargs)
# because only every nth step is optimized
return loss / self.acc_steps
def train_step(self, batch, **fit_params):
"""Perform gradient accumulation
Only optimize every 2nd batch.
"""
# note that n_train_batches starts at 1 for each epoch
n_train_batches = len(self.history[-1, 'batches'])
step = self.train_step_single(batch, **fit_params)
if n_train_batches % self.acc_steps == 0:
self.optimizer_.step()
self.optimizer_.zero_grad()
return step
max_epochs = 5
net = GradAccNet(module_cls, max_epochs=max_epochs)
X, y = data
net.fit(X, y)
n = len(X) * 0.8 # number of training samples
b = np.ceil(n / net.batch_size) # batches per epoch
s = b // acc_steps # number of acc steps per epoch
calls_total = s * max_epochs
calls_step = mock_optimizer.step.call_count
calls_zero_grad = mock_optimizer.zero_grad.call_count
assert calls_total == calls_step == calls_zero_grad
def test_setattr_custom_module(self, net_cls, module_cls):
# creating a custom module should result in its regiestration
net = net_cls(module_cls).initialize()
assert 'mymodule' not in net.prefixes_
assert 'mymodule_' not in net.cuda_dependent_attributes_
assert 'mymodule' not in net._modules
class MyNet(net_cls):
def initialize_module(self):
super().initialize_module()
self.mymodule_ = module_cls()
return self
net = MyNet(module_cls).initialize()
assert 'mymodule' in net.prefixes_
assert 'mymodule_' in net.cuda_dependent_attributes_
assert 'mymodule' in net._modules
del net.mymodule_
assert 'mymodule' not in net.prefixes_
assert 'mymodule_' not in net.cuda_dependent_attributes_
assert 'mymodule' not in net._modules
def test_setattr_custom_criterion(self, net_cls, module_cls):
# creating a custom criterion should result in its regiestration
net = net_cls(module_cls).initialize()
assert 'mycriterion' not in net.prefixes_
assert 'mycriterion_' not in net.cuda_dependent_attributes_
assert 'mycriterion' not in net._criteria
class MyNet(net_cls):
def initialize_criterion(self):
super().initialize_criterion()
self.mycriterion_ = module_cls()
return self
net = MyNet(module_cls).initialize()
assert 'mycriterion' in net.prefixes_
assert 'mycriterion_' in net.cuda_dependent_attributes_
assert 'mycriterion' in net._criteria
del net.mycriterion_
assert 'mycriterion' not in net.prefixes_
assert 'mycriterion_' not in net.cuda_dependent_attributes_
assert 'mycriterion' not in net._criteria
def test_setattr_custom_optimizer(self, net_cls, module_cls):
# creating a custom optimizer should result in its regiestration
net = net_cls(module_cls).initialize()
assert 'myoptimizer' not in net.prefixes_
assert 'myoptimizer_' not in net.cuda_dependent_attributes_
assert 'myoptimizer' not in net.prefixes_
class MyNet(net_cls):
def initialize_optimizer(self):
super().initialize_optimizer()
self.myoptimizer_ = torch.optim.SGD(self.module_.parameters(), lr=1)
return self
net = MyNet(module_cls).initialize()
assert 'myoptimizer' in net.prefixes_
assert 'myoptimizer_' in net.cuda_dependent_attributes_
assert 'myoptimizer' in net.prefixes_
del net.myoptimizer_
assert 'myoptimizer' not in net.prefixes_
assert 'myoptimizer_' not in net.cuda_dependent_attributes_
assert 'myoptimizer' not in net.prefixes_
def test_custom_optimizer_virtual_params(self, net_cls, module_cls):
# creating a custom optimizer should lead to its parameters being
# virtual
side_effects = []
class MyNet(net_cls):
def initialize_module(self):
side_effects.append(True)
return super().initialize_module()
def initialize_optimizer(self):
super().initialize_optimizer()
self.myoptimizer_ = torch.optim.SGD(self.module_.parameters(), lr=1)
return self
net = MyNet(module_cls).initialize()
# module initialized once
assert len(side_effects) == 1
net.set_params(optimizer__lr=123)
# module is not re-initialized, since virtual parameter
assert len(side_effects) == 1
net.set_params(myoptimizer__lr=123)
# module is not re-initialized, since virtual parameter
assert len(side_effects) == 1
def test_module_referencing_another_module_no_duplicate_params(
self, net_cls, module_cls
):
# When a module references another module, it will yield that modules'
# parameters. Therefore, if we collect all parameters, we have to make
# sure that there are no duplicate parameters.
class MyCriterion(torch.nn.NLLLoss):
"""Criterion that references net.module_"""
def __init__(self, *args, themodule, **kwargs):
super().__init__(*args, **kwargs)
self.themodule = themodule
class MyNet(net_cls):
def initialize_criterion(self):
kwargs = self.get_params_for('criterion')
kwargs['themodule'] = self.module_
self.criterion_ = self.criterion(**kwargs)
return self
net = MyNet(module_cls, criterion=MyCriterion).initialize()
params = [p for _, p in net.get_all_learnable_params()]
assert len(params) == len(set(params))
def test_custom_optimizer_lr_is_associated_with_optimizer(
self, net_cls, module_cls,
):
# the 'lr' parameter belongs to the default optimizer, not any custom
# optimizer
class MyNet(net_cls):
def initialize_optimizer(self):
super().initialize_optimizer()
self.myoptimizer_ = torch.optim.SGD(self.module_.parameters(), lr=1)
return self
net = MyNet(module_cls, lr=123).initialize()
assert net.optimizer_.state_dict()['param_groups'][0]['lr'] == 123
assert net.myoptimizer_.state_dict()['param_groups'][0]['lr'] == 1
net.set_params(lr=456)
assert net.optimizer_.state_dict()['param_groups'][0]['lr'] == 456
assert net.myoptimizer_.state_dict()['param_groups'][0]['lr'] == 1
def test_custom_non_default_module_with_check_is_fitted(
self, net_cls, module_cls
):
# This is a regression test for a bug fixed in #927. In check_is_fitted
# we made the assumption that there is a 'module_' attribute, but we
# should not assume that. Here we test that even if such an attribute
# doesn't exist, a properly initialized net will not raise an error when
# check_is_fitted is called.
class MyNet(net_cls):
"""Net without a 'module_' attribute"""
def initialize_module(self):
kwargs = self.get_params_for('module')
module = self.initialized_instance(self.module, kwargs)
# pylint: disable=attribute-defined-outside-init
self.mymodule_ = module
return self
net = MyNet(module_cls).initialize()
# does not raise
net.check_is_fitted()
def test_setattr_custom_module_no_duplicates(self, net_cls, module_cls):
# the 'module' attribute is set twice but that shouldn't lead
# to duplicates in prefixes_ or cuda_dependent_attributes_
class MyNet(net_cls):
def initialize_module(self):
super().initialize_module()
self.module_ = module_cls() # same attribute name
return self
net = MyNet(module_cls).initialize()
assert net.prefixes_.count('module') == 1
assert net.cuda_dependent_attributes_.count('module_') == 1
def test_setattr_in_initialize_non_torch_attribute(self, net_cls, module_cls):
# attributes that are not torch modules or optimizers should
# not be registered
class MyNet(net_cls):
def initialize_module(self):
super().initialize_module()
self.num = 123
self.num_ = 123
return self
net = MyNet(module_cls)
assert 'num' not in net.prefixes_
assert 'num_' not in net.cuda_dependent_attributes_
def test_setattr_does_not_modify_class_attribute(self, net_cls, module_cls):
net = net_cls(module_cls)
assert 'mymodule' not in net.prefixes_
assert 'mymodule' not in net.cuda_dependent_attributes_
class MyNet(net_cls):
def initialize_module(self):
super().initialize_module()
self.mymodule_ = module_cls()
return self
net = MyNet(module_cls).initialize()
assert 'mymodule' in net.prefixes_
assert 'mymodule_' in net.cuda_dependent_attributes_
assert 'mymodule' not in net_cls.prefixes_
assert 'mymodule_' not in net_cls.cuda_dependent_attributes_
@pytest.fixture
def net_custom_module_cls(self, net_cls, module_cls):
class MyNet(net_cls):
"""Net with custom attribute mymodule"""
def __init__(self, *args, custom=module_cls, **kwargs):
self.custom = custom
super().__init__(*args, **kwargs)
def initialize_module(self, *args, **kwargs):
super().initialize_module(*args, **kwargs)
params = self.get_params_for('custom')
# pylint: disable=attribute-defined-outside-init
self.custom_ = self.custom(**params)
return self
return MyNet
def test_set_params_on_custom_module(self, net_custom_module_cls, module_cls):
# set_params requires the prefixes_ attribute to be correctly
# set, which is what is tested here
net = net_custom_module_cls(module_cls, custom__hidden_units=77).initialize()
hidden_units = net.custom_.state_dict()['sequential.3.weight'].shape[1]
assert hidden_units == 77
net.set_params(custom__hidden_units=99)
hidden_units = net.custom_.state_dict()['sequential.3.weight'].shape[1]
assert hidden_units == 99
@pytest.mark.parametrize('use_safetensors', [False, True])
def test_save_load_state_dict_custom_module(
self, net_custom_module_cls, module_cls, use_safetensors, tmpdir):
# test that we can store and load an arbitrary attribute like 'custom'
net = net_custom_module_cls(module_cls).initialize()
weights_before = net.custom_.state_dict()['sequential.3.weight']
tmpdir_custom = str(tmpdir.mkdir('skorch').join('custom.pkl'))
net.save_params(f_custom=tmpdir_custom, use_safetensors=use_safetensors)
del net
# initialize a new net, weights should differ
net_new = net_custom_module_cls(module_cls).initialize()
weights_new = net_new.custom_.state_dict()['sequential.3.weight']
assert not (weights_before == weights_new).all()
# after loading, weights should be the same again
net_new.load_params(f_custom=tmpdir_custom, use_safetensors=use_safetensors)
weights_loaded = net_new.custom_.state_dict()['sequential.3.weight']
assert (weights_before == weights_loaded).all()
def test_torch_load_kwargs_auto_weights_only_true_when_load_params(
self, net_cls, module_cls, monkeypatch, tmp_path
):
# Here we assume that the torch version is high enough that weights_only
# defaults to True. Check that when no argument is set in skorch, the
# right default is used.
# See discussion in 1063
net = net_cls(module_cls).initialize()
net.save_params(f_params=tmp_path / 'params.pkl')
state_dict = net.module_.state_dict()
expected_kwargs = {"weights_only": True}
mock_torch_load = Mock(return_value=state_dict)
monkeypatch.setattr(torch, "load", mock_torch_load)
net.load_params(f_params=tmp_path / 'params.pkl')
call_kwargs = mock_torch_load.call_args_list[0].kwargs
del call_kwargs['map_location'] # we're not interested in that
assert call_kwargs == expected_kwargs
def test_torch_load_kwargs_forwarded_to_torch_load(
self, net_cls, module_cls, monkeypatch, tmp_path
):
# Here we check that custom set torch load args are forwarded to
# torch.load.
# See discussion in 1063
expected_kwargs = {'weights_only': 123, 'foo': 'bar'}
net = net_cls(module_cls, torch_load_kwargs=expected_kwargs).initialize()
net.save_params(f_params=tmp_path / 'params.pkl')
state_dict = net.module_.state_dict()
mock_torch_load = Mock(return_value=state_dict)
monkeypatch.setattr(torch, "load", mock_torch_load)
net.load_params(f_params=tmp_path / 'params.pkl')
call_kwargs = mock_torch_load.call_args_list[0].kwargs
del call_kwargs['map_location'] # we're not interested in that
assert call_kwargs == expected_kwargs
def test_torch_load_kwargs_auto_weights_true(
self, net_cls, module_cls, monkeypatch, tmp_path
):
# See discussion in 1063.
net = net_cls(module_cls).initialize()
net.save_params(f_params=tmp_path / 'params.pkl')
state_dict = net.module_.state_dict()
expected_kwargs = {"weights_only": True}
mock_torch_load = Mock(return_value=state_dict)
monkeypatch.setattr(torch, "load", mock_torch_load)
net.load_params(f_params=tmp_path / 'params.pkl')
call_kwargs = mock_torch_load.call_args_list[0].kwargs
del call_kwargs['map_location'] # we're not interested in that
assert call_kwargs == expected_kwargs
def test_torch_load_kwargs_forwarded_to_torch_load_unpickle(
self, net_cls, module_cls, monkeypatch, tmp_path
):
# See discussion in 1090
# Here we check that custom set torch load args are forwarded to
# torch.load even when using pickle. This is the same test otherwise as
# test_torch_load_kwargs_forwarded_to_torch_load
expected_kwargs = {'weights_only': 123, 'foo': 'bar'}
net = net_cls(module_cls, torch_load_kwargs=expected_kwargs).initialize()
original_torch_load = torch.load
# call original torch.load without extra params to prevent error:
mock_torch_load = Mock(
side_effect=lambda *args, **kwargs: original_torch_load(*args)
)
monkeypatch.setattr(torch, "load", mock_torch_load)
dumped = pickle.dumps(net)
pickle.loads(dumped)
call_kwargs = mock_torch_load.call_args_list[0].kwargs
del call_kwargs['map_location'] # we're not interested in that
assert call_kwargs == expected_kwargs
def test_unpickle_no_pytorch_warning(self, net_cls, module_cls, recwarn):
# See discussion 1090
# When using pickle, i.e. when going through __setstate__, we don't want to get
# any warnings about the usage of weights_only.
net = net_cls(module_cls).initialize()
dumped = pickle.dumps(net)
pickle.loads(dumped)
msg_content = "weights_only"
assert not any(msg_content in str(w.message) for w in recwarn.list)
def test_custom_module_params_passed_to_optimizer(
self, net_custom_module_cls, module_cls):
# custom module parameters should automatically be passed to the optimizer
net = net_custom_module_cls(module_cls).initialize()
optimizer = net.optimizer_
module0 = net.module_
module1 = net.custom_
num_params_optimizer = len(optimizer.param_groups[0]['params'])
num_params_expected = len(module0.state_dict()) + len(module1.state_dict())
assert num_params_optimizer == num_params_expected
def test_criterion_params_passed_to_optimizer_if_any(self, net_fit_criterion):
# the parameters of the criterion should be passed to the optimizer if
# there are any
optimizer = net_fit_criterion.optimizer_
num_params_module = len(net_fit_criterion.module_.state_dict())
num_params_criterion = len(net_fit_criterion.criterion_.state_dict())
num_params_optimizer = len(optimizer.param_groups[0]['params'])
assert num_params_criterion > 0
assert num_params_optimizer == num_params_module + num_params_criterion
def test_set_params_on_custom_module_triggers_reinit_of_criterion_and_optimizer(
self, net_custom_module_cls, module_cls,
):
# When a custom module is re-initialized because of set_params, the
# criterion and optimizer should also be re-initialized, as with a
# normal module.
init_side_effects = [] # record initialize calls
class MyNet(net_custom_module_cls):
"""Records initialize_* calls"""
def initialize_module(self):
super().initialize_module()
init_side_effects.append('module')
return self
def initialize_criterion(self):
super().initialize_criterion()
init_side_effects.append('criterion')
return self
def initialize_optimizer(self):
super().initialize_optimizer()
init_side_effects.append('optimizer')
return self
net = MyNet(module_cls).initialize()
# just normal initialization behavior
assert init_side_effects == ['module', 'criterion', 'optimizer']
# still just normal behavior
net.set_params(module__hidden_units=123)
assert init_side_effects == ['module', 'criterion', 'optimizer'] * 2
# setting custom module should also re-initialize
net.set_params(custom__num_hidden=3)
assert init_side_effects == ['module', 'criterion', 'optimizer'] * 3
# setting normal and custom module should re-initialize, but only once
net.set_params(module__num_hidden=1, custom__dropout=0.7)
assert init_side_effects == ['module', 'criterion', 'optimizer'] * 4
def test_set_params_on_custom_criterion_triggers_reinit_of_optimizer(
self, net_cls, module_cls,
):
# When a custom criterion is re-initialized because of set_params, the
# optimizer should also be re-initialized, as with a normal criterion.
init_side_effects = [] # record initialize calls
class MyNet(net_cls):
"""Records initialize_* calls"""
def __init__(self, *args, mycriterion, **kwargs):
self.mycriterion = mycriterion
super().__init__(*args, **kwargs)
def initialize_module(self):
super().initialize_module()
init_side_effects.append('module')
return self
def initialize_criterion(self):
super().initialize_criterion()
params = self.get_params_for('mycriterion')
self.mycriterion_ = self.mycriterion(**params)
init_side_effects.append('criterion')
return self
def initialize_optimizer(self):
super().initialize_optimizer()
init_side_effects.append('optimizer')
return self
net = MyNet(module_cls, mycriterion=nn.NLLLoss).initialize()
# just normal initialization behavior
assert init_side_effects == ['module'] + ['criterion', 'optimizer']
# still just normal behavior
net.set_params(criterion__ignore_index=123)
assert init_side_effects == ['module'] + ['criterion', 'optimizer'] * 2
# setting custom module should also re-initialize
net.set_params(mycriterion__ignore_index=456)
assert init_side_effects == ['module'] + ['criterion', 'optimizer'] * 3
# setting normal and custom module should re-initialize, but only once
net.set_params(criterion__size_average=True, mycriterion__reduce=False)
assert init_side_effects == ['module'] + ['criterion', 'optimizer'] * 4
def test_set_params_on_custom_module_with_default_module_params_msg(
self, net_cls, module_cls, capsys,
):
# say we have module and module2, with module having some non-default
# params, e.g. module__num_hidden=3; when setting params on module2,
# that non-default value should not be given as a reason for
# re-initialization.
class MyNet(net_cls):
def initialize_module(self):
super().initialize_module()
self.module2_ = module_cls()
return self
net = MyNet(module_cls, module__hidden_units=7).initialize()
net.set_params(module2__num_hidden=3)
msg = capsys.readouterr()[0]
# msg should not be about hidden_units, since that wasn't changed, but
# about num_hidden
expected = ("Re-initializing module because the following parameters "
"were re-set: module2__num_hidden.")
assert msg.startswith(expected)
def test_set_params_on_custom_criterion_with_default_criterion_params_msg(
self, net_cls, module_cls, capsys,
):
# say we have criterion and criterion2, with criterion having some non-default
# params, e.g. criterion__num_hidden=3; when setting params on criterion2,
# that non-default value should not be given as a reason for
# re-initialization.
class MyNet(net_cls):
def initialize_criterion(self):
super().initialize_criterion()
self.criterion2_ = module_cls()
return self
net = MyNet(module_cls, criterion__reduce=False).initialize()
net.set_params(criterion2__num_hidden=3)
msg = capsys.readouterr()[0]
# msg should not be about hidden_units, since that wasn't changed, but
# about num_hidden
expected = ("Re-initializing criterion because the following parameters "
"were re-set: criterion2__num_hidden.")
assert msg.startswith(expected)
def test_modules_reinit_when_both_initialized_but_custom_module_changed(
self, net_cls, module_cls,
):
# When the default module and the custom module are already initialized,
# initialize() should just leave them. However, when we change a
# parameter on the custom module, both should be re-initialized.
class MyNet(net_cls):
def __init__(self, *args, module2, **kwargs):
self.module2 = module2
super().__init__(*args, **kwargs)
def initialize_module(self):
super().initialize_module()
params = self.get_params_for('module2')
is_init = isinstance(self.module2, nn.Module)
if is_init and not params:
# no need to initialize
self.module2_ = self.module2
return
if is_init:
module2 = type(self.module2)
else:
module2 = self.module2
self.module2_ = module2(**params)
return self
module = module_cls()
module2 = module_cls()
# all default params, hence no re-initilization
net = MyNet(module=module, module2=module2).initialize()
assert net.module_ is module
assert net.module2_ is module2
# module2 non default param, hence re-initilization
net = MyNet(module=module, module2=module2, module2__num_hidden=3).initialize()
assert net.module_ is module
assert net.module2_ is not module2
def test_criteria_reinit_when_both_initialized_but_custom_criterion_changed(
self, net_cls, module_cls,
):
# When the default criterion and the custom criterion are already initialized,
# initialize() should just leave them. However, when we change a
# parameter on the custom criterion, both should be re-initialized.
class MyNet(net_cls):
def __init__(self, *args, criterion2, **kwargs):
self.criterion2 = criterion2
super().__init__(*args, **kwargs)
def initialize_criterion(self):
super().initialize_criterion()
params = self.get_params_for('criterion2')
is_init = isinstance(self.criterion2, nn.Module)
if is_init and not params:
# no need to initialize
self.criterion2_ = self.criterion2
return
if is_init:
criterion2 = type(self.criterion2)
else:
criterion2 = self.criterion2
self.criterion2_ = criterion2(**params)
return self
criterion = module_cls()
criterion2 = module_cls()
# all default params, hence no re-initilization
net = MyNet(module_cls, criterion=criterion, criterion2=criterion2).initialize()
assert net.criterion_ is criterion
assert net.criterion2_ is criterion2
# criterion2 non default param, hence re-initilization
net = MyNet(
module_cls,
criterion=criterion,
criterion2=criterion2,
criterion2__num_hidden=3,
).initialize()
assert net.criterion_ is criterion
assert net.criterion2_ is not criterion2
def test_custom_criterion_attribute_name_predict_works(
self, net_cls, module_cls, data
):
# This is a regression test for bugfix in #927. We should not assume
# that there is always an attribute called 'criterion_' when trying to
# infer the predict nonlinearity.
from skorch.utils import to_tensor
class MyNet(net_cls):
def initialize_criterion(self):
kwargs = self.get_params_for('criterion')
criterion = self.initialized_instance(self.criterion, kwargs)
# pylint: disable=attribute-defined-outside-init
self.mycriterion_ = criterion # non-default name
def get_loss(self, y_pred, y_true, *args, **kwargs):
y_true = to_tensor(y_true, device=self.device)
return self.mycriterion_(y_pred, y_true)
net = MyNet(module_cls).initialize()
X, y = data[0][:10], data[1][:10]
net.fit(X, y)
net.predict(X)
def test_custom_module_is_init_when_default_module_already_is(
self, net_cls, module_cls,
):
# Assume that the module is already initialized, which is something we
# allow, but the custom module isn't. After calling initialize(), the
# custom module should be initialized and not skipped just because the
# default module already was initialized.
class MyNet(net_cls):
def initialize_module(self):
super().initialize_module()
self.module2_ = module_cls()
return self
module = module_cls()
net = MyNet(module=module).initialize() # module already initialized
assert net.module_ is module # normal module_ not changed
assert hasattr(net, 'module2_') # there is a module2_
def test_custom_criterion_is_init_when_default_criterion_already_is(
self, net_cls, module_cls,
):
# Assume that the criterion is already initialized, which is something we
# allow, but the custom criterion isn't. After calling initialize(), the
# custom criterion should be initialized and not skipped just because the
# default criterion already was initialized.
class MyNet(net_cls):
def initialize_criterion(self):
super().initialize_criterion()
self.criterion2_ = module_cls()
return self
criterion = module_cls()
# criterion already initialized
net = MyNet(module_cls, criterion=criterion).initialize()
assert net.criterion_ is criterion # normal criterion_ not changed
assert hasattr(net, 'criterion2_') # there is a criterion2_
def test_setting_custom_module_outside_initialize_raises(self, net_cls, module_cls):
from skorch.exceptions import SkorchAttributeError
# all modules should be set within an initialize method
class MyNet(net_cls):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.foo_ = module_cls()
msg = ("Trying to set torch compoment 'foo_' outside of an initialize method. "
"Consider defining it inside 'initialize_module'")
with pytest.raises(SkorchAttributeError, match=msg):
MyNet(module_cls)
def test_setting_custom_optimizer_outside_initialize_raises(
self, net_cls, module_cls
):
from skorch.exceptions import SkorchAttributeError
# all optimzers should be set within an initialize method
class MyNet(net_cls):
def initialize(self):
super().initialize()
self.opti = torch.optim.Adam(self.module_.parameters())
return self
msg = ("Trying to set torch compoment 'opti' outside of an initialize method. "
"Consider defining it inside 'initialize_optimizer'")
with pytest.raises(SkorchAttributeError, match=msg):
MyNet(module_cls).initialize()
def test_setting_custom_module_without_trailing_underscore_raises(
self, net_cls, module_cls,
):
from skorch.exceptions import SkorchAttributeError
# all initialized modules should end on an underscore
class MyNet(net_cls):
def initialize_module(self):
super().initialize_module()
self.mymodule = module_cls()
return self
msg = ("Names of initialized modules or optimizers should end "
"with an underscore (e.g. 'mymodule_')")
with pytest.raises(SkorchAttributeError, match=re.escape(msg)):
MyNet(module_cls).initialize()
def test_moving_custom_modules_to_device(self, net_cls):
# testing that custom modules and criteria are moved to the indicated
# device, not just the normal module/criterion; we override .to(device)
# here to be able to test this even without GPU
device_side_effects = [] # record module name and device
class MyModule(nn.Module):
"""Custom module that records .to calls"""
def __init__(self, name):
super().__init__()
self.name = name
self.lin = nn.Linear(5, 5) # module needs parameters
def to(self, device):
device_side_effects.append((self.name, device))
return self
class MyNet(net_cls):
"""Net with custom mymodule and mycriterion"""
def __init__(self, *args, mymodule, mycriterion, **kwargs):
self.mymodule = mymodule
self.mycriterion = mycriterion
super().__init__(*args, **kwargs)
def initialize_module(self):
super().initialize_module()
params = self.get_params_for('mymodule')
self.mymodule_ = MyModule(**params)
return self
def initialize_criterion(self):
super().initialize_criterion()
params = self.get_params_for('mycriterion')
self.mycriterion_ = MyModule(**params)
return self
MyNet(
module=MyModule,
module__name='module-normal',
mymodule=MyModule,
mymodule__name='module-custom',
criterion=MyModule,
criterion__name='criterion-normal',
mycriterion=MyModule,
mycriterion__name='criterion-custom',
device='foo',
).initialize()
expected = [('module-normal', 'foo'), ('module-custom', 'foo'),
('criterion-normal', 'foo'), ('criterion-custom', 'foo')]
assert device_side_effects == expected
def test_set_params_on_custom_module_preserves_its_device(self, net_cls):
# when a custom module or criterion is re-created because of set_params,
# it should be moved to the indicated device
class MyNet(net_cls):
"""Net with custom module and criterion"""
def __init__(self, *args, mymodule, mycriterion, **kwargs):
self.mymodule = mymodule
self.mycriterion = mycriterion
super().__init__(*args, **kwargs)
def initialize_module(self):
super().initialize_module()
params = self.get_params_for('mymodule')
self.mymodule_ = self.mymodule(**params)
return self
def initialize_criterion(self):
super().initialize_criterion()
params = self.get_params_for('mycriterion')
self.mycriterion_ = self.mycriterion(**params)
return self
class MyModule(nn.Module):
"""Custom module to test device even without GPU"""
def __init__(self, x=1):
super().__init__()
self.lin = nn.Linear(x, 1) # modules need parameters
self.device = 'cpu'
def to(self, device):
self.device = device
return self
# first normal CPU
net = MyNet(
module=MyModule,
mymodule=MyModule,
criterion=MyModule,
mycriterion=MyModule,
).initialize()
assert net.mymodule_.device == 'cpu'
assert net.mycriterion_.device == 'cpu'
# now try other device
net = MyNet(
module=MyModule,
mymodule=MyModule,
device='foo',
criterion=MyModule,
mycriterion=MyModule,
).initialize()
assert net.mymodule_.device == 'foo'
assert net.mycriterion_.device == 'foo'
net.set_params(mymodule__x=3)
assert net.mymodule_.device == 'foo'
assert net.mycriterion_.device == 'foo'
def test_custom_modules_and_criteria_training_mode_set_correctly(
self, net_cls, module_cls, data,
):
# custom modules and criteria should be set to training/eval mode
# correctly depending on the stage of training/validation/inference
from skorch.callbacks import Callback
class MyNet(net_cls):
"""Net with custom mymodule and mycriterion"""
def initialize_module(self):
super().initialize_module()
self.mymodule_ = module_cls()
return self
def initialize_criterion(self):
super().initialize_criterion()
self.mycriterion_ = module_cls()
return self
def evaluation_step(self, batch, training=False):
y_pred = super().evaluation_step(batch, training=training)
assert_net_training_mode(self, training=training)
return y_pred
def on_batch_end(self, net, batch, training, **kwargs):
assert_net_training_mode(net, training=training)
def assert_net_training_mode(net, training=True):
if training:
check = lambda module: module.training is True
else:
check = lambda module: module.training is False
assert check(net.module_)
assert check(net.mymodule_)
assert check(net.criterion_)
assert check(net.mycriterion_)
X, y = data
net = MyNet(module_cls, max_epochs=1)
net.fit(X, y)
net.predict(X)
def test_custom_optimizer_performs_updates(self, net_cls, module_cls, data):
# make sure that updates are actually performed by a custom optimizer
from skorch.utils import to_tensor
# custom optimizers should actually perform updates
# pylint: disable=attribute-defined-outside-init
class MyNet(net_cls):
"""A net with 2 modules with their respective optimizers"""
def initialize_module(self):
super().initialize_module()
self.module2_ = module_cls()
return self
def initialize_optimizer(self):
self.optimizer_ = self.optimizer(self.module_.parameters(), self.lr)
self.optimizer2_ = self.optimizer(self.module2_.parameters(), self.lr)
return self
def infer(self, x, **fit_params):
# prediction is just mean of the two modules
x = to_tensor(x, device=self.device)
return 0.5 * (self.module_(x) + self.module2_(x))
net = MyNet(module_cls, max_epochs=1, lr=0.5).initialize()
params1_before = copy.deepcopy(list(net.module_.parameters()))
params2_before = copy.deepcopy(list(net.module2_.parameters()))
net.partial_fit(*data)
params1_after = list(net.module_.parameters())
params2_after = list(net.module2_.parameters())
assert not any(
(p_b == p_a).all() for p_b, p_a in zip(params1_before, params1_after))
assert not any(
(p_b == p_a).all() for p_b, p_a in zip(params2_before, params2_after))
def test_optimizer_initialized_after_module_moved_to_device(self, net_cls):
# it is recommended to initialize the optimizer with the module params
# _after_ the module has been moved to its device, see:
# https://discuss.pytorch.org/t/effect-of-calling-model-cuda-after-constructing-an-optimizer/15165/6
side_effects = [] # record module name and device
class MyModule(nn.Module):
"""Custom module that records .to calls"""
def __init__(self, x=1):
super().__init__()
self.lin = nn.Linear(x, 1) # module needs parameters
def to(self, device):
side_effects.append('moved-to-device')
return self
class MyOptimizer(torch.optim.SGD):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
side_effects.append('optimizer-init')
net = net_cls(
module=MyModule,
criterion=MyModule,
optimizer=MyOptimizer,
).initialize()
# first move module and criterion to device, then initialize optimizer
expected = ['moved-to-device', 'moved-to-device', 'optimizer-init']
assert side_effects == expected
net.set_params(module__x=2)
# after set_params on module, re-initialization and moving of device
# should happen again, with the same order as before
expected = ['moved-to-device', 'moved-to-device', 'optimizer-init'] * 2
assert side_effects == expected
@pytest.mark.parametrize("needs_y, train_split, raises", [
(False, None, ExitStack()), # ExitStack = does not raise
(True, None, ExitStack()),
(False, "default", ExitStack()), # Default parameters for NeuralNet
(True, "default", ExitStack()), # Default parameters for NeuralNet
(False, lambda x: (x, x), ExitStack()), # Earlier this was not allowed
(True, lambda x, y: (x, x), ExitStack()), # Works for custom split
(True, lambda x: (x, x), pytest.raises(TypeError)), # Raises an error
])
def test_passes_y_to_train_split_when_not_none(
self, needs_y, train_split, raises):
from skorch.net import NeuralNet
from skorch.toy import MLPModule
# By default, `train_split=ValidSplit(5)` in the `NeuralNet` definition
kwargs = {} if train_split == 'default' else {
'train_split': train_split}
# Dummy loss that ignores y_true
class UnsupervisedLoss(torch.nn.NLLLoss):
def forward(self, y_pred, _):
return y_pred.mean()
# Generate the dummy dataset
n_samples, n_features = 128, 10
X = np.random.rand(n_samples, n_features).astype(np.float32)
y = np.random.binomial(n=1, p=0.5, size=n_samples) if needs_y else None
# The `NeuralNetClassifier` or `NeuralNetRegressor` always require `y`
# Only `NeuralNet` can transfer `y=None` to `train_split` method.
net = NeuralNet(
MLPModule, # Any model, it's not important here
module__input_units=n_features,
max_epochs=2, # Run train loop twice to detect possible errors
criterion=UnsupervisedLoss,
**kwargs,
)
# Check if the code should fail or not
with raises:
net.fit(X, y)
def test_predict_nonlinearity_called_with_predict(
self, net_cls, module_cls, data):
side_effect = []
def nonlin(X):
side_effect.append(X)
return np.zeros_like(X)
X, y = data[0][:200], data[1][:200]
net = net_cls(
module_cls, max_epochs=1, predict_nonlinearity=nonlin).initialize()
# don't want callbacks to trigger side effects
net.callbacks_ = []
net.partial_fit(X, y)
assert not side_effect
# 2 calls, since batch size == 128 and n == 200
y_proba = net.predict(X)
assert len(side_effect) == 2
assert side_effect[0].shape == (128, 2)
assert side_effect[1].shape == (72, 2)
assert (y_proba == 0).all()
net.predict(X)
assert len(side_effect) == 4
def test_predict_nonlinearity_called_with_predict_proba(
self, net_cls, module_cls, data):
side_effect = []
def nonlin(X):
side_effect.append(X)
return np.zeros_like(X)
X, y = data[0][:200], data[1][:200]
net = net_cls(
module_cls, max_epochs=1, predict_nonlinearity=nonlin).initialize()
net.callbacks_ = []
# don't want callbacks to trigger side effects
net.partial_fit(X, y)
assert not side_effect
# 2 calls, since batch size == 128 and n == 200
y_proba = net.predict_proba(X)
assert len(side_effect) == 2
assert side_effect[0].shape == (128, 2)
assert side_effect[1].shape == (72, 2)
assert np.allclose(y_proba, 0)
net.predict_proba(X)
assert len(side_effect) == 4
def test_predict_nonlinearity_none(
self, net_cls, module_cls, data):
# even though we have CrossEntropyLoss, we don't want the
# output from predict_proba to be modified, thus we set
# predict_nonlinearity to None
X = data[0][:200]
net = net_cls(
module_cls,
max_epochs=1,
criterion=nn.CrossEntropyLoss,
predict_nonlinearity=None,
).initialize()
rv = np.random.random((20, 5))
net.forward_iter = (
lambda *args, **kwargs: (torch.as_tensor(rv) for _ in range(2)))
# 2 batches, mock return value has shape 20,5 thus y_proba has
# shape 40,5
y_proba = net.predict_proba(X)
assert y_proba.shape == (40, 5)
assert np.allclose(y_proba[:20], rv)
assert np.allclose(y_proba[20:], rv)
def test_predict_nonlinearity_type_error(self, net_cls, module_cls):
# if predict_nonlinearity is not callable, raise a TypeError
net = net_cls(module_cls, predict_nonlinearity=123).initialize()
msg = "predict_nonlinearity has to be a callable, 'auto' or None"
with pytest.raises(TypeError, match=msg):
net.predict(np.zeros((3, 3)))
with pytest.raises(TypeError, match=msg):
net.predict_proba(np.zeros((3, 3)))
def test_predict_nonlinearity_is_identity_with_multiple_criteria(
self, net_cls, module_cls, data
):
# Regression test for bugfix so we don't assume that there is always
# just a single criterion when trying to infer the predict nonlinearity
# (#927). Instead, if there are multiple criteria, don't apply any
# predict nonlinearity. In this test, criterion_ is CrossEntropyLoss, so
# normally we would apply softmax, but since there is a second criterion
# here, we shouldn't. To test that the identity function is used, we
# check that predict_proba and forward return the same values.
from skorch.utils import to_numpy, to_tensor
class MyNet(net_cls):
def initialize_criterion(self):
# pylint: disable=attribute-defined-outside-init
kwargs = self.get_params_for('criterion')
criterion = self.initialized_instance(nn.CrossEntropyLoss, kwargs)
self.criterion_ = criterion # non-default name
kwargs = self.get_params_for('criterion2')
criterion2 = self.initialized_instance(nn.NLLLoss, kwargs)
self.criterion2_ = criterion2
def get_loss(self, y_pred, y_true, *args, **kwargs):
y_true = to_tensor(y_true, device=self.device)
loss = self.criterion_(y_pred, y_true)
loss2 = self.criterion2_(y_pred, y_true)
return loss + loss2
net = MyNet(module_cls).initialize()
X, y = data[0][:10], data[1][:10]
net.fit(X, y)
# test that predict_proba and forward return the same values, hence no
# nonlinearity was applied
y_proba = net.predict_proba(X)
y_forward = to_numpy(net.forward(X))
assert np.allclose(y_proba, y_forward)
def test_customize_net_with_custom_dataset_that_returns_3_values(self, data):
# Test if it's possible to easily customize NeuralNet to work
# with Datasets that don't return 2 values. This way, a user
# can more easily customize the net and use his or her own
# datasets.
from skorch import NeuralNet
from skorch.utils import to_tensor
class MyDataset(torch.utils.data.Dataset):
"""Returns 3 elements instead of 2"""
def __init__(self, X, y):
self.X = X
self.y = y
def __getitem__(self, i):
x = self.X[i]
if self.y is None:
return x[:5], x[5:]
y = self.y[i]
return x[:5], x[5:], y
def __len__(self):
return len(self.X)
class MyModule(nn.Module):
"""Module that takes 2 inputs"""
def __init__(self):
super().__init__()
self.lin = nn.Linear(20, 2)
def forward(self, x0, x1):
x = torch.cat((x0, x1), axis=1)
return self.lin(x)
class MyNet(NeuralNet):
"""Override train_step_single and validation_step"""
def train_step_single(self, batch, **fit_params):
self.module_.train()
x0, x1, yi = batch
x0, x1, yi = to_tensor((x0, x1, yi), device=self.device)
y_pred = self.module_(x0, x1)
loss = self.criterion_(y_pred, yi)
loss.backward()
return {'loss': loss, 'y_pred': y_pred}
def validation_step(self, batch, **fit_params):
self.module_.eval()
x0, x1, yi = batch
x0, x1, yi = to_tensor((x0, x1, yi), device=self.device)
y_pred = self.module_(x0, x1)
loss = self.criterion_(y_pred, yi)
return {'loss': loss, 'y_pred': y_pred}
def evaluation_step(self, batch, training=False):
self.check_is_fitted()
x0, x1 = batch
x0, x1 = to_tensor((x0, x1), device=self.device)
with torch.set_grad_enabled(training):
self.module_.train(training)
return self.module_(x0, x1)
net = MyNet(
MyModule,
lr=0.1,
dataset=MyDataset,
criterion=nn.CrossEntropyLoss,
)
X, y = data[0][:100], data[1][:100]
net.fit(X, y)
# net learns
assert net.history[-1, 'train_loss'] < 0.75 * net.history[0, 'train_loss']
y_pred = net.predict(X)
assert y_pred.shape == (100, 2)
| TestNeuralNet |
python | scipy__scipy | scipy/special/tests/test_hypergeometric.py | {
"start": 3855,
"end": 9996
} | class ____:
@pytest.mark.parametrize('a, b, x', [
(np.nan, 1, 1),
(1, np.nan, 1),
(1, 1, np.nan)
])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyp1f1(a, b, x))
def test_poles(self):
assert_equal(sc.hyp1f1(1, [0, -1, -2, -3, -4], 0.5), np.inf)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 0.5, 0.5),
(1, 1, 0.5, 1.6487212707001281468),
(2, 1, 0.5, 2.4730819060501922203),
(1, 2, 0.5, 1.2974425414002562937),
(-10, 1, 0.5, -0.38937441413785204475)
])
def test_special_cases(self, a, b, x, result):
# Hit all the special case branches at the beginning of the
# function. Desired answers computed using Mpmath.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(1, 1, 0.44, 1.5527072185113360455),
(-1, 1, 0.44, 0.55999999999999999778),
(100, 100, 0.89, 2.4351296512898745592),
(-100, 100, 0.89, 0.40739062490768104667),
(1.5, 100, 59.99, 3.8073513625965598107),
(-1.5, 100, 59.99, 0.25099240047125826943)
])
def test_geometric_convergence(self, a, b, x, result):
# Test the region where we are relying on the ratio of
#
# (|a| + 1) * |x| / |b|
#
# being small. Desired answers computed using Mpmath
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 1.5, -0.5),
(-10, 1, 1.5, 0.41801777430943080357),
(-25, 1, 1.5, 0.25114491646037839809),
(-50, 1, 1.5, -0.25683643975194756115),
(-80, 1, 1.5, -0.24554329325751503601),
(-150, 1, 1.5, -0.173364795515420454496),
])
def test_a_negative_integer(self, a, b, x, result):
# Desired answers computed using Mpmath.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=2e-14)
@pytest.mark.parametrize('a, b, x, expected', [
(0.01, 150, -4, 0.99973683897677527773), # gh-3492
(1, 5, 0.01, 1.0020033381011970966), # gh-3593
(50, 100, 0.01, 1.0050126452421463411), # gh-3593
(1, 0.3, -1e3, -7.011932249442947651455e-04), # gh-14149
(1, 0.3, -1e4, -7.001190321418937164734e-05), # gh-14149
(9, 8.5, -350, -5.224090831922378361082e-20), # gh-17120
(9, 8.5, -355, -4.595407159813368193322e-20), # gh-17120
(75, -123.5, 15, 3.425753920814889017493e+06),
])
def test_assorted_cases(self, a, b, x, expected):
# Expected values were computed with mpmath.hyp1f1(a, b, x).
assert_allclose(sc.hyp1f1(a, b, x), expected, atol=0, rtol=1e-14)
def test_a_neg_int_and_b_equal_x(self):
# This is a case where the Boost wrapper will call hypergeometric_pFq
# instead of hypergeometric_1F1. When we use a version of Boost in
# which https://github.com/boostorg/math/issues/833 is fixed, this
# test case can probably be moved into test_assorted_cases.
# The expected value was computed with mpmath.hyp1f1(a, b, x).
a = -10.0
b = 2.5
x = 2.5
expected = 0.0365323664364104338721
computed = sc.hyp1f1(a, b, x)
assert_allclose(computed, expected, atol=0, rtol=1e-13)
@pytest.mark.parametrize('a, b, x, desired', [
(-1, -2, 2, 2),
(-1, -4, 10, 3.5),
(-2, -2, 1, 2.5)
])
def test_gh_11099(self, a, b, x, desired):
# All desired results computed using Mpmath
assert sc.hyp1f1(a, b, x) == desired
@pytest.mark.parametrize('a', [-3, -2])
def test_x_zero_a_and_b_neg_ints_and_a_ge_b(self, a):
assert sc.hyp1f1(a, -3, 0) == 1
# In the following tests with complex z, the reference values
# were computed with mpmath.hyp1f1(a, b, z), and verified with
# Wolfram Alpha Hypergeometric1F1(a, b, z), except for the
# case a=0.1, b=1, z=7-24j, where Wolfram Alpha reported
# "Standard computation time exceeded". That reference value
# was confirmed in an online Matlab session, with the commands
#
# > format long
# > hypergeom(0.1, 1, 7-24i)
# ans =
# -3.712349651834209 + 4.554636556672912i
#
@pytest.mark.parametrize(
'a, b, z, ref',
[(-0.25, 0.5, 1+2j, 1.1814553180903435-1.2792130661292984j),
(0.25, 0.5, 1+2j, 0.24636797405707597+1.293434354945675j),
(25, 1.5, -2j, -516.1771262822523+407.04142751922024j),
(12, -1.5, -10+20j, -5098507.422706547-1341962.8043508842j),
pytest.param(
10, 250, 10-15j, 1.1985998416598884-0.8613474402403436j,
marks=pytest.mark.xfail,
),
pytest.param(
0.1, 1, 7-24j, -3.712349651834209+4.554636556672913j,
marks=pytest.mark.xfail,
)
],
)
def test_complex_z(self, a, b, z, ref):
h = sc.hyp1f1(a, b, z)
assert_allclose(h, ref, rtol=4e-15)
# The "legacy edge cases" mentioned in the comments in the following
# tests refers to the behavior of hyp1f1(a, b, x) when b is a nonpositive
# integer. In some subcases, the behavior of SciPy does not match that
# of Boost (1.81+), mpmath and Mathematica (via Wolfram Alpha online).
# If the handling of these edges cases is changed to agree with those
# libraries, these test will have to be updated.
@pytest.mark.parametrize('b', [0, -1, -5])
def test_legacy_case1(self, b):
# Test results of hyp1f1(0, n, x) for n <= 0.
# This is a legacy edge case.
# Boost (versions greater than 1.80), Mathematica (via Wolfram Alpha
# online) and mpmath all return 1 in this case, but SciPy's hyp1f1
# returns inf.
assert_equal(sc.hyp1f1(0, b, [-1.5, 0, 1.5]), [np.inf, np.inf, np.inf])
def test_legacy_case2(self):
# This is a legacy edge case.
# In software such as boost (1.81+), mpmath and Mathematica,
# the value is 1.
assert sc.hyp1f1(-4, -3, 0) == np.inf
| TestHyp1f1 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 416379,
"end": 418354
} | class ____(sgqlc.types.Interface):
"""Represents an owner of a project (beta)."""
__schema__ = github_schema
__field_names__ = ("id", "project_v2", "projects_v2")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
project_v2 = sgqlc.types.Field(
"ProjectV2",
graphql_name="projectV2",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""Find a project by number.
Arguments:
* `number` (`Int!`): The project number.
"""
projects_v2 = sgqlc.types.Field(
sgqlc.types.non_null("ProjectV2Connection"),
graphql_name="projectsV2",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("order_by", sgqlc.types.Arg(ProjectV2Order, graphql_name="orderBy", default={"field": "NUMBER", "direction": "DESC"})),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of projects under the owner.
Arguments:
* `query` (`String`): A project to search for under the the owner.
* `order_by` (`ProjectV2Order`): How to order the returned
projects. (default: `{field: NUMBER, direction: DESC}`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
| ProjectV2Owner |
python | dagster-io__dagster | python_modules/libraries/dagster-sling/dagster_sling/components/sling_replication_collection/component.py | {
"start": 2558,
"end": 4241
} | class ____(Resolvable, BaseModel):
"""Properties of a Sling connection resource."""
# each connection type supports a variety of different properties
model_config = ConfigDict(extra="allow")
type: str = Field(
description="Type of the source connection, must match the Sling connection types. Use 'file' for local storage."
)
connection_string: Optional[str] = Field(
description="The optional connection string for the source database, if not using keyword arguments.",
default=None,
)
def resolve_connections(
context: ResolutionContext,
connections: Mapping[str, SlingConnectionResourcePropertiesModel],
) -> list[SlingConnectionResource]:
return [
SlingConnectionResource(
name=name,
**context.resolve_value(
connection if isinstance(connection, dict) else connection.model_dump()
),
)
for name, connection in connections.items()
]
ResolvedSlingConnections: TypeAlias = Annotated[
list[SlingConnectionResource],
Resolver(
resolve_connections, model_field_type=Mapping[str, SlingConnectionResourcePropertiesModel]
),
]
def resolve_resource(
context: ResolutionContext,
sling,
) -> Optional[SlingResource]:
if sling:
deprecation_warning(
"The `sling` field is deprecated, use `connections` instead. This field will be removed in a future release.",
"1.11.1",
)
return SlingResource(**context.resolve_value(sling.model_dump())) if sling else None
@public
@scaffold_with(SlingReplicationComponentScaffolder)
@dataclass
| SlingConnectionResourcePropertiesModel |
python | networkx__networkx | networkx/generators/tests/test_interval_graph.py | {
"start": 229,
"end": 4277
} | class ____:
"""Unit tests for :func:`networkx.generators.interval_graph.interval_graph`"""
def test_empty(self):
"""Tests for trivial case of empty input"""
assert len(interval_graph([])) == 0
def test_interval_graph_check_invalid(self):
"""Tests for conditions that raise Exceptions"""
invalids_having_none = [None, (1, 2)]
with pytest.raises(TypeError):
interval_graph(invalids_having_none)
invalids_having_set = [{1, 2}]
with pytest.raises(TypeError):
interval_graph(invalids_having_set)
invalids_having_seq_but_not_length2 = [(1, 2, 3)]
with pytest.raises(TypeError):
interval_graph(invalids_having_seq_but_not_length2)
invalids_interval = [[3, 2]]
with pytest.raises(ValueError):
interval_graph(invalids_interval)
def test_interval_graph_0(self):
intervals = [(1, 2), (1, 3)]
expected_graph = nx.Graph()
expected_graph.add_edge(*intervals)
actual_g = interval_graph(intervals)
assert set(actual_g.nodes) == set(expected_graph.nodes)
assert edges_equal(expected_graph, actual_g)
def test_interval_graph_1(self):
intervals = [(1, 2), (2, 3), (3, 4), (1, 4)]
expected_graph = nx.Graph()
expected_graph.add_nodes_from(intervals)
e1 = ((1, 4), (1, 2))
e2 = ((1, 4), (2, 3))
e3 = ((1, 4), (3, 4))
e4 = ((3, 4), (2, 3))
e5 = ((1, 2), (2, 3))
expected_graph.add_edges_from([e1, e2, e3, e4, e5])
actual_g = interval_graph(intervals)
assert set(actual_g.nodes) == set(expected_graph.nodes)
assert edges_equal(expected_graph, actual_g)
def test_interval_graph_2(self):
intervals = [(1, 2), [3, 5], [6, 8], (9, 10)]
expected_graph = nx.Graph()
expected_graph.add_nodes_from([(1, 2), (3, 5), (6, 8), (9, 10)])
actual_g = interval_graph(intervals)
assert set(actual_g.nodes) == set(expected_graph.nodes)
assert edges_equal(expected_graph, actual_g)
def test_interval_graph_3(self):
intervals = [(1, 4), [3, 5], [2.5, 4]]
expected_graph = nx.Graph()
expected_graph.add_nodes_from([(1, 4), (3, 5), (2.5, 4)])
e1 = ((1, 4), (3, 5))
e2 = ((1, 4), (2.5, 4))
e3 = ((3, 5), (2.5, 4))
expected_graph.add_edges_from([e1, e2, e3])
actual_g = interval_graph(intervals)
assert set(actual_g.nodes) == set(expected_graph.nodes)
assert edges_equal(expected_graph, actual_g)
def test_interval_graph_4(self):
"""test all possible overlaps"""
intervals = [
(0, 2),
(-2, -1),
(-2, 0),
(-2, 1),
(-2, 2),
(-2, 3),
(0, 1),
(0, 2),
(0, 3),
(1, 2),
(1, 3),
(2, 3),
(3, 4),
]
expected_graph = nx.Graph()
expected_graph.add_nodes_from(intervals)
expected_nbrs = {
(-2, 0),
(-2, 1),
(-2, 2),
(-2, 3),
(0, 1),
(0, 2),
(0, 3),
(1, 2),
(1, 3),
(2, 3),
}
actual_g = nx.interval_graph(intervals)
actual_nbrs = nx.neighbors(actual_g, (0, 2))
assert set(actual_nbrs) == expected_nbrs
def test_interval_graph_5(self):
"""this test is to see that an interval supports infinite number"""
intervals = {(-math.inf, 0), (-1, -1), (0.5, 0.5), (1, 1), (1, math.inf)}
expected_graph = nx.Graph()
expected_graph.add_nodes_from(intervals)
e1 = ((-math.inf, 0), (-1, -1))
e2 = ((1, 1), (1, math.inf))
expected_graph.add_edges_from([e1, e2])
actual_g = interval_graph(intervals)
assert set(actual_g.nodes) == set(expected_graph.nodes)
assert edges_equal(expected_graph, actual_g)
| TestIntervalGraph |
python | huggingface__transformers | src/transformers/models/cvt/modeling_cvt.py | {
"start": 20375,
"end": 23971
} | class ____(CvtPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.cvt = CvtModel(config, add_pooling_layer=False)
self.layernorm = nn.LayerNorm(config.embed_dim[-1])
# Classifier head
self.classifier = (
nn.Linear(config.embed_dim[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.cvt(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
cls_token = outputs[1]
if self.config.cls_token[-1]:
sequence_output = self.layernorm(cls_token)
else:
batch_size, num_channels, height, width = sequence_output.shape
# rearrange "b c h w -> b (h w) c"
sequence_output = sequence_output.view(batch_size, num_channels, height * width).permute(0, 2, 1)
sequence_output = self.layernorm(sequence_output)
sequence_output_mean = sequence_output.mean(dim=1)
logits = self.classifier(sequence_output_mean)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
__all__ = ["CvtForImageClassification", "CvtModel", "CvtPreTrainedModel"]
| CvtForImageClassification |
python | pexpect__pexpect | tests/test_env.py | {
"start": 1613,
"end": 3763
} | class ____(PexpectTestCase.PexpectTestCase):
" tests for the env argument to pexpect.spawn and pexpect.run "
def test_run_uses_env(self):
" pexpect.run uses env argument when running child process "
script_name = 'run_uses_env.sh'
environ = {'PEXPECT_TEST_KEY': 'pexpect test value'}
with example_script(script_name, '$PEXPECT_TEST_KEY') as script_dir:
script = os.path.join(script_dir, script_name)
out = pexpect.run(script, env=environ)
self.assertEqual(out.rstrip(), b'pexpect test value')
def test_spawn_uses_env(self):
" pexpect.spawn uses env argument when running child process "
script_name = 'spawn_uses_env.sh'
environ = {'PEXPECT_TEST_KEY': 'pexpect test value'}
with example_script(script_name, '$PEXPECT_TEST_KEY') as script_dir:
script = os.path.join(script_dir, script_name)
child = pexpect.spawn(script, env=environ)
out = child.readline()
child.expect(pexpect.EOF)
self.assertEqual(child.exitstatus, 0)
self.assertEqual(out.rstrip(), b'pexpect test value')
def test_run_uses_env_path(self):
" pexpect.run uses binary from PATH when given in env argument "
script_name = 'run_uses_env_path.sh'
with example_script(script_name) as script_dir:
out = pexpect.run(script_name, env={'PATH': script_dir})
self.assertEqual(out.rstrip(), b'success')
def test_run_uses_env_path_over_path(self):
" pexpect.run uses PATH from env over os.environ "
script_name = 'run_uses_env_path_over_path.sh'
with example_script(script_name, output='failure') as wrong_dir:
with example_script(script_name) as right_dir:
orig_path = os.environ['PATH']
os.environ['PATH'] = wrong_dir
try:
out = pexpect.run(script_name, env={'PATH': right_dir})
finally:
os.environ['PATH'] = orig_path
self.assertEqual(out.rstrip(), b'success')
if __name__ == '__main__':
unittest.main()
| TestCaseEnv |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 20217,
"end": 21252
} | class ____(object):
"""*
jina gRPC service to trigger a snapshot at the Executor Runtime.
"""
def snapshot_status(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JinaExecutorSnapshotProgressServicer_to_server(servicer, server):
rpc_method_handlers = {
'snapshot_status': grpc.unary_unary_rpc_method_handler(
servicer.snapshot_status,
request_deserializer=jina__pb2.SnapshotId.FromString,
response_serializer=jina__pb2.SnapshotStatusProto.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'jina.JinaExecutorSnapshotProgress', rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
| JinaExecutorSnapshotProgressServicer |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 16494,
"end": 17278
} | class ____(VarArray):
"""
Handles an array of variable-length arrays, i.e. where *arraysize*
ends in '*'.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == "":
return ma.array([]), False
parts = self._splitter(value, config, pos)
items = self._base._items
parse_parts = self._base.parse_parts
if len(parts) % items != 0:
vo_raise(E02, (items, len(parts)), config, pos)
result = []
result_mask = []
for i in range(0, len(parts), items):
value, mask = parse_parts(parts[i : i + items], config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
| ArrayVarArray |
python | ray-project__ray | python/ray/train/v2/_internal/callbacks/datasets.py | {
"start": 1562,
"end": 6309
} | class ____(WorkerGroupCallback, ControllerCallback):
"""A callback for managing Ray Datasets for the worker group."""
def __init__(self, train_run_context: TrainRunContext):
self._datasets = train_run_context.datasets
self._data_config = copy.deepcopy(train_run_context.dataset_config)
self._scaling_config = train_run_context.scaling_config
self._coordinator_actors: List[ActorHandle] = []
self._shutdown_refs: List[ObjectRef] = []
# Capture the current DataContext to propagate it to
# the Train workers later.
# The propagation works in the following way:
# 1. This callback is created when user create the Trainer.
# 2. Then this callback will be passed to the Controller actor.
# 3. Lastly, when the worker group is initialized, the Controller
# will call the `after_worker_group_start` callback to propagate
# the DataContext to Train workers.
self._data_context = copy.deepcopy(DataContext.get_current())
def get_train_total_resources(
self, scaling_config: ray.train.ScalingConfig
) -> Dict[str, float]:
"""Return the resources reserved for training, so that Data can exclude
these resources logically from its available pool."""
return scaling_config.total_resources
def _get_coordinator_actors(
self, ds_iterators_per_rank: List[Dict[str, DataIterator]]
) -> List[ActorHandle]:
"""
Returns a list of each unique SplitCoordinator actor handle given the iterators per rank.
These handles will later be used to call shutdown on the actors.
"""
# Note: Currently, we only need to check rank 0 for split iterators.
# In the future, if datasets can be split across only a subset of ranks,
# we may need to process all ranks.
rank_0_iterators = ds_iterators_per_rank[0]
coord_actors = [
iterator._coord_actor
for iterator in rank_0_iterators.values()
if isinstance(iterator, StreamSplitDataIterator)
]
return coord_actors
def _shutdown_data_executors(self):
"""Eagerly shutdown the data executors of the split coordinator actors."""
self._shutdown_refs = [
coord.shutdown_executor.remote() for coord in self._coordinator_actors
]
# --------------------------
# WorkerGroupCallback
# --------------------------
def before_init_train_context(
self, workers: List[Worker]
) -> Dict[str, List[DatasetShardProvider]]:
world_size = len(workers)
worker_node_ids = [worker.metadata.node_id for worker in workers]
# Notify the DataConfig about the total resources reserved for training.
total_train_resources = self.get_train_total_resources(self._scaling_config)
self._data_config.set_train_total_resources(
total_train_resources.get("CPU", 0), total_train_resources.get("GPU", 0)
)
datasets = {k: v() if callable(v) else v for k, v in self._datasets.items()}
ds_iterators_per_rank = self._data_config.configure(
datasets=datasets,
world_size=world_size,
worker_handles=None,
worker_node_ids=worker_node_ids,
)
assert len(ds_iterators_per_rank) == world_size
self._coordinator_actors = self._get_coordinator_actors(ds_iterators_per_rank)
shard_providers_per_rank = [
RayDatasetShardProvider(ds_iterators=ds_iterators_per_rank[rank])
for rank in range(world_size)
]
return {"dataset_shard_provider": shard_providers_per_rank}
def after_worker_group_start(self, worker_group: WorkerGroup):
# Propagate DataContext
def _propagate_data_context(ctx: DataContext):
DataContext._set_current(ctx)
worker_group.execute(
_propagate_data_context,
self._data_context,
)
def after_worker_group_shutdown(
self, worker_group_context: WorkerGroupContext
) -> None:
self._shutdown_data_executors()
def after_worker_group_abort(
self, worker_group_context: WorkerGroupContext
) -> None:
self._shutdown_data_executors()
# --------------------------
# ControllerCallback
# --------------------------
def before_controller_shutdown(self):
try:
ray.get(self._shutdown_refs, timeout=5)
except GetTimeoutError:
logger.error("Ray Data executor shutdown task timed out after 5 seconds.")
except Exception:
logger.exception("Failed to gracefully terminate Ray Data executors.")
| DatasetsSetupCallback |
python | walkccc__LeetCode | solutions/538. Convert BST to Greater Tree/538.py | {
"start": 0,
"end": 370
} | class ____:
def convertBST(self, root: TreeNode | None) -> TreeNode | None:
prefix = 0
def reversedInorder(root: TreeNode | None) -> None:
nonlocal prefix
if not root:
return
reversedInorder(root.right)
prefix += root.val
root.val = prefix
reversedInorder(root.left)
reversedInorder(root)
return root
| Solution |
python | pallets__jinja | src/jinja2/idtracking.py | {
"start": 699,
"end": 5079
} | class ____:
def __init__(
self, parent: t.Optional["Symbols"] = None, level: int | None = None
) -> None:
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level: int = level
self.parent = parent
self.refs: dict[str, str] = {}
self.loads: dict[str, t.Any] = {}
self.stores: set[str] = set()
def analyze_node(self, node: nodes.Node, **kwargs: t.Any) -> None:
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(self, name: str, load: tuple[str, str | None] | None = None) -> str:
ident = f"l_{self.level}_{name}"
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target: str) -> t.Any | None:
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
return None
def find_ref(self, name: str) -> str | None:
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
return None
def ref(self, name: str) -> str:
rv = self.find_ref(name)
if rv is None:
raise AssertionError(
"Tried to resolve a name to a reference that was"
f" unknown to the frame ({name!r})"
)
return rv
def copy(self) -> "te.Self":
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name: str) -> None:
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name: str) -> str:
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name: str) -> None:
if self.find_ref(name) is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols: t.Sequence["Symbols"]) -> None:
stores: set[str] = set()
for branch in branch_symbols:
stores.update(branch.stores)
stores.difference_update(self.stores)
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name in stores:
target = self.find_ref(name)
assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self) -> dict[str, str]:
rv: dict[str, str] = {}
node: Symbols | None = self
while node is not None:
for name in sorted(node.stores):
if name not in rv:
rv[name] = self.find_ref(name) # type: ignore
node = node.parent
return rv
def dump_param_targets(self) -> set[str]:
rv = set()
node: Symbols | None = self
while node is not None:
for target, (instr, _) in self.loads.items():
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
| Symbols |
python | realpython__materials | python-built-in-functions/circle.py | {
"start": 46,
"end": 408
} | class ____:
def __init__(self, radius):
self.radius = radius
self._diameter = SENTINEL
@property
def diameter(self):
if self._diameter is SENTINEL:
sleep(0.5) # Simulate a costly computation
self._diameter = self.radius * 2
return self._diameter
circle = Circle(5)
print(circle.diameter)
| Circle |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/test_given_forms.py | {
"start": 5247,
"end": 7172
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
# Set up example Company records to use as choices for
# Store.company. These must exist before creating a strategy
# for the ModelChoiceField.
cls.company_names = ("Bill's Flowers", "Jane's Sporting Goods")
for name in cls.company_names:
Company.objects.create(name=name)
@given(
choice=from_field(
forms.ModelChoiceField(queryset=Company.objects.order_by("name"))
)
)
def test_from_model_choices_field(self, choice):
assume(choice != "") # Skip the empty choice.
self.assertIsInstance(choice, int)
Company.objects.get(id=choice)
@given(
choice=from_field(
forms.ModelChoiceField(
queryset=Company.objects.order_by("name"), empty_label=None
)
)
)
def test_from_model_choices_field_no_empty_choice(self, choice):
Company.objects.get(id=choice)
@given(choice=from_field(forms.ModelChoiceField(queryset=Company.objects.none())))
def test_from_model_choices_field_empty(self, choice):
self.assertEqual(choice, "")
@given(form=from_form(StoreForm))
def test_store_form_valid(self, form):
assume(form.data["company"])
self.assertTrue(form.is_valid())
@given(
choice=from_field(
forms.ModelMultipleChoiceField(queryset=Company.objects.order_by("name"))
)
)
def test_from_model_multiple_choices_field(self, choice):
n_choices = len(choice)
self.assertEqual(n_choices, len(set(choice)))
self.assertEqual(n_choices, Company.objects.filter(pk__in=choice).count())
@given(form=from_form(MultipleCompaniesForm))
def test_multiple_companies_form_valid(self, form):
self.assertTrue(form.is_valid())
| TestFormsWithModelChoices |
python | networkx__networkx | networkx/utils/mapped_queue.py | {
"start": 2965,
"end": 10184
} | class ____:
"""The MappedQueue class implements a min-heap with removal and update-priority.
The min heap uses heapq as well as custom written _siftup and _siftdown
methods to allow the heap positions to be tracked by an additional dict
keyed by element to position. The smallest element can be popped in O(1) time,
new elements can be pushed in O(log n) time, and any element can be removed
or updated in O(log n) time. The queue cannot contain duplicate elements
and an attempt to push an element already in the queue will have no effect.
MappedQueue complements the heapq package from the python standard
library. While MappedQueue is designed for maximum compatibility with
heapq, it adds element removal, lookup, and priority update.
Parameters
----------
data : dict or iterable
Examples
--------
A `MappedQueue` can be created empty, or optionally, given a dictionary
of initial elements and priorities. The methods `push`, `pop`,
`remove`, and `update` operate on the queue.
>>> colors_nm = {"red": 665, "blue": 470, "green": 550}
>>> q = MappedQueue(colors_nm)
>>> q.remove("red")
>>> q.update("green", "violet", 400)
>>> q.push("indigo", 425)
True
>>> [q.pop().element for i in range(len(q.heap))]
['violet', 'indigo', 'blue']
A `MappedQueue` can also be initialized with a list or other iterable. The priority is assumed
to be the sort order of the items in the list.
>>> q = MappedQueue([916, 50, 4609, 493, 237])
>>> q.remove(493)
>>> q.update(237, 1117)
>>> [q.pop() for i in range(len(q.heap))]
[50, 916, 1117, 4609]
An exception is raised if the elements are not comparable.
>>> q = MappedQueue([100, "a"])
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'int' and 'str'
To avoid the exception, use a dictionary to assign priorities to the elements.
>>> q = MappedQueue({100: 0, "a": 1})
References
----------
.. [1] Cormen, T. H., Leiserson, C. E., Rivest, R. L., & Stein, C. (2001).
Introduction to algorithms second edition.
.. [2] Knuth, D. E. (1997). The art of computer programming (Vol. 3).
Pearson Education.
"""
def __init__(self, data=None):
"""Priority queue class with updatable priorities."""
if data is None:
self.heap = []
elif isinstance(data, dict):
self.heap = [_HeapElement(v, k) for k, v in data.items()]
else:
self.heap = list(data)
self.position = {}
self._heapify()
def _heapify(self):
"""Restore heap invariant and recalculate map."""
heapq.heapify(self.heap)
self.position = {elt: pos for pos, elt in enumerate(self.heap)}
if len(self.heap) != len(self.position):
raise AssertionError("Heap contains duplicate elements")
def __len__(self):
return len(self.heap)
def push(self, elt, priority=None):
"""Add an element to the queue."""
if priority is not None:
elt = _HeapElement(priority, elt)
# If element is already in queue, do nothing
if elt in self.position:
return False
# Add element to heap and dict
pos = len(self.heap)
self.heap.append(elt)
self.position[elt] = pos
# Restore invariant by sifting down
self._siftdown(0, pos)
return True
def pop(self):
"""Remove and return the smallest element in the queue."""
# Remove smallest element
elt = self.heap[0]
del self.position[elt]
# If elt is last item, remove and return
if len(self.heap) == 1:
self.heap.pop()
return elt
# Replace root with last element
last = self.heap.pop()
self.heap[0] = last
self.position[last] = 0
# Restore invariant by sifting up
self._siftup(0)
# Return smallest element
return elt
def update(self, elt, new, priority=None):
"""Replace an element in the queue with a new one."""
if priority is not None:
new = _HeapElement(priority, new)
# Replace
pos = self.position[elt]
self.heap[pos] = new
del self.position[elt]
self.position[new] = pos
# Restore invariant by sifting up
self._siftup(pos)
def remove(self, elt):
"""Remove an element from the queue."""
# Find and remove element
try:
pos = self.position[elt]
del self.position[elt]
except KeyError:
# Not in queue
raise
# If elt is last item, remove and return
if pos == len(self.heap) - 1:
self.heap.pop()
return
# Replace elt with last element
last = self.heap.pop()
self.heap[pos] = last
self.position[last] = pos
# Restore invariant by sifting up
self._siftup(pos)
def _siftup(self, pos):
"""Move smaller child up until hitting a leaf.
Built to mimic code for heapq._siftup
only updating position dict too.
"""
heap, position = self.heap, self.position
end_pos = len(heap)
startpos = pos
newitem = heap[pos]
# Shift up the smaller child until hitting a leaf
child_pos = (pos << 1) + 1 # start with leftmost child position
while child_pos < end_pos:
# Set child_pos to index of smaller child.
child = heap[child_pos]
right_pos = child_pos + 1
if right_pos < end_pos:
right = heap[right_pos]
if not child < right:
child = right
child_pos = right_pos
# Move the smaller child up.
heap[pos] = child
position[child] = pos
pos = child_pos
child_pos = (pos << 1) + 1
# pos is a leaf position. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
while pos > 0:
parent_pos = (pos - 1) >> 1
parent = heap[parent_pos]
if not newitem < parent:
break
heap[pos] = parent
position[parent] = pos
pos = parent_pos
heap[pos] = newitem
position[newitem] = pos
def _siftdown(self, start_pos, pos):
"""Restore invariant. keep swapping with parent until smaller.
Built to mimic code for heapq._siftdown
only updating position dict too.
"""
heap, position = self.heap, self.position
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > start_pos:
parent_pos = (pos - 1) >> 1
parent = heap[parent_pos]
if not newitem < parent:
break
heap[pos] = parent
position[parent] = pos
pos = parent_pos
heap[pos] = newitem
position[newitem] = pos
| MappedQueue |
python | scipy__scipy | scipy/special/tests/test_exponential_integrals.py | {
"start": 105,
"end": 802
} | class ____:
def test_branch_cut(self):
assert np.isnan(sc.exp1(-1))
assert sc.exp1(complex(-1, 0)).imag == (
-sc.exp1(complex(-1, -0.0)).imag
)
assert_allclose(
sc.exp1(complex(-1, 0)),
sc.exp1(-1 + 1e-20j),
atol=0,
rtol=1e-15
)
assert_allclose(
sc.exp1(complex(-1, -0.0)),
sc.exp1(-1 - 1e-20j),
atol=0,
rtol=1e-15
)
def test_834(self):
# Regression test for #834
a = sc.exp1(-complex(19.9999990))
b = sc.exp1(-complex(19.9999991))
assert_allclose(a.imag, b.imag, atol=0, rtol=1e-15)
| TestExp1 |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/utils.py | {
"start": 466,
"end": 7301
} | class ____(Enum):
"""OCI authentication types as enumerator."""
API_KEY = 1
SECURITY_TOKEN = 2
INSTANCE_PRINCIPAL = 3
RESOURCE_PRINCIPAL = 4
CUSTOM_ENDPOINT_PREFIX = "ocid1.generativeaiendpoint"
COMPLETION_MODELS = {} # completion endpoint has been deprecated
CHAT_MODELS = {
"cohere.command-a-03-2025": 256000,
"cohere.command-r-16k": 16000,
"cohere.command-r-plus": 128000,
"cohere.command-r-08-2024": 128000,
"cohere.command-r-plus-08-2024": 128000,
"meta.llama-3-70b-instruct": 8192,
"meta.llama-3.1-70b-instruct": 128000,
"meta.llama-3.1-405b-instruct": 128000,
"meta.llama-3.2-90b-vision-instruct": 128000,
"meta.llama-3.3-70b-instruct": 128000,
"meta.llama-4-scout-17b-16e-instruct": 192000,
"meta.llama-4-maverick-17b-128e-instruct-fp8": 512000,
"xai.grok-code-fast-1": 256000,
"xai.grok-4-fast-reasoning": 2000000,
"xai.grok-4-fast-non-reasoning": 2000000,
"xai.grok-4": 128000,
"xai.grok-3": 131072,
"xai.grok-3-fast": 131072,
"xai.grok-3-mini": 131072,
"xai.grok-3-mini-fast": 131072,
}
OCIGENAI_LLMS = {**COMPLETION_MODELS, **CHAT_MODELS}
JSON_TO_PYTHON_TYPES = {
"string": "str",
"number": "float",
"boolean": "bool",
"integer": "int",
"array": "List",
"object": "Dict",
}
def _format_oci_tool_calls(
tool_calls: Optional[List[Any]] = None,
) -> List[Dict]:
"""
Formats an OCI GenAI API response into the tool call format used in LlamaIndex.
Handles both dictionary and object formats.
"""
if not tool_calls:
return []
formatted_tool_calls = []
for tool_call in tool_calls:
# Handle both object and dict formats
if isinstance(tool_call, dict):
name = tool_call.get("name", tool_call.get("functionName"))
parameters = tool_call.get(
"parameters", tool_call.get("functionParameters")
)
else:
name = getattr(tool_call, "name", getattr(tool_call, "functionName", None))
parameters = getattr(
tool_call, "parameters", getattr(tool_call, "functionParameters", None)
)
if name and parameters:
formatted_tool_calls.append(
{
"toolUseId": uuid.uuid4().hex[:],
"name": name,
"input": json.dumps(parameters)
if isinstance(parameters, dict)
else parameters,
}
)
return formatted_tool_calls
def create_client(auth_type, auth_profile, auth_file_location, service_endpoint):
"""
OCI Gen AI client.
Args:
auth_type (Optional[str]): Authentication type, can be: API_KEY (default), SECURITY_TOKEN, INSTANCE_PRINCIPAL, RESOURCE_PRINCIPAL. If not specified, API_KEY will be used
auth_profile (Optional[str]): The name of the profile in ~/.oci/config. If not specified , DEFAULT will be used
auth_file_location (Optional[str]): Path to the config file. If not specified, ~/.oci/config will be used
service_endpoint (str): service endpoint url, e.g., "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
"""
try:
import oci
client_kwargs = {
"config": {},
"signer": None,
"service_endpoint": service_endpoint,
"retry_strategy": oci.retry.DEFAULT_RETRY_STRATEGY,
"timeout": (10, 240), # default timeout config for OCI Gen AI service
}
if auth_type == OCIAuthType(1).name:
client_kwargs["config"] = oci.config.from_file(
file_location=auth_file_location, profile_name=auth_profile
)
client_kwargs.pop("signer", None)
elif auth_type == OCIAuthType(2).name:
def make_security_token_signer(oci_config): # type: ignore[no-untyped-def]
pk = oci.signer.load_private_key_from_file(
oci_config.get("key_file"), None
)
with open(oci_config.get("security_token_file"), encoding="utf-8") as f:
st_string = f.read()
return oci.auth.signers.SecurityTokenSigner(st_string, pk)
client_kwargs["config"] = oci.config.from_file(
file_location=auth_file_location, profile_name=auth_profile
)
client_kwargs["signer"] = make_security_token_signer(
oci_config=client_kwargs["config"]
)
elif auth_type == OCIAuthType(3).name:
client_kwargs["signer"] = (
oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
)
elif auth_type == OCIAuthType(4).name:
client_kwargs["signer"] = oci.auth.signers.get_resource_principals_signer()
else:
raise ValueError(
f"Please provide valid value to auth_type, {auth_type} is not valid."
)
return oci.generative_ai_inference.GenerativeAiInferenceClient(**client_kwargs)
except ImportError as ex:
raise ModuleNotFoundError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
except Exception as e:
raise ValueError(
"""Could not authenticate with OCI client.
If INSTANCE_PRINCIPAL or RESOURCE_PRINCIPAL is used, please check the specified
auth_profile, auth_file_location and auth_type are valid.""",
e,
) from e
def get_serving_mode(model_id: str) -> Any:
try:
from oci.generative_ai_inference import models
except ImportError as ex:
raise ModuleNotFoundError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
if model_id.startswith(CUSTOM_ENDPOINT_PREFIX):
serving_mode = models.DedicatedServingMode(endpoint_id=model_id)
else:
serving_mode = models.OnDemandServingMode(model_id=model_id)
return serving_mode
def get_completion_generator() -> Any:
try:
from oci.generative_ai_inference import models
except ImportError as ex:
raise ModuleNotFoundError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
return models.GenerateTextDetails
def get_chat_generator() -> Any:
try:
from oci.generative_ai_inference import models
except ImportError as ex:
raise ModuleNotFoundError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
return models.ChatDetails
| OCIAuthType |
python | realpython__materials | python-http-server/https/self_signed.py | {
"start": 228,
"end": 1471
} | class ____:
host: str = "0.0.0.0"
bits: int = 2048
country: str = "CA"
state: str = "British Columbia"
locality: str = "Vancouver"
organization: str = "Real Python"
organizational_unit: str = "Development"
serial_number: int = 1
expires_on: int = 365 * 24 * 60 * 60
@property
def path(self) -> Path:
key_pair = PKey()
key_pair.generate_key(TYPE_RSA, self.bits)
certificate = X509()
subject = certificate.get_subject()
subject.CN = self.host
subject.C = self.country
subject.ST = self.state
subject.L = self.locality
subject.O = self.organization
subject.OU = self.organizational_unit
certificate.set_serial_number(self.serial_number)
certificate.gmtime_adj_notBefore(0)
certificate.gmtime_adj_notAfter(self.expires_on)
certificate.set_issuer(subject)
certificate.set_pubkey(key_pair)
certificate.sign(key_pair, "sha256")
with tempfile.NamedTemporaryFile(delete=False) as file:
file.write(dump_privatekey(FILETYPE_PEM, key_pair))
file.write(dump_certificate(FILETYPE_PEM, certificate))
return Path(file.name)
| SelfSignedCertificate |
python | getsentry__sentry | src/sentry/search/utils.py | {
"start": 11759,
"end": 29741
} | class ____(Enum):
DATE = 0
SEMVER = 1
def get_latest_release(
projects: Sequence[Project | int],
environments: Sequence[Environment] | None,
organization_id: int | None = None,
adopted=False,
) -> list[str]:
if organization_id is None:
project = projects[0]
if isinstance(project, Project):
organization_id = project.organization_id
else:
return []
# Convert projects to ids so that we can work with them more easily
project_ids = [project.id if isinstance(project, Project) else project for project in projects]
semver_project_ids = []
date_project_ids = []
for project_id in project_ids:
if follows_semver_versioning_scheme(organization_id, project_id):
semver_project_ids.append(project_id)
else:
date_project_ids.append(project_id)
versions: set[str] = set()
versions.update(
_run_latest_release_query(
LatestReleaseOrders.SEMVER,
semver_project_ids,
environments,
organization_id,
adopted=adopted,
)
)
versions.update(
_run_latest_release_query(
LatestReleaseOrders.DATE,
date_project_ids,
environments,
organization_id,
adopted=adopted,
)
)
if not versions:
raise Release.DoesNotExist()
return sorted(versions)
def _get_release_query_type_sql(query_type: LatestReleaseOrders, last: bool) -> tuple[str, str]:
direction = "DESC" if last else "ASC"
extra_conditions = ""
if query_type == LatestReleaseOrders.SEMVER:
rank_order_by = f"major {direction}, minor {direction}, patch {direction}, revision {direction}, CASE WHEN (prerelease = '') THEN 1 ELSE 0 END {direction}, prerelease {direction}, sr.id {direction}"
extra_conditions += " AND sr.major IS NOT NULL"
else:
rank_order_by = f"COALESCE(date_released, date_added) {direction}"
return rank_order_by, extra_conditions
def _run_latest_release_query(
query_type: LatestReleaseOrders,
project_ids: Sequence[int],
environments: Sequence[Environment] | None,
organization_id: int,
# Only include adopted releases in the results
adopted: bool = False,
) -> Sequence[str]:
if not project_ids:
return []
extra_join_conditions = ""
extra_conditions = ""
if environments:
extra_join_conditions = "AND jt.environment_id IN %s"
join_table = "sentry_releaseprojectenvironment"
else:
join_table = "sentry_release_project"
if adopted:
extra_conditions += " AND jt.adopted IS NOT NULL AND jt.unadopted IS NULL "
rank_order_by, query_type_conditions = _get_release_query_type_sql(query_type, True)
extra_conditions += query_type_conditions
# XXX: This query can be very inefficient for projects with a large (100k+)
# number of releases. To work around this, we only check 1000 releases
# ordered by highest release id, which is generally correlated with
# most recent releases for a project. This isn't guaranteed to be correct,
# since `date_released` could end up out of order, or we might be using semver.
# However, this should be close enough the majority of the time. If a project has
# > 400 newer releases that were more recently associated with the "true" most recent
# release then likely something is off.
# We might be able to remove this kind of hackery once we add retention to the release
# and related tables.
query = f"""
SELECT DISTINCT version
FROM (
SELECT sr.version, rank() OVER (
PARTITION BY jt.project_id
ORDER BY {rank_order_by}
) AS rank
FROM "sentry_release" sr
INNER JOIN (
SELECT release_id, project_id, adopted, unadopted
FROM {join_table} jt
WHERE jt.project_id IN %s
{extra_join_conditions}
ORDER BY release_id desc
LIMIT 1000
) jt on sr.id = jt.release_id
WHERE sr.organization_id = %s
AND sr.status = {ReleaseStatus.OPEN}
{extra_conditions}
) sr
WHERE rank = 1
"""
cursor = connections[router.db_for_read(Release, replica=True)].cursor()
query_args: list[int | tuple[int, ...]] = [tuple(project_ids)]
if environments:
query_args.append(tuple(e.id for e in environments))
query_args.append(organization_id)
cursor.execute(query, query_args)
return [row[0] for row in cursor.fetchall()]
def get_first_last_release_for_group(
group: Group,
query_type: LatestReleaseOrders,
last: bool,
) -> Release:
"""
Fetches the first or last release associated with a group. `query_type` determines whether we use semver or date
ordering to order the releases.
"""
direction = "DESC" if last else "ASC"
rank_order_by, extra_conditions = _get_release_query_type_sql(query_type, last)
query = f"""
SELECT sr.*
FROM sentry_release sr
INNER JOIN (
SELECT sgr.release_id
FROM sentry_grouprelease sgr
WHERE sgr.group_id = %s
ORDER BY sgr.first_seen {direction}
-- We limit the number of groupreleases we check here to handle edge cases of groups with 100k+ releases
LIMIT 1000
) sgr ON sr.id = sgr.release_id
{extra_conditions}
ORDER BY {rank_order_by}
LIMIT 1
"""
result = list(Release.objects.raw(query, [group.id]))
if not result:
raise Release.DoesNotExist
return result[0]
def parse_release(
value: str,
projects: Sequence[Project | int],
environments: Sequence[Environment] | None,
organization_id: int | None = None,
) -> list[str]:
if value == "latest":
try:
return get_latest_release(projects, environments, organization_id)
except Release.DoesNotExist:
# Should just get no results here, so return an empty release name.
return [""]
else:
return [value]
numeric_modifiers: Sequence[
tuple[str, Callable[[str, int | float], dict[str, int | float | bool]]]
] = [
(
">=",
lambda field, value: {
f"{field}_lower": value,
f"{field}_lower_inclusive": True,
},
),
(
"<=",
lambda field, value: {
f"{field}_upper": value,
f"{field}_upper_inclusive": True,
},
),
(
">",
lambda field, value: {
f"{field}_lower": value,
f"{field}_lower_inclusive": False,
},
),
(
"<",
lambda field, value: {
f"{field}_upper": value,
f"{field}_upper_inclusive": False,
},
),
]
def get_numeric_field_value(
field: str, raw_value: str, type: Callable[[str], int | float] = int
) -> dict[str, int | float | bool]:
try:
for modifier, function in numeric_modifiers:
if raw_value.startswith(modifier):
return function(field, type(str(raw_value[len(modifier) :])))
else:
return {field: type(raw_value)}
except ValueError:
msg = f'"{raw_value}" could not be converted to a number.'
raise InvalidQuery(msg)
def tokenize_query(query: str) -> dict[str, list[str]]:
"""
Tokenizes a standard Sentry search query.
Example:
>>> query = 'is:resolved foo bar tag:value'
>>> tokenize_query(query)
{
'is': ['resolved'],
'query': ['foo', 'bar'],
'tag': ['value'],
}
Has a companion implementation in static/app/utils/tokenizeSearch.tsx
"""
result = defaultdict(list)
query_params = defaultdict(list)
tokens = split_query_into_tokens(query)
for token in tokens:
if token.upper() in ["OR", "AND"] or token.strip("()") == "":
continue
state = "query"
for idx, char in enumerate(token):
next_char = token[idx + 1] if idx < len(token) - 1 else None
if idx == 0 and char in ('"', "'", ":"):
break
if char == ":":
if next_char in (":", " "):
state = "query"
else:
state = "tags"
break
query_params[state].append(token)
if "query" in query_params:
result["query"] = [format_query(query) for query in query_params["query"]]
for tag in query_params["tags"]:
key, value = format_tag(tag)
result[key].append(value)
return dict(result)
def format_tag(tag: str) -> tuple[str, str]:
"""
Splits tags on ':' and removes enclosing quotes and grouping parens if present and
returns both sides of the split as strings
Example:
>>> format_tag('user:foo')
'user', 'foo'
>>>format_tag('user:"foo bar"')
'user', 'foo bar'
"""
idx = tag.index(":")
key = remove_surrounding_quotes(tag[:idx].lstrip("("))
value = remove_surrounding_quotes(tag[idx + 1 :].rstrip(")"))
return key, value
def remove_surrounding_quotes(text: str) -> str:
length = len(text)
if length <= 1:
return text
left = 0
while left <= length / 2:
if text[left] != '"':
break
left += 1
right = length - 1
while right >= length / 2:
if text[right] != '"' or text[right - 1] == "\\":
break
right -= 1
return text[left : right + 1]
def format_query(query: str) -> str:
"""
Strips enclosing quotes and grouping parens from queries if present.
Example:
>>> format_query('"user:foo bar"')
'user:foo bar'
"""
return query.strip('"()')
def split_query_into_tokens(query: str) -> Sequence[str]:
"""
Splits query string into tokens for parsing by 'tokenize_query'.
Returns list of strigs
Rules:
Split on whitespace
Unless
- inside enclosing quotes -> 'user:"foo bar"'
- end of last word is a ':' -> 'user: foo'
Example:
>>> split_query_into_tokens('user:foo user: bar user"foo bar' foo bar) =>
['user:foo', 'user: bar', 'user"foo bar"', 'foo', 'bar']
Has a companion implementation in static/app/utils/tokenizeSearch.tsx
"""
tokens = []
token = ""
quote_enclosed = False
quote_type = None
end_of_prev_word = None
idx = 0
while idx < len(query):
char = query[idx]
next_char = query[idx + 1] if idx < len(query) - 1 else None
token += char
if next_char and not char.isspace() and next_char.isspace():
end_of_prev_word = char
if char.isspace() and not quote_enclosed and end_of_prev_word != ":":
if not token.isspace():
tokens.append(token.strip(" "))
token = ""
if char in ("'", '"'):
if not quote_enclosed or quote_type == char:
quote_enclosed = not quote_enclosed
if quote_enclosed:
quote_type = char
if quote_enclosed and char == "\\" and next_char == quote_type:
if next_char is not None:
token += next_char
idx += 1
idx += 1
if not token.isspace():
tokens.append(token.strip(" "))
return tokens
def parse_query(
projects: Sequence[Project],
query: str,
user: User | RpcUser | AnonymousUser,
environments: Sequence[Environment],
) -> dict[str, Any]:
"""| Parses the query string and returns a dict of structured query term values:
| Required:
| - tags: dict[str, Union[str, list[str], Any]]: dictionary of tag key-values 'user.id:123'
| - query: str: the general query portion of the query string
| Optional:
| - unassigned: bool: 'is:unassigned'
| - for_review: bool: 'is:for_review'
| - linked: bool: 'is:linked'
| - status: int: 'is:<resolved,unresolved,ignored,muted,reprocessing>'
| - assigned_to: Optional[Union[User, Team]]: 'assigned:<user or team>'
| - assigned_or_suggested: Optional[Union[User, Team]]: 'assigned_or_suggested:<user or team>'
| - bookmarked_by: User: 'bookmarks:<user>'
| - subscribed_by: User: 'subscribed:<user>'
| - first_release: Sequence[str]: '<first-release/firstRelease>:1.2.3'
| - age_from: Union[datetime, bool]: '<age/firstSeen>:-1h'
| - age_to: Union[datetime, bool]: '<age/firstSeen>:+1h'
| - last_seen_from: Union[datetime, bool]: 'last_seen/lastSeen:-1h'
| - last_seen_to: Union[datetime, bool]: 'last_seen/lastSeen:+1h'
| - date_from: Union[datetime, bool]: 'event.timestamp:-24h'
| - date_to: Union[datetime, bool]: 'event.timestamp:+0m'
| - times_seen: Union[int, float]: 'timesSeen:>100'
:returns: A dict of parsed values from the query.
"""
# TODO(dcramer): handle query being wrapped in quotes
tokens = tokenize_query(query)
results: dict[str, Any] = {"tags": {}, "query": []}
for key, token_list in tokens.items():
for value in token_list:
if key == "query":
results["query"].append(value)
elif key == "is":
if value == "unassigned":
results["unassigned"] = True
elif value == "assigned":
results["unassigned"] = False
elif value == "for_review":
results["for_review"] = True
elif value == "linked":
results["linked"] = True
elif value == "unlinked":
results["linked"] = False
else:
try:
results["status"] = STATUS_QUERY_CHOICES[value]
except KeyError:
raise InvalidQuery(f"'is:' had unknown status code '{value}'.")
elif key == "assigned":
results["assigned_to"] = parse_actor_or_none_value(projects, value, user)
elif key == "assigned_or_suggested":
results["assigned_or_suggested"] = parse_actor_or_none_value(projects, value, user)
elif key == "bookmarks":
results["bookmarked_by"] = parse_user_value(value, user)
elif key == "subscribed":
results["subscribed_by"] = parse_user_value(value, user)
elif key in ("first-release", "firstRelease"):
results["first_release"] = parse_release(value, projects, environments)
elif key == "release":
results["tags"]["sentry:release"] = parse_release(value, projects, environments)
elif key == "dist":
results["tags"]["sentry:dist"] = value
elif key == "user":
if ":" in value:
comp, value = value.split(":", 1)
else:
comp = "id"
results["tags"]["sentry:user"] = get_user_tag(projects, comp, value)
elif key == "has":
if value == "user":
value = "sentry:user"
elif value == "release":
value = "sentry:release"
# `has:x` query should not take precedence over `x:value` queries
if value not in results["tags"]:
results["tags"][value] = ANY
elif key in ("age", "firstSeen"):
results.update(get_date_params(value, "age_from", "age_to"))
elif key in ("last_seen", "lastSeen"):
results.update(get_date_params(value, "last_seen_from", "last_seen_to"))
elif key == "activeSince":
results.update(get_date_params(value, "active_at_from", "active_at_to"))
elif key.startswith("user."):
results["tags"]["sentry:user"] = get_user_tag(projects, key.split(".", 1)[1], value)
elif key == "event.timestamp":
results.update(get_date_params(value, "date_from", "date_to"))
elif key == "timesSeen":
results.update(get_numeric_field_value("times_seen", value))
else:
results["tags"][key] = value
results["query"] = " ".join(results["query"])
return results
def convert_user_tag_to_query(key: str, value: str) -> str | None:
"""
Converts a user tag to a query string that can be used to search for that
user. Returns None if not a user tag.
"""
if key == "user" and value is not None and ":" in value:
sub_key, value = value.split(":", 1)
if KEYWORD_MAP.get_key(sub_key, None):
return 'user.{}:"{}"'.format(sub_key, value.replace('"', '\\"'))
return None
# Mapping of device class to the store corresponding tag value
DEVICE_CLASS: dict[str, set[str]] = {
"low": {"1"},
"medium": {"2"},
"high": {"3"},
}
def map_device_class_level(device_class: str) -> str | None:
for key, value in DEVICE_CLASS.items():
if device_class in value:
return key
return None
def validate_snuba_array_parameter(parameter: Sequence[str]) -> bool:
"""Returns whether parameter is within a reasonable length to be used as a snuba parameter"""
# 4 here is for the 2 quotes around the string + a comma + a space
# this should be roughly equivalent to len(str(parameter)), but runs 2x as fast
# python -m timeit -n 10000 -s "array=['abcdef123456']*1000" "sum(len(x) for x in array) + 4 * len(array)"
# 10000 loops, best of 5: 23.6 usec per loop
# python -m timeit -n 10000 -s "array=['abcdef123456']*1000" "len(str(array))"
# 10000 loops, best of 5: 42.6 usec per loop
converted_length = sum(len(item) for item in parameter) + (4 * len(parameter))
return converted_length <= MAX_PARAMETERS_IN_ARRAY
| LatestReleaseOrders |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_reflection.py | {
"start": 99042,
"end": 101038
} | class ____(fixtures.TablesTest):
__only_on__ = "postgresql"
__sparse_driver_backend__ = True
def define_tables(metadata):
Table(
"sample_table",
metadata,
Column("c1", Integer, primary_key=True),
Column("c2", Integer, unique=True),
Column("c3", Integer),
Index("sample_table_index", "c2", "c3"),
)
def check_int_list(self, row, key):
value = row[key]
is_true(isinstance(value, list))
is_true(len(value) > 0)
is_true(all(isinstance(v, int) for v in value))
def test_pg_index(self, connection):
insp = inspect(connection)
pgc_oid = insp.get_table_oid("sample_table")
cols = [
col
for col in pg_catalog.pg_index.c
if testing.db.dialect.server_version_info
>= col.info.get("server_version", (0,))
]
stmt = sa.select(*cols).filter_by(indrelid=pgc_oid)
rows = connection.execute(stmt).mappings().all()
is_true(len(rows) > 0)
cols = [
col
for col in ["indkey", "indoption", "indclass", "indcollation"]
if testing.db.dialect.server_version_info
>= pg_catalog.pg_index.c[col].info.get("server_version", (0,))
]
for row in rows:
for col in cols:
self.check_int_list(row, col)
def test_pg_constraint(self, connection):
insp = inspect(connection)
pgc_oid = insp.get_table_oid("sample_table")
cols = [
col
for col in pg_catalog.pg_constraint.c
if testing.db.dialect.server_version_info
>= col.info.get("server_version", (0,))
]
stmt = sa.select(*cols).filter_by(conrelid=pgc_oid)
rows = connection.execute(stmt).mappings().all()
is_true(len(rows) > 0)
for row in rows:
self.check_int_list(row, "conkey")
| TestReflectDifficultColTypes |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/busybox.py | {
"start": 91,
"end": 142
} | class ____(BaseModel):
image: ExternalImage
| Busybox |
python | has2k1__plotnine | tools/term.py | {
"start": 118,
"end": 546
} | class ____(Enum):
"""
Foreground color codes
"""
black = "\033[30m"
red = "\033[31m"
green = "\033[32m"
orange = "\033[33m"
blue = "\033[34m"
purple = "\033[35m"
cyan = "\033[36m"
lightgrey = "\033[37m"
darkgrey = "\033[90m"
lightred = "\033[91m"
lightgreen = "\033[92m"
yellow = "\033[93m"
lightblue = "\033[94m"
pink = "\033[95m"
lightcyan = "\033[96m"
| Fg |
python | PrefectHQ__prefect | src/prefect/events/filters.py | {
"start": 386,
"end": 626
} | class ____(PrefectBaseModel):
"""Filter by `Automation.created`."""
before_: Optional[DateTime] = Field(
default=None,
description="Only include automations created before this datetime",
)
| AutomationFilterCreated |
python | mlflow__mlflow | mlflow/entities/assessment.py | {
"start": 1202,
"end": 7596
} | class ____(_MlflowObject):
"""
Base class for assessments that can be attached to a trace.
An Assessment should be one of the following types:
- Expectations: A label that represents the expected value for a particular operation.
For example, an expected answer for a user question from a chatbot.
- Feedback: A label that represents the feedback on the quality of the operation.
Feedback can come from different sources, such as human judges, heuristic scorers,
or LLM-as-a-Judge.
"""
name: str
source: AssessmentSource
# NB: The trace ID is optional because the assessment object itself may be created
# standalone. For example, a custom metric function returns an assessment object
# without a trace ID. That said, the trace ID is required when logging the
# assessment to a trace in the backend eventually.
# https://docs.databricks.com/aws/en/generative-ai/agent-evaluation/custom-metrics#-metric-decorator
trace_id: str | None = None
run_id: str | None = None
rationale: str | None = None
metadata: dict[str, str] | None = None
span_id: str | None = None
create_time_ms: int | None = None
last_update_time_ms: int | None = None
# NB: The assessment ID should always be generated in the backend. The CreateAssessment
# backend API asks for an incomplete Assessment object without an ID and returns a
# complete one with assessment_id, so the ID is Optional in the constructor here.
assessment_id: str | None = None
# Deprecated, use `error` in Feedback instead. Just kept for backward compatibility
# and will be removed in the 3.0.0 release.
error: AssessmentError | None = None
# Should only be used internally. To create an assessment with an expectation or feedback,
# use the`Expectation` or `Feedback` classes instead.
expectation: ExpectationValue | None = None
feedback: FeedbackValue | None = None
# The ID of the assessment which this assessment overrides.
overrides: str | None = None
# Whether this assessment is valid (i.e. has not been overridden).
# This should not be set by the user, it is automatically set by the backend.
valid: bool | None = None
def __post_init__(self):
from mlflow.tracing.constant import AssessmentMetadataKey
if (self.expectation is not None) + (self.feedback is not None) != 1:
raise MlflowException.invalid_parameter_value(
"Exactly one of `expectation` or `feedback` should be specified.",
)
# Populate the error field to the feedback object
if self.error is not None:
if self.expectation is not None:
raise MlflowException.invalid_parameter_value(
"Cannot set `error` when `expectation` is specified.",
)
if self.feedback is None:
raise MlflowException.invalid_parameter_value(
"Cannot set `error` when `feedback` is not specified.",
)
self.feedback.error = self.error
# Set timestamp if not provided
current_time = int(time.time() * 1000) # milliseconds
if self.create_time_ms is None:
self.create_time_ms = current_time
if self.last_update_time_ms is None:
self.last_update_time_ms = current_time
if not isinstance(self.source, AssessmentSource):
raise MlflowException.invalid_parameter_value(
"`source` must be an instance of `AssessmentSource`. "
f"Got {type(self.source)} instead."
)
# Extract and set run_id from metadata but don't modify the proto representation
if (
self.run_id is None
and self.metadata
and AssessmentMetadataKey.SOURCE_RUN_ID in self.metadata
):
self.run_id = self.metadata[AssessmentMetadataKey.SOURCE_RUN_ID]
def to_proto(self):
assessment = ProtoAssessment()
assessment.assessment_name = self.name
assessment.trace_id = self.trace_id or ""
assessment.source.CopyFrom(self.source.to_proto())
# Convert time in milliseconds to protobuf Timestamp
assessment.create_time.FromMilliseconds(self.create_time_ms)
assessment.last_update_time.FromMilliseconds(self.last_update_time_ms)
if self.span_id is not None:
assessment.span_id = self.span_id
if self.rationale is not None:
assessment.rationale = self.rationale
if self.assessment_id is not None:
assessment.assessment_id = self.assessment_id
if self.expectation is not None:
assessment.expectation.CopyFrom(self.expectation.to_proto())
elif self.feedback is not None:
assessment.feedback.CopyFrom(self.feedback.to_proto())
if self.metadata:
for key, value in self.metadata.items():
assessment.metadata[key] = str(value)
if self.overrides:
assessment.overrides = self.overrides
if self.valid is not None:
assessment.valid = self.valid
return assessment
@classmethod
def from_proto(cls, proto):
if proto.WhichOneof("value") == "expectation":
return Expectation.from_proto(proto)
elif proto.WhichOneof("value") == "feedback":
return Feedback.from_proto(proto)
else:
raise MlflowException.invalid_parameter_value(
f"Unknown assessment type: {proto.WhichOneof('value')}"
)
def to_dictionary(self):
# Note that MessageToDict excludes None fields. For example, if assessment_id is None,
# it won't be included in the resulting dictionary.
return MessageToDict(self.to_proto(), preserving_proto_field_name=True)
@classmethod
def from_dictionary(cls, d: dict[str, Any]) -> "Assessment":
if d.get("expectation"):
return Expectation.from_dictionary(d)
elif d.get("feedback"):
return Feedback.from_dictionary(d)
else:
raise MlflowException.invalid_parameter_value(
f"Unknown assessment type: {d.get('assessment_name')}"
)
DEFAULT_FEEDBACK_NAME = "feedback"
@experimental(version="3.0.0")
@dataclass
| Assessment |
python | celery__celery | celery/worker/consumer/mingle.py | {
"start": 274,
"end": 2531
} | class ____(bootsteps.StartStopStep):
"""Bootstep syncing state with neighbor workers.
At startup, or upon consumer restart, this will:
- Sync logical clocks.
- Sync revoked tasks.
"""
label = 'Mingle'
requires = (Events,)
compatible_transports = {'amqp', 'redis', 'gcpubsub'}
def __init__(self, c, without_mingle=False, **kwargs):
self.enabled = not without_mingle and self.compatible_transport(c.app)
super().__init__(
c, without_mingle=without_mingle, **kwargs)
def compatible_transport(self, app):
with app.connection_for_read() as conn:
return conn.transport.driver_type in self.compatible_transports
def start(self, c):
self.sync(c)
def sync(self, c):
info('mingle: searching for neighbors')
replies = self.send_hello(c)
if replies:
info('mingle: sync with %s nodes',
len([reply for reply, value in replies.items() if value]))
[self.on_node_reply(c, nodename, reply)
for nodename, reply in replies.items() if reply]
info('mingle: sync complete')
else:
info('mingle: all alone')
def send_hello(self, c):
inspect = c.app.control.inspect(timeout=1.0, connection=c.connection)
our_revoked = c.controller.state.revoked
replies = inspect.hello(c.hostname, our_revoked._data) or {}
replies.pop(c.hostname, None) # delete my own response
return replies
def on_node_reply(self, c, nodename, reply):
debug('mingle: processing reply from %s', nodename)
try:
self.sync_with_node(c, **reply)
except MemoryError:
raise
except Exception as exc: # pylint: disable=broad-except
exception('mingle: sync with %s failed: %r', nodename, exc)
def sync_with_node(self, c, clock=None, revoked=None, **kwargs):
self.on_clock_event(c, clock)
self.on_revoked_received(c, revoked)
def on_clock_event(self, c, clock):
c.app.clock.adjust(clock) if clock else c.app.clock.forward()
def on_revoked_received(self, c, revoked):
if revoked:
c.controller.state.revoked.update(revoked)
| Mingle |
python | pdm-project__pdm | tests/test_utils.py | {
"start": 3229,
"end": 6003
} | class ____:
@mock.patch("pdm.utils.shutil.which", return_value=None)
def test_no_git(self, no_git_patch):
with no_git_patch:
assert utils.get_user_email_from_git() == ("", "")
@mock.patch(
"pdm.utils.subprocess.check_output",
side_effect=[
utils.subprocess.CalledProcessError(-1, ["git", "config", "user.name"], "No username"),
utils.subprocess.CalledProcessError(-1, ["git", "config", "user.email"], "No email"),
],
)
@mock.patch("pdm.utils.shutil.which", return_value="git")
def test_no_git_username_and_email(self, git_patch, no_git_username_and_email_patch):
with git_patch:
with no_git_username_and_email_patch:
assert utils.get_user_email_from_git() == ("", "")
@mock.patch(
"pdm.utils.subprocess.check_output",
side_effect=[
"username",
utils.subprocess.CalledProcessError(-1, ["git", "config", "user.email"], "No email"),
],
)
@mock.patch("pdm.utils.shutil.which", return_value="git")
def test_no_git_email(self, git_patch, no_git_email_patch):
with git_patch:
with no_git_email_patch:
assert utils.get_user_email_from_git() == ("username", "")
@mock.patch(
"pdm.utils.subprocess.check_output",
side_effect=[utils.subprocess.CalledProcessError(-1, ["git", "config", "user.name"], "No username"), "email"],
)
@mock.patch("pdm.utils.shutil.which", return_value="git")
def test_no_git_username(self, git_patch, no_git_username_patch):
with git_patch:
with no_git_username_patch:
assert utils.get_user_email_from_git() == ("", "email")
@mock.patch("pdm.utils.subprocess.check_output", side_effect=["username", "email"])
@mock.patch("pdm.utils.shutil.which", return_value="git")
def test_git_username_and_email(self, git_patch, git_username_and_email_patch):
with git_patch:
with git_username_and_email_patch:
assert utils.get_user_email_from_git() == ("username", "email")
@pytest.mark.parametrize(
"given,expected",
[
("git@github.com/pdm-project/pdm", "ssh://git@github.com/pdm-project/pdm"),
("ssh://git@github.com/pdm-project/pdm", "ssh://git@github.com/pdm-project/pdm"),
("git+ssh://git@github.com/pdm-project/pdm", "git+ssh://git@github.com/pdm-project/pdm"),
("https://git@github.com/pdm-project/pdm", "https://git@github.com/pdm-project/pdm"),
("file:///my/local/pdm-project/pdm", "file:///my/local/pdm-project/pdm"),
],
)
def test_add_ssh_scheme_to_git_uri(given, expected):
assert utils.add_ssh_scheme_to_git_uri(given) == expected
| TestGetUserEmailFromGit |
python | pypa__pipenv | pipenv/patched/pip/_internal/index/sources.py | {
"start": 4295,
"end": 5249
} | class ____(LinkSource):
"""``--find-links=<path-or-url>`` or ``--[extra-]index-url=<path-or-url>``.
If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to
the option, it is converted to a URL first. This returns:
* ``page_candidates``: Links listed on an HTML file.
* ``file_candidates``: The non-HTML file.
"""
def __init__(
self,
candidates_from_page: CandidatesFromPage,
link: Link,
) -> None:
self._candidates_from_page = candidates_from_page
self._link = link
@property
def link(self) -> Optional[Link]:
return self._link
def page_candidates(self) -> FoundCandidates:
if not _is_html_file(self._link.url):
return
yield from self._candidates_from_page(self._link)
def file_links(self) -> FoundLinks:
if _is_html_file(self._link.url):
return
yield self._link
| _LocalFileSource |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py | {
"start": 44859,
"end": 48042
} | class ____(ShopifyBulkQuery):
"""
{
inventoryItems(query: "updated_at:>='2022-04-13T00:00:00+00:00' AND updated_at:<='2023-02-07T00:00:00+00:00'") {
edges {
node {
__typename
unitCost {
cost: amount
currency_code: currencyCode
}
countryCodeOfOrigin
countryHarmonizedSystemCodes {
edges {
node {
harmonizedSystemCode
countryCode
}
}
}
harmonizedSystemCode
provinceCodeOfOrigin
updatedAt
createdAt
sku
tracked
requiresShipping
duplicateSkuCount
}
}
}
}
"""
query_name = "inventoryItems"
country_harmonizedS_system_codes: List[Field] = [
Field(name="edges", fields=[Field(name="node", fields=["__typename", "harmonizedSystemCode", "countryCode"])])
]
query_nodes: List[Field] = [
"__typename",
"id",
"harmonizedSystemCode",
"provinceCodeOfOrigin",
"updatedAt",
"createdAt",
"sku",
"tracked",
"requiresShipping",
"duplicateSkuCount",
Field(name="unitCost", fields=[Field(name="amount", alias="cost"), Field(name="currencyCode", alias="currency_code")]),
Field(name="countryCodeOfOrigin"),
Field(name="countryHarmonizedSystemCodes", fields=country_harmonizedS_system_codes),
]
record_composition = {
"new_record": "InventoryItem",
}
def _process_unit_cost(self, record: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
# resolve `cost` to root lvl as `number`
unit_cost = record.get("unitCost", {})
if unit_cost:
record["cost"] = float(unit_cost.get("cost"))
record["currency_code"] = unit_cost.get("currency_code")
else:
record["cost"] = None
record["currency_code"] = None
# clean up
record.pop("unitCost", None)
return record
def record_process_components(self, record: MutableMapping[str, Any]) -> Iterable[MutableMapping[str, Any]]:
"""
Defines how to process collected components.
"""
# unnest unit_cost to the root lvl
record = self._process_unit_cost(record)
# add empty `country_harmonized_system_codes` array, if missing for record
if "countryHarmonizedSystemCodes" not in record.keys():
record["country_harmonized_system_codes"] = []
# convert dates from ISO-8601 to RFC-3339
record["createdAt"] = self.tools.from_iso8601_to_rfc3339(record, "createdAt")
record["updatedAt"] = self.tools.from_iso8601_to_rfc3339(record, "updatedAt")
record = self.tools.fields_names_to_snake_case(record)
yield record
| InventoryItem |
python | wandb__wandb | wandb/sdk/artifacts/_generated/delete_artifact.py | {
"start": 265,
"end": 349
} | class ____(GQLResult):
artifact: DeleteArtifactResultArtifact
| DeleteArtifactResult |
python | walkccc__LeetCode | solutions/164. Maximum Gap/164.py | {
"start": 0,
"end": 90
} | class ____:
def __init__(self, mn: int, mx: int):
self.mn = mn
self.mx = mx
| Bucket |
python | sympy__sympy | sympy/codegen/fnodes.py | {
"start": 7664,
"end": 16567
} | class ____(Basic):
""" Represents a dimension extent.
Examples
========
>>> from sympy.codegen.fnodes import Extent
>>> e = Extent(-3, 3) # -3, -2, -1, 0, 1, 2, 3
>>> from sympy import fcode
>>> fcode(e, source_format='free')
'-3:3'
>>> from sympy.codegen.ast import Variable, real
>>> from sympy.codegen.fnodes import dimension, intent_out
>>> dim = dimension(e, e)
>>> arr = Variable('x', real, attrs=[dim, intent_out])
>>> fcode(arr.as_Declaration(), source_format='free', standard=2003)
'real*8, dimension(-3:3, -3:3), intent(out) :: x'
"""
def __new__(cls, *args):
if len(args) == 2:
low, high = args
return Basic.__new__(cls, sympify(low), sympify(high))
elif len(args) == 0 or (len(args) == 1 and args[0] in (':', None)):
return Basic.__new__(cls) # assumed shape
else:
raise ValueError("Expected 0 or 2 args (or one argument == None or ':')")
def _sympystr(self, printer):
if len(self.args) == 0:
return ':'
return ":".join(str(arg) for arg in self.args)
assumed_extent = Extent() # or Extent(':'), Extent(None)
def dimension(*args):
""" Creates a 'dimension' Attribute with (up to 7) extents.
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import dimension, intent_in
>>> dim = dimension('2', ':') # 2 rows, runtime determined number of columns
>>> from sympy.codegen.ast import Variable, integer
>>> arr = Variable('a', integer, attrs=[dim, intent_in])
>>> fcode(arr.as_Declaration(), source_format='free', standard=2003)
'integer*4, dimension(2, :), intent(in) :: a'
"""
if len(args) > 7:
raise ValueError("Fortran only supports up to 7 dimensional arrays")
parameters = []
for arg in args:
if isinstance(arg, Extent):
parameters.append(arg)
elif isinstance(arg, str):
if arg == ':':
parameters.append(Extent())
else:
parameters.append(String(arg))
elif iterable(arg):
parameters.append(Extent(*arg))
else:
parameters.append(sympify(arg))
if len(args) == 0:
raise ValueError("Need at least one dimension")
return Attribute('dimension', parameters)
assumed_size = dimension('*')
def array(symbol, dim, intent=None, *, attrs=(), value=None, type=None):
""" Convenience function for creating a Variable instance for a Fortran array.
Parameters
==========
symbol : symbol
dim : Attribute or iterable
If dim is an ``Attribute`` it need to have the name 'dimension'. If it is
not an ``Attribute``, then it is passed to :func:`dimension` as ``*dim``
intent : str
One of: 'in', 'out', 'inout' or None
\\*\\*kwargs:
Keyword arguments for ``Variable`` ('type' & 'value')
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.ast import integer, real
>>> from sympy.codegen.fnodes import array
>>> arr = array('a', '*', 'in', type=integer)
>>> print(fcode(arr.as_Declaration(), source_format='free', standard=2003))
integer*4, dimension(*), intent(in) :: a
>>> x = array('x', [3, ':', ':'], intent='out', type=real)
>>> print(fcode(x.as_Declaration(value=1), source_format='free', standard=2003))
real*8, dimension(3, :, :), intent(out) :: x = 1
"""
if isinstance(dim, Attribute):
if str(dim.name) != 'dimension':
raise ValueError("Got an unexpected Attribute argument as dim: %s" % str(dim))
else:
dim = dimension(*dim)
attrs = list(attrs) + [dim]
if intent is not None:
if intent not in (intent_in, intent_out, intent_inout):
intent = {'in': intent_in, 'out': intent_out, 'inout': intent_inout}[intent]
attrs.append(intent)
if type is None:
return Variable.deduced(symbol, value=value, attrs=attrs)
else:
return Variable(symbol, type, value=value, attrs=attrs)
def _printable(arg):
return String(arg) if isinstance(arg, str) else sympify(arg)
def allocated(array):
""" Creates an AST node for a function call to Fortran's "allocated(...)"
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import allocated
>>> alloc = allocated('x')
>>> fcode(alloc, source_format='free')
'allocated(x)'
"""
return FunctionCall('allocated', [_printable(array)])
def lbound(array, dim=None, kind=None):
""" Creates an AST node for a function call to Fortran's "lbound(...)"
Parameters
==========
array : Symbol or String
dim : expr
kind : expr
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import lbound
>>> lb = lbound('arr', dim=2)
>>> fcode(lb, source_format='free')
'lbound(arr, 2)'
"""
return FunctionCall(
'lbound',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def ubound(array, dim=None, kind=None):
return FunctionCall(
'ubound',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def shape(source, kind=None):
""" Creates an AST node for a function call to Fortran's "shape(...)"
Parameters
==========
source : Symbol or String
kind : expr
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import shape
>>> shp = shape('x')
>>> fcode(shp, source_format='free')
'shape(x)'
"""
return FunctionCall(
'shape',
[_printable(source)] +
([_printable(kind)] if kind else [])
)
def size(array, dim=None, kind=None):
""" Creates an AST node for a function call to Fortran's "size(...)"
Examples
========
>>> from sympy import fcode, Symbol
>>> from sympy.codegen.ast import FunctionDefinition, real, Return
>>> from sympy.codegen.fnodes import array, sum_, size
>>> a = Symbol('a', real=True)
>>> body = [Return((sum_(a**2)/size(a))**.5)]
>>> arr = array(a, dim=[':'], intent='in')
>>> fd = FunctionDefinition(real, 'rms', [arr], body)
>>> print(fcode(fd, source_format='free', standard=2003))
real*8 function rms(a)
real*8, dimension(:), intent(in) :: a
rms = sqrt(sum(a**2)*1d0/size(a))
end function
"""
return FunctionCall(
'size',
[_printable(array)] +
([_printable(dim)] if dim else []) +
([_printable(kind)] if kind else [])
)
def reshape(source, shape, pad=None, order=None):
""" Creates an AST node for a function call to Fortran's "reshape(...)"
Parameters
==========
source : Symbol or String
shape : ArrayExpr
pad : Symbol or String, optional
The padding array
order : Symbol or String, optional
The order of the elements in the array
Examples
========
>>> from sympy import fcode, symbols
>>> from sympy.codegen.fnodes import reshape
>>> array, shape, pad, order = symbols('array shape pad order')
>>> fcode(reshape(array, shape), source_format='free')
'reshape(array, shape)'
>>> fcode(reshape(array, shape, pad), source_format='free')
'reshape(array, shape, pad=pad)'
>>> fcode(reshape(array, shape, None, order), source_format='free')
'reshape(array, shape, order=order)'
>>> fcode(reshape(array, shape, pad, order), source_format='free')
'reshape(array, shape, pad=pad, order=order)'
"""
from sympy.codegen.ast import KeywordFunctionCall
kwargs = {}
if pad is not None:
kwargs['pad'] = _printable(pad)
if order is not None:
kwargs['order'] = _printable(order)
return KeywordFunctionCall(
'reshape',
[_printable(source), _printable(shape)],
kwargs
)
def bind_C(name=None):
""" Creates an Attribute ``bind_C`` with a name.
Parameters
==========
name : str
Examples
========
>>> from sympy import fcode, Symbol
>>> from sympy.codegen.ast import FunctionDefinition, real, Return
>>> from sympy.codegen.fnodes import array, sum_, bind_C
>>> a = Symbol('a', real=True)
>>> s = Symbol('s', integer=True)
>>> arr = array(a, dim=[s], intent='in')
>>> body = [Return((sum_(a**2)/s)**.5)]
>>> fd = FunctionDefinition(real, 'rms', [arr, s], body, attrs=[bind_C('rms')])
>>> print(fcode(fd, source_format='free', standard=2003))
real*8 function rms(a, s) bind(C, name="rms")
real*8, dimension(s), intent(in) :: a
integer*4 :: s
rms = sqrt(sum(a**2)/s)
end function
"""
return Attribute('bind_C', [String(name)] if name else [])
| Extent |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 11355,
"end": 12016
} | class ____(_BaseVyperException):
"""
Base Vyper internal exception class.
This exception is not raised directly, it is subclassed by other internal
exceptions.
Internal exceptions are raised as a means of telling the user that the
compiler has panicked, and that filing a bug report would be appropriate.
"""
def __str__(self):
return (
f"{super().__str__()}\n\n"
"This is an unhandled internal compiler error. "
"Please create an issue on Github to notify the developers!\n"
"https://github.com/vyperlang/vyper/issues/new?template=bug.md"
)
| VyperInternalException |
python | pytorch__pytorch | torch/distributed/_shard/sharding_spec/api.py | {
"start": 4808,
"end": 9848
} | class ____(ShardingSpec):
"""
This is a type of PlacementSpec that allows users to specify a generic
sharding scheme by enumerating exactly how each shard is laid out.
Args:
shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing
each shard. Note that none of the shards should overlap.
"""
shards: list[ShardMetadata]
def __post_init__(self):
if len(self.shards) == 0:
raise ValueError(f"Empty shard list provided: {self.shards}")
# Validate each shard has same rank.
rank = -1
for shard in self.shards:
if rank != -1 and rank != len(shard.shard_offsets):
raise ValueError(
f"Found inconsistent ranks for shards: {rank} and {len(shard.shard_offsets)}"
)
rank = len(shard.shard_offsets)
validate_non_overlapping_shards_metadata(self.shards)
def build_metadata(
self,
tensor_sizes: torch.Size,
tensor_properties: sharded_tensor_meta.TensorProperties,
) -> sharded_tensor_meta.ShardedTensorMetadata:
# check if shards form a valid tensor
check_tensor(self.shards, tensor_sizes)
return sharded_tensor_meta.ShardedTensorMetadata(
self.shards, tensor_sizes, tensor_properties
)
def shard(
self, tensor: torch.Tensor, src_rank: int = 0, process_group=None
) -> "ShardedTensor":
# TODO: figure out a generic and efficient way to scatter the shards for EnumerableShardingSpec
raise NotImplementedError("EnumerableShardingSpec.shard not implemented yet!")
def _infer_sharding_spec_from_shards_metadata(shards_metadata):
"""
Infer the sharding spec from the metadata of each shard of a ShardedTensor.
If the tensor is sharded only on one dimension, we can then verify whether it's
a ChunkShardingSpec or not. The way to verify it is to first get the total length
and perform a chunk sharding with the given placements to see if we can have the
same chunk size as the given shards_metadata. If not, we assume it's enum sharded.
Args:
shards_metadata (List[ShardMetadata]): List of Metadata of local shards.
Returns:
A :class:`torch.distributed._shard.sharding_spec.ShardingSpec` object of sharding
spec for one sharded tensor.
"""
placements = []
chunk_sharding_dim = None
chunk_offset_list = []
shard_size_list = []
shard_offset_list = []
# collect local shard metadatas from the global sharded_tensor_metadata
for shard_metadata in shards_metadata: # type: ignore[attr-defined]
placements.append(shard_metadata.placement)
local_offsets = shard_metadata.shard_offsets
chunk_offset_list.append(sum(local_offsets))
shard_size_list.append(shard_metadata.shard_sizes)
shard_offset_list.append(shard_metadata.shard_offsets)
shard_dims = [idx for idx, e in enumerate(local_offsets) if e != 0]
# If the offset is [0, 0, ..., 0] (all zeros),
# we cannot decide whether how the tensor is sharded.
if len(shard_dims) == 0:
continue
# If the offset is [0, N, .,0, M, 0, .., 0],
# we are sure it's sharded by more than one dimension.
if len(shard_dims) != 1:
chunk_sharding_dim = None
break
# If the offset is [0, 0, .,0, M, 0, .., 0], aka, it's sharded by just
# one dimension, we need to make sure all ranks share the same dimension.
if not chunk_sharding_dim:
chunk_sharding_dim = shard_dims[0]
elif chunk_sharding_dim != shard_dims[0]:
chunk_sharding_dim = None
break
if chunk_sharding_dim is not None:
# Ensure we infer the correct placement order from offsets
placements = [
x
for _, x in sorted(
zip(chunk_offset_list, placements), key=operator.itemgetter(0)
)
]
from .chunk_sharding_spec import ChunkShardingSpec
chunk_spec = ChunkShardingSpec(
dim=chunk_sharding_dim,
placements=placements,
)
shard_sizes = sorted([x[chunk_sharding_dim] for x in shard_size_list])
shard_total_length = sum(shard_sizes)
shard_offsets = sorted([x[chunk_sharding_dim] for x in shard_offset_list])
chunks = len(placements)
split_size = get_split_size(shard_total_length, chunks)
chunk_shard_sizes = sorted(
[
get_chunked_dim_size(shard_total_length, split_size, idx)
for idx in range(chunks)
]
)
# Should match ChunkShardingSpec offsets calculation
chunk_shard_offsets = [split_size * idx for idx in range(chunks)]
if shard_sizes == chunk_shard_sizes and shard_offsets == chunk_shard_offsets:
return chunk_spec
return EnumerableShardingSpec(shards_metadata)
| EnumerableShardingSpec |
python | pandas-dev__pandas | pandas/tests/extension/test_datetime.py | {
"start": 2362,
"end": 4622
} | class ____(base.ExtensionTests):
def _get_expected_exception(self, op_name, obj, other):
if op_name in ["__sub__", "__rsub__"]:
return None
return super()._get_expected_exception(op_name, obj, other)
def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
if op_name == "std":
return "timedelta64[ns]"
return arr.dtype
def _supports_accumulation(self, ser, op_name: str) -> bool:
return op_name in ["cummin", "cummax"]
def _supports_reduction(self, obj, op_name: str) -> bool:
return op_name in ["min", "max", "median", "mean", "std", "any", "all"]
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
meth = all_boolean_reductions
msg = f"datetime64 type does not support operation '{meth}'"
with pytest.raises(TypeError, match=msg):
super().test_reduce_series_boolean(data, all_boolean_reductions, skipna)
def test_series_constructor(self, data):
# Series construction drops any .freq attr
data = data._with_freq(None)
super().test_series_constructor(data)
@pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data, na_action):
result = data.map(lambda x: x, na_action=na_action)
tm.assert_extension_array_equal(result, data)
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
if op_name in ["median", "mean", "std"]:
alt = ser.astype("int64")
res_op = getattr(ser, op_name)
exp_op = getattr(alt, op_name)
result = res_op(skipna=skipna)
expected = exp_op(skipna=skipna)
if op_name in ["mean", "median"]:
# error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype"
# has no attribute "tz"
tz = ser.dtype.tz # type: ignore[union-attr]
expected = pd.Timestamp(expected, tz=tz)
else:
expected = pd.Timedelta(expected)
tm.assert_almost_equal(result, expected)
else:
return super().check_reduce(ser, op_name, skipna)
| TestDatetimeArray |
python | mwaskom__seaborn | tests/_core/test_data.py | {
"start": 288,
"end": 14373
} | class ____:
@pytest.fixture
def long_variables(self):
variables = dict(x="x", y="y", color="a", size="z", style="s_cat")
return variables
def test_named_vectors(self, long_df, long_variables):
p = PlotData(long_df, long_variables)
assert p.source_data is long_df
assert p.source_vars is long_variables
for key, val in long_variables.items():
assert p.names[key] == val
assert_vector_equal(p.frame[key], long_df[val])
def test_named_and_given_vectors(self, long_df, long_variables):
long_variables["y"] = long_df["b"]
long_variables["size"] = long_df["z"].to_numpy()
p = PlotData(long_df, long_variables)
assert_vector_equal(p.frame["color"], long_df[long_variables["color"]])
assert_vector_equal(p.frame["y"], long_df["b"])
assert_vector_equal(p.frame["size"], long_df["z"])
assert p.names["color"] == long_variables["color"]
assert p.names["y"] == "b"
assert p.names["size"] is None
assert p.ids["color"] == long_variables["color"]
assert p.ids["y"] == "b"
assert p.ids["size"] == id(long_variables["size"])
def test_index_as_variable(self, long_df, long_variables):
index = pd.Index(np.arange(len(long_df)) * 2 + 10, name="i", dtype=int)
long_variables["x"] = "i"
p = PlotData(long_df.set_index(index), long_variables)
assert p.names["x"] == p.ids["x"] == "i"
assert_vector_equal(p.frame["x"], pd.Series(index, index))
def test_multiindex_as_variables(self, long_df, long_variables):
index_i = pd.Index(np.arange(len(long_df)) * 2 + 10, name="i", dtype=int)
index_j = pd.Index(np.arange(len(long_df)) * 3 + 5, name="j", dtype=int)
index = pd.MultiIndex.from_arrays([index_i, index_j])
long_variables.update({"x": "i", "y": "j"})
p = PlotData(long_df.set_index(index), long_variables)
assert_vector_equal(p.frame["x"], pd.Series(index_i, index))
assert_vector_equal(p.frame["y"], pd.Series(index_j, index))
def test_int_as_variable_key(self, rng):
df = pd.DataFrame(rng.uniform(size=(10, 3)))
var = "x"
key = 2
p = PlotData(df, {var: key})
assert_vector_equal(p.frame[var], df[key])
assert p.names[var] == p.ids[var] == str(key)
def test_int_as_variable_value(self, long_df):
p = PlotData(long_df, {"x": 0, "y": "y"})
assert (p.frame["x"] == 0).all()
assert p.names["x"] is None
assert p.ids["x"] == id(0)
def test_tuple_as_variable_key(self, rng):
cols = pd.MultiIndex.from_product([("a", "b", "c"), ("x", "y")])
df = pd.DataFrame(rng.uniform(size=(10, 6)), columns=cols)
var = "color"
key = ("b", "y")
p = PlotData(df, {var: key})
assert_vector_equal(p.frame[var], df[key])
assert p.names[var] == p.ids[var] == str(key)
def test_dict_as_data(self, long_dict, long_variables):
p = PlotData(long_dict, long_variables)
assert p.source_data is long_dict
for key, val in long_variables.items():
assert_vector_equal(p.frame[key], pd.Series(long_dict[val]))
@pytest.mark.parametrize(
"vector_type",
["series", "numpy", "list"],
)
def test_vectors_various_types(self, long_df, long_variables, vector_type):
variables = {key: long_df[val] for key, val in long_variables.items()}
if vector_type == "numpy":
variables = {key: val.to_numpy() for key, val in variables.items()}
elif vector_type == "list":
variables = {key: val.to_list() for key, val in variables.items()}
p = PlotData(None, variables)
assert list(p.names) == list(long_variables)
if vector_type == "series":
assert p.source_vars is variables
assert p.names == p.ids == {key: val.name for key, val in variables.items()}
else:
assert p.names == {key: None for key in variables}
assert p.ids == {key: id(val) for key, val in variables.items()}
for key, val in long_variables.items():
if vector_type == "series":
assert_vector_equal(p.frame[key], long_df[val])
else:
assert_array_equal(p.frame[key], long_df[val])
def test_none_as_variable_value(self, long_df):
p = PlotData(long_df, {"x": "z", "y": None})
assert list(p.frame.columns) == ["x"]
assert p.names == p.ids == {"x": "z"}
def test_frame_and_vector_mismatched_lengths(self, long_df):
vector = np.arange(len(long_df) * 2)
with pytest.raises(ValueError):
PlotData(long_df, {"x": "x", "y": vector})
@pytest.mark.parametrize(
"arg", [{}, pd.DataFrame()],
)
def test_empty_data_input(self, arg):
p = PlotData(arg, {})
assert p.frame.empty
assert not p.names
if not isinstance(arg, pd.DataFrame):
p = PlotData(None, dict(x=arg, y=arg))
assert p.frame.empty
assert not p.names
def test_index_alignment_series_to_dataframe(self):
x = [1, 2, 3]
x_index = pd.Index(x, dtype=int)
y_values = [3, 4, 5]
y_index = pd.Index(y_values, dtype=int)
y = pd.Series(y_values, y_index, name="y")
data = pd.DataFrame(dict(x=x), index=x_index)
p = PlotData(data, {"x": "x", "y": y})
x_col_expected = pd.Series([1, 2, 3, np.nan, np.nan], np.arange(1, 6))
y_col_expected = pd.Series([np.nan, np.nan, 3, 4, 5], np.arange(1, 6))
assert_vector_equal(p.frame["x"], x_col_expected)
assert_vector_equal(p.frame["y"], y_col_expected)
def test_index_alignment_between_series(self):
x_index = [1, 2, 3]
x_values = [10, 20, 30]
x = pd.Series(x_values, x_index, name="x")
y_index = [3, 4, 5]
y_values = [300, 400, 500]
y = pd.Series(y_values, y_index, name="y")
p = PlotData(None, {"x": x, "y": y})
idx_expected = [1, 2, 3, 4, 5]
x_col_expected = pd.Series([10, 20, 30, np.nan, np.nan], idx_expected)
y_col_expected = pd.Series([np.nan, np.nan, 300, 400, 500], idx_expected)
assert_vector_equal(p.frame["x"], x_col_expected)
assert_vector_equal(p.frame["y"], y_col_expected)
def test_key_not_in_data_raises(self, long_df):
var = "x"
key = "what"
msg = f"Could not interpret value `{key}` for `{var}`. An entry with this name"
with pytest.raises(ValueError, match=msg):
PlotData(long_df, {var: key})
def test_key_with_no_data_raises(self):
var = "x"
key = "what"
msg = f"Could not interpret value `{key}` for `{var}`. Value is a string,"
with pytest.raises(ValueError, match=msg):
PlotData(None, {var: key})
def test_data_vector_different_lengths_raises(self, long_df):
vector = np.arange(len(long_df) - 5)
msg = "Length of ndarray vectors must match length of `data`"
with pytest.raises(ValueError, match=msg):
PlotData(long_df, {"y": vector})
def test_undefined_variables_raise(self, long_df):
with pytest.raises(ValueError):
PlotData(long_df, dict(x="not_in_df"))
with pytest.raises(ValueError):
PlotData(long_df, dict(x="x", y="not_in_df"))
with pytest.raises(ValueError):
PlotData(long_df, dict(x="x", y="y", color="not_in_df"))
def test_contains_operation(self, long_df):
p = PlotData(long_df, {"x": "y", "color": long_df["a"]})
assert "x" in p
assert "y" not in p
assert "color" in p
def test_join_add_variable(self, long_df):
v1 = {"x": "x", "y": "f"}
v2 = {"color": "a"}
p1 = PlotData(long_df, v1)
p2 = p1.join(None, v2)
for var, key in dict(**v1, **v2).items():
assert var in p2
assert p2.names[var] == key
assert_vector_equal(p2.frame[var], long_df[key])
def test_join_replace_variable(self, long_df):
v1 = {"x": "x", "y": "y"}
v2 = {"y": "s"}
p1 = PlotData(long_df, v1)
p2 = p1.join(None, v2)
variables = v1.copy()
variables.update(v2)
for var, key in variables.items():
assert var in p2
assert p2.names[var] == key
assert_vector_equal(p2.frame[var], long_df[key])
def test_join_remove_variable(self, long_df):
variables = {"x": "x", "y": "f"}
drop_var = "y"
p1 = PlotData(long_df, variables)
p2 = p1.join(None, {drop_var: None})
assert drop_var in p1
assert drop_var not in p2
assert drop_var not in p2.frame
assert drop_var not in p2.names
def test_join_all_operations(self, long_df):
v1 = {"x": "x", "y": "y", "color": "a"}
v2 = {"y": "s", "size": "s", "color": None}
p1 = PlotData(long_df, v1)
p2 = p1.join(None, v2)
for var, key in v2.items():
if key is None:
assert var not in p2
else:
assert p2.names[var] == key
assert_vector_equal(p2.frame[var], long_df[key])
def test_join_all_operations_same_data(self, long_df):
v1 = {"x": "x", "y": "y", "color": "a"}
v2 = {"y": "s", "size": "s", "color": None}
p1 = PlotData(long_df, v1)
p2 = p1.join(long_df, v2)
for var, key in v2.items():
if key is None:
assert var not in p2
else:
assert p2.names[var] == key
assert_vector_equal(p2.frame[var], long_df[key])
def test_join_add_variable_new_data(self, long_df):
d1 = long_df[["x", "y"]]
d2 = long_df[["a", "s"]]
v1 = {"x": "x", "y": "y"}
v2 = {"color": "a"}
p1 = PlotData(d1, v1)
p2 = p1.join(d2, v2)
for var, key in dict(**v1, **v2).items():
assert p2.names[var] == key
assert_vector_equal(p2.frame[var], long_df[key])
def test_join_replace_variable_new_data(self, long_df):
d1 = long_df[["x", "y"]]
d2 = long_df[["a", "s"]]
v1 = {"x": "x", "y": "y"}
v2 = {"x": "a"}
p1 = PlotData(d1, v1)
p2 = p1.join(d2, v2)
variables = v1.copy()
variables.update(v2)
for var, key in variables.items():
assert p2.names[var] == key
assert_vector_equal(p2.frame[var], long_df[key])
def test_join_add_variable_different_index(self, long_df):
d1 = long_df.iloc[:70]
d2 = long_df.iloc[30:]
v1 = {"x": "a"}
v2 = {"y": "z"}
p1 = PlotData(d1, v1)
p2 = p1.join(d2, v2)
(var1, key1), = v1.items()
(var2, key2), = v2.items()
assert_vector_equal(p2.frame.loc[d1.index, var1], d1[key1])
assert_vector_equal(p2.frame.loc[d2.index, var2], d2[key2])
assert p2.frame.loc[d2.index.difference(d1.index), var1].isna().all()
assert p2.frame.loc[d1.index.difference(d2.index), var2].isna().all()
def test_join_replace_variable_different_index(self, long_df):
d1 = long_df.iloc[:70]
d2 = long_df.iloc[30:]
var = "x"
k1, k2 = "a", "z"
v1 = {var: k1}
v2 = {var: k2}
p1 = PlotData(d1, v1)
p2 = p1.join(d2, v2)
(var1, key1), = v1.items()
(var2, key2), = v2.items()
assert_vector_equal(p2.frame.loc[d2.index, var], d2[k2])
assert p2.frame.loc[d1.index.difference(d2.index), var].isna().all()
def test_join_subset_data_inherit_variables(self, long_df):
sub_df = long_df[long_df["a"] == "b"]
var = "y"
p1 = PlotData(long_df, {var: var})
p2 = p1.join(sub_df, None)
assert_vector_equal(p2.frame.loc[sub_df.index, var], sub_df[var])
assert p2.frame.loc[long_df.index.difference(sub_df.index), var].isna().all()
def test_join_multiple_inherits_from_orig(self, rng):
d1 = pd.DataFrame(dict(a=rng.normal(0, 1, 100), b=rng.normal(0, 1, 100)))
d2 = pd.DataFrame(dict(a=rng.normal(0, 1, 100)))
p = PlotData(d1, {"x": "a"}).join(d2, {"y": "a"}).join(None, {"y": "a"})
assert_vector_equal(p.frame["x"], d1["a"])
assert_vector_equal(p.frame["y"], d1["a"])
def test_bad_type(self, flat_list):
err = "Data source must be a DataFrame or Mapping"
with pytest.raises(TypeError, match=err):
PlotData(flat_list, {})
@pytest.mark.skipif(
condition=not hasattr(pd.api, "interchange"),
reason="Tests behavior assuming support for dataframe interchange"
)
def test_data_interchange(self, mock_long_df, long_df):
variables = {"x": "x", "y": "z", "color": "a"}
p = PlotData(mock_long_df, variables)
for var, col in variables.items():
assert_vector_equal(p.frame[var], long_df[col])
p = PlotData(mock_long_df, {**variables, "color": long_df["a"]})
for var, col in variables.items():
assert_vector_equal(p.frame[var], long_df[col])
@pytest.mark.skipif(
condition=not hasattr(pd.api, "interchange"),
reason="Tests behavior assuming support for dataframe interchange"
)
def test_data_interchange_failure(self, mock_long_df):
mock_long_df._data = None # Break __dataframe__()
with pytest.raises(RuntimeError, match="Encountered an exception"):
PlotData(mock_long_df, {"x": "x"})
@pytest.mark.skipif(
condition=hasattr(pd.api, "interchange"),
reason="Tests graceful failure without support for dataframe interchange"
)
def test_data_interchange_support_test(self, mock_long_df):
with pytest.raises(TypeError, match="Support for non-pandas DataFrame"):
PlotData(mock_long_df, {"x": "x"})
| TestPlotData |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-vowels-in-a-substring-of-given-length.py | {
"start": 29,
"end": 417
} | class ____(object):
def maxVowels(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
VOWELS = set("aeiou")
result = curr = 0
for i, c in enumerate(s):
curr += c in VOWELS
if i >= k:
curr -= s[i-k] in VOWELS
result = max(result, curr)
return result
| Solution |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_profiling_profile.py | {
"start": 129,
"end": 495
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-profiling-profile"
def setUp(self) -> None:
self.login_as(user=self.user)
def test_feature_flag_disabled(self) -> None:
response = self.get_response(self.project.organization.slug, self.project.id, str(uuid4()))
assert response.status_code == 404
| ProjectProfilingProfileTest |
python | spack__spack | lib/spack/spack/modules/common.py | {
"start": 38032,
"end": 38162
} | class ____(ModulesError, RuntimeError):
"""Raised if the template for a module file was not found."""
| ModulesTemplateNotFoundError |
python | jina-ai__jina | jina/excepts.py | {
"start": 1145,
"end": 1278
} | class ____(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
| BadClient |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_styles03.py | {
"start": 380,
"end": 4044
} | class ____(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for number format and font styles."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
workbook.add_format({"num_format": 2})
workbook.add_format({"num_format": 2, "bold": 1})
workbook.add_format({"num_format": "0.0"})
workbook._set_default_xf_indices()
workbook._prepare_format_properties()
style._set_style_properties(
[
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_formats,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
workbook.has_comments,
]
)
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<numFmts count="1">
<numFmt numFmtId="164" formatCode="0.0"/>
</numFmts>
<fonts count="2">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
<font>
<b/>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="2">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
</fills>
<borders count="1">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="4">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
<xf numFmtId="2" fontId="0" fillId="0" borderId="0" xfId="0" applyNumberFormat="1"/>
<xf numFmtId="2" fontId="1" fillId="0" borderId="0" xfId="0" applyNumberFormat="1" applyFont="1"/>
<xf numFmtId="164" fontId="0" fillId="0" borderId="0" xfId="0" applyNumberFormat="1"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="0"/>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleStyles |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/application.py | {
"start": 581,
"end": 1491
} | class ____(LoginRequiredMixin, CreateView):
"""
View used to register a new Application for the request.user
"""
template_name = "oauth2_provider/application_registration_form.html"
def get_form_class(self):
"""
Returns the form class for the application model
"""
return modelform_factory(
get_application_model(),
fields=(
"name",
"client_id",
"client_secret",
"hash_client_secret",
"client_type",
"authorization_grant_type",
"redirect_uris",
"post_logout_redirect_uris",
"allowed_origins",
"algorithm",
),
)
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
| ApplicationRegistration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.