language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_integrations.py | {
"start": 120,
"end": 4313
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-integrations"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.integration = self.create_integration(
organization=self.organization,
provider="example",
name="Example",
external_id="example:1",
)
self.msteams_integration = self.create_integration(
organization=self.organization,
provider="msteams",
name="MS Teams",
external_id="msteams:1",
)
self.opsgenie = self.create_integration(
organization=self.organization,
provider="opsgenie",
name="Opsgenie",
external_id="opsgenie:1",
)
self.slack_integration = self.create_integration(
organization=self.organization,
provider="slack",
name="Slack",
external_id="slack:1",
)
def test_simple(self) -> None:
response = self.get_success_response(self.organization.slug)
assert len(response.data) == 4
assert response.data[0]["id"] == str(self.integration.id)
assert "configOrganization" in response.data[0]
def test_no_config(self) -> None:
response = self.get_success_response(self.organization.slug, qs_params={"includeConfig": 0})
assert "configOrganization" not in response.data[0]
def test_feature_filters(self) -> None:
response = self.get_success_response(
self.organization.slug, qs_params={"features": "issue_basic"}
)
assert response.data[0]["id"] == str(self.integration.id)
response = self.get_success_response(
self.organization.slug, qs_params={"features": "codeowners"}
)
assert response.data == []
def test_provider_key(self) -> None:
response = self.get_success_response(
self.organization.slug, qs_params={"providerKey": "example"}
)
assert response.data[0]["id"] == str(self.integration.id)
response = self.get_success_response(
self.organization.slug, qs_params={"provider_key": "example"}
)
assert response.data[0]["id"] == str(self.integration.id)
response = self.get_success_response(
self.organization.slug, qs_params={"provider_key": "vercel"}
)
assert response.data == []
def test_integration_type(self) -> None:
response = self.get_success_response(
self.organization.slug, qs_params={"integrationType": "messaging"}
)
assert len(response.data) == 2
assert response.data[0]["id"] == str(self.msteams_integration.id)
assert response.data[1]["id"] == str(self.slack_integration.id)
response = self.get_success_response(
self.organization.slug, qs_params={"integrationType": "on_call_scheduling"}
)
assert len(response.data) == 1
assert response.data[0]["id"] == str(self.opsgenie.id)
response = self.get_error_response(
self.organization.slug, qs_params={"integrationType": "third_party"}
)
assert response.data == {"detail": "Invalid integration type"}
assert response.status_code == 400
def test_provider_key_and_integration_type(self) -> None:
response = self.get_success_response(
self.organization.slug,
qs_params={"providerKey": "slack", "integrationType": "messaging"},
)
assert len(response.data) == 1
assert response.data[0]["id"] == str(self.slack_integration.id)
response = self.get_success_response(
self.organization.slug,
qs_params={"providerKey": "vercel", "integrationType": "messaging"},
)
assert response.data == []
response = self.get_error_response(
self.organization.slug,
qs_params={"providerKey": "slack", "integrationType": "third_party"},
)
assert response.data == {"detail": "Invalid integration type"}
assert response.status_code == 400
| OrganizationIntegrationsListTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/unit_tests/integration/test_assignees.py | {
"start": 820,
"end": 8570
} | class ____(TestCase):
def setUp(self) -> None:
"""Base setup for all tests. Add responses for:
1. rate limit checker
2. repositories
3. branches
"""
self.r_mock = HttpMocker()
self.r_mock.__enter__()
self.r_mock.get(
HttpRequest(
url="https://api.github.com/rate_limit",
query_params={},
headers={
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
"Authorization": "token GITHUB_TEST_TOKEN",
},
),
HttpResponse(
json.dumps(
{
"resources": {
"core": {"limit": 5000, "used": 0, "remaining": 5000, "reset": 5070908800},
"graphql": {"limit": 5000, "used": 0, "remaining": 5000, "reset": 5070908800},
}
}
),
200,
),
)
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[0]}",
query_params={"per_page": 100},
),
HttpResponse(json.dumps({"full_name": "airbytehq/mock-test-0", "default_branch": "master"}), 200),
)
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[1]}",
query_params={"per_page": 100},
),
HttpResponse(json.dumps({"full_name": "airbytehq/mock-test-1", "default_branch": "master"}), 200),
)
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[2]}",
query_params={"per_page": 100},
),
HttpResponse(json.dumps({"full_name": "airbytehq/mock-test-2", "default_branch": "master"}), 200),
)
def teardown(self):
"""Stops and resets HttpMocker instance."""
self.r_mock.__exit__()
def test_read_full_refresh_emits_per_partition_state(self):
"""Ensure http integration and per-partition state is emitted correctly"""
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[0]}/assignees",
query_params={"per_page": 100},
),
HttpResponse(json.dumps(find_template("assignees", __file__)), 200),
)
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[1]}/assignees",
query_params={"per_page": 100},
),
HttpResponse(json.dumps(find_template("assignees", __file__)), 200),
)
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[2]}/assignees",
query_params={"per_page": 100},
),
HttpResponse(json.dumps(find_template("assignees", __file__)), 200),
)
per_partition_state_0 = {"partition": {"repository": "airbytehq/mock-test-0"}, "cursor": {"__ab_full_refresh_sync_complete": True}}
per_partition_state_1 = {"partition": {"repository": "airbytehq/mock-test-1"}, "cursor": {"__ab_full_refresh_sync_complete": True}}
per_partition_state_2 = {"partition": {"repository": "airbytehq/mock-test-2"}, "cursor": {"__ab_full_refresh_sync_complete": True}}
source = SourceGithub()
actual_messages = read(source, config=_CONFIG, catalog=_create_catalog())
assert len(actual_messages.records) == 6
# Validates that each time we sync a parent partition, the size of the per-partition state is increasing for the final
# state of each parent record
assert len(actual_messages.state_messages) == 3
actual_state_after_first_partition = actual_messages.state_messages[0].state.stream.stream_state.model_dump()
assert len(actual_state_after_first_partition.get("states")) == 1
actual_state_after_second_partition = actual_messages.state_messages[1].state.stream.stream_state.model_dump()
assert len(actual_state_after_second_partition.get("states")) == 2
actual_state_after_third_partition = actual_messages.state_messages[2].state.stream.stream_state.model_dump()
assert len(actual_state_after_third_partition.get("states")) == 3
# Validate that the final set of per-partition states includes the terminal value for each successful parent
final_list_of_per_partition_state = actual_state_after_third_partition.get("states")
assert per_partition_state_0 in final_list_of_per_partition_state
assert per_partition_state_1 in final_list_of_per_partition_state
assert per_partition_state_2 in final_list_of_per_partition_state
def test_read_full_refresh_emits_per_partition_state(self):
"""Ensure that incoming RFR state skips parent records from state that have already been synced on a prior attempt"""
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[0]}/assignees",
query_params={"per_page": 100},
),
HttpResponse(json.dumps(find_template("assignees", __file__)), 200),
)
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[1]}/assignees",
query_params={"per_page": 100},
),
HttpResponse(json.dumps(find_template("assignees", __file__)), 200),
)
self.r_mock.get(
HttpRequest(
url=f"https://api.github.com/repos/{_CONFIG.get('repositories')[2]}/assignees",
query_params={"per_page": 100},
),
HttpResponse(json.dumps(find_template("assignees", __file__)), 200),
)
per_partition_state_0 = {"partition": {"repository": "airbytehq/mock-test-0"}, "cursor": {"__ab_full_refresh_sync_complete": True}}
per_partition_state_1 = {"partition": {"repository": "airbytehq/mock-test-1"}, "cursor": {"__ab_full_refresh_sync_complete": True}}
per_partition_state_2 = {"partition": {"repository": "airbytehq/mock-test-2"}, "cursor": {"__ab_full_refresh_sync_complete": True}}
incoming_state = (
StateBuilder()
.with_stream_state(
"assignees",
{
"states": [
{"partition": {"repository": "airbytehq/mock-test-0"}, "cursor": {"__ab_full_refresh_sync_complete": True}},
{"partition": {"repository": "airbytehq/mock-test-1"}, "cursor": {"__ab_full_refresh_sync_complete": True}},
]
},
)
.build()
)
source = SourceGithub()
actual_messages = read(source, config=_CONFIG, catalog=_create_catalog(), state=incoming_state)
assert len(actual_messages.records) == 2
# There should only be on state message since the first two parents were already successfully synced
assert len(actual_messages.state_messages) == 1
final_list_of_per_partition_state = actual_messages.state_messages[0].state.stream.stream_state.states
assert per_partition_state_0 in final_list_of_per_partition_state
assert per_partition_state_1 in final_list_of_per_partition_state
assert per_partition_state_2 in final_list_of_per_partition_state
| AssigneesTest |
python | ray-project__ray | python/ray/experimental/channel/serialization_context.py | {
"start": 234,
"end": 10055
} | class ____:
def __init__(self):
# If true, then tensors found in the data to serialize are extracted
# and the caller should send them through an external transport.
self._use_external_transport: bool = False
# If _use_external_transport is True, then these are
# the tensors that should be sent or received
# out-of-band, through the external transport.
self._out_of_band_tensors: List["torch.Tensor"] = []
# During serialization, tensors sent out-of-band are replaced with
# integer placeholders. This tracks the set of placeholders seen.
self._deserialized_tensor_placeholders: Set[int] = set()
# Buffer for transferring data between tasks in the same worker process.
# The key is the channel ID, and the value is the data. We don't use a
# lock when reading/writing the buffer because a DAG node actor will only
# execute one task at a time in `do_exec_tasks`. It will not execute multiple
# Ray tasks on a single actor simultaneously.
self.intra_process_channel_buffers: Dict[str, Any] = {}
# The number of readers for each channel. When the number of readers
# reaches 0, remove the data from the buffer.
self.channel_id_to_num_readers: Dict[str, int] = {}
def set_target_device(self, device: Device) -> None:
self._target_device = device
def set_data(self, channel_id: str, value: Any, num_readers: int) -> None:
assert num_readers > 0, "num_readers must be greater than 0."
assert (
channel_id not in self.intra_process_channel_buffers
), f"Channel {channel_id} already exists in the buffer."
assert (
channel_id not in self.channel_id_to_num_readers
), f"Channel {channel_id} already exists in the channel_id_to_num_readers."
self.intra_process_channel_buffers[channel_id] = value
self.channel_id_to_num_readers[channel_id] = num_readers
def has_data(self, channel_id: str) -> bool:
return channel_id in self.intra_process_channel_buffers
def get_data(self, channel_id: str) -> Any:
assert (
channel_id in self.intra_process_channel_buffers
), f"Channel {channel_id} does not exist in the buffer."
assert (
channel_id in self.channel_id_to_num_readers
), f"Channel {channel_id} does not exist in the channel_id_to_num_readers."
self.channel_id_to_num_readers[channel_id] -= 1
if self.channel_id_to_num_readers[channel_id] == 0:
# All readers have read the data, so we can remove it.
self.channel_id_to_num_readers.pop(channel_id)
return self.intra_process_channel_buffers.pop(channel_id)
return self.intra_process_channel_buffers[channel_id]
def reset_data(self, channel_id: str) -> None:
self.intra_process_channel_buffers.pop(channel_id, None)
self.channel_id_to_num_readers.pop(channel_id, None)
def set_use_external_transport(self, use_external_transport: bool) -> None:
self._use_external_transport = use_external_transport
@property
def use_external_transport(self) -> bool:
return self._use_external_transport
def reset_out_of_band_tensors(
self, tensors: List["torch.Tensor"]
) -> Tuple[List["torch.Tensor"], Set[int]]:
"""
Return and reset the out-of-band tensors and all tensor placeholders
that were deserialized since the last call to reset.
"""
prev_tensors = self._out_of_band_tensors
deserialized_tensor_placeholders = self._deserialized_tensor_placeholders
self._out_of_band_tensors = tensors
self._deserialized_tensor_placeholders = set()
return prev_tensors, deserialized_tensor_placeholders
def serialize_tensor(
self, tensor: "torch.Tensor"
) -> Union[int, Tuple["np.ndarray", "torch.dtype", str]]:
from ray.experimental.channel import ChannelContext
ctx = ChannelContext.get_current()
if self._use_external_transport and (
ctx._torch_device is None or ctx._torch_device == tensor.device
):
# External transport is enabled and we found a tensor that matches
# our device. Add the actual tensor to a buffer. The buffer of
# tensors should later be popped by the caller and sent via
# external transport.
self._out_of_band_tensors.append(tensor)
# Return a placeholder.
return len(self._out_of_band_tensors) - 1
return self.serialize_to_numpy_or_scalar(tensor)
def serialize_to_numpy_or_scalar(
self, tensor: "torch.Tensor"
) -> Tuple[Union["np.ndarray", Any], "torch.dtype", str]:
"""
Serialize a tensor to a numpy array,
or a scalar when the tensor is 0-dim.
"""
import torch
tensor_device_type = tensor.device.type
# Transfer through Ray's shared memory store for now.
# TODO(swang): This requires two copies, one to transfer from GPU to
# CPU and another from CPU to shared memory. Ideally we should elide
# the first copy and memcpy directly from GPU to the shared memory
# buffer.
if tensor_device_type != "cpu":
tensor = tensor.to("cpu")
# Numpy does not have an equivalent dtype for all torch dtypes, so
# instead of casting directly to numpy:
# 1) for non-scalar tensors, we first use a view with a common dtype (uint8)
# and then view as numpy array.
# 2) for scalar tensors, we cannot use a uint8 view when the size differs,
# so we save the original item and type information.
if tensor.dim() > 0:
return (tensor.view(torch.uint8).numpy(), tensor.dtype, tensor_device_type)
else:
return (tensor.item(), tensor.dtype, tensor_device_type)
def deserialize_tensor(
self,
val: Union[Tuple["np.ndarray", "torch.dtype", str], int],
target_device: Device,
):
# Found a placeholder for a tensor that was serialized via accelerator.
# Replace it with the corresponding deserialized tensor.
if isinstance(val, int):
placeholder = val
self._deserialized_tensor_placeholders.add(placeholder)
assert placeholder < len(self._out_of_band_tensors), (
"placeholder",
placeholder,
"out_of_band_tensors",
self._out_of_band_tensors,
)
tensor = self._out_of_band_tensors[placeholder]
if target_device == Device.CPU:
tensor = tensor.to("cpu")
return tensor
np_array, dtype, tensor_device_type = val
return self.deserialize_from_numpy_or_scalar(
np_array, dtype, tensor_device_type, target_device
)
def deserialize_from_numpy_or_scalar(
self,
np_array: Union["np.ndarray", Any],
dtype: "torch.dtype",
tensor_device_type: str,
target_device: Device,
):
import numpy as np
import torch
if target_device == Device.DEFAULT:
target_device_type = tensor_device_type
elif target_device in [Device.GPU, Device.CUDA]:
target_device_type = "cuda"
else:
target_device_type = target_device.value
# TODO(swang): Support local P2P transfers if available.
if target_device_type != "cpu":
def convert_numpy_to_tensor(np_array):
if not isinstance(np_array, np.ndarray):
# For scalar tensors, create the 0-dim tensor.
return torch.tensor(
np_array, device=target_device_type, dtype=dtype
)
else:
# For non-scalar tensors, view as the original dtype.
# It does zero-copy convert np_array inside shared memory to
# a tensor. Since we move data to GPU immediately, it is safe.
cpu_tensor = torch.from_numpy(np_array).view(dtype)
return cpu_tensor.to(device=target_device_type)
global _TORCH_WARNING_FILTER_ACTIVATE
# filtering warning messages would be the bottleneck for
# deserializing torch tensors. Since the warning only prompts once,
# we would only deal with it for the first time.
if _TORCH_WARNING_FILTER_ACTIVATE:
with warnings.catch_warnings():
# Since np_array.is_writable is False (it is set by Ray),
# this raises a warning. Suppress it.
warnings.filterwarnings(
"ignore",
category=UserWarning,
message="The given NumPy array is not writable",
)
gpu_tensor = convert_numpy_to_tensor(np_array)
_TORCH_WARNING_FILTER_ACTIVATE = False
else:
gpu_tensor = convert_numpy_to_tensor(np_array)
return gpu_tensor
# TODO(swang): Use zero-copy from_numpy() if np_array.flags.writeable
# is True. This is safe to set when deserializing np_array if the
# upstream task has num_readers=1.
if not isinstance(np_array, np.ndarray):
# For scalar tensors, create the 0-dim tensor.
return torch.tensor(np_array, device=target_device_type, dtype=dtype)
else:
# For non-scalar tensors, view as the original dtype.
return torch.tensor(np_array, device=target_device_type).view(dtype)
| _SerializationContext |
python | django__django | django/contrib/postgres/aggregates/statistics.py | {
"start": 1277,
"end": 1339
} | class ____(StatAggregate):
function = "REGR_SLOPE"
| RegrSlope |
python | doocs__leetcode | solution/2300-2399/2390.Removing Stars From a String/Solution.py | {
"start": 0,
"end": 222
} | class ____:
def removeStars(self, s: str) -> str:
ans = []
for c in s:
if c == '*':
ans.pop()
else:
ans.append(c)
return ''.join(ans)
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/test_utils.py | {
"start": 10623,
"end": 11540
} | class ____(RunLauncher, ConfigurableClass):
def __init__(self, inst_data: Optional[ConfigurableClassData] = None):
self._inst_data = inst_data
super().__init__()
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> Mapping[str, Any]:
return {}
@classmethod
def from_config_value(
cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]
) -> Self:
return cls(inst_data=inst_data)
def launch_run(self, context) -> NoReturn:
raise NotImplementedError("The entire purpose of this is to throw on launch")
def join(self, timeout: float = 30) -> None:
"""Nothing to join on since all executions are synchronous."""
def terminate(self, run_id):
check.not_implemented("Termination not supported")
| ExplodingRunLauncher |
python | pytorch__pytorch | torchgen/model.py | {
"start": 106289,
"end": 112143
} | class ____:
view: NativeFunction
# Note: the {view}_copy operator is optional because we currently don't generate copy variants
# for all view ops. Notably, we don't generate them for CompositeImplicitAutograd views
# (we already get them "for free" through decomposition)
view_copy: NativeFunction | None
# view_inplace ops are also optional, but every view_inplace op should have out-of-place variant.
view_inplace: NativeFunction | None
def __post_init__(self) -> None:
assert self.view.is_view_op
if self.view_copy is None:
assert not gets_generated_view_copy(self.view), (
f"{str(self.view.func.name)} appears to be a new operator that aliases its inputs."
" The codegen expects you to add a corresponding operator to native_functions.yaml:"
f" {get_view_copy_name(self.view)!s}."
" See Note [view_copy NativeFunctions] for details."
)
else:
assert self.view_copy.func.name.name.base.endswith(("_copy", "_scatter"))
assert self.view.func.signature() == self.view_copy.func.signature(
strip_view_copy_name=True,
)
assert "view_copy" in self.view_copy.tags, (
f"{str(self.view_copy.func.name), str(self.view.tags)} appears to be a view_copy operator. The codegen expects"
" view_copy operators to be annotated with the 'view_copy' tag in native_functions.yaml."
" See Note [view_copy NativeFunction] for details."
)
if self.view_inplace is not None:
assert self.view.func.signature() == self.view_inplace.func.signature()
if self.view.has_composite_implicit_autograd_kernel:
if self.view_inplace is not None:
assert self.view_inplace.has_composite_implicit_autograd_kernel, (
f"{str(self.view.func.name)} and {str(self.view_inplace.func.name)} must either"
" both have CompositeImplicitAutograd kernels, or both not have composite kernels."
)
if self.view.has_composite_implicit_autograd_nested_tensor_kernel:
if self.view_inplace is not None:
assert self.view_inplace.has_composite_implicit_autograd_nested_tensor_kernel, (
f"{str(self.view.func.name)} and {str(self.view_inplace.func.name)} must either"
" both have CompositeImplicitAutogradNestedTensor kernels, or both not have composite kernels."
)
def functions(self, *, include_copy: bool = True) -> Iterator[NativeFunction]:
yield self.view
if self.view_inplace is not None:
yield self.view_inplace
if self.view_copy is not None and include_copy:
yield self.view_copy
@property
def root_name(self) -> str:
return self.view.root_name
@property
def composite(self) -> bool:
# We currently assert that the "group" is consistent.
# If the view op is composite, then its view_inplace op is too.
return self.view.has_composite_implicit_autograd_kernel
def gets_generated_view_copy(f: NativeFunction) -> bool:
# Only aliasing (view) operators get a copy variant.
if not f.is_view_op:
return False
# We don't need to bother generating copy variants for CompositeImplicitAutograd ops,
# because we can let them decompose into base view ops.
if f.has_composite_implicit_autograd_kernel:
return False
# We also don't need to generate copy variants for inplace views.
if "inplace_view" in f.tags:
return False
# Assume ops ending in _inverse have manually-defined copy variants
# (e.g. slice_inverse() has the copy variant slice_scatter()).
# We -could- probably generate these as well, but the codegen will be
# slightly different, and hand-writing these few kernels keeps codegen
# complexity lower.
if f.func.name.name.base.endswith("_inverse"):
return False
return True
# Given a NativeFunction that corresponds to a view op,
# returns the OperatorName of the corresponding "copy" variant of the op.
def get_view_copy_name(f: NativeFunction) -> OperatorName:
# Right now, when asking for a view op's corresponding "view_copy" name
# we assert for sanity that the op is allowed to have a generated view_copy variant.
# (We can do this because "gets_generated_view_copy()" tell us which ops get a generated view_copy op).
# However, narrow_copy() already exists as an op directly in native_functions.yaml.
# I'm hardcoding narrow_copy here for now to maintain the assert,
# But we could also just get rid of the assert.
list_of_ops_with_explicit_view_copy_operators = ["narrow"]
if str(f.func.name) not in list_of_ops_with_explicit_view_copy_operators:
assert gets_generated_view_copy(f)
base_name = f"{f.func.name.name.base}_copy"
view_copy_name = OperatorName(
name=BaseOperatorName(
base=base_name, inplace=False, dunder_method=f.func.name.name.dunder_method
),
overload_name=f.func.name.overload_name,
)
return view_copy_name
# Helper functions for parsing argument lists (both inputs and returns)
def parse_returns(return_decl: str) -> tuple[Return, ...]:
"""
Input: '()'
Output: []
"""
if return_decl == "()":
return ()
if return_decl[0] == "(" and return_decl[-1] == ")":
return_decl = return_decl[1:-1]
return tuple(Return.parse(arg) for arg in return_decl.split(", "))
# A Precompute instance consists of a map from kernel argument name
# to the list of Argument instances that should replace that
# kernel argument in the impl function.
@dataclass(frozen=True)
| NativeFunctionsViewGroup |
python | openai__openai-python | src/openai/types/responses/response_mcp_list_tools_failed_event.py | {
"start": 208,
"end": 604
} | class ____(BaseModel):
item_id: str
"""The ID of the MCP tool call item that failed."""
output_index: int
"""The index of the output item that failed."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_list_tools.failed"]
"""The type of the event. Always 'response.mcp_list_tools.failed'."""
| ResponseMcpListToolsFailedEvent |
python | has2k1__plotnine | plotnine/guides/guide_colorbar.py | {
"start": 938,
"end": 13533
} | class ____(guide):
"""
Guide colorbar
Notes
-----
To correctly place a rasterized colorbar when saving the plot as an `svg`
or `pdf`, you should set the `dpi` to 72 i.e. `theme(dpi=72)`{.py}.
"""
nbin: Optional[int] = None
"""
Number of bins for drawing a colorbar. A larger value yields
a smoother colorbar
"""
display: Literal["gradient", "rectangles", "raster"] = "gradient"
"""How to render the colorbar."""
alpha: Optional[float] = None
"""
Opacity (in the range `[0, 1]`) of the colorbar. The default
`None`, is to use the opacity of the plot.
"""
draw_ulim: bool = True
"""Whether to show the upper limit tick marks."""
draw_llim: bool = True
"""Whether to show the lower limit tick marks. """
# Non-Parameter Attributes
available_aes: set[str] = field(
init=False, default_factory=lambda: {"colour", "color", "fill"}
)
def __post_init__(self):
self._elements_cls = GuideElementsColorbar
self.elements: GuideElementsColorbar
if self.nbin is None:
self.nbin = 300 # if self.display == "gradient" else 300
def train(self, scale: scale, aesthetic=None):
self.nbin = cast("int", self.nbin)
self.title = cast("str", self.title)
if not isinstance(scale, scale_continuous):
warn("colorbar guide needs continuous scales", PlotnineWarning)
return None
if aesthetic is None:
aesthetic = scale.aesthetics[0]
# Do nothing if scales are inappropriate
if set(scale.aesthetics) & self.available_aes == 0:
warn("colorbar guide needs appropriate scales.", PlotnineWarning)
return None
# value = breaks (numeric) is used for determining the
# position of ticks
limits = scale.final_limits
breaks = scale.get_bounded_breaks()
if not len(breaks):
return None
self.key = pd.DataFrame(
{
aesthetic: scale.map(breaks),
"label": scale.get_labels(breaks),
"value": breaks,
}
)
bar = np.linspace(limits[0], limits[1], self.nbin)
self.bar = pd.DataFrame({"color": scale.map(bar), "value": bar})
labels = " ".join(str(x) for x in self.key["label"])
info = "\n".join(
[
self.title,
labels,
" ".join(self.bar["color"].tolist()),
self.__class__.__name__,
]
)
self.hash = hashlib.sha256(info.encode("utf-8")).hexdigest()
return self
def merge(self, other):
"""
Simply discards the other guide
"""
return self
def create_geoms(self):
"""
Return self if colorbar will be drawn and None if not
This guide is not geom based
"""
for l in self.plot_layers:
exclude = set()
if isinstance(l.show_legend, dict):
l.show_legend = rename_aesthetics(l.show_legend)
exclude = {ae for ae, val in l.show_legend.items() if not val}
elif l.show_legend not in (None, True):
continue
matched = self.legend_aesthetics(l)
# layer uses guide
if set(matched) - exclude:
break
# no break, no layer uses this guide
else:
return None
return self
def draw(self):
"""
Draw guide
Returns
-------
out : matplotlib.offsetbox.Offsetbox
A drawing of this legend
"""
from matplotlib.offsetbox import (
HPacker,
TextArea,
VPacker,
)
from matplotlib.transforms import IdentityTransform
from .._mpl.offsetbox import DPICorAuxTransformBox
self.theme = cast("theme", self.theme)
obverse = slice(0, None)
reverse = slice(None, None, -1)
nbars = len(self.bar)
elements = self.elements
raster = self.display == "raster"
colors = self.bar["color"].tolist()
labels = self.key["label"].tolist()
targets = self.theme.targets
# .5 puts the ticks in the middle of the bars when
# raster=False. So when raster=True the ticks are
# in between interpolation points and the matching is
# close though not exactly right.
_from = self.bar["value"].min(), self.bar["value"].max()
tick_locations = (
rescale(self.key["value"], (0.5, nbars - 0.5), _from)
* elements.key_height
/ nbars
)
# With many bins, the ticks approach the edges of the colorbar.
# This may look odd if there is a border and the top & bottom ticks
# partly overlap the border only because of floating point arithmetic.
# This eliminates some of those cases so that user does no have to
# use llim and ulim
if nbars >= 150 and len(tick_locations) >= 2:
tick_locations = [
np.floor(tick_locations[0]),
*np.round(tick_locations[1:-1]),
np.ceil(tick_locations[-1]),
]
if self.reverse:
colors = colors[::-1]
labels = labels[::-1]
tick_locations = elements.key_height - tick_locations[::-1]
auxbox = DPICorAuxTransformBox(IdentityTransform())
# title
title = cast("str", self.title)
props = {"ha": elements.title.ha, "va": elements.title.va}
title_box = TextArea(title, textprops=props)
targets.legend_title = title_box._text # type: ignore
# labels
if not self.elements.text.is_blank:
texts = add_labels(auxbox, labels, tick_locations, elements)
targets.legend_text_colorbar = texts
# colorbar
if self.display == "rectangles":
add_segmented_colorbar(auxbox, colors, elements)
else:
add_gradient_colorbar(auxbox, colors, elements, raster)
# ticks
visible = slice(
None if self.draw_llim else 1,
None if self.draw_ulim else -1,
)
coll = add_ticks(auxbox, tick_locations[visible], elements)
targets.legend_ticks = coll
# frame
frame = add_frame(auxbox, elements)
targets.legend_frame = frame
# title + colorbar(with labels)
lookup: dict[Side, tuple[type[PackerBase], slice]] = {
"right": (HPacker, reverse),
"left": (HPacker, obverse),
"bottom": (VPacker, reverse),
"top": (VPacker, obverse),
}
packer, slc = lookup[elements.title_position]
if elements.title.is_blank:
children: list[Artist] = [auxbox]
else:
children = [title_box, auxbox][slc]
box = packer(
children=children,
sep=elements.title.margin,
align=elements.title.align,
pad=0,
)
return box
guide_colourbar = guide_colorbar
def add_gradient_colorbar(
auxbox: AuxTransformBox,
colors: Sequence[str],
elements: GuideElementsColorbar,
raster: bool = False,
):
"""
Add an interpolated gradient colorbar to DrawingArea
"""
from matplotlib.collections import QuadMesh
from matplotlib.colors import ListedColormap
# Special case that arises due to not so useful
# aesthetic mapping.
if len(colors) == 1:
colors = [colors[0], colors[0]]
# Number of horizontal edges(breaks) in the grid
# No need to create more nbreak than colors, provided
# no. of colors = no. of breaks = no. of cmap colors
# the shading does a perfect interpolation
nbreak = len(colors)
if elements.is_vertical:
colorbar_height = elements.key_height
colorbar_width = elements.key_width
mesh_width = 1
mesh_height = nbreak - 1
linewidth = colorbar_height / mesh_height
# Construct rectangular meshgrid
# The values(Z) at each vertex are just the
# normalized (onto [0, 1]) vertical distance
x = np.array([0, colorbar_width])
y = np.arange(0, nbreak) * linewidth
X, Y = np.meshgrid(x, y)
Z = Y / y.max()
else:
colorbar_width = elements.key_height
colorbar_height = elements.key_width
mesh_width = nbreak - 1
mesh_height = 1
linewidth = colorbar_width / mesh_width
x = np.arange(0, nbreak) * linewidth
y = np.array([0, colorbar_height])
X, Y = np.meshgrid(x, y)
Z = X / x.max()
# As a 3D (mesh_width x mesh_height x 2) coordinates array
coordinates = np.stack([X, Y], axis=-1)
cmap = ListedColormap(colors)
coll = QuadMesh(
coordinates,
antialiased=False,
shading="gouraud",
cmap=cmap,
array=Z.ravel(),
rasterized=raster,
)
auxbox.add_artist(coll)
def add_segmented_colorbar(
auxbox: AuxTransformBox,
colors: Sequence[str],
elements: GuideElementsColorbar,
):
"""
Add 'non-rastered' colorbar to AuxTransformBox
"""
from matplotlib.collections import PolyCollection
nbreak = len(colors)
if elements.is_vertical:
colorbar_height = elements.key_height
colorbar_width = elements.key_width
linewidth = colorbar_height / nbreak
verts = []
x1, x2 = 0, colorbar_width
for i in range(nbreak):
y1 = i * linewidth
y2 = y1 + linewidth
verts.append(((x1, y1), (x1, y2), (x2, y2), (x2, y1)))
else:
colorbar_width = elements.key_height
colorbar_height = elements.key_width
linewidth = colorbar_width / nbreak
verts = []
y1, y2 = 0, colorbar_height
for i in range(nbreak):
x1 = i * linewidth
x2 = x1 + linewidth
verts.append(((x1, y1), (x1, y2), (x2, y2), (x2, y1)))
coll = PolyCollection(
verts,
facecolors=colors,
linewidth=0,
antialiased=False,
)
auxbox.add_artist(coll)
def add_ticks(auxbox, locations, elements) -> LineCollection:
"""
Add ticks to colorbar
"""
from matplotlib.collections import LineCollection
segments = []
l = elements.ticks_length
tick_stops = np.array([0.0, l, 1 - l, 1]) * elements.key_width
if elements.is_vertical:
x1, x2, x3, x4 = tick_stops
for y in locations:
segments.extend(
[
((x1, y), (x2, y)),
((x3, y), (x4, y)),
]
)
else:
y1, y2, y3, y4 = tick_stops
for x in locations:
segments.extend(
[
((x, y1), (x, y2)),
((x, y3), (x, y4)),
]
)
coll = LineCollection(segments)
auxbox.add_artist(coll)
return coll
def add_labels(
auxbox: AuxTransformBox,
labels: Sequence[str],
ys: Sequence[float],
elements: GuideElementsColorbar,
) -> list[Text]:
"""
Return Texts added to the auxbox
"""
from matplotlib.text import Text
n = len(labels)
sep = elements.text.margin
texts: list[Text] = []
has = elements.has(n)
vas = elements.vas(n)
# The horizontal and vertical alignments are set in the theme
# or dynamically calculates in GuideElements and added to the
# themeable properties dict
if elements.is_vertical:
if elements.text_position == "right":
xs = [elements.key_width + sep] * n
else:
xs = [-sep] * n
else:
xs = ys
if elements.text_position == "bottom":
ys = [-sep] * n
else:
ys = [elements.key_width + sep] * n
for x, y, s, ha, va in zip(xs, ys, labels, has, vas):
t = Text(x, y, s, ha=ha, va=va)
auxbox.add_artist(t)
texts.append(t)
return texts
def add_frame(auxbox, elements):
"""
Add frame to colorbar
"""
from matplotlib.patches import Rectangle
# from .._mpl.patches import InsideStrokedRectangle as Rectangle
width = elements.key_width
height = elements.key_height
if elements.is_horizontal:
width, height = height, width
rect = Rectangle((0, 0), width, height, facecolor="none")
auxbox.add_artist(rect)
return rect
| guide_colorbar |
python | eventlet__eventlet | tests/queue_test.py | {
"start": 7988,
"end": 10594
} | class ____(tests.LimitedTestCase):
def test_put_nowait_simple(self):
hub = hubs.get_hub()
result = []
q = eventlet.Queue(1)
hub.schedule_call_global(0, store_result, result, q.put_nowait, 2)
hub.schedule_call_global(0, store_result, result, q.put_nowait, 3)
eventlet.sleep(0)
eventlet.sleep(0)
assert len(result) == 2, result
assert result[0] is None, result
assert isinstance(result[1], queue.Full), result
def test_get_nowait_simple(self):
hub = hubs.get_hub()
result = []
q = eventlet.Queue(1)
q.put(4)
hub.schedule_call_global(0, store_result, result, q.get_nowait)
hub.schedule_call_global(0, store_result, result, q.get_nowait)
eventlet.sleep(0)
assert len(result) == 2, result
assert result[0] == 4, result
assert isinstance(result[1], queue.Empty), result
# get_nowait must work from the mainloop
def test_get_nowait_unlock(self):
hub = hubs.get_hub()
result = []
q = eventlet.Queue(0)
p = eventlet.spawn(q.put, 5)
assert q.empty(), q
assert q.full(), q
eventlet.sleep(0)
assert q.empty(), q
assert q.full(), q
hub.schedule_call_global(0, store_result, result, q.get_nowait)
eventlet.sleep(0)
assert q.empty(), q
assert q.full(), q
assert result == [5], result
# TODO add ready to greenthread
# assert p.ready(), p
assert p.dead, p
assert q.empty(), q
# put_nowait must work from the mainloop
def test_put_nowait_unlock(self):
hub = hubs.get_hub()
result = []
q = eventlet.Queue(0)
eventlet.spawn(q.get)
assert q.empty(), q
assert q.full(), q
eventlet.sleep(0)
assert q.empty(), q
assert q.full(), q
hub.schedule_call_global(0, store_result, result, q.put_nowait, 10)
# TODO ready method on greenthread
# assert not p.ready(), p
eventlet.sleep(0)
assert result == [None], result
# TODO ready method
# assert p.ready(), p
assert q.full(), q
assert q.empty(), q
def test_wait_except(self):
# https://github.com/eventlet/eventlet/issues/407
q = eventlet.Queue()
def get():
q.get()
raise KeyboardInterrupt
eventlet.spawn(get)
eventlet.sleep()
with tests.assert_raises(KeyboardInterrupt):
q.put(None)
eventlet.sleep()
| TestNoWait |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/manager.py | {
"start": 53019,
"end": 65324
} | class ____(BaseCallbackManager):
"""Async callback manager that handles callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
run_id: UUID | None = None,
**kwargs: Any,
) -> list[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized: The serialized LLM.
prompts: The list of prompts.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
The list of async callback managers, one for each LLM Run corresponding to
each prompt.
"""
inline_tasks = []
non_inline_tasks = []
inline_handlers = [handler for handler in self.handlers if handler.run_inline]
non_inline_handlers = [
handler for handler in self.handlers if not handler.run_inline
]
managers = []
for prompt in prompts:
if run_id is not None:
run_id_ = run_id
run_id = None
else:
run_id_ = uuid.uuid4()
if inline_handlers:
inline_tasks.append(
ahandle_event(
inline_handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
else:
non_inline_tasks.append(
ahandle_event(
non_inline_handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
# Run inline tasks sequentially
for inline_task in inline_tasks:
await inline_task
# Run non-inline tasks concurrently
if non_inline_tasks:
await asyncio.gather(*non_inline_tasks)
return managers
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
run_id: UUID | None = None,
**kwargs: Any,
) -> list[AsyncCallbackManagerForLLMRun]:
"""Async run when LLM starts running.
Args:
serialized: The serialized LLM.
messages: The list of messages.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
The list of async callback managers, one for each LLM Run corresponding to
each inner message list.
"""
inline_tasks = []
non_inline_tasks = []
managers = []
for message_list in messages:
if run_id is not None:
run_id_ = run_id
run_id = None
else:
run_id_ = uuid.uuid4()
for handler in self.handlers:
task = ahandle_event(
[handler],
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
if handler.run_inline:
inline_tasks.append(task)
else:
non_inline_tasks.append(task)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
# Run inline tasks sequentially
for task in inline_tasks:
await task
# Run non-inline tasks concurrently
if non_inline_tasks:
await asyncio.gather(*non_inline_tasks)
return managers
async def on_chain_start(
self,
serialized: dict[str, Any] | None,
inputs: dict[str, Any] | Any,
run_id: UUID | None = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Async run when chain starts running.
Args:
serialized: The serialized chain.
inputs: The inputs to the chain.
run_id: The ID of the run.
**kwargs: Additional keyword arguments.
Returns:
The async callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@override
async def on_tool_start(
self,
serialized: dict[str, Any] | None,
input_str: str,
run_id: UUID | None = None,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when the tool starts running.
Args:
serialized: The serialized tool.
input_str: The input to the tool.
run_id: The ID of the run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
Returns:
The async callback manager for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_custom_event(
self,
name: str,
data: Any,
run_id: UUID | None = None,
**kwargs: Any,
) -> None:
"""Dispatch an adhoc event to the handlers (async version).
This event should NOT be used in any internal LangChain code. The event
is meant specifically for users of the library to dispatch custom
events that are tailored to their application.
Args:
name: The name of the adhoc event.
data: The data for the adhoc event.
run_id: The ID of the run.
Raises:
ValueError: If additional keyword arguments are passed.
"""
if not self.handlers:
return
if run_id is None:
run_id = uuid.uuid4()
if kwargs:
msg = (
"The dispatcher API does not accept additional keyword arguments."
"Please do not pass any additional keyword arguments, instead "
"include them in the data field."
)
raise ValueError(msg)
await ahandle_event(
self.handlers,
"on_custom_event",
"ignore_custom_event",
name,
data,
run_id=run_id,
tags=self.tags,
metadata=self.metadata,
)
@override
async def on_retriever_start(
self,
serialized: dict[str, Any] | None,
query: str,
run_id: UUID | None = None,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> AsyncCallbackManagerForRetrieverRun:
"""Run when the retriever starts running.
Args:
serialized: The serialized retriever.
query: The query.
run_id: The ID of the run.
parent_run_id: The ID of the parent run.
**kwargs: Additional keyword arguments.
Returns:
The async callback manager for the retriever run.
"""
if run_id is None:
run_id = uuid.uuid4()
await ahandle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False, # noqa: FBT001,FBT002
inheritable_tags: list[str] | None = None,
local_tags: list[str] | None = None,
inheritable_metadata: dict[str, Any] | None = None,
local_metadata: dict[str, Any] | None = None,
) -> AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks: The inheritable callbacks.
local_callbacks: The local callbacks.
verbose: Whether to enable verbose mode.
inheritable_tags: The inheritable tags.
local_tags: The local tags.
inheritable_metadata: The inheritable metadata.
local_metadata: The local metadata.
Returns:
The configured async callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
verbose=verbose,
)
| AsyncCallbackManager |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-kaltura/llama_index/readers/kaltura_esearch/base.py | {
"start": 262,
"end": 12206
} | class ____(BaseReader):
"""Kaltura eSearch API Reader."""
def __init__(
self,
partner_id: int = 0,
api_secret: str = "INSERT_YOUR_ADMIN_SECRET",
user_id: str = "INSERT_YOUR_USER_ID",
ks_type: int = 2,
ks_expiry: int = 86400,
ks_privileges: str = "disableentitlement",
kaltura_api_endpoint: str = "https://cdnapi-ev.kaltura.com/",
request_timeout: int = 500,
should_log_api_calls: bool = False,
) -> None:
"""
Initialize a new instance of KalturaESearchReader.
Args:
partner_id (int): The Kaltura Account ID. Default is 0.
api_secret (str): The Kaltura API Admin Secret. Default is "INSERT_YOUR_ADMIN_SECRET".
user_id (str): User ID for executing and logging all API actions under. Default is "INSERT_YOUR_USER_ID".
ks_type (int): Type of Kaltura Session. Default is 2.
ks_expiry (int): Validity of the Kaltura session in seconds. Default is 86400.
ks_privileges (str): Kaltura session privileges. Default is "disableentitlement".
kaltura_api_endpoint (str): The Kaltura API endpoint. Default is "https://cdnapi-ev.kaltura.com/".
request_timeout (int): API request timeout in seconds. Default is 500.
should_log_api_calls (bool): Boolean value determining whether to log Kaltura requests. Default is False.
"""
self.partner_id = partner_id
self.api_secret = api_secret
self.user_id = user_id
self.ks_type = ks_type
self.ks_expiry = ks_expiry
self.ks_privileges = ks_privileges
self.kaltura_api_endpoint = kaltura_api_endpoint
self.request_timeout = request_timeout
self.should_log_api_calls = should_log_api_calls
# Kaltura libraries will be loaded when they are needed
self._kaltura_loaded = False
def _load_kaltura(self):
"""Load Kaltura libraries and initialize the Kaltura client."""
from KalturaClient import KalturaClient
from KalturaClient.Base import IKalturaLogger, KalturaConfiguration
from KalturaClient.Plugins.Core import KalturaSessionType
class KalturaLogger(IKalturaLogger):
def log(self, msg):
logging.info(msg)
try:
self.config = KalturaConfiguration()
self.config.requestTimeout = self.request_timeout
self.config.serviceUrl = self.kaltura_api_endpoint
if self.should_log_api_calls:
self.config.setLogger(KalturaLogger())
self.client = KalturaClient(self.config)
if self.ks_type is None:
self.ks_type = KalturaSessionType.ADMIN
self.ks = self.client.generateSessionV2(
self.api_secret,
self.user_id,
self.ks_type,
self.partner_id,
self.ks_expiry,
self.ks_privileges,
)
self.client.setKs(self.ks)
self._kaltura_loaded = True
except Exception:
logger.error("Kaltura Auth failed, check your credentials")
def _load_from_search_params(
self, search_params, with_captions: bool = True, max_entries: int = 10
) -> List[Dict[str, Any]]:
"""
Load search parameters and returns a list of entries.
Args:
search_params: Search parameters for Kaltura eSearch.
with_captions (bool): If True, the entries will include captions.
max_entries (int): Maximum number of entries to return.
Returns:
list: A list of entries as dictionaries,
if captions required entry_info will include all metadata and text will include transcript,
otherwise info is just entry_id and text is all metadata.
"""
from KalturaClient.Plugins.Core import KalturaPager
try:
entries = []
pager = KalturaPager()
pager.pageIndex = 1
pager.pageSize = max_entries
response = self.client.elasticSearch.eSearch.searchEntry(
search_params, pager
)
for search_result in response.objects:
entry = search_result.object
items_data = search_result.itemsData
entry_info = {
"entry_id": str(entry.id),
"entry_name": str(entry.name),
"entry_description": str(entry.description or ""),
"entry_media_type": int(entry.mediaType.value or 0),
"entry_media_date": int(entry.createdAt or 0),
"entry_ms_duration": int(entry.msDuration or 0),
"entry_last_played_at": int(entry.lastPlayedAt or 0),
"entry_application": str(entry.application or ""),
"entry_tags": str(entry.tags or ""),
"entry_reference_id": str(entry.referenceId or ""),
}
if with_captions:
caption_search_result = items_data[0].items[0]
if hasattr(caption_search_result, "captionAssetId"):
# TODO: change this to fetch captions per language, or as for a specific language code
caption_asset_id = caption_search_result.captionAssetId
entry_dict = {
"video_transcript": self._get_json_transcript(
caption_asset_id
)
}
else:
entry_dict = entry_info.copy()
entry_info = {"entry_id": str(entry.id)}
else:
entry_dict = entry_info.copy()
entry_info = {"entry_id": str(entry.id)}
entry_doc = Document(text=json.dumps(entry_dict), extra_info=entry_info)
entries.append(entry_doc)
return entries
except Exception as e:
if e.code == "INVALID_KS":
raise ValueError(f"Kaltura Auth failed, check your credentials: {e}")
logger.error(f"An error occurred while loading with search params: {e}")
return []
def _get_json_transcript(self, caption_asset_id):
"""
Fetch json transcript/captions from a given caption_asset_id.
Args:
caption_asset_id: The ID of the caption asset that includes the captions to fetch json transcript for
Returns:
A JSON transcript of the captions, or an empty dictionary if none found or an error occurred.
"""
# TODO: change this to fetch captions per language, or as for a specific language code
try:
cap_json_url = self.client.caption.captionAsset.serveAsJson(
caption_asset_id
)
return requests.get(cap_json_url).json()
except Exception as e:
logger.error(f"An error occurred while getting captions: {e}")
return {}
def load_data(
self,
search_params: Any = None,
search_operator_and: bool = True,
free_text: Optional[str] = None,
category_ids: Optional[str] = None,
with_captions: bool = True,
max_entries: int = 5,
) -> List[Dict[str, Any]]:
"""
Load data from the Kaltura based on search parameters.
The function returns a list of dictionaries.
Each dictionary represents a media entry, where the keys are strings (field names) and the values can be of any type.
Args:
search_params: search parameters of type KalturaESearchEntryParams with pre-set search queries. If not provided, the other parameters will be used to construct the search query.
search_operator_and: if True, the constructed search query will have AND operator between query filters, if False, the operator will be OR.
free_text: if provided, will be used as the free text query of the search in Kaltura.
category_ids: if provided, will only search for entries that are found inside these category ids.
withCaptions: determines whether or not to also download captions/transcript contents from Kaltura.
maxEntries: sets the maximum number of entries to pull from Kaltura, between 0 to 500 (max pageSize in Kaltura).
Returns:
List[Dict[str, Any]]: A list of dictionaries representing Kaltura Media Entries with the following fields:
entry_id:str, entry_name:str, entry_description:str, entry_captions:JSON,
entry_media_type:int, entry_media_date:int, entry_ms_duration:int, entry_last_played_at:int,
entry_application:str, entry_tags:str, entry_reference_id:str.
If with_captions is False, it sets entry_info to only include the entry_id and entry_dict to include all other entry information.
If with_captions is True, it sets entry_info to include all entry information and entry_dict to only include the entry transcript fetched via self._get_captions(items_data).
"""
from KalturaClient.Plugins.ElasticSearch import (
KalturaCategoryEntryStatus,
KalturaESearchCaptionFieldName,
KalturaESearchCaptionItem,
KalturaESearchCategoryEntryFieldName,
KalturaESearchCategoryEntryItem,
KalturaESearchEntryOperator,
KalturaESearchEntryParams,
KalturaESearchItemType,
KalturaESearchOperatorType,
KalturaESearchUnifiedItem,
)
# Load and initialize the Kaltura client
if not self._kaltura_loaded:
self._load_kaltura()
# Validate input parameters:
if search_params is None:
search_params = KalturaESearchEntryParams()
# Create an AND/OR relationship between the following search queries -
search_params.searchOperator = KalturaESearchEntryOperator()
if search_operator_and:
search_params.searchOperator.operator = (
KalturaESearchOperatorType.AND_OP
)
else:
search_params.searchOperator.operator = KalturaESearchOperatorType.OR_OP
search_params.searchOperator.searchItems = []
# Find only entries that have captions -
if with_captions:
caption_item = KalturaESearchCaptionItem()
caption_item.fieldName = KalturaESearchCaptionFieldName.CONTENT
caption_item.itemType = KalturaESearchItemType.EXISTS
search_params.searchOperator.searchItems.append(caption_item)
# Find only entries that are inside these category IDs -
if category_ids is not None:
category_item = KalturaESearchCategoryEntryItem()
category_item.categoryEntryStatus = KalturaCategoryEntryStatus.ACTIVE
category_item.fieldName = KalturaESearchCategoryEntryFieldName.FULL_IDS
category_item.addHighlight = False
category_item.itemType = KalturaESearchItemType.EXACT_MATCH
category_item.searchTerm = category_ids
search_params.searchOperator.searchItems.append(category_item)
# Find only entries that has this freeText found in them -
if free_text is not None:
unified_item = KalturaESearchUnifiedItem()
unified_item.searchTerm = free_text
unified_item.itemType = KalturaESearchItemType.PARTIAL
search_params.searchOperator.searchItems.append(unified_item)
return self._load_from_search_params(search_params, with_captions, max_entries)
| KalturaESearchReader |
python | google__pytype | pytype/errors/error_types.py | {
"start": 5107,
"end": 5274
} | class ____(Exception):
def __init__(self, left_type, other_type):
super().__init__()
self.left_type = left_type
self.other_type = other_type
| ProtocolError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 562865,
"end": 563366
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteVerifiableDomain"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "owner")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
owner = sgqlc.types.Field("VerifiableDomainOwner", graphql_name="owner")
"""The owning account from which the domain was deleted."""
| DeleteVerifiableDomainPayload |
python | encode__django-rest-framework | tests/test_encoders.py | {
"start": 355,
"end": 420
} | class ____:
def tolist(self):
return [1, 2, 3]
| MockList |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 191044,
"end": 194834
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def _path(self, name):
base = "tensorflow/core/lib/webp/testdata/"
return os.path.join(base, name)
@parameterized.named_parameters([
("_rgbNoise", "RGB_noise_large_pixels_115x115.webp", (1, 115, 115, 3)),
("_lossless", "lossless_raw.webp", (1, 32, 32, 3)),
("_alpha", "lossy_alpha1.webp", (1, 307, 1000, 4)),
])
def testRegularFile(self, filename, expected_dimensions):
# Read a real WebP image, via both APIs and check they're equal.
with self.cached_session():
webp = io_ops.read_file(self._path(filename))
image0 = image_ops.decode_webp(webp)
image1 = image_ops.decode_image(webp)
webp, image0, image1 = self.evaluate([webp, image0, image1])
self.assertEqual(image0.shape, expected_dimensions)
self.assertAllEqual(image0, image1)
def testAnimation(self):
# Read a WebP animation file, via both APIs and check they're equal.
with self.cached_session():
webp = io_ops.read_file(self._path("bouncy_ball.webp"))
expected_dimensions = (15, 450, 450, 4)
image0 = image_ops.decode_webp(webp)
image1 = image_ops.decode_image(webp, expand_animations=True)
webp, image0, image1 = self.evaluate([webp, image0, image1])
self.assertEqual(image0.shape, expected_dimensions)
self.assertAllEqual(image0, image1)
def testAnimationFrame0(self):
# Read a WebP animation file, via both APIs, but drop
# animation. Compare frame 0.
with self.cached_session():
webp = io_ops.read_file(self._path("bouncy_ball.webp"))
expected_anim_dimensions = (15, 450, 450, 4)
expected_still_dimensions = (450, 450, 4)
# decode_webp will return all the frames, but we should get the
# same frame 0 in both cases.
image0 = image_ops.decode_webp(webp)
image1 = image_ops.decode_image(webp, expand_animations=False)
webp, image0, image1 = self.evaluate([webp, image0, image1])
self.assertEqual(image0.shape, expected_anim_dimensions)
self.assertEqual(image1.shape, expected_still_dimensions)
# Compare frame0 of image0 to image1.
self.assertAllEqual(image0[0, ...], image1)
def testChannelsArg(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
webp = io_ops.read_file(
self._path("RGB_noise_large_pixels_115x115.webp")
)
for channels in 0, 3, 4:
image = image_ops.decode_webp(webp, channels=channels)
self.assertEqual(
image.get_shape().as_list(), [None, None, None, channels or None]
)
def testInvalidChannels(self):
with self.cached_session():
webp = io_ops.read_file(self._path("RGB_noise_large_pixels_115x115.webp"))
# DecodeImage supports grayscale, but WebP does not.
message = "WebP only supports 3 or 4 channels"
with self.assertRaisesRegex(
(errors.InvalidArgumentError, ValueError), message
):
op = image_ops.decode_webp(webp, channels=1)
self.evaluate(op)
@parameterized.named_parameters(
[("_int8", np.int8), ("_int16", np.int16), ("_float32", np.float32)]
)
def testUnsupportedDtypes(self, dtype):
with self.cached_session():
webp = io_ops.read_file(self._path("RGB_noise_large_pixels_115x115.webp"))
message = "WebP only supports uint8"
with self.assertRaisesRegex(
(errors.InvalidArgumentError, ValueError), message
):
# Note: we're testing with decode_image, since decode_webp
# *statically* does not support anything other than uint8.
op = image_ops.decode_image(webp, dtype=dtype)
self.evaluate(op)
| WebpTest |
python | huggingface__transformers | src/transformers/models/jamba/modeling_jamba.py | {
"start": 30322,
"end": 32071
} | class ____(nn.Module):
"""
This implementation is
strictly equivalent to standard MoE with full capacity (no
dropped tokens). It's faster since it formulates MoE operations
in terms of block-sparse operations to accommodate imbalanced
assignments of tokens to experts, whereas standard MoE either
(1) drop tokens at the cost of reduced performance or (2) set
capacity factor to number of experts and thus waste computation
and memory on padding.
"""
def __init__(self, config: JambaConfig):
super().__init__()
self.hidden_dim = config.hidden_size
self.ffn_dim = config.intermediate_size
self.num_experts = config.num_experts
self.top_k = config.num_experts_per_tok
self.router = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
self.experts = JambaExperts(config)
def route_tokens_to_experts(self, hidden_states, router_logits):
routing_weights = torch.nn.functional.softmax(router_logits, dim=-1, dtype=torch.float)
top_k_weights, top_k_index = torch.topk(routing_weights, self.top_k, dim=-1)
return top_k_index, top_k_weights.to(hidden_states.dtype)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
router_logits = self.router(hidden_states)
top_k_index, top_k_weights = self.route_tokens_to_experts(hidden_states, router_logits)
hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return hidden_states
| JambaSparseMoeBlock |
python | doocs__leetcode | solution/0500-0599/0576.Out of Boundary Paths/Solution.py | {
"start": 0,
"end": 601
} | class ____:
def findPaths(
self, m: int, n: int, maxMove: int, startRow: int, startColumn: int
) -> int:
@cache
def dfs(i: int, j: int, k: int) -> int:
if not 0 <= i < m or not 0 <= j < n:
return int(k >= 0)
if k <= 0:
return 0
ans = 0
for a, b in pairwise(dirs):
x, y = i + a, j + b
ans = (ans + dfs(x, y, k - 1)) % mod
return ans
mod = 10**9 + 7
dirs = (-1, 0, 1, 0, -1)
return dfs(startRow, startColumn, maxMove)
| Solution |
python | encode__starlette | starlette/responses.py | {
"start": 9519,
"end": 9665
} | class ____(Exception):
def __init__(self, content: str = "Malformed range header.") -> None:
self.content = content
| MalformedRangeHeader |
python | tensorflow__tensorflow | tensorflow/python/training/optimizer.py | {
"start": 3709,
"end": 4176
} | class ____(metaclass=abc.ABCMeta):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
| _OptimizableVariable |
python | catalyst-team__catalyst | tests/benchmarks/test_benchmark.py | {
"start": 520,
"end": 624
} | class ____(str, enum.Enum):
"""RunModes."""
catalyst = "catalyst"
pytorch = "pytorch"
| RunMode |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/claude/diagnostics.py | {
"start": 688,
"end": 12469
} | class ____:
"""Central diagnostics service for scaffold branch operations."""
def __init__(
self,
level: DiagnosticsLevel = "off",
output_dir: Optional[Path] = None,
correlation_id: Optional[str] = None,
):
self.level = level
self.correlation_id = correlation_id or str(uuid.uuid4())
self.output_dir = output_dir or Path(tempfile.gettempdir()) / "dg" / "diagnostics"
self.entries: list[DiagnosticsEntry] = []
self._output_file: Optional[Path] = None
# Ensure output directory exists and create output file if diagnostics are enabled
if self.level != "off":
self.output_dir.mkdir(parents=True, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"scaffold_diagnostics_{timestamp}_{self.correlation_id[:8]}.jsonl"
self._output_file = self.output_dir / filename
# Initialize file with session metadata as first line
session_metadata = {
"type": "session_start",
"correlation_id": self.correlation_id,
"timestamp": datetime.now().isoformat(),
"level": self.level,
}
with self._output_file.open("w") as f:
f.write(json.dumps(session_metadata) + "\n")
@property
def output_file(self) -> Optional[Path]:
"""Get the current output file path."""
return self._output_file
def _should_log(self, entry_level: DiagnosticsLevel) -> bool:
"""Check if entry should be logged based on current diagnostics level."""
if self.level == "off":
return False
# Define hierarchy as mapping for cleaner logic
level_priority = {"error": 0, "info": 1, "debug": 2}
current_priority = level_priority.get(self.level)
entry_priority = level_priority.get(entry_level)
# Both levels must be valid for logging to proceed
if current_priority is None or entry_priority is None:
return False
return entry_priority <= current_priority
def log(
self,
*,
level: DiagnosticsLevel,
category: str,
message: str,
data: Optional[dict[str, Any]] = None,
) -> None:
"""Log a diagnostics entry."""
if not self._should_log(level):
return
entry = DiagnosticsEntry(
correlation_id=self.correlation_id,
timestamp=datetime.now().isoformat(),
level=level,
category=category,
message=message,
data=data or {},
)
self.entries.append(entry)
# Stream entry to file immediately if file exists
if self._output_file and self._output_file.exists():
self._append_entry_to_file(entry)
def _append_entry_to_file(self, entry: DiagnosticsEntry) -> None:
"""Append a single entry to the JSONL file."""
if not self._output_file:
return
try:
# Create entry dict and append as single line
entry_dict = {
"type": "entry",
"correlation_id": entry.correlation_id,
"timestamp": entry.timestamp,
"level": entry.level,
"category": entry.category,
"message": entry.message,
"data": entry.data,
}
# Append as new line to JSONL file
with self._output_file.open("a") as f:
f.write(json.dumps(entry_dict) + "\n")
except Exception:
# If append fails, fall back to recreating the file
self._recreate_file_with_entries()
def _recreate_file_with_entries(self) -> None:
"""Recreate the diagnostics file with all current entries in JSONL format."""
if not self._output_file:
return
with self._output_file.open("w") as f:
# Write session metadata first
session_metadata = {
"type": "session_start",
"correlation_id": self.correlation_id,
"timestamp": self.entries[0].timestamp
if self.entries
else datetime.now().isoformat(),
"level": self.level,
}
f.write(json.dumps(session_metadata) + "\n")
# Write all entries as individual lines
for entry in self.entries:
entry_dict = {
"type": "entry",
"correlation_id": entry.correlation_id,
"timestamp": entry.timestamp,
"level": entry.level,
"category": entry.category,
"message": entry.message,
"data": entry.data,
}
f.write(json.dumps(entry_dict) + "\n")
def log_ai_interaction(self, interaction: AIInteraction) -> None:
"""Log an AI interaction."""
if not self._should_log("info"):
return
self.log(
level="info",
category="ai_interaction",
message="Claude API interaction",
data={
"prompt_length": len(interaction.prompt),
"response_length": len(interaction.response),
"token_count": interaction.token_count,
"allowed_tools": interaction.allowed_tools,
"duration_ms": interaction.duration_ms,
},
)
def log_context_gathering(self, context: ContextGathering) -> None:
"""Log context gathering operations."""
if not self._should_log("debug"):
return
self.log(
level="debug",
category="context_gathering",
message="Project context analysis",
data={
"files_count": len(context.files_analyzed),
"files_analyzed": context.files_analyzed,
"patterns_detected": context.patterns_detected,
"decisions_made": context.decisions_made,
},
)
def log_performance(self, metrics: PerformanceMetrics) -> None:
"""Log performance metrics."""
if not self._should_log("debug"):
return
self.log(
level="debug",
category="performance",
message=f"Operation timing: {metrics.operation}",
data={
"operation": metrics.operation,
"phase": metrics.phase,
"duration_ms": metrics.duration_ms,
},
)
@contextmanager
def time_operation(self, operation: str, phase: str) -> Generator[None, None, None]:
"""Context manager for timing operations and logging performance metrics.
Args:
operation: Name of the operation being timed
phase: Phase or category of the operation
Yields:
None
"""
start_time = perf_counter()
try:
yield
finally:
duration_ms = (perf_counter() - start_time) * 1000
metrics = PerformanceMetrics(
correlation_id=self.correlation_id,
timestamp=datetime.now().isoformat(),
operation=operation,
duration_ms=duration_ms,
phase=phase,
)
self.log_performance(metrics)
@contextmanager
def claude_operation(
self, *, operation_name: str, error_code: str, error_message: str, **additional_context: Any
) -> Generator[None, None, None]:
"""Context manager for Claude operations that handles timing and comprehensive logging.
Automatically logs operation start, success, and errors with consistent formatting.
Args:
operation_name: Name of the operation for diagnostics
error_code: Specific error code to log on failure
error_message: Human-readable error message
**additional_context: Additional context to include in error logs
Raises:
Re-raises any exception after logging it
"""
# Log operation start
self.info(
category=f"{operation_name}_start",
message=f"Starting {operation_name}",
)
start_time = perf_counter()
try:
yield
# Log successful completion
duration_ms = (perf_counter() - start_time) * 1000
self.info(
category=f"{operation_name}_success",
message=f"Successfully completed {operation_name}",
data={"duration_ms": duration_ms},
)
except Exception as e:
duration_ms = (perf_counter() - start_time) * 1000
self.error(
category=error_code,
message=error_message,
data={
"error_type": type(e).__name__,
"error_message": str(e),
"duration_ms": duration_ms,
**additional_context,
},
)
raise
def flush(self) -> Optional[Path]:
"""Finalize the diagnostics file with session end timestamp."""
if self.level == "off" or not self._output_file:
return None
# Return None if no entries to flush
if not self.entries:
return None
# Add session_end timestamp as final line in JSONL file
if self._output_file.exists():
try:
session_end = {
"type": "session_end",
"correlation_id": self.correlation_id,
"timestamp": datetime.now().isoformat(),
}
with self._output_file.open("a") as f:
f.write(json.dumps(session_end) + "\n")
# Clear entries after successful flush
self.entries.clear()
return self._output_file
except Exception:
# If append fails, recreate file and add session_end
self._recreate_file_with_entries()
session_end = {
"type": "session_end",
"correlation_id": self.correlation_id,
"timestamp": datetime.now().isoformat(),
}
with self._output_file.open("a") as f:
f.write(json.dumps(session_end) + "\n")
self.entries.clear()
return self._output_file
return self._output_file
def error(self, *, category: str, message: str, data: Optional[dict[str, Any]] = None) -> None:
"""Log an error-level entry."""
self.log(level="error", category=category, message=message, data=data)
def info(self, *, category: str, message: str, data: Optional[dict[str, Any]] = None) -> None:
"""Log an info-level entry."""
self.log(level="info", category=category, message=message, data=data)
def debug(self, *, category: str, message: str, data: Optional[dict[str, Any]] = None) -> None:
"""Log a debug-level entry."""
self.log(level="debug", category=category, message=message, data=data)
def create_claude_diagnostics_service(
level: DiagnosticsLevel = "off",
output_dir: Optional[Union[str, Path]] = None,
correlation_id: Optional[str] = None,
) -> ClaudeDiagnostics:
"""Create a new Claude diagnostics service instance."""
output_path = Path(output_dir) if output_dir else None
return ClaudeDiagnostics(
level=level,
output_dir=output_path,
correlation_id=correlation_id,
)
| ClaudeDiagnostics |
python | spyder-ide__spyder | spyder/utils/color_system.py | {
"start": 1997,
"end": 2170
} | class ____:
"""
Colors for the Python and Spyder logos.
"""
B10 = '#3775a9'
B20 = '#ffd444'
B30 = '#414141'
B40 = '#fafafa'
B50 = '#8c0000' | Logos |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/serdes/serdes.py | {
"start": 19060,
"end": 20968
} | class ____:
"""values are unpacked bottom up."""
def __init__(self):
self.observed_unknown_serdes_values: set[UnknownSerdesValue] = set()
def assert_no_unknown_values(self, obj: UnpackedValue) -> PackableValue:
if isinstance(obj, UnknownSerdesValue):
raise DeserializationError(
f"{obj.message}\nThis error can occur due to version skew, verify processes are"
" running expected versions."
)
elif isinstance(obj, (list, set, frozenset)):
for inner in obj:
self.assert_no_unknown_values(inner)
elif isinstance(obj, dict):
for v in obj.values():
self.assert_no_unknown_values(v)
return cast("PackableValue", obj)
def observe_unknown_value(self, val: "UnknownSerdesValue") -> "UnknownSerdesValue":
self.observed_unknown_serdes_values.add(val)
return val
def clear_ignored_unknown_values(self, obj: T) -> T:
if isinstance(obj, UnknownSerdesValue):
self.observed_unknown_serdes_values.discard(obj)
self.clear_ignored_unknown_values(obj.value)
elif isinstance(obj, (list, set, frozenset)):
for inner in obj:
self.clear_ignored_unknown_values(inner)
elif isinstance(obj, dict):
for v in obj.values():
self.clear_ignored_unknown_values(v)
return obj
def finalize_unpack(self, unpacked: UnpackedValue) -> PackableValue:
if self.observed_unknown_serdes_values:
message = ",".join(v.message for v in self.observed_unknown_serdes_values)
raise DeserializationError(
f"{message}\nThis error can occur due to version skew, verify processes are"
" running expected versions."
)
return cast("PackableValue", unpacked)
| UnpackContext |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datastore.py | {
"start": 17286,
"end": 19385
} | class ____(GoogleCloudBaseOperator):
"""
Roll back a transaction.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreRollbackOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:param project_id: Google Cloud project ID against which to make the request.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"transaction",
"impersonation_chain",
)
def __init__(
self,
*,
transaction: str,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.transaction = transaction
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.rollback(
transaction=self.transaction,
project_id=self.project_id,
)
| CloudDatastoreRollbackOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol17.py | {
"start": 1496,
"end": 1684
} | class ____(Protocol[_T1_contra]):
# This should generate an error because a contravariant TypeVar
# should not be used as a return type.
def m1(self) -> _T1_contra: ...
| Protocol7 |
python | pypa__hatch | tests/backend/metadata/test_hatch.py | {
"start": 6265,
"end": 7131
} | class ____:
def test_default(self, isolation):
config = {}
metadata = HatchMetadata(str(isolation), config, None)
assert metadata.metadata.allow_ambiguous_features is metadata.metadata.allow_ambiguous_features is False
def test_not_boolean(self, isolation):
config = {"metadata": {"allow-ambiguous-features": 9000}}
metadata = HatchMetadata(str(isolation), config, None)
with pytest.raises(TypeError, match="Field `tool.hatch.metadata.allow-ambiguous-features` must be a boolean"):
_ = metadata.metadata.allow_ambiguous_features
def test_correct(self, isolation):
config = {"metadata": {"allow-ambiguous-features": True}}
metadata = HatchMetadata(str(isolation), config, None)
assert metadata.metadata.allow_ambiguous_features is True
| TestMetadataAllowAmbiguousFeatures |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/custom_nest_trace_type.py | {
"start": 1027,
"end": 5425
} | class ____(trace.TraceType):
"""Represents the TraceType of a class implmenting the CustomNestProtocol."""
def __init__(
self,
value_type: Type[Any],
metadata: Any,
components: PythonTuple[trace.TraceType],
):
if not issubclass(value_type, custom_nest_protocol.CustomNestProtocol):
raise ValueError(f"{value_type!r} does not implement CustomNestProtocol.")
self.value_type = value_type
self.metadata = metadata
self.components = components
def is_subtype_of(self, other: trace.TraceType) -> bool:
if not self._is_same_trace_type(other):
return False
for c_self, c_other in zip(self.components, other.components): # pytype: disable=attribute-error
if not c_self.is_subtype_of(c_other):
return False
return True
def most_specific_common_supertype(
self, others: Sequence[trace.TraceType]
) -> Optional["CustomNestTraceType"]:
for other in others:
if not self._is_same_trace_type(other):
return None
others_components = [other.components for other in others] # pytype: disable=attribute-error
supertyped_components = tuple(
self_component.most_specific_common_supertype(others_component)
for self_component, *others_component in zip(
self.components, *others_components
)
)
return CustomNestTraceType(
self.value_type, self.metadata, supertyped_components
)
def __eq__(self, other: trace.TraceType) -> bool:
return (
isinstance(other, CustomNestTraceType)
and self.value_type == other.value_type
and self.metadata == other.metadata
and self.components == other.components
)
def __hash__(self) -> int:
# The hash computation doesn't use self.metadata, so unhashable metadata can
# be used. The `self.__eq__` method is used instead to differentiate between
# two objects with the same components but different metadata.
return hash((self.value_type, self.components))
def __repr__(self) -> str:
return (
f"{self.__class__.__name__} [metadata={self.metadata!r}, "
f"components={self.components!r}]"
)
def placeholder_value(self, placeholder_context: Any) -> Any:
components_placeholder_value = tuple(
c.placeholder_value(placeholder_context) for c in self.components
)
return self.value_type.__tf_unflatten__(
self.metadata, components_placeholder_value
)
def to_tensors(self, value: Any) -> PythonList[Any]:
if not isinstance(value, self.value_type):
raise TypeError(f"{value!r} is not of type {self.value_type}.")
_, value_components = value.__tf_flatten__()
flattened_values = []
for value_comp, type_comp in zip(value_components, self.components):
flattened_values.extend(type_comp.to_tensors(value_comp))
return flattened_values
def from_tensors(self, tensors: Iterator[Any]) -> Any:
return self.value_type.__tf_unflatten__(
self.metadata, tuple(c.from_tensors(tensors) for c in self.components)
)
def flatten(self) -> PythonList[trace.TraceType]:
flat_list = []
for c in self.components:
flat_list.extend(c.flatten())
return flat_list
def cast(self, value: Any, casting_context: Any) -> Any:
if not isinstance(value, self.value_type):
raise TypeError(f"[{value!r}] is not of type {self.value_type}.")
value_metadata, value_components = value.__tf_flatten__()
if self.metadata != value_metadata:
raise ValueError(
f"Metadata mismatch: [{self.metadata!r}] != [{value_metadata!r}]."
)
if len(self.components) != len(value_components):
raise ValueError(
f"Lengths of components mismatch: {len(self.components)} != "
f"{len(value_components)}."
)
casted_value_components, was_casted = util.cast_and_return_whether_casted(
self.components, value_components, casting_context
)
if was_casted:
return self.value_type.__tf_unflatten__(
self.metadata, casted_value_components
)
else:
return value
def _is_same_trace_type(self, other: trace.TraceType) -> bool:
return (
isinstance(other, CustomNestTraceType)
and self.value_type == other.value_type
and self.metadata == other.metadata
and len(self.components) == len(other.components)
)
| CustomNestTraceType |
python | scrapy__scrapy | tests/test_spidermiddleware.py | {
"start": 7443,
"end": 7691
} | class ____:
def process_spider_output(self, response, result):
yield from result
async def process_spider_output_async(self, response, result):
async for r in result:
yield r
| ProcessSpiderOutputUniversalMiddleware |
python | redis__redis-py | redis/commands/core.py | {
"start": 250215,
"end": 250633
} | class ____(
AsyncBasicKeyCommands,
AsyncHyperlogCommands,
AsyncHashCommands,
AsyncGeoCommands,
AsyncListCommands,
AsyncScanCommands,
AsyncSetCommands,
AsyncStreamCommands,
AsyncSortedSetCommands,
):
"""
A class containing all of the implemented data access redis commands.
This class is to be used as a mixin for asynchronous Redis clients.
"""
| AsyncDataAccessCommands |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 329560,
"end": 333626
} | class ____:
"""link or outline destination details"""
def __init__(self, obj, rlink, document=None):
isExt = obj.is_external
isInt = not isExt
self.dest = ""
self.file_spec = ""
self.flags = 0
self.is_map = False
self.is_uri = False
self.kind = LINK_NONE
self.lt = Point(0, 0)
self.named = dict()
self.new_window = ""
self.page = obj.page
self.rb = Point(0, 0)
self.uri = obj.uri
def uri_to_dict(uri):
items = self.uri[1:].split('&')
ret = dict()
for item in items:
eq = item.find('=')
if eq >= 0:
ret[item[:eq]] = item[eq+1:]
else:
ret[item] = None
return ret
def unescape(name):
"""Unescape '%AB' substrings to chr(0xAB)."""
split = name.replace("%%", "%25") # take care of escaped '%'
split = split.split("%")
newname = split[0]
for item in split[1:]:
piece = item[:2]
newname += chr(int(piece, base=16))
newname += item[2:]
return newname
if rlink and not self.uri.startswith("#"):
self.uri = f"#page={rlink[0] + 1}&zoom=0,{_format_g(rlink[1])},{_format_g(rlink[2])}"
if obj.is_external:
self.page = -1
self.kind = LINK_URI
if not self.uri:
self.page = -1
self.kind = LINK_NONE
if isInt and self.uri:
self.uri = self.uri.replace("&zoom=nan", "&zoom=0")
if self.uri.startswith("#"):
self.kind = LINK_GOTO
m = re.match('^#page=([0-9]+)&zoom=([0-9.]+),(-?[0-9.]+),(-?[0-9.]+)$', self.uri)
if m:
self.page = int(m.group(1)) - 1
self.lt = Point(float((m.group(3))), float(m.group(4)))
self.flags = self.flags | LINK_FLAG_L_VALID | LINK_FLAG_T_VALID
else:
m = re.match('^#page=([0-9]+)$', self.uri)
if m:
self.page = int(m.group(1)) - 1
else:
self.kind = LINK_NAMED
m = re.match('^#nameddest=(.*)', self.uri)
assert document
if document and m:
named = unescape(m.group(1))
self.named = document.resolve_names().get(named)
if self.named is None:
# document.resolve_names() does not contain an
# entry for `named` so use an empty dict.
self.named = dict()
self.named['nameddest'] = named
else:
self.named = uri_to_dict(self.uri[1:])
else:
self.kind = LINK_NAMED
self.named = uri_to_dict(self.uri)
if obj.is_external:
if not self.uri:
pass
elif self.uri.startswith("file:"):
self.file_spec = self.uri[5:]
if self.file_spec.startswith("//"):
self.file_spec = self.file_spec[2:]
self.is_uri = False
self.uri = ""
self.kind = LINK_LAUNCH
ftab = self.file_spec.split("#")
if len(ftab) == 2:
if ftab[1].startswith("page="):
self.kind = LINK_GOTOR
self.file_spec = ftab[0]
self.page = int(ftab[1].split("&")[0][5:]) - 1
elif ":" in self.uri:
self.is_uri = True
self.kind = LINK_URI
else:
self.is_uri = True
self.kind = LINK_LAUNCH
assert isinstance(self.named, dict)
| linkDest |
python | jina-ai__jina | jina/serve/runtimes/servers/load_balancer.py | {
"start": 77,
"end": 2225
} | class ____(BaseServer):
"""Base FastAPI server. Implement this abstract class in-case you want to build a fastapi-based server by
implementing the `app` property. This property should return a fastapi app. The base Gateway will handle starting
a server and serving the application using that server."""
def __init__(self, **kwargs):
"""Initialize the LoadBalancingServer
:param kwargs: keyword args
"""
super().__init__(**kwargs)
# get server list from args
self._server_exit = False
async def handle_request(self, request):
"""Method called to handle requests coming to the LoadBalancer
:param request: request to handle
:return: the response to the request
"""
return await self._request_handler._load_balance(request)
async def setup_server(self):
"""
Initialize and return server
"""
self.logger.debug(f'Setting up LoadBalancer server')
self.app = web.Application()
self.app.router.add_route('*', '/{path:.*}', self.handle_request)
self.logger.debug(f'LoadBalancer server setup successful')
async def run_server(self):
"""Run HTTP server forever"""
await web._run_app(
app=self.app,
host=self.host,
port=self.port,
)
async def shutdown(self):
"""Shutdown the server and free other allocated resources, e.g, streamer object, health check service, ..."""
self.logger.debug(f'Shutting down server')
self._server_exit = True
await super().shutdown()
await self._request_handler.close()
self.logger.debug(f'Server shutdown finished')
@property
def _should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self._server_exit
@property
def should_exit(self):
"""Property describing if server is ready to exit
:return: boolean indicating if Server ready to exit
"""
return self._should_exit
| LoadBalancingServer |
python | pydata__xarray | xarray/tests/test_datatree.py | {
"start": 8889,
"end": 10996
} | class ____:
def test_getitem_node(self) -> None:
folder1 = DataTree.from_dict(
{
"/results/highres": DataTree(),
}
)
assert folder1["results"].name == "results"
assert folder1["results/highres"].name == "highres"
def test_getitem_self(self) -> None:
dt = DataTree()
assert dt["."] is dt
def test_getitem_single_data_variable(self) -> None:
data = xr.Dataset({"temp": [0, 50]})
results = DataTree(name="results", dataset=data)
assert_identical(results["temp"], data["temp"])
def test_getitem_single_data_variable_from_node(self) -> None:
data = xr.Dataset({"temp": [0, 50]})
folder1 = DataTree.from_dict(
{
"/results/highres": data,
}
)
assert_identical(folder1["results/highres/temp"], data["temp"])
def test_getitem_nonexistent_node(self) -> None:
folder1 = DataTree.from_dict({"/results": DataTree()}, name="folder1")
with pytest.raises(KeyError):
folder1["results/highres"]
def test_getitem_nonexistent_variable(self) -> None:
data = xr.Dataset({"temp": [0, 50]})
results = DataTree(name="results", dataset=data)
with pytest.raises(KeyError):
results["pressure"]
@pytest.mark.xfail(reason="Should be deprecated in favour of .subset")
def test_getitem_multiple_data_variables(self) -> None:
data = xr.Dataset({"temp": [0, 50], "p": [5, 8, 7]})
results = DataTree(name="results", dataset=data)
assert_identical(results[["temp", "p"]], data[["temp", "p"]]) # type: ignore[index]
@pytest.mark.xfail(
reason="Indexing needs to return whole tree (GH https://github.com/xarray-contrib/datatree/issues/77)"
)
def test_getitem_dict_like_selection_access_to_dataset(self) -> None:
data = xr.Dataset({"temp": [0, 50]})
results = DataTree(name="results", dataset=data)
assert_identical(results[{"temp": 1}], data[{"temp": 1}]) # type: ignore[index]
| TestGetItem |
python | cherrypy__cherrypy | cherrypy/lib/cpstats.py | {
"start": 10077,
"end": 11781
} | class ____(object):
"""Wraps a file-like object, counting the number of bytes read."""
def __init__(self, rfile):
"""Initialize a read byte counter."""
self.rfile = rfile
self.bytes_read = 0
def read(self, size=-1):
"""Read from file, counting bytes."""
data = self.rfile.read(size)
self.bytes_read += len(data)
return data
def readline(self, size=-1):
"""Read a line from file, counting bytes."""
data = self.rfile.readline(size)
self.bytes_read += len(data)
return data
def readlines(self, sizehint=0):
"""Read a list of lines from file, counting bytes."""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
"""Close the underlying file object."""
self.rfile.close()
def __iter__(self):
"""Make a file reader iterator."""
return self
def next(self):
"""Return next portion of bytes from the iterated file."""
data = self.rfile.next()
self.bytes_read += len(data)
return data
def average_uriset_time(s):
"""Compute average request processing time within a URI set."""
return s['Count'] and (s['Sum'] / s['Count']) or 0
def _get_threading_ident():
"""Discover the current thread identifier."""
if sys.version_info >= (3, 3):
return threading.get_ident()
return threading._get_ident()
| ByteCountWrapper |
python | doocs__leetcode | solution/3500-3599/3597.Partition String/Solution2.py | {
"start": 532,
"end": 895
} | class ____:
def partitionString(self, s: str) -> List[str]:
hashing = Hashing(s)
vis = set()
l = 1
ans = []
for r, c in enumerate(s, 1):
x = hashing.query(l, r)
if x not in vis:
vis.add(x)
ans.append(s[l - 1 : r])
l = r + 1
return ans
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-sigma/dagster_sigma/resource.py | {
"start": 31589,
"end": 33207
} | class ____(StateBackedDefinitionsLoader[SigmaOrganizationData]):
organization: SigmaOrganization
translator: DagsterSigmaTranslator
snapshot: Optional[RepositoryLoadData]
sigma_filter: Optional[SigmaFilter] = None
fetch_column_data: bool = True
fetch_lineage_data: bool = True
@property
def defs_key(self) -> str:
return f"{SIGMA_RECON_DATA_PREFIX}{self.organization.client_id}"
def fetch_state(self) -> SigmaOrganizationData:
if self.snapshot and self.defs_key in self.snapshot.reconstruction_metadata:
return deserialize_value(self.snapshot.reconstruction_metadata[self.defs_key]) # type: ignore
return asyncio.run(
self.organization.build_organization_data(
sigma_filter=self.sigma_filter,
fetch_column_data=self.fetch_column_data,
fetch_lineage_data=self.fetch_lineage_data,
)
)
def defs_from_state(self, state: SigmaOrganizationData) -> Definitions:
translator_data_workbooks = [
SigmaWorkbookTranslatorData(workbook=workbook, organization_data=state)
for workbook in state.workbooks
]
translator_data_datasets = [
SigmaDatasetTranslatorData(dataset=dataset, organization_data=state)
for dataset in state.datasets
]
asset_specs = [
_get_translator_spec_assert_keys_match(self.translator, obj)
for obj in [*translator_data_workbooks, *translator_data_datasets]
]
return Definitions(assets=asset_specs)
| SigmaOrganizationDefsLoader |
python | walkccc__LeetCode | solutions/1326. Minimum Number of Taps to Open to Water a Garden/1326.py | {
"start": 0,
"end": 436
} | class ____:
def minTaps(self, n: int, ranges: list[int]) -> int:
nums = [0] * (n + 1)
for i, range_ in enumerate(ranges):
l = max(0, i - range_)
r = min(n, range_ + i)
nums[l] = max(nums[l], r - l)
ans = 0
end = 0
farthest = 0
for i in range(n):
farthest = max(farthest, i + nums[i])
if i == end:
ans += 1
end = farthest
return ans if end == n else -1
| Solution |
python | donnemartin__system-design-primer | solutions/object_oriented_design/call_center/call_center.py | {
"start": 1502,
"end": 1756
} | class ____(Employee):
def __init__(self, employee_id, name):
super(Operator, self).__init__(employee_id, name, Rank.DIRECTOR)
def escalate_call(self):
raise NotImplementedError('Directors must be able to handle any call')
| Director |
python | huggingface__transformers | src/transformers/models/xlnet/modeling_xlnet.py | {
"start": 87759,
"end": 98372
} | class ____(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = XLNetPoolerStartLogits(config)
self.end_logits = XLNetPoolerEndLogits(config)
self.answer_class = XLNetPoolerAnswerClass(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
mems: Optional[torch.Tensor] = None,
perm_mask: Optional[torch.Tensor] = None,
target_mapping: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
input_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
is_impossible: Optional[torch.Tensor] = None,
cls_index: Optional[torch.Tensor] = None,
p_mask: Optional[torch.Tensor] = None,
use_mems: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs, # delete when `use_cache` is removed in XLNetModel
) -> Union[tuple, XLNetForQuestionAnsweringOutput]:
r"""
mems (`list[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential
decoding. The token ids which have their past given to this model should not be passed as `input_ids` as
they have already been computed.
`use_mems` has to be set to `True` to make use of `mems`.
perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*):
Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`:
- if `perm_mask[k, i, j] = 0`, i attend to j in batch k;
- if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k.
If not set, each token attends to all the others (full bidirectional attention). Only used during
pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*):
Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is
on the j-th token. Only used during pretraining for partial prediction or for sequential decoding
(generation).
input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*):
Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for
real tokens and 1 for padding which is kept for compatibility with the original code base.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **masked**,
- 0 for tokens that are **not masked**.
You can only uses one of `input_mask` and `attention_mask`.
is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the classification token to use as input for computing plausibility of the
answer.
p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
masked. 0.0 mean token is not masked.
use_mems (`bool`, *optional*):
Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden
states from previous forward passes to compute attention, which can significantly improve performance for
sequential decoding tasks.
Example:
```python
>>> from transformers import AutoTokenizer, XLNetForQuestionAnswering
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased")
>>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
... ) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
inputs_embeds=inputs_embeds,
use_mems=use_mems,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
if not return_dict:
return (total_loss,) + transformer_outputs[1:]
else:
return XLNetForQuestionAnsweringOutput(
loss=total_loss,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum(
"blh,bl->bh", hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
) # Shape (batch size,): one single `cls_logits` for each sample
if not return_dict:
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
return outputs + transformer_outputs[1:]
else:
return XLNetForQuestionAnsweringOutput(
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = [
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
]
| XLNetForQuestionAnswering |
python | squidfunk__mkdocs-material | material/plugins/social/layout.py | {
"start": 2990,
"end": 3338
} | class ____(Config):
content = Type(str, default = "")
align = Choice(Origin, default = "start top")
overflow = Choice(Overflow, default = "truncate")
color = Type(str, default = "")
line = SubConfig(Line)
font = SubConfig(Font)
# -----------------------------------------------------------------------------
# Layer
| Typography |
python | jazzband__django-oauth-toolkit | tests/test_client_credential.py | {
"start": 5912,
"end": 7310
} | class ____(BaseTest):
def test_client_resource_password_based(self):
"""
Request an access token using Resource Owner Password Based flow
"""
self.application.delete()
self.application = Application.objects.create(
name="test_client_credentials_app",
user=self.dev_user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_PASSWORD,
client_secret=CLEARTEXT_SECRET,
)
token_request_data = {"grant_type": "password", "username": "test_user", "password": "123456"}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
| TestClientResourcePasswordBased |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 15848,
"end": 26409
} | class ____(_TestJointOps, __TestCase):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2)
self.assertRaises(TypeError, s.__init__, 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal(self):
s = set([1,2,3])
t = {1,2,3}
self.assertEqual(s, t)
def test_set_literal_insertion_order(self):
# SF Issue #26020 -- Expect left to right insertion
s = {1, 1.0, True}
self.assertEqual(len(s), 1)
stored_value = s.pop()
self.assertEqual(type(stored_value), int)
def test_set_literal_evaluation_order(self):
# Expect left to right expression evaluation
events = []
def record(obj):
events.append(obj)
s = {record(1), record(2), record(3)}
self.assertEqual(events, [1, 2, 3])
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# https://bugs.python.org/issue1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
support.gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self):
with torch._dynamo.error_on_graph_break(False):
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the builtin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = {1, 2, 3}
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
| TestSet |
python | gevent__gevent | src/greentest/3.14/test_ssl.py | {
"start": 10108,
"end": 34930
} | class ____(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_options(self):
# gh-106687: SSL options values are unsigned integer (uint64_t)
for name in dir(ssl):
if not name.startswith('OP_'):
continue
with self.subTest(option=name):
value = getattr(ssl, name)
self.assertGreaterEqual(value, 0, f"ssl.{name}")
def test_ssl_types(self):
ssl_types = [
_ssl._SSLContext,
_ssl._SSLSocket,
_ssl.MemoryBIO,
_ssl.Certificate,
_ssl.SSLSession,
_ssl.SSLError,
]
for ssl_type in ssl_types:
with self.subTest(ssl_type=ssl_type):
with self.assertRaisesRegex(TypeError, "immutable type"):
ssl_type.value = None
support.check_disallow_instantiation(self, _ssl.Certificate)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(repr(proto), '<_SSLMethod.PROTOCOL_TLS_CLIENT: %r>' % proto.value)
self.assertEqual(str(proto), str(proto.value))
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
self.maxDiff = None
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
if major >= 3:
# 3.x uses 0xMNN00PP0L
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{patch:d}"
else:
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertStartsWith(
s, (openssl_ver, libressl_ver, "AWS-LC"),
(t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_openssl111_deprecations(self):
options = [
ssl.OP_NO_TLSv1,
ssl.OP_NO_TLSv1_1,
ssl.OP_NO_TLSv1_2,
ssl.OP_NO_TLSv1_3
]
protocols = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
ssl.PROTOCOL_TLS
]
versions = [
ssl.TLSVersion.SSLv3,
ssl.TLSVersion.TLSv1,
ssl.TLSVersion.TLSv1_1,
]
for option in options:
with self.subTest(option=option):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.options |= option
self.assertEqual(
'ssl.OP_NO_SSL*/ssl.OP_NO_TLS* options are deprecated',
str(cm.warning)
)
for protocol in protocols:
if not has_tls_protocol(protocol):
continue
with self.subTest(protocol=protocol):
with self.assertWarns(DeprecationWarning) as cm:
ssl.SSLContext(protocol)
self.assertEqual(
f'ssl.{protocol.name} is deprecated',
str(cm.warning)
)
for version in versions:
if not has_tls_version(version):
continue
with self.subTest(version=version):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.minimum_version = version
version_text = '%s.%s' % (version.__class__.__name__, version.name)
self.assertEqual(
f'ssl.{version_text} is deprecated',
str(cm.warning)
)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"certdata", certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
| BasicSocketTests |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 58074,
"end": 58332
} | class ____(PrefectBaseModel):
"""A representation of an installed Prefect integration."""
name: str = Field(description="The name of the Prefect integration.")
version: str = Field(description="The version of the Prefect integration.")
| Integration |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/app_blur.py | {
"start": 113,
"end": 668
} | class ____(App[None]):
CSS = """
Screen {
align: center middle;
}
Input {
width: 50%;
margin-bottom: 1;
&:focus {
width: 75%;
border: thick green;
background: pink;
}
}
"""
def compose(self) -> ComposeResult:
yield Input("This should be the blur style")
yield Input("This should also be the blur style")
def on_mount(self) -> None:
self.post_message(AppBlur())
if __name__ == "__main__":
AppBlurApp().run()
| AppBlurApp |
python | PrefectHQ__prefect | tests/client/test_prefect_client.py | {
"start": 88053,
"end": 99385
} | class ____:
@pytest.fixture
def automation(self):
return AutomationCore(
name="test-automation",
trigger=EventTrigger(
match={"flow_run_id": "123"},
posture=Posture.Reactive,
threshold=1,
within=0,
),
actions=[],
)
async def test_create_automation(self, cloud_client, automation: AutomationCore):
with respx.mock(
base_url=PREFECT_CLOUD_API_URL.value(), using="httpx"
) as router:
created_automation = automation.model_dump(mode="json")
created_automation["id"] = str(uuid4())
create_route = router.post("/automations/").mock(
return_value=httpx.Response(200, json=created_automation)
)
automation_id = await cloud_client.create_automation(automation)
assert create_route.called
assert json.loads(
create_route.calls[0].request.content
) == automation.model_dump(mode="json")
assert automation_id == UUID(created_automation["id"])
async def test_read_automation(self, cloud_client, automation: AutomationCore):
with respx.mock(
base_url=PREFECT_CLOUD_API_URL.value(), using="httpx"
) as router:
created_automation = automation.model_dump(mode="json")
created_automation["id"] = str(uuid4())
created_automation_id = created_automation["id"]
read_route = router.get(f"/automations/{created_automation_id}").mock(
return_value=httpx.Response(200, json=created_automation)
)
read_automation = await cloud_client.read_automation(created_automation_id)
assert read_route.called
assert read_automation.id == UUID(created_automation["id"])
async def test_read_automation_not_found(
self, cloud_client, automation: AutomationCore
):
with respx.mock(
base_url=PREFECT_CLOUD_API_URL.value(), using="httpx"
) as router:
created_automation = automation.model_dump(mode="json")
created_automation["id"] = str(uuid4())
created_automation_id = created_automation["id"]
read_route = router.get(f"/automations/{created_automation_id}").mock(
return_value=httpx.Response(404)
)
with pytest.raises(prefect.exceptions.PrefectHTTPStatusError, match="404"):
await cloud_client.read_automation(created_automation_id)
assert read_route.called
async def test_read_automations_by_name(
self, cloud_client, automation: AutomationCore
):
with respx.mock(
base_url=PREFECT_CLOUD_API_URL.value(), using="httpx"
) as router:
created_automation = automation.model_dump(mode="json")
created_automation["id"] = str(uuid4())
read_route = router.post("/automations/filter").mock(
return_value=httpx.Response(200, json=[created_automation])
)
read_automation = await cloud_client.read_automations_by_name(
automation.name
)
assert read_route.called
assert len(read_automation) == 1
assert read_automation[0].id == UUID(created_automation["id"])
assert (
read_automation[0].name == automation.name == created_automation["name"]
)
@pytest.fixture
def automation2(self):
return AutomationCore(
name="test-automation",
trigger=EventTrigger(
match={"flow_run_id": "234"},
posture=Posture.Reactive,
threshold=1,
within=0,
),
actions=[],
)
async def test_read_automations_by_name_multiple_same_name(
self, cloud_client, automation: AutomationCore, automation2: AutomationCore
):
with respx.mock(
base_url=PREFECT_CLOUD_API_URL.value(), using="httpx"
) as router:
created_automation = automation.model_dump(mode="json")
created_automation["id"] = str(uuid4())
created_automation2 = automation2.model_dump(mode="json")
created_automation2["id"] = str(uuid4())
read_route = router.post("/automations/filter").mock(
return_value=httpx.Response(
200, json=[created_automation, created_automation2]
)
)
read_automation = await cloud_client.read_automations_by_name(
automation.name
)
assert read_route.called
assert len(read_automation) == 2, (
"Expected two automations with the same name"
)
assert all(
[
automation.name == created_automation["name"]
for automation in read_automation
]
), "Expected all automations to have the same name"
async def test_read_automations_by_name_not_found(
self, cloud_client, automation: AutomationCore
):
with respx.mock(
base_url=PREFECT_CLOUD_API_URL.value(), using="httpx"
) as router:
created_automation = automation.model_dump(mode="json")
created_automation["id"] = str(uuid4())
created_automation["name"] = "nonexistent"
read_route = router.post("/automations/filter").mock(
return_value=httpx.Response(200, json=[])
)
nonexistent_automation = await cloud_client.read_automations_by_name(
name="nonexistent"
)
assert read_route.called
assert nonexistent_automation == []
async def test_delete_owned_automations(self, cloud_client):
with respx.mock(
base_url=PREFECT_CLOUD_API_URL.value(), using="httpx"
) as router:
resource_id = f"prefect.deployment.{uuid4()}"
delete_route = router.delete(f"/automations/owned-by/{resource_id}").mock(
return_value=httpx.Response(204)
)
await cloud_client.delete_resource_owned_automations(resource_id)
assert delete_route.called
async def test_server_error_does_not_raise_on_client():
async def raise_error():
raise ValueError("test")
with temporary_settings(
{PREFECT_SERVER_DOCKET_NAME: f"test-docket-{uuid4().hex[:8]}"}
):
app = create_app(ephemeral=True)
app.api_app.add_api_route("/raise_error", raise_error)
async with PrefectClient(
api=app,
) as client:
with pytest.raises(prefect.exceptions.HTTPStatusError, match="500"):
await client._client.get("/raise_error")
async def test_prefect_client_follow_redirects():
with temporary_settings(
{PREFECT_SERVER_DOCKET_NAME: f"test-docket-{uuid4().hex[:8]}"}
):
app = create_app(ephemeral=True)
httpx_settings = {"follow_redirects": True}
async with PrefectClient(api=app, httpx_settings=httpx_settings) as client:
assert client._client.follow_redirects is True
httpx_settings = {"follow_redirects": False}
async with PrefectClient(api=app, httpx_settings=httpx_settings) as client:
assert client._client.follow_redirects is False
# follow redirects by default
with temporary_settings({PREFECT_TESTING_UNIT_TEST_MODE: False}):
async with PrefectClient(api=app) as client:
assert client._client.follow_redirects is True
# do not follow redirects by default during unit tests
async with PrefectClient(api=app) as client:
assert client._client.follow_redirects is False
async def test_global_concurrency_limit_create(prefect_client):
# Test for both `integer` and `float` slot_delay_per_second
for slot_decay_per_second in [1, 1.2]:
global_concurrency_limit_name = f"global-create-test-{slot_decay_per_second}"
response_uuid = await prefect_client.create_global_concurrency_limit(
GlobalConcurrencyLimitCreate(
name=global_concurrency_limit_name,
limit=42,
slot_decay_per_second=slot_decay_per_second,
)
)
concurrency_limit = await prefect_client.read_global_concurrency_limit_by_name(
name=global_concurrency_limit_name
)
assert concurrency_limit.id == response_uuid
assert concurrency_limit.slot_decay_per_second == slot_decay_per_second
async def test_global_concurrency_limit_delete(prefect_client):
await prefect_client.create_global_concurrency_limit(
GlobalConcurrencyLimitCreate(name="global-delete-test", limit=42)
)
assert len(await prefect_client.read_global_concurrency_limits()) == 1
await prefect_client.delete_global_concurrency_limit_by_name(
name="global-delete-test"
)
assert len(await prefect_client.read_global_concurrency_limits()) == 0
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.delete_global_concurrency_limit_by_name(
name="global-delete-test"
)
async def test_global_concurrency_limit_update_with_integer(prefect_client):
# Test for both `integer` and `float` slot_delay_per_second
for index, slot_decay_per_second in enumerate([1, 1.2]):
created_global_concurrency_limit_name = (
f"global-update-test-{slot_decay_per_second}"
)
updated_global_concurrency_limit_name = (
f"global-create-test-{slot_decay_per_second}-new"
)
await prefect_client.create_global_concurrency_limit(
GlobalConcurrencyLimitCreate(
name=created_global_concurrency_limit_name,
limit=42,
slot_decay_per_second=slot_decay_per_second,
)
)
await prefect_client.update_global_concurrency_limit(
name=created_global_concurrency_limit_name,
concurrency_limit=GlobalConcurrencyLimitUpdate(
limit=1, name=updated_global_concurrency_limit_name
),
)
assert len(await prefect_client.read_global_concurrency_limits()) == index + 1
assert (
await prefect_client.read_global_concurrency_limit_by_name(
name=updated_global_concurrency_limit_name
)
).limit == 1
assert (
await prefect_client.read_global_concurrency_limit_by_name(
name=updated_global_concurrency_limit_name
)
).slot_decay_per_second == slot_decay_per_second
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.update_global_concurrency_limit(
name=created_global_concurrency_limit_name,
concurrency_limit=GlobalConcurrencyLimitUpdate(limit=1),
)
async def test_global_concurrency_limit_read_nonexistent_by_name(prefect_client):
with pytest.raises(prefect.exceptions.ObjectNotFound):
await prefect_client.read_global_concurrency_limit_by_name(name="not-here")
| TestAutomations |
python | huggingface__transformers | src/transformers/models/jetmoe/modeling_jetmoe.py | {
"start": 9819,
"end": 12024
} | class ____(nn.Module):
"""
A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.
Args:
config:
Configuration object with model hyperparameters.
"""
def __init__(self, config: JetMoeConfig):
super().__init__()
self.input_size = config.hidden_size
self.hidden_size = config.intermediate_size
self.activation = ACT2FN[config.activation_function]
self.bias = torch.nn.Parameter(torch.empty(self.input_size))
self.input_linear = JetMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2)
self.output_linear = JetMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size)
self.router = JetMoeTopKGating(
input_size=self.input_size,
num_experts=config.num_local_experts,
top_k=config.num_experts_per_tok,
)
def forward(self, layer_input):
"""
Forward pass of the mixture of experts layer.
Args:
layer_input (Tensor):
Input tensor.
Returns:
Tensor:
Output tensor.
Tensor:
Router logits.
"""
bsz, length, emb_size = layer_input.size()
layer_input = layer_input.reshape(-1, emb_size)
_, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)
expert_inputs = layer_input[batch_index]
hidden_states = self.input_linear(expert_inputs, expert_size)
chunked_hidden_states = hidden_states.chunk(2, dim=-1)
hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]
expert_outputs = self.output_linear(hidden_states, expert_size)
expert_outputs = expert_outputs * batch_gates[:, None]
zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)
layer_output = zeros.index_add(0, batch_index, expert_outputs)
layer_output = layer_output.view(bsz, length, self.input_size)
layer_output = layer_output + self.bias
return layer_output
| JetMoeMoE |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/arguments_of_correct_type.py | {
"start": 178,
"end": 982
} | class ____(ValidationRule):
def enter_Argument(self, node, key, parent, path, ancestors):
arg_def = self.context.get_argument()
if arg_def:
errors = is_valid_literal_value(arg_def.type, node.value)
if errors:
self.context.report_error(GraphQLError(
self.bad_value_message(node.name.value, arg_def.type,
print_ast(node.value), errors),
[node.value]
))
return False
@staticmethod
def bad_value_message(arg_name, type, value, verbose_errors):
message = (u'\n' + u'\n'.join(verbose_errors)) if verbose_errors else ''
return 'Argument "{}" has invalid value {}.{}'.format(arg_name, value, message)
| ArgumentsOfCorrectType |
python | keras-team__keras | integration_tests/dataset_tests/reuters_test.py | {
"start": 91,
"end": 1953
} | class ____(testing.TestCase):
def test_load_data_default(self):
(x_train, y_train), (x_test, y_test) = reuters.load_data()
# Check types
self.assertIsInstance(x_train, np.ndarray)
self.assertIsInstance(y_train, np.ndarray)
self.assertIsInstance(x_test, np.ndarray)
self.assertIsInstance(y_test, np.ndarray)
# Check shapes
self.assertGreater(len(x_train), 0)
self.assertEqual(len(x_train), len(y_train))
self.assertGreater(len(x_test), 0)
self.assertEqual(len(x_test), len(y_test))
def test_num_words(self):
# Only consider the top 1000 words
(x_train, _), _ = reuters.load_data(num_words=1000)
# Ensure no word index exceeds 999 (0-based indexing)
max_index = max(max(sequence) for sequence in x_train if sequence)
self.assertLessEqual(max_index, 999)
def test_skip_top(self):
# Skip the top 10 most frequent words
(x_train, _), _ = reuters.load_data(skip_top=10, num_words=1000)
# Assuming 1 is among top 10, check if it's skipped
self.assertNotIn(1, x_train[0])
def test_maxlen(self):
# Only consider sequences shorter than 50
(x_train, _), _ = reuters.load_data(maxlen=50)
self.assertTrue(all(len(seq) <= 50 for seq in x_train))
def test_get_word_index(self):
word_index = reuters.get_word_index()
self.assertIsInstance(word_index, dict)
# Check if word_index contains specific known words
self.assertIn("the", word_index)
def test_get_label_names(self):
label_names = reuters.get_label_names()
self.assertIsInstance(label_names, tuple)
# Check if the tuple contains specific known labels
self.assertIn("earn", label_names)
self.assertIn("acq", label_names)
| ReutersLoadDataTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 139603,
"end": 140226
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("repository_id", "limit", "expiry", "client_mutation_id")
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
limit = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryInteractionLimit), graphql_name="limit"
)
expiry = sgqlc.types.Field(RepositoryInteractionLimitExpiry, graphql_name="expiry")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| SetRepositoryInteractionLimitInput |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 50253,
"end": 50835
} | class ____(Interface):
"""Class which provides code introspection capability associated with an
action. The ParserInfo class used by ZCML implements the same interface.
"""
file = Attribute('Filename of action-invoking code as a string')
line = Attribute(
'Starting line number in file (as an integer) of action-invoking code.'
'This will be ``None`` if the value could not be determined.'
)
def __str__():
"""Return a representation of the action information (including
source code from file, if possible)"""
| IActionInfo |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_property_types.py | {
"start": 6281,
"end": 6551
} | class ____(_ItemProperties):
obj_type: Literal['type']
_obj___name__: str | None
_obj___qualname__: str | None
_obj___value__: str # The aliased annotation
@property
def _groupwise_order_key(self) -> int:
return 70
| _TypeStatementProperties |
python | huggingface__transformers | src/transformers/models/fuyu/configuration_fuyu.py | {
"start": 919,
"end": 8839
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an
Fuyu model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[adept/fuyu-8b](https://huggingface.co/adept/fuyu-8b).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 262144):
Vocabulary size of the Fuyu model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FuyuForCausalLM`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 16384):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with.
image_size (`int`, *optional*, defaults to 300):
The input image size.
patch_size (`int`, *optional*, defaults to 30):
The input vision transformer encoding patch size.
num_channels (`int`, *optional*, defaults to 3):
The input image number of channels.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`. Whether to tie weight embeddings
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
qk_layernorm (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the Queries and Keys after projecting the hidden states
hidden_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio after applying the MLP to the hidden states.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio after computing the attention scores.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the *beginning-of-sequence* token.
eos_token_id (`Union[int, list[int]]`, *optional*, defaults to 2):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
image_token_id (`int`, *optional*, defaults to 71011):
The id of the image placeholder token.
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize the `language``[`Aut`].
```python
>>> from transformers import FuyuConfig
>>> # Initializing a Fuyu fuyu-7b style configuration
>>> configuration = FuyuConfig()
```"""
model_type = "fuyu"
sub_configs = {"text_config": AutoConfig}
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 25000.0
def __init__(
self,
vocab_size: Optional[int] = 262144,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 16384,
num_hidden_layers: Optional[int] = 36,
num_attention_heads: Optional[int] = 64,
hidden_act: Optional[str] = "relu2",
max_position_embeddings: Optional[int] = 16384,
image_size: Optional[int] = 300,
patch_size: Optional[int] = 30,
num_channels: Optional[int] = 3,
initializer_range: Optional[float] = 0.02,
layer_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
qk_layernorm: Optional[bool] = True,
hidden_dropout: Optional[float] = 0.0,
attention_dropout: Optional[float] = 0.0,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
image_token_id: Optional[int] = 71011,
text_config: Optional[dict] = None,
**kwargs,
):
if text_config is None:
text_config = {
"vocab_size": vocab_size,
"max_position_embeddings": max_position_embeddings,
"hidden_size": hidden_size,
"intermediate_size": intermediate_size,
"num_hidden_layers": num_hidden_layers,
"num_attention_heads": num_attention_heads,
"hidden_act": hidden_act,
"initializer_range": initializer_range,
"layer_norm_eps": layer_norm_eps,
"use_cache": use_cache,
"rope_parameters": rope_parameters,
"qk_layernorm": qk_layernorm,
"hidden_dropout": hidden_dropout,
"attention_dropout": attention_dropout,
"pad_token_id": pad_token_id,
"bos_token_id": bos_token_id,
"eos_token_id": eos_token_id,
"tie_word_embeddings": tie_word_embeddings,
}
logger.info("text_config is None. initializing the text model with default values.")
text_model_type = text_config.get("model_type", "persimmon")
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
self._vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.qk_layernorm = qk_layernorm
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.image_token_id = image_token_id
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["FuyuConfig"]
| FuyuConfig |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/schemas/role_and_permission_schema.py | {
"start": 1047,
"end": 1198
} | class ____(SQLAlchemySchema):
"""Action Schema."""
class Meta:
"""Meta."""
model = Action
name = auto_field()
| ActionSchema |
python | bokeh__bokeh | src/bokeh/core/serialization.py | {
"start": 3077,
"end": 3196
} | class ____(TypedDict):
type: Literal["slice"]
start: int | None
stop: int | None
step: int | None
| SliceRep |
python | jazzband__django-oauth-toolkit | oauth2_provider/contrib/rest_framework/permissions.py | {
"start": 4143,
"end": 6586
} | class ____(BasePermission):
"""
:attr:alternate_required_scopes: dict keyed by HTTP method name with value: iterable alternate scope lists
This fulfills the [Open API Specification (OAS; formerly Swagger)](https://www.openapis.org/)
list of alternative Security Requirements Objects for oauth2 or openIdConnect:
When a list of Security Requirement Objects is defined on the Open API object or Operation Object,
only one of Security Requirement Objects in the list needs to be satisfied to authorize the request.
[1](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securityRequirementObject)
For each method, a list of lists of allowed scopes is tried in order and the first to match succeeds.
@example
required_alternate_scopes = {
'GET': [['read']],
'POST': [['create1','scope2'], ['alt-scope3'], ['alt-scope4','alt-scope5']],
}
TODO: DRY: subclass TokenHasScope and iterate over values of required_scope?
"""
def has_permission(self, request, view):
token = request.auth
if not token:
return False
if hasattr(token, "scope"): # OAuth 2
required_alternate_scopes = self.get_required_alternate_scopes(request, view)
m = request.method.upper()
if m in required_alternate_scopes:
log.debug(
"Required scopes alternatives to access resource: {0}".format(
required_alternate_scopes[m]
)
)
for alt in required_alternate_scopes[m]:
if token.is_valid(alt):
return True
return False
else:
log.warning("no scope alternates defined for method {0}".format(m))
return False
assert False, (
"TokenMatchesOASRequirements requires the"
"`oauth2_provider.rest_framework.OAuth2Authentication` authentication "
"class to be used."
)
def get_required_alternate_scopes(self, request, view):
try:
return getattr(view, "required_alternate_scopes")
except AttributeError:
raise ImproperlyConfigured(
"TokenMatchesOASRequirements requires the view to"
" define the required_alternate_scopes attribute"
)
| TokenMatchesOASRequirements |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/4_Sarsa_lambda_maze/maze_env.py | {
"start": 592,
"end": 4013
} | class ____(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_W * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(
hell2_center[0] - 15, hell2_center[1] - 15,
hell2_center[0] + 15, hell2_center[1] + 15,
fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.5)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
s_ = self.canvas.coords(self.rect) # next state
# reward function
if s_ == self.canvas.coords(self.oval):
reward = 1
done = True
s_ = 'terminal'
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
reward = -1
done = True
s_ = 'terminal'
else:
reward = 0
done = False
return s_, reward, done
def render(self):
time.sleep(0.05)
self.update()
| Maze |
python | kubernetes-client__python | kubernetes/client/models/v1_node_config_source.py | {
"start": 383,
"end": 3507
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config_map': 'V1ConfigMapNodeConfigSource'
}
attribute_map = {
'config_map': 'configMap'
}
def __init__(self, config_map=None, local_vars_configuration=None): # noqa: E501
"""V1NodeConfigSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_map = None
self.discriminator = None
if config_map is not None:
self.config_map = config_map
@property
def config_map(self):
"""Gets the config_map of this V1NodeConfigSource. # noqa: E501
:return: The config_map of this V1NodeConfigSource. # noqa: E501
:rtype: V1ConfigMapNodeConfigSource
"""
return self._config_map
@config_map.setter
def config_map(self, config_map):
"""Sets the config_map of this V1NodeConfigSource.
:param config_map: The config_map of this V1NodeConfigSource. # noqa: E501
:type: V1ConfigMapNodeConfigSource
"""
self._config_map = config_map
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeConfigSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeConfigSource):
return True
return self.to_dict() != other.to_dict()
| V1NodeConfigSource |
python | mlflow__mlflow | mlflow/utils/autologging_utils/__init__.py | {
"start": 9014,
"end": 29492
} | class ____:
"""
The BatchMetricsLogger will log metrics in batch against an mlflow run.
If run_id is passed to to constructor then all recording and logging will
happen against that run_id.
If no run_id is passed into constructor, then the run ID will be fetched
from `mlflow.active_run()` each time `record_metrics()` or `flush()` is called; in this
case, callers must ensure that an active run is present before invoking
`record_metrics()` or `flush()`.
"""
def __init__(self, run_id=None, tracking_uri=None, model_id=None):
from mlflow.tracking.client import MlflowClient
self.run_id = run_id
self.model_id = model_id
self.client = MlflowClient(tracking_uri)
# data is an array of Metric objects
self.data = []
self.total_training_time = 0
self.total_log_batch_time = 0
self.previous_training_timestamp = None
def flush(self):
"""
The metrics accumulated by BatchMetricsLogger will be batch logged to an MLflow run.
"""
self._timed_log_batch()
self.data = []
def _timed_log_batch(self):
# Retrieving run_id from active mlflow run when run_id is empty.
current_run_id = mlflow.active_run().info.run_id if self.run_id is None else self.run_id
start = time.time()
metrics_slices = [
self.data[i : i + MAX_METRICS_PER_BATCH]
for i in range(0, len(self.data), MAX_METRICS_PER_BATCH)
]
for metrics_slice in metrics_slices:
self.client.log_batch(run_id=current_run_id, metrics=metrics_slice)
end = time.time()
self.total_log_batch_time += end - start
def _should_flush(self):
target_training_to_logging_time_ratio = 10
if (
self.total_training_time
>= self.total_log_batch_time * target_training_to_logging_time_ratio
):
return True
return False
def record_metrics(self, metrics, step=None):
"""
Submit a set of metrics to be logged. The metrics may not be immediately logged, as this
class will batch them in order to not increase execution time too much by logging
frequently.
Args:
metrics: Dictionary containing key, value pairs of metrics to be logged.
step: The training step that the metrics correspond to.
"""
current_timestamp = time.time()
if self.previous_training_timestamp is None:
self.previous_training_timestamp = current_timestamp
training_time = current_timestamp - self.previous_training_timestamp
self.total_training_time += training_time
# log_batch() requires step to be defined. Therefore will set step to 0 if not defined.
if step is None:
step = 0
for key, value in metrics.items():
self.data.append(
Metric(key, value, int(current_timestamp * 1000), step, model_id=self.model_id)
)
if self._should_flush():
self.flush()
self.previous_training_timestamp = current_timestamp
@contextlib.contextmanager
def batch_metrics_logger(run_id: str | None = None, model_id: str | None = None):
"""
Context manager that yields a BatchMetricsLogger object, which metrics can be logged against.
The BatchMetricsLogger keeps metrics in a list until it decides they should be logged, at
which point the accumulated metrics will be batch logged. The BatchMetricsLogger ensures
that logging imposes no more than a 10% overhead on the training, where the training is
measured by adding up the time elapsed between consecutive calls to record_metrics.
If logging a batch fails, a warning will be emitted and subsequent metrics will continue to
be collected.
Once the context is closed, any metrics that have yet to be logged will be logged.
Args:
run_id: ID of the run that the metrics will be logged to.
model_id: ID of the model that the metrics will be associated with.
"""
batch_metrics_logger = BatchMetricsLogger(run_id, model_id=model_id)
yield batch_metrics_logger
batch_metrics_logger.flush()
def gen_autologging_package_version_requirements_doc(integration_name):
"""
Returns:
A document note string saying the compatibility for the specified autologging
integration's associated package versions.
"""
min_ver, max_ver, pip_release = get_min_max_version_and_pip_release(integration_name)
required_pkg_versions = f"``{min_ver}`` <= ``{pip_release}`` <= ``{max_ver}``"
return (
" .. Note:: Autologging is known to be compatible with the following package versions: "
+ required_pkg_versions
+ ". Autologging may not succeed when used with package versions outside of this range."
+ "\n\n"
)
def _check_and_log_warning_for_unsupported_package_versions(integration_name):
"""
If the package version is not supported for autologging, log a warning message.
Only check the minimum version, not the maximum version. This is because the "maximum" version
in the ml-package-versions.yml is only updated per release and it cannot keep up with the pace
of the package releases. The cross-version tests in MLflow CI runs tests against the latest
available version, not limited to the "maximum" version, so it is safe to assume it supports
up to the latest version.
"""
if (
integration_name in FLAVOR_TO_MODULE_NAME
and not get_autologging_config(integration_name, "disable", True)
and not get_autologging_config(integration_name, "disable_for_unsupported_versions", False)
and not is_flavor_supported_for_associated_package_versions(
integration_name, check_max_version=False
)
):
min_var, _, pip_release = get_min_max_version_and_pip_release(integration_name)
module = importlib.import_module(FLAVOR_TO_MODULE_NAME[integration_name])
_logger.warning(
f"MLflow {integration_name} autologging is known to be compatible with "
f"{min_var} <= {pip_release}, but the installed version is "
f"{module.__version__}. If you encounter errors during autologging, try upgrading "
f"/ downgrading {pip_release} to a compatible version, or try upgrading MLflow.",
)
def autologging_integration(name):
"""
**All autologging integrations should be decorated with this wrapper.**
Wraps an autologging function in order to store its configuration arguments. This enables
patch functions to broadly obey certain configurations (e.g., disable=True) without
requiring specific logic to be present in each autologging integration.
"""
def validate_param_spec(param_spec):
if "disable" not in param_spec or param_spec["disable"].default is not False:
raise Exception(
f"Invalid `autolog()` function for integration '{name}'. `autolog()` functions"
" must specify a 'disable' argument with default value 'False'"
)
elif "silent" not in param_spec or param_spec["silent"].default is not False:
raise Exception(
f"Invalid `autolog()` function for integration '{name}'. `autolog()` functions"
" must specify a 'silent' argument with default value 'False'"
)
def wrapper(_autolog):
param_spec = inspect.signature(_autolog).parameters
validate_param_spec(param_spec)
AUTOLOGGING_INTEGRATIONS[name] = {}
default_params = {param.name: param.default for param in param_spec.values()}
@autologging_conf_lock
def autolog(*args, **kwargs):
config_to_store = dict(default_params)
config_to_store.update(
{param.name: arg for arg, param in zip(args, param_spec.values())}
)
config_to_store.update(kwargs)
AUTOLOGGING_INTEGRATIONS[name] = config_to_store
try:
# Pass `autolog()` arguments to `log_autolog_called` in keyword format to enable
# event loggers to more easily identify important configuration parameters
# (e.g., `disable`) without examining positional arguments. Passing positional
# arguments to `log_autolog_called` is deprecated in MLflow > 1.13.1
AutologgingEventLogger.get_logger().log_autolog_called(name, (), config_to_store)
except Exception:
pass
revert_patches(name)
# If disabling autologging using fluent api, then every active integration's autolog
# needs to be called with disable=True. So do not short circuit and let
# `mlflow.autolog()` invoke all active integrations with disable=True.
if name != "mlflow" and get_autologging_config(name, "disable", True):
return
is_silent_mode = get_autologging_config(name, "silent", False)
# Reroute non-MLflow warnings encountered during autologging enablement to an
# MLflow event logger, and enforce silent mode if applicable (i.e. if the corresponding
# autologging integration was called with `silent=True`)
with (
MlflowEventsAndWarningsBehaviorGlobally(
# MLflow warnings emitted during autologging setup / enablement are likely
# actionable and relevant to the user, so they should be emitted as normal
# when `silent=False`. For reference, see recommended warning and event logging
# behaviors from https://docs.python.org/3/howto/logging.html#when-to-use-logging
reroute_warnings=False,
disable_event_logs=is_silent_mode,
disable_warnings=is_silent_mode,
),
NonMlflowWarningsBehaviorForCurrentThread(
# non-MLflow warnings emitted during autologging setup / enablement are not
# actionable for the user, as they are a byproduct of the autologging
# implementation. Accordingly, they should be rerouted to `logger.warning()`.
# For reference, see recommended warning and event logging
# behaviors from https://docs.python.org/3/howto/logging.html#when-to-use-logging
reroute_warnings=True,
disable_warnings=is_silent_mode,
),
):
_check_and_log_warning_for_unsupported_package_versions(name)
return _autolog(*args, **kwargs)
wrapped_autolog = update_wrapper_extended(autolog, _autolog)
# Set the autologging integration name as a function attribute on the wrapped autologging
# function, allowing the integration name to be extracted from the function. This is used
# during the execution of import hooks for `mlflow.autolog()`.
wrapped_autolog.integration_name = name
if name in FLAVOR_TO_MODULE_NAME:
wrapped_autolog.__doc__ = gen_autologging_package_version_requirements_doc(name) + (
wrapped_autolog.__doc__ or ""
)
return wrapped_autolog
return wrapper
def get_autologging_config(flavor_name, config_key, default_value=None):
"""
Returns a desired config value for a specified autologging integration.
Returns `None` if specified `flavor_name` has no recorded configs.
If `config_key` is not set on the config object, default value is returned.
Args:
flavor_name: An autologging integration flavor name.
config_key: The key for the desired config value.
default_value: The default_value to return.
"""
config = AUTOLOGGING_INTEGRATIONS.get(flavor_name)
if config is not None:
return config.get(config_key, default_value)
else:
return default_value
def autologging_is_disabled(integration_name):
"""Returns a boolean flag of whether the autologging integration is disabled.
Args:
integration_name: An autologging integration flavor name.
"""
explicit_disabled = get_autologging_config(integration_name, "disable", True)
if explicit_disabled:
return True
if (
integration_name in FLAVOR_TO_MODULE_NAME
and get_autologging_config(integration_name, "disable_for_unsupported_versions", False)
and not is_flavor_supported_for_associated_package_versions(integration_name)
):
return True
return False
def is_autolog_supported(integration_name: str) -> bool:
"""
Whether the specified autologging integration is supported by the current environment.
Args:
integration_name: An autologging integration flavor name.
"""
# NB: We don't check for the presence of autolog() function as it requires importing
# the flavor module, which may cause import error or overhead.
return "autologging" in _ML_PACKAGE_VERSIONS.get(integration_name, {})
def get_autolog_function(integration_name: str) -> Callable[..., Any] | None:
"""
Get the autolog() function for the specified integration.
Returns None if the flavor does not have an autolog() function.
"""
flavor_module = importlib.import_module(f"mlflow.{integration_name}")
return getattr(flavor_module, "autolog", None)
@contextlib.contextmanager
def disable_autologging():
"""
Context manager that temporarily disables autologging globally for all integrations upon
entry and restores the previous autologging configuration upon exit.
"""
global _AUTOLOGGING_GLOBALLY_DISABLED
_AUTOLOGGING_GLOBALLY_DISABLED = True
try:
yield
finally:
_AUTOLOGGING_GLOBALLY_DISABLED = False
@contextlib.contextmanager
def disable_discrete_autologging(flavors_to_disable: list[str]) -> None:
"""
Context manager for disabling specific autologging integrations temporarily while another
flavor's autologging is activated. This context wrapper is useful in the event that, for
example, a particular library calls upon another library within a training API that has a
current MLflow autologging integration.
For instance, the transformers library's Trainer class, when running metric scoring,
builds a sklearn model and runs evaluations as part of its accuracy scoring. Without this
temporary autologging disabling, a new run will be generated that contains a sklearn model
that holds no use for tracking purposes as it is only used during the metric evaluation phase
of training.
Args:
flavors_to_disable: A list of flavors that need to be temporarily disabled while
executing another flavor's autologging to prevent spurious run
logging of unrelated models, metrics, and parameters.
"""
enabled_flavors = []
for flavor in flavors_to_disable:
if not autologging_is_disabled(flavor):
enabled_flavors.append(flavor)
autolog_func = getattr(mlflow, flavor)
autolog_func.autolog(disable=True)
yield
for flavor in enabled_flavors:
autolog_func = getattr(mlflow, flavor)
autolog_func.autolog(disable=False)
_training_sessions = []
def _get_new_training_session_class():
"""
Returns a session manager class for nested autologging runs.
Examples
--------
>>> class Parent:
... pass
>>> class Child:
... pass
>>> class Grandchild:
... pass
>>>
>>> _TrainingSession = _get_new_training_session_class()
>>> with _TrainingSession(Parent, False) as p:
... with _SklearnTrainingSession(Child, True) as c:
... with _SklearnTrainingSession(Grandchild, True) as g:
... print(p.should_log(), c.should_log(), g.should_log())
True False False
>>>
>>> with _TrainingSession(Parent, True) as p:
... with _TrainingSession(Child, False) as c:
... with _TrainingSession(Grandchild, True) as g:
... print(p.should_log(), c.should_log(), g.should_log())
True True False
>>>
>>> with _TrainingSession(Child, True) as c1:
... with _TrainingSession(Child, True) as c2:
... print(c1.should_log(), c2.should_log())
True False
"""
# NOTE: The current implementation doesn't guarantee thread-safety, but that's okay for now
# because:
# 1. We don't currently have any use cases for allow_children=True.
# 2. The list append & pop operations are thread-safe, so we will always clear the session stack
# once all _TrainingSessions exit.
class _TrainingSession:
_session_stack = []
def __init__(self, estimator, allow_children=True):
"""A session manager for nested autologging runs.
Args:
estimator: An estimator that this session originates from.
allow_children: If True, allows autologging in child sessions.
If False, disallows autologging in all descendant sessions.
"""
self.allow_children = allow_children
self.estimator = estimator
self._parent = None
def __enter__(self):
if len(_TrainingSession._session_stack) > 0:
self._parent = _TrainingSession._session_stack[-1]
self.allow_children = (
_TrainingSession._session_stack[-1].allow_children and self.allow_children
)
_TrainingSession._session_stack.append(self)
return self
def __exit__(self, tp, val, traceback):
_TrainingSession._session_stack.pop()
def should_log(self):
"""
Returns True when at least one of the following conditions satisfies:
1. This session is the root session.
2. The parent session allows autologging and its estimator differs from this session's
estimator.
"""
for training_session in _TrainingSession._session_stack:
if training_session is self:
break
elif training_session.estimator is self.estimator:
return False
return self._parent is None or self._parent.allow_children
@staticmethod
def is_active():
return len(_TrainingSession._session_stack) != 0
@staticmethod
def get_current_session():
if _TrainingSession.is_active():
return _TrainingSession._session_stack[-1]
return None
_training_sessions.append(_TrainingSession)
return _TrainingSession
def _has_active_training_session():
return any(s.is_active() for s in _training_sessions)
def get_instance_method_first_arg_value(method, call_pos_args, call_kwargs):
"""Get instance method first argument value (exclude the `self` argument).
Args:
method: A `cls.method` object which includes the `self` argument.
call_pos_args: positional arguments excluding the first `self` argument.
call_kwargs: keywords arguments.
"""
if len(call_pos_args) >= 1:
return call_pos_args[0]
else:
param_sig = inspect.signature(method).parameters
first_arg_name = list(param_sig.keys())[1]
assert param_sig[first_arg_name].kind not in [
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
]
return call_kwargs.get(first_arg_name)
def get_method_call_arg_value(arg_index, arg_name, default_value, call_pos_args, call_kwargs):
"""Get argument value for a method call.
Args:
arg_index: The argument index in the function signature. Start from 0.
arg_name: The argument name in the function signature.
default_value: Default argument value.
call_pos_args: The positional argument values in the method call.
call_kwargs: The keyword argument values in the method call.
"""
if arg_name in call_kwargs:
return call_kwargs[arg_name]
elif arg_index < len(call_pos_args):
return call_pos_args[arg_index]
else:
return default_value
| BatchMetricsLogger |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 192003,
"end": 194101
} | class ____:
"""Tests kstest and ks_1samp agree with K-S various sizes, alternatives, modes."""
def _testOne(self, x, alternative, expected_statistic, expected_prob,
mode='auto', decimal=14):
result = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
expected = np.array([expected_statistic, expected_prob])
assert_array_almost_equal(np.array(result), expected, decimal=decimal)
def _test_kstest_and_ks1samp(self, x, alternative, mode='auto', decimal=14):
result = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
result_1samp = stats.ks_1samp(x, stats.norm.cdf,
alternative=alternative, mode=mode)
assert_array_almost_equal(np.array(result), result_1samp, decimal=decimal)
def test_namedtuple_attributes(self):
x = np.linspace(-1, 1, 9)
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.kstest(x, 'norm')
check_named_results(res, attributes)
def test_agree_with_ks_1samp(self):
x = np.linspace(-1, 1, 9)
self._test_kstest_and_ks1samp(x, 'two-sided')
x = np.linspace(-15, 15, 9)
self._test_kstest_and_ks1samp(x, 'two-sided')
x = [-1.23, 0.06, -0.60, 0.17, 0.66, -0.17, -0.08, 0.27, -0.98, -0.99]
self._test_kstest_and_ks1samp(x, 'two-sided')
self._test_kstest_and_ks1samp(x, 'greater', mode='exact')
self._test_kstest_and_ks1samp(x, 'less', mode='exact')
def test_pm_inf_gh20386(self):
# Check that gh-20386 is resolved - `kstest` does not
# return NaNs when both -inf and inf are in sample.
vals = [-np.inf, 0, 1, np.inf]
res = stats.kstest(vals, stats.cauchy.cdf)
ref = stats.kstest(vals, stats.cauchy.cdf, _no_deco=True)
assert np.all(np.isfinite(res))
assert_equal(res, ref)
assert not np.isnan(res.statistic)
assert not np.isnan(res.pvalue)
# missing: no test that uses *args
@make_xp_test_case(stats.ks_1samp)
| TestKSTest |
python | dask__dask | dask/array/_array_expr/random.py | {
"start": 33036,
"end": 36204
} | class ____(IO):
_parameters = [
"rng",
"distribution",
"size",
"chunks",
"extra_chunks",
"args",
"kwargs",
]
_defaults = {"extra_chunks": ()}
@cached_property
def kwargs(self):
return self.operand("kwargs")
@property
def chunks(self):
size = self.operand("size")
chunks = self.operand("chunks")
# shapes = list(
# {
# ar.shape
# for ar in chain(args, kwargs.values())
# if isinstance(ar, (Array, np.ndarray))
# }
# )
# if size is not None:
# shapes.append(size)
shapes = [size]
# broadcast to the final size(shape)
size = broadcast_shapes(*shapes)
return normalize_chunks(
chunks,
size, # ideally would use dtype here
dtype=self.kwargs.get("dtype", np.float64),
)
@cached_property
def _info(self):
sizes = list(product(*self.chunks))
if isinstance(self.rng, Generator):
bitgens = _spawn_bitgens(self.rng._bit_generator, len(sizes))
bitgen_token = tokenize(bitgens)
bitgens = [_bitgen._seed_seq for _bitgen in bitgens]
func_applier = _apply_random_func
gen = type(self.rng._bit_generator)
elif isinstance(self.rng, RandomState):
bitgens = random_state_data(len(sizes), self.rng._numpy_state)
bitgen_token = tokenize(bitgens)
func_applier = _apply_random
gen = self.rng._RandomState
else:
raise TypeError(
"Unknown object type: Not a Generator and Not a RandomState"
)
token = tokenize(bitgen_token, self.size, self.chunks, self.args, self.kwargs)
name = f"{self.distribution}-{token}"
return bitgens, name, sizes, gen, func_applier
@property
def _name(self):
return self._info[1]
@property
def bitgens(self):
return self._info[0]
def _layer(self):
bitgens, name, sizes, gen, func_applier = self._info
keys = product(
[name],
*([range(len(bd)) for bd in self.chunks] + [[0]] * len(self.extra_chunks)),
)
vals = []
# TODO: handle non-trivial args/kwargs (arrays, dask or otherwise)
for bitgen, size in zip(bitgens, sizes):
vals.append(
(
func_applier,
gen,
self.distribution,
bitgen,
size,
self.args,
self.kwargs,
)
)
return dict(zip(keys, vals))
@cached_property
def _meta(self):
bitgens, name, sizes, gen, func_applier = self._info
return func_applier(
gen,
self.distribution,
bitgens[0], # TODO: not sure about this
(0,) * len(self.operand("size")),
self.args,
self.kwargs,
# small_args,
# small_kwargs,
)
| Random |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 998393,
"end": 999141
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for TeamDiscussion."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("TeamDiscussionEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("TeamDiscussion"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| TeamDiscussionConnection |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_stats.py | {
"start": 327,
"end": 4041
} | class ____(APITestCase, OutcomesSnubaTest):
def test_simple(self) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user)
project = self.create_project(organization=org)
project_key = self.create_project_key(project=project)
self.store_outcomes(
{
"org_id": org.id,
"timestamp": before_now(minutes=1),
"project_id": project.id,
"key_id": project_key.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.ERROR,
"quantity": 1,
},
3,
)
url = reverse("sentry-api-0-organization-stats", args=[org.slug])
response = self.client.get(url)
assert response.status_code == 200, response.content
assert response.data[-1][1] == 3, response.data
for point in response.data[:-1]:
assert point[1] == 0
assert len(response.data) == 24
def test_resolution(self) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user)
project = self.create_project(organization=org)
project_key = self.create_project_key(project=project)
self.store_outcomes(
{
"org_id": org.id,
"timestamp": before_now(hours=1),
"project_id": project.id,
"key_id": project_key.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.ERROR,
"quantity": 1,
},
3,
)
url = reverse("sentry-api-0-organization-stats", args=[org.slug])
response = self.client.get(f"{url}?resolution=1d")
assert response.status_code == 200, response.content
assert response.data[-1][1] == 3, response.data
assert len(response.data) == 1
def test_resolution_invalid(self) -> None:
self.login_as(user=self.user)
url = reverse("sentry-api-0-organization-stats", args=[self.organization.slug])
response = self.client.get(f"{url}?resolution=lol-nope")
assert response.status_code == 400, response.content
def test_id_filtering(self) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user)
project = self.create_project(
teams=[self.create_team(organization=org, members=[self.user])]
)
url = reverse("sentry-api-0-organization-stats", args=[org.slug])
response = self.client.get(url, {"id": str(project.id), "group": "project"})
assert response.status_code == 200, response.content
assert project.id in response.data
response = self.client.get(url, {"id": str(sys.maxsize), "group": "project"})
assert project.id not in response.data
def test_project_id_only(self) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user)
project = self.create_project(
teams=[self.create_team(organization=org, members=[self.user])]
)
project2 = self.create_project(
teams=[self.create_team(organization=org, members=[self.user])]
)
response = self.client.get(
reverse("sentry-api-0-organization-stats", args=[org.slug]),
{"projectID": str(project.id), "group": "project"},
)
assert response.status_code == 200, response.content
assert project.id in response.data
assert project2.id not in response.data
| OrganizationStatsTest |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 10561,
"end": 11773
} | class ____(Benchmark):
r"""
Mishra 5 objective function.
This class defines the Mishra 5 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra05}}(x) = \left [ \sin^2 ((\cos(x_1) + \cos(x_2))^2)
+ \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1 \right ]^2 + 0.01(x_1 + x_2)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.119829` for :math:`x = [-1.98682, -10]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 381 in paper
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-1.98682, -10.0]]
self.fglob = -1.019829519930646
def fun(self, x, *args):
self.nfev += 1
return (0.01 * x[0] + 0.1 * x[1]
+ (sin((cos(x[0]) + cos(x[1])) ** 2) ** 2
+ cos((sin(x[0]) + sin(x[1])) ** 2) ** 2 + x[0]) ** 2)
| Mishra05 |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint_adapter.py | {
"start": 876,
"end": 2822
} | class ____:
"""API to reshard a checkpoint value during restore.
When a ReshardCallback is attached to a CheckpointPosition, the restored value
of the checkpoint position is resharded based on this callback.
"""
def object_name(self) -> str:
"""Returns the local name of the object being restored.
Override this method when the local name of object is different than in the
checkpoint.
"""
return None
def reshard(
self,
checkpoint_values: List[tensor.Tensor],
shape_and_slice_spec: List[str],
) -> tensor.Tensor:
"""Reshards the checkpoint values as read from the checkpoint file.
Override this to reshard/modify the restored values
Args:
checkpoint_values: The values returned by the restore op, as read from
file.
shape_and_slice_spec: The shape and slice spec required by the caller.
Returns:
List of restored Tensor values after being resharded.
"""
del shape_and_slice_spec # unused
# Default reshard is a trivial one.
if len(checkpoint_values) != 1:
raise ValueError("Default reshard expects a single checkpoint value.")
return checkpoint_values[0]
def update_restore_inputs(
self, checkpoint_key, shape_and_slice_spec
) -> tuple[List[str], List[str]]:
"""Updates the specs to restore op.
Override this method if the arguments to restore op need to be updated as
per the resharding required.
Args:
checkpoint_key: The checkpoint key as requested by the caller
shape_and_slice_spec: The shape and slice spec as requested by caller
Returns:
Tuple of list of checkpoint_keys and specs that the restore op should fetch
as per the resharding requirement. The length of checkpoint keys returned by
this method will match the length of checkpoint_values that are input to
`reshard`.
"""
return ([checkpoint_key], [shape_and_slice_spec])
| ReshardCallback |
python | doocs__leetcode | solution/2500-2599/2592.Maximize Greatness of an Array/Solution.py | {
"start": 0,
"end": 176
} | class ____:
def maximizeGreatness(self, nums: List[int]) -> int:
nums.sort()
i = 0
for x in nums:
i += x > nums[i]
return i
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/tuple7.py | {
"start": 153,
"end": 651
} | class ____(tuple[int, str, int, _T]):
def __new__(cls) -> Self: ...
objA = ClassA[complex]()
(a, b, c, d) = objA
aa1: int = a
bb1: str = b
cc1: int = c
dd1: complex = d
reveal_type(objA[0], expected_text="int")
reveal_type(objA[1], expected_text="str")
reveal_type(objA[2], expected_text="int")
reveal_type(objA[3], expected_text="complex")
# This should generate an error because the trailing
# comma turns the index value into a tuple.
e = objA[0,]
for aaa in objA:
print(aaa)
| ClassA |
python | spyder-ide__spyder | external-deps/spyder-remote-services/spyder_remote_services/services/files/handlers.py | {
"start": 5982,
"end": 6183
} | class ____(BaseFSHandler):
@web.authenticated
@authorized
def get(self):
result = self.fs_isdir(self.get_path_argument("path"))
self.write_json({"isdir": result})
| IsDirHandler |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 8273,
"end": 8327
} | class ____(XsdUnsignedInt):
pass
| ST_DrawingElementId |
python | tensorflow__tensorflow | tensorflow/python/platform/flags_test.py | {
"start": 1971,
"end": 4562
} | class ____(unittest.TestCase):
def setUp(self):
self.original_flags = flags.FlagValues()
self.wrapped_flags = flags._FlagValuesWrapper(self.original_flags)
flags.DEFINE_string(
'test', 'default', 'test flag', flag_values=self.wrapped_flags)
def test_attribute_overrides(self):
# Test that methods defined in absl.flags.FlagValues are the same as the
# wrapped ones.
self.assertEqual(flags.FLAGS.is_parsed, absl_flags.FLAGS.is_parsed)
def test_getattr(self):
self.assertFalse(self.wrapped_flags.is_parsed())
with test.mock.patch.object(sys, 'argv', new=['program', '--test=new']):
self.assertEqual('new', self.wrapped_flags.test)
self.assertTrue(self.wrapped_flags.is_parsed())
def test_setattr(self):
self.assertEqual('default', self.wrapped_flags.test)
self.wrapped_flags.test = 'new'
self.assertEqual('new', self.wrapped_flags.test)
def test_delattr(self):
del self.wrapped_flags.test
self.assertNotIn('test', self.wrapped_flags)
with self.assertRaises(AttributeError):
_ = self.wrapped_flags.test
def test_dir(self):
self.assertEqual(['test'], dir(self.wrapped_flags))
def test_getitem(self):
self.assertIs(self.original_flags['test'], self.wrapped_flags['test'])
def test_setitem(self):
flag = flags.Flag(flags.ArgumentParser(), flags.ArgumentSerializer(),
'fruit', 'apple', 'the fruit type')
self.wrapped_flags['fruit'] = flag
self.assertIs(self.original_flags['fruit'], self.wrapped_flags['fruit'])
self.assertEqual('apple', self.wrapped_flags.fruit)
def test_len(self):
self.assertEqual(1, len(self.wrapped_flags))
def test_iter(self):
self.assertEqual(['test'], list(self.wrapped_flags))
def test_str(self):
self.assertEqual(str(self.wrapped_flags), str(self.original_flags))
def test_call(self):
self.wrapped_flags(['program', '--test=new'])
self.assertEqual('new', self.wrapped_flags.test)
def test_keyword_arguments(self):
test_cases = (
('old_string', 'default'),
('new_string', 'default'),
('old_integer', 1),
('new_integer', 1),
('old_float', 1.5),
('new_float', 1.5),
('old_bool', True),
('new_bool', True),
('old_boolean', False),
('new_boolean', False),
)
for flag_name, default_value in test_cases:
self.assertEqual(default_value, absl_flags.FLAGS[flag_name].default)
self.assertEqual('docstring', absl_flags.FLAGS[flag_name].help)
if __name__ == '__main__':
unittest.main()
| FlagsTest |
python | django__django | tests/test_runner_apps/simple/tests.py | {
"start": 339,
"end": 447
} | class ____(SimpleTestCase):
def test_1(self):
pass
def test_2(self):
pass
| SimpleCase1 |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/relativity/relativity.py | {
"start": 9220,
"end": 10257
} | class ____(pTypes.GroupParameter):
def __init__(self, **kwds):
defs = dict(name="Grid", autoIncrementName=True, renamable=True, removable=True, children=[
dict(name='Number of Clocks', type='int', value=5, limits=[1, None]),
dict(name='Spacing', type='float', value=1.0, step=0.1),
ClockParam(name='ClockTemplate'),
])
#defs.update(kwds)
pTypes.GroupParameter.__init__(self, **defs)
self.restoreState(kwds, removeChildren=False)
def buildClocks(self):
clocks = {}
template = self.param('ClockTemplate')
spacing = self['Spacing']
for i in range(self['Number of Clocks']):
c = list(template.buildClocks().values())[0]
c.x0 += i * spacing
clocks[self.name() + '%02d' % i] = c
return clocks
def clockNames(self):
return [self.name() + '%02d' % i for i in range(self['Number of Clocks'])]
pTypes.registerParameterType('Grid', GridParam)
| GridParam |
python | doocs__leetcode | solution/3200-3299/3286.Find a Safe Walk Through a Grid/Solution.py | {
"start": 0,
"end": 701
} | class ____:
def findSafeWalk(self, grid: List[List[int]], health: int) -> bool:
m, n = len(grid), len(grid[0])
dist = [[inf] * n for _ in range(m)]
dist[0][0] = grid[0][0]
q = deque([(0, 0)])
dirs = (-1, 0, 1, 0, -1)
while q:
x, y = q.popleft()
for a, b in pairwise(dirs):
nx, ny = x + a, y + b
if (
0 <= nx < m
and 0 <= ny < n
and dist[nx][ny] > dist[x][y] + grid[nx][ny]
):
dist[nx][ny] = dist[x][y] + grid[nx][ny]
q.append((nx, ny))
return dist[-1][-1] < health
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_cloud_batch.py | {
"start": 1566,
"end": 5846
} | class ____:
def test_serialization(self, trigger):
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.cloud_batch.CloudBatchJobFinishedTrigger"
assert kwargs == {
"project_id": PROJECT_ID,
"job_name": JOB_NAME,
"location": LOCATION,
"gcp_conn_id": GCP_CONNECTION_ID,
"polling_period_seconds": POLL_SLEEP,
"timeout": TIMEOUT,
"impersonation_chain": IMPERSONATION_CHAIN,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.cloud_batch.CloudBatchAsyncHook")
async def test_trigger_on_success_yield_successfully(
self, mock_hook, trigger: CloudBatchJobFinishedTrigger
):
"""
Tests the CloudBatchJobFinishedTrigger fires once the job execution reaches a successful state.
"""
state = JobStatus.State(JobStatus.State.SUCCEEDED)
mock_hook.return_value.get_batch_job.return_value = self._mock_job_with_state(state)
generator = trigger.run()
actual = await generator.asend(None) # type:ignore[attr-defined]
assert (
TriggerEvent(
{
"job_name": JOB_NAME,
"status": "success",
"message": "Job completed",
}
)
== actual
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.cloud_batch.CloudBatchAsyncHook")
async def test_trigger_on_deleted_yield_successfully(
self, mock_hook, trigger: CloudBatchJobFinishedTrigger
):
"""
Tests the CloudBatchJobFinishedTrigger fires once the job execution reaches a successful state.
"""
state = JobStatus.State(JobStatus.State.DELETION_IN_PROGRESS)
mock_hook.return_value.get_batch_job.return_value = self._mock_job_with_state(state)
generator = trigger.run()
actual = await generator.asend(None) # type:ignore[attr-defined]
assert (
TriggerEvent(
{
"job_name": JOB_NAME,
"status": "deleted",
"message": f"Batch job with name {JOB_NAME} is being deleted",
}
)
== actual
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.cloud_batch.CloudBatchAsyncHook")
async def test_trigger_on_deleted_yield_exception(self, mock_hook, trigger: CloudBatchJobFinishedTrigger):
"""
Tests the CloudBatchJobFinishedTrigger fires once the job execution
reaches an state with an error message.
"""
mock_hook.return_value.get_batch_job.side_effect = Exception("Test Exception")
generator = trigger.run()
actual = await generator.asend(None) # type:ignore[attr-defined]
assert (
TriggerEvent(
{
"status": "error",
"message": "Test Exception",
}
)
== actual
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.cloud_batch.CloudBatchAsyncHook")
async def test_trigger_timeout(self, mock_hook, trigger: CloudBatchJobFinishedTrigger):
"""
Tests the CloudBatchJobFinishedTrigger fires once the job execution times out with an error message.
"""
async def _mock_job(job_name):
job = mock.MagicMock()
job.status.state = JobStatus.State.RUNNING
return job
mock_hook.return_value.get_batch_job = _mock_job
generator = trigger.run()
actual = await generator.asend(None) # type:ignore[attr-defined]
assert (
TriggerEvent(
{
"job_name": JOB_NAME,
"status": "timed out",
"message": f"Batch job with name {JOB_NAME} timed out",
}
)
== actual
)
async def _mock_job_with_state(self, state: JobStatus.State):
job: Job = mock.MagicMock()
job.status.state = state
return job
| TestCloudBatchJobFinishedTrigger |
python | pytorch__pytorch | test/test_overrides.py | {
"start": 49662,
"end": 49989
} | class ____(TestCase):
# Regression test for gh-54457
def test_iterator(self):
t = torch.tensor([5, 6, 7]).as_subclass(SubTensor2)
it = iter(t)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
self.assertIs(type(next(it)), SubTensor2)
| TestIterator |
python | walkccc__LeetCode | solutions/1365. How Many Numbers Are Smaller Than the Current Number/1365.py | {
"start": 0,
"end": 277
} | class ____:
def smallerNumbersThanCurrent(self, nums: list[int]) -> list[int]:
MAX = 100
count = collections.Counter(nums)
for i in range(1, MAX + 1):
count[i] += count[i - 1]
return [0 if num == 0 else count[num - 1]
for num in nums]
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC201_google.py | {
"start": 3089,
"end": 3183
} | class ____:
# OK
def __new__(cls) -> 'Spam':
"""New!!"""
return cls()
| Spam |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_inheritance.py | {
"start": 1325,
"end": 1687
} | class ____(fixtures.TestBase, testing.AssertsExecutionResults):
def setup_test(self):
global Base
self.mapper_registry = registry()
Base = self.mapper_registry.generate_base()
def teardown_test(self):
close_all_sessions()
self.mapper_registry.dispose()
Base.metadata.drop_all(testing.db)
| DeclarativeTestBase |
python | getsentry__sentry | src/sentry/metrics/dogstatsd.py | {
"start": 690,
"end": 4573
} | class ____(MetricsBackend):
def __init__(self, prefix: str | None = None, **kwargs: Any) -> None:
# TODO(dcramer): it'd be nice if the initialize call wasn't a global
self.tags = kwargs.pop("tags", None)
kwargs["statsd_disable_buffering"] = False
initialize(**kwargs)
statsd.disable_telemetry()
# When enabled, a background thread will be used to send metric payloads to the Agent.
statsd.enable_background_sender(
sender_queue_size=SENDER_QUEUE_SIZE, sender_queue_timeout=SENDER_QUEUE_TIMEOUT
)
# Applications should call wait_for_pending() before exiting to make sure all pending payloads are sent.
atexit.register(statsd.wait_for_pending)
# Origin detection is enabled after 0.45 by default.
# Disable it since it silently fails.
# Ref: https://github.com/DataDog/datadogpy/issues/764
statsd._container_id = None
super().__init__(prefix=prefix)
def incr(
self,
key: str,
instance: str | None = None,
tags: Tags | None = None,
amount: float | int = 1,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
statsd.increment(self._get_key(key), amount, sample_rate=sample_rate, tags=tags_list)
def timing(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
statsd.timing(self._get_key(key), value, sample_rate=sample_rate, tags=tags_list)
def gauge(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
statsd.gauge(self._get_key(key), value, sample_rate=sample_rate, tags=tags_list)
def distribution(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
# We keep the same implementation for Datadog.
return self.timing(key, value, instance, tags, sample_rate)
def event(
self,
title: str,
message: str,
alert_type: str | None = None,
aggregation_key: str | None = None,
source_type_name: str | None = None,
priority: str | None = None,
instance: str | None = None,
tags: Tags | None = None,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
statsd.event(
title=title,
message=message,
alert_type=alert_type,
aggregation_key=aggregation_key,
source_type_name=source_type_name,
priority=priority,
tags=tags_list,
hostname=self.host,
)
| DogStatsdMetricsBackend |
python | cython__cython | Demos/benchmarks/bm_raytrace.py | {
"start": 9678,
"end": 11391
} | class ____(SimpleSurface):
def __init__(self, **kwargs):
SimpleSurface.__init__(self, **kwargs)
self.otherColour = kwargs.get('otherColour', (0, 0, 0))
self.checkSize = kwargs.get('checkSize', 1)
def baseColourAt(self, p):
v = p - Point.ZERO
v.scale(1.0 / self.checkSize)
if ((int(abs(v.x) + 0.5)
+ int(abs(v.y) + 0.5)
+ int(abs(v.z) + 0.5)) % 2):
return self.otherColour
else:
return self.baseColour
def bench_raytrace(loops: cython.long, width, height, filename=None, timer=time.perf_counter):
i: cython.long
y: cython.long
t0 = timer()
for i in range(loops):
canvas = Canvas(width, height)
s = Scene()
s.addLight(Point(30, 30, 10))
s.addLight(Point(-10, 100, 30))
s.lookAt(Point(0, 3, 0))
s.addObject(Sphere(Point(1, 3, -10), 2),
SimpleSurface(baseColour=(1, 1, 0)))
for y in range(6):
s.addObject(Sphere(Point(-3 - y * 0.4, 2.3, -5), 0.4),
SimpleSurface(baseColour=(y / 6.0, 1 - y / 6.0, 0.5)))
s.addObject(Halfspace(Point(0, 0, 0), Vector.UP),
CheckerboardSurface())
s.render(canvas)
dt = timer() - t0
if filename:
canvas.write_ppm(filename)
return dt
def run_benchmark(repeat=True, scale: cython.long = 1):
from util import repeat_to_accuracy
from functools import partial
def single_run(scale, timer):
return bench_raytrace(scale, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timer=timer)
return repeat_to_accuracy(single_run, scale=scale, repeat=repeat)[0]
| CheckerboardSurface |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_page_view02.py | {
"start": 315,
"end": 1006
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("page_view02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with print options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_page_view()
worksheet.set_zoom(75)
# Options to match automatic page setup.
worksheet.set_paper(9)
worksheet.vertical_dpi = 200
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ansible__ansible | lib/ansible/galaxy/collection/gpg.py | {
"start": 4300,
"end": 4498
} | class ____(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by a revoked key."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
| GpgRevKeySig |
python | apache__airflow | airflow-core/src/airflow/models/dag_favorite.py | {
"start": 992,
"end": 1327
} | class ____(Base):
"""Association table model linking users to their favorite DAGs."""
__tablename__ = "dag_favorite"
user_id: Mapped[str] = mapped_column(StringID(), primary_key=True)
dag_id: Mapped[str] = mapped_column(
StringID(), ForeignKey("dag.dag_id", ondelete="CASCADE"), primary_key=True
)
| DagFavorite |
python | pytransitions__transitions | transitions/extensions/nesting.py | {
"start": 3668,
"end": 6137
} | class ____(Event):
"""An event type to work with nested states.
This subclass is NOT compatible with simple Machine instances.
"""
def trigger(self, model, *args, **kwargs):
raise RuntimeError("NestedEvent.trigger must not be called directly. Call Machine.trigger_event instead.")
def trigger_nested(self, event_data):
"""Executes all transitions that match the current state,
halting as soon as one successfully completes.
It is up to the machine's configuration of the Event whether processing happens queued (sequentially) or
whether further Events are processed as they occur. NOTE: This should only
be called by HierarchicalMachine instances.
Args:
event_data (NestedEventData): The currently processed event
Returns: boolean indicating whether or not a transition was
successfully executed (True if successful, False if not).
"""
machine = event_data.machine
model = event_data.model
state_tree = machine.build_state_tree(getattr(model, machine.model_attribute), machine.state_cls.separator)
state_tree = reduce(dict.get, machine.get_global_name(join=False), state_tree)
ordered_states = resolve_order(state_tree)
done = set()
event_data.event = self
for state_path in ordered_states:
state_name = machine.state_cls.separator.join(state_path)
if state_name not in done and state_name in self.transitions:
event_data.state = machine.get_state(state_name)
event_data.source_name = state_name
event_data.source_path = copy.copy(state_path)
self._process(event_data)
if event_data.result:
elems = state_path
while elems:
done.add(machine.state_cls.separator.join(elems))
elems.pop()
return event_data.result
def _process(self, event_data):
machine = event_data.machine
machine.callbacks(event_data.machine.prepare_event, event_data)
_LOGGER.debug("%sExecuted machine preparation callbacks before conditions.", machine.name)
for trans in self.transitions[event_data.source_name]:
event_data.transition = trans
event_data.result = trans.execute(event_data)
if event_data.result:
break
| NestedEvent |
python | walkccc__LeetCode | solutions/1955. Count Number of Special Subsequences/1955-3.py | {
"start": 0,
"end": 546
} | class ____:
def countSpecialSubsequences(self, nums: list[int]) -> int:
MOD = 1_000_000_007
n = len(nums)
# dp[j] := the number of increasing subsequences of the numbers so far that
# end in j
dp = [0] * 3
if nums[0] == 0:
dp[0] = 1
for i in range(1, n):
if nums[i] == 0:
dp[0] = dp[0] * 2 + 1
elif nums[i] == 1:
dp[1] = dp[1] * 2 + dp[0]
else: # nums[i] == 2
dp[2] = dp[2] * 2 + dp[1]
for ending in range(3):
dp[ending] %= MOD
return dp[2]
| Solution |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/auth/managers/models/resource_details.py | {
"start": 1464,
"end": 1570
} | class ____:
"""Represents the details of an asset."""
id: str | None = None
@dataclass
| AssetDetails |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/components.py | {
"start": 1097,
"end": 2535
} | class ____(SubstreamPartitionRouter):
"""
We often require certain data to be fully retrieved from the parent stream before we begin requesting data from the child stream.
In this custom component, we execute stream slices twice: first, we retrieve all the parent_stream_fields,
and then we call stream slices again, this time with the previously fetched fields.
"""
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
super().__post_init__(parameters)
fields_parent_stream_config, *parent_stream_configs = self.parent_stream_configs
self.fields_parent_stream_config = fields_parent_stream_config
self.parent_stream_configs = parent_stream_configs
def stream_slices(self) -> Iterable[StreamSlice]:
self.parent_stream_configs, parent_stream_configs = [self.fields_parent_stream_config], self.parent_stream_configs
fields = [s.partition[self.fields_parent_stream_config.partition_field.eval(self.config)] for s in super().stream_slices()]
fields += ["key", "status", "created", "updated"]
self.parent_stream_configs = parent_stream_configs
for stream_slice in super().stream_slices():
stream_slice = StreamSlice(
partition=stream_slice.partition, cursor_slice=stream_slice.cursor_slice, extra_fields={"fields": fields}
)
yield stream_slice
| SprintIssuesSubstreamPartitionRouter |
python | fluentpython__example-code | 10-seq-hacking/vector_v1.py | {
"start": 1751,
"end": 2725
} | class ____:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components) # <1>
def __iter__(self):
return iter(self._components) # <2>
def __repr__(self):
components = reprlib.repr(self._components) # <3>
components = components[components.find('['):-1] # <4>
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components)) # <5>
def __eq__(self, other):
return tuple(self) == tuple(other)
def __abs__(self):
return math.sqrt(sum(x * x for x in self)) # <6>
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv) # <7>
# END VECTOR_V1
| Vector |
python | sanic-org__sanic | examples/request_stream/server.py | {
"start": 249,
"end": 1512
} | class ____(HTTPMethodView):
@stream_decorator
async def post(self, request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.post("/stream", stream=True)
async def handler(request):
async def streaming(response):
while True:
body = await request.stream.get()
if body is None:
break
body = body.decode("utf-8").replace("1", "A")
await response.write(body)
return stream(streaming)
@bp.put("/bp_stream", stream=True)
async def bp_handler(request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8").replace("1", "A")
return text(result)
async def post_handler(request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.blueprint(bp)
app.add_route(SimpleView.as_view(), "/method_view")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
| SimpleView |
python | numba__numba | numba/tests/test_target_extension.py | {
"start": 2121,
"end": 3874
} | class ____(CPUCodegen):
# This largely rips off the CPU for ease
_library_class = JITCodeLibrary
def _customize_tm_options(self, options):
# Customize the target machine options.
options["cpu"] = self._get_host_cpu_name()
arch = ll.Target.from_default_triple().name
if arch.startswith("x86"):
reloc_model = "static"
elif arch.startswith("ppc"):
reloc_model = "pic"
else:
reloc_model = "default"
options["reloc"] = reloc_model
options["codemodel"] = "jitdefault"
# Set feature attributes (such as ISA extensions)
# This overrides default feature selection by CPU model above
options["features"] = self._tm_features
# Deal with optional argument to ll.Target.create_target_machine
sig = utils.pysignature(ll.Target.create_target_machine)
if "jit" in sig.parameters:
# Mark that this is making a JIT engine
options["jit"] = True
def _customize_tm_features(self):
# For JIT target, we will use LLVM to get the feature map
return self._get_host_cpu_features()
def _add_module(self, module):
self._engine.add_module(module)
def set_env(self, env_name, env):
"""Set the environment address.
Update the GlobalVariable named *env_name* to the address of *env*.
"""
gvaddr = self._engine.get_global_value_address(env_name)
envptr = (ctypes.c_void_p * 1).from_address(gvaddr)
envptr[0] = ctypes.c_void_p(id(env))
# This is the function registry for the dpu, it just has one registry, this one!
dpu_function_registry = Registry()
# Implement a new context for the DPU target
| JITDPUCodegen |
python | getsentry__sentry | src/sentry/tasks/summaries/utils.py | {
"start": 1176,
"end": 2432
} | class ____:
def __init__(
self, timestamp: float, duration: int, organization: Organization, daily: bool = False
):
self.timestamp = timestamp
self.duration = duration
self.start = to_datetime(timestamp - duration)
self.end = to_datetime(timestamp)
self.organization: Organization = organization
self.projects_context_map: dict[int, ProjectContext | DailySummaryProjectContext] = (
{}
) # { project_id: ProjectContext }
self.project_ownership: dict[int, set[int]] = {} # { user_id: set<project_id> }
self.daily = daily
for project in organization.project_set.all():
if self.daily:
self.projects_context_map[project.id] = DailySummaryProjectContext(project)
else:
self.projects_context_map[project.id] = ProjectContext(project)
def __repr__(self) -> str:
return self.projects_context_map.__repr__()
def is_empty(self):
"""
Returns True if every project context is empty.
"""
return all(
project_ctx.check_if_project_is_empty()
for project_ctx in self.projects_context_map.values()
)
| OrganizationReportContext |
python | jina-ai__jina | tests/unit/serve/executors/test_executor.py | {
"start": 1006,
"end": 20697
} | class ____(Executor):
@requests
def foo(self, docs, **kwargs):
docs.texts = ['foo' for _ in docs]
@requests(on='/bar')
def bar(self, docs, **kwargs):
docs.texts = ['bar' for _ in docs]
@pytest.fixture()
def exposed_port():
port = random_port()
yield port
@pytest.fixture(autouse=False)
def served_exec(request: FixtureRequest, exposed_port):
import threading
import time
def serve_exec(**kwargs):
MyServeExec.serve(**kwargs)
e = threading.Event()
kwargs = {'port': exposed_port, 'stop_event': e}
enable_dynamic_batching = request.param
if enable_dynamic_batching:
kwargs['uses_dynamic_batching'] = {
'/bar': {'preferred_batch_size': 4, 'timeout': 5000}
}
t = threading.Thread(
name='serve-exec',
target=serve_exec,
kwargs=kwargs,
)
t.start()
time.sleep(3) # allow Flow to start
yield
e.set() # set event and stop (unblock) the Flow
t.join()
@pytest.mark.skip('jinahub not available')
@pytest.mark.parametrize('uses', ['jinaai://jina-ai/DummyHubExecutor'])
def test_executor_load_from_hub(uses):
exec = Executor.from_hub(uses, uses_metas={'name': 'hello123'})
da = DocumentArray([Document()])
exec.foo(da)
assert da.texts == ['hello']
assert exec.metas.name == 'hello123'
@pytest.mark.skip('jinahub not available')
def test_executor_import_with_external_dependencies(capsys):
ex = Executor.load_config('../../hubble-executor/config.yml')
assert ex.bar == 123
ex.foo()
out, err = capsys.readouterr()
assert 'hello' in out
def test_executor_with_pymodule_path():
with pytest.raises(FileNotFoundError):
_ = Executor.load_config(
'''
jtype: BaseExecutor
py_modules:
- jina.no_valide.executor
'''
)
ex = Executor.load_config(
'''
jtype: MyExecutor
with:
bar: 123
py_modules:
- unit.serve.executors.dummy_executor
'''
)
assert ex.bar == 123
assert ex.process(DocumentArray([Document()]))[0].text == 'hello world'
def test_flow_uses_with_pymodule_path():
with Flow.load_config(
'''
jtype: Flow
executors:
- uses: unit.serve.executors.dummy_executor.MyExecutor
uses_with:
bar: 123
'''
):
pass
with Flow().add(
uses='unit.serve.executors.dummy_executor.MyExecutor', uses_with={'bar': 123}
):
pass
with pytest.raises(RuntimeFailToStart):
with Flow.load_config(
'''
jtype: Flow
executors:
- uses: jina.no_valide.executor
uses_with:
bar: 123
'''
):
pass
@property
def workspace(self) -> str:
"""
Get the path of the current shard.
:return: returns the workspace of the shard of this Executor.
"""
return os.path.abspath(
self.metas.workspace
or (
os.path.join(self.runtime_args.workspace, self.metas.name)
if self.metas.shard_id == -1
else os.path.join(
self.runtime_args.workspace, self.metas.name, self.metas.shard_id
)
)
)
@pytest.fixture
def shard_id(request):
return request.param
@pytest.fixture
def test_metas_workspace_simple(tmpdir):
metas = get_default_metas()
metas['workspace'] = str(tmpdir)
metas['name'] = 'test'
return metas
@pytest.fixture
def test_bad_metas_workspace(tmpdir):
metas = get_default_metas()
return metas
@pytest.fixture
def test_metas_workspace_replica_pods(tmpdir, shard_id):
metas = get_default_metas()
metas['workspace'] = str(tmpdir)
metas['name'] = 'test'
metas['shard_id'] = shard_id
return metas
def test_executor_workspace_simple(test_metas_workspace_simple):
executor = Executor(metas=test_metas_workspace_simple)
assert executor.workspace == os.path.abspath(
os.path.join(
test_metas_workspace_simple['workspace'],
test_metas_workspace_simple['name'],
)
)
def test_executor_workspace_simple_workspace(tmpdir):
runtime_workspace = os.path.join(tmpdir, 'test2')
workspace = os.path.join(tmpdir, 'some_folder')
name = 'test_meta'
executor = Executor(metas={'name': name, 'workspace': workspace})
assert executor.workspace == os.path.abspath(os.path.join(workspace, name))
executor = Executor(metas={'name': name}, runtime_args={'workspace': workspace})
assert executor.workspace == os.path.abspath(os.path.join(workspace, name))
# metas after runtime_args
executor = Executor(
metas={'name': name, 'workspace': workspace},
runtime_args={'workspace': runtime_workspace},
)
assert executor.workspace == os.path.abspath(os.path.join(runtime_workspace, name))
executor = Executor(
metas={'name': name, 'workspace': workspace},
runtime_args={'shard_id': 1},
)
assert executor.workspace == os.path.abspath(os.path.join(workspace, name, '1'))
executor = Executor(
metas={'name': name},
runtime_args={'workspace': workspace, 'shard_id': 1},
)
assert executor.workspace == os.path.abspath(os.path.join(workspace, name, '1'))
@pytest.mark.parametrize('shard_id', [0, 1, 2], indirect=True)
def test_executor_workspace(test_metas_workspace_replica_pods, shard_id):
executor = Executor(
metas={'name': test_metas_workspace_replica_pods['name']},
runtime_args=test_metas_workspace_replica_pods,
)
assert executor.workspace == os.path.abspath(
os.path.join(
test_metas_workspace_replica_pods['workspace'],
test_metas_workspace_replica_pods['name'],
str(shard_id),
)
)
@pytest.mark.parametrize('shard_id', [None, -1], indirect=True)
def test_executor_workspace_parent_replica_nopea(
test_metas_workspace_replica_pods, shard_id
):
executor = Executor(
metas={'name': test_metas_workspace_replica_pods['name']},
runtime_args=test_metas_workspace_replica_pods,
)
assert executor.workspace == os.path.abspath(
os.path.join(
test_metas_workspace_replica_pods['workspace'],
test_metas_workspace_replica_pods['name'],
)
)
@pytest.mark.parametrize('shard_id', [0, 1, 2], indirect=True)
def test_executor_workspace_parent_noreplica_pod(
test_metas_workspace_replica_pods, shard_id
):
executor = Executor(
metas={'name': test_metas_workspace_replica_pods['name']},
runtime_args=test_metas_workspace_replica_pods,
)
assert executor.workspace == os.path.abspath(
os.path.join(
test_metas_workspace_replica_pods['workspace'],
test_metas_workspace_replica_pods['name'],
str(shard_id),
)
)
@pytest.mark.parametrize('shard_id', [None, -1], indirect=True)
def test_executor_workspace_parent_noreplica_nopea(
test_metas_workspace_replica_pods, shard_id
):
executor = Executor(
metas={'name': test_metas_workspace_replica_pods['name']},
runtime_args=test_metas_workspace_replica_pods,
)
assert executor.workspace == os.path.abspath(
os.path.join(
test_metas_workspace_replica_pods['workspace'],
test_metas_workspace_replica_pods['name'],
)
)
def test_workspace_not_exists(tmpdir):
class MyExec(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do(self, *args, **kwargs):
with open(
os.path.join(self.workspace, 'text.txt'), 'w', encoding='utf-8'
) as f:
f.write('here!')
e = MyExec(metas={'workspace': tmpdir})
e.do()
@pytest.mark.parametrize(
'uses_requests, expected',
[
(None, {'/foo', '/default', '*'}),
({'/nofoo': 'foo'}, {'/nofoo', '/default', '*'}),
({'/nofoo': 'foo', '/new': 'default'}, {'/nofoo', '/new', '*'}),
({'/new': 'default'}, {'/foo', '/new', '*'}),
({'/nofoo': 'foo', '/new': 'all'}, {'/nofoo', '/default', '/new'}),
({'/new': 'all'}, {'/foo', '/default', '/new'}),
],
)
def test_override_requests(uses_requests, expected):
from jina.serve.executors import __dry_run_endpoint__
expected.add(__dry_run_endpoint__)
class OverrideExec(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@requests()
def default(self, *args, **kwargs):
pass
@requests(on='*')
def all(self, *args, **kwargs):
pass
@requests(on='/foo')
def foo(self, *args, **kwargs):
pass
exec = OverrideExec(requests=uses_requests)
assert expected == set(exec.requests.keys())
def test_map_nested():
class NestedExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
def bar(d: Document):
d.text = 'hello'
return d
docs.apply(bar)
return docs
N = 2
da = DocumentArray.empty(N)
exec = NestedExecutor()
da1 = exec.foo(da)
assert da1.texts == ['hello'] * N
@pytest.mark.asyncio
async def test_async():
class AsyncExecutor(Executor):
@requests
async def foo(self, docs: DocumentArray, **kwargs):
for d in docs:
d.text = 'hello'
return docs
N = 2
da = DocumentArray.empty(N)
exec = AsyncExecutor()
da1 = await exec.foo(da)
assert da1.texts == ['hello'] * N
def set_hello(d: Document):
d.text = 'hello'
return d
@pytest.mark.asyncio
async def test_async_apply():
class AsyncExecutor(Executor):
@requests
async def foo(self, docs: DocumentArray, **kwargs):
docs.apply(set_hello)
return docs
N = 2
da = DocumentArray.empty(N)
exec = AsyncExecutor()
da1 = await exec.foo(da)
assert da1.texts == ['hello'] * N
@pytest.mark.parametrize('served_exec', [False, True], indirect=True)
def test_serve(served_exec, exposed_port):
docs = Client(port=exposed_port).post(on='/bar', inputs=DocumentArray.empty(5))
assert docs.texts == ['bar' for _ in docs]
def test_set_workspace(tmpdir):
complete_workspace = os.path.abspath(os.path.join(tmpdir, 'WorkspaceExec', '0'))
with Flow().add(uses=WorkspaceExec, workspace=str(tmpdir)) as f:
resp = f.post(on='/foo', inputs=Document())
assert resp[0].text == complete_workspace
with Flow().add(uses=WorkspaceExec, uses_metas={'workspace': str(tmpdir)}) as f:
resp = f.post(on='/foo', inputs=Document())
assert resp[0].text == complete_workspace
complete_workspace_no_replicas = os.path.abspath(
os.path.join(tmpdir, 'WorkspaceExec')
)
assert (
WorkspaceExec(workspace=str(tmpdir)).workspace == complete_workspace_no_replicas
)
def test_default_workspace(tmpdir):
with Flow().add(uses=WorkspaceExec) as f:
resp = f.post(on='/foo', inputs=Document())
assert resp[0].text
result_workspace = resp[0].text
assert result_workspace == os.path.join(__cache_path__, 'WorkspaceExec', '0')
@pytest.mark.skip('Hub not available')
@pytest.mark.parametrize(
'exec_type',
[Executor.StandaloneExecutorType.EXTERNAL, Executor.StandaloneExecutorType.SHARED],
)
@pytest.mark.parametrize(
'uses',
['jinahub+docker://DummyHubExecutor', 'jinaai+docker://jina-ai/DummyHubExecutor'],
)
def test_to_k8s_yaml(tmpdir, exec_type, uses):
Executor.to_kubernetes_yaml(
output_base_path=tmpdir,
uses=uses,
executor_type=exec_type,
)
with open(
os.path.join(tmpdir, 'executor0', 'executor0.yml'), encoding='utf-8'
) as f:
exec_yaml = list(yaml.safe_load_all(f))[-1]
assert exec_yaml['spec']['template']['spec']['containers'][0][
'image'
].startswith('registry')
if exec_type == Executor.StandaloneExecutorType.SHARED:
assert set(os.listdir(tmpdir)) == {
'executor0',
}
else:
assert set(os.listdir(tmpdir)) == {
'executor0',
'gateway',
}
with open(
os.path.join(tmpdir, 'gateway', 'gateway.yml'), encoding='utf-8'
) as f:
gatewayyaml = list(yaml.safe_load_all(f))[-1]
assert (
gatewayyaml['spec']['template']['spec']['containers'][0]['ports'][0][
'containerPort'
]
== 8080
)
gateway_args = gatewayyaml['spec']['template']['spec']['containers'][0][
'args'
]
assert gateway_args[gateway_args.index('--port') + 1] == '8080'
@pytest.mark.skip('jinahub not available')
@pytest.mark.parametrize(
'exec_type',
[Executor.StandaloneExecutorType.EXTERNAL, Executor.StandaloneExecutorType.SHARED],
)
@pytest.mark.parametrize(
'uses',
['jinaai+docker://jina-ai/DummyHubExecutor'],
)
def test_to_docker_compose_yaml(tmpdir, exec_type, uses):
compose_file = os.path.join(tmpdir, 'compose.yml')
Executor.to_docker_compose_yaml(
output_path=compose_file,
port_expose=2020,
uses=uses,
executor_type=exec_type,
)
with open(compose_file, encoding='utf-8') as f:
services = list(yaml.safe_load_all(f))[0]['services']
assert services['executor0']['image'].startswith('registry')
if exec_type == Executor.StandaloneExecutorType.SHARED:
assert len(services) == 1
else:
assert len(services) == 2
assert services['gateway']['ports'][0] == '2020:2020'
gateway_args = services['gateway']['command']
assert gateway_args[gateway_args.index('--port') + 1] == '2020'
def _create_test_data_message(counter=0):
return list(request_generator('/', DocumentArray([Document(text=str(counter))])))[0]
@pytest.mark.asyncio
async def test_blocking_sync_exec():
SLEEP_TIME = 0.01
REQUEST_COUNT = 100
class BlockingExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
time.sleep(SLEEP_TIME)
for doc in docs:
doc.text = 'BlockingExecutor'
return docs
args = _generate_pod_args(['--uses', 'BlockingExecutor'])
cancel_event = multiprocessing.Event()
def start_runtime(args, cancel_event):
with AsyncNewLoopRuntime(
args, cancel_event=cancel_event, req_handler_cls=WorkerRequestHandler
) as runtime:
runtime.run_forever()
runtime_thread = Process(
target=start_runtime,
args=(args, cancel_event),
daemon=True,
)
runtime_thread.start()
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'{args.host}:{args.port[0]}',
ready_or_shutdown_event=Event(),
)
send_tasks = []
start_time = time.time()
for i in range(REQUEST_COUNT):
send_tasks.append(
asyncio.create_task(
send_request_async(
_create_test_data_message(),
target=f'{args.host}:{args.port[0]}',
timeout=3.0,
)
)
)
results = await asyncio.gather(*send_tasks)
end_time = time.time()
assert all(result.docs.texts == ['BlockingExecutor'] for result in results)
assert end_time - start_time < (REQUEST_COUNT * SLEEP_TIME) * 2.0
cancel_event.set()
runtime_thread.join()
def test_executors_inheritance_binding():
class A(Executor):
@requests(on='/index')
def a(self, **kwargs):
pass
@requests
def default_a(self, **kwargs):
pass
class B(A):
@requests(on='/index')
def b(self, **kwargs):
pass
class C(B):
pass
assert set(A().requests.keys()) == {'/index', '/default', '_jina_dry_run_'}
assert A().requests['/index'].fn == A.a
assert A().requests['/default'].fn == A.default_a
assert set(B().requests.keys()) == {'/index', '/default', '_jina_dry_run_'}
assert B().requests['/index'].fn == B.b
assert B().requests['/default'].fn == A.default_a
assert set(C().requests.keys()) == {'/index', '/default', '_jina_dry_run_'}
assert C().requests['/index'].fn == B.b
assert C().requests['/default'].fn == A.default_a
@pytest.mark.parametrize(
'inputs,expected_values',
[
(
dict(preferred_batch_size=4, timeout=5_000),
dict(preferred_batch_size=4, timeout=5_000, flush_all=False, use_custom_metric=False, custom_metric=None, use_dynamic_batching=True),
),
(
dict(preferred_batch_size=4, timeout=5_000, flush_all=True),
dict(preferred_batch_size=4, timeout=5_000, flush_all=True, use_custom_metric=False, custom_metric=None, use_dynamic_batching=True),
),
(
dict(preferred_batch_size=4),
dict(preferred_batch_size=4, timeout=10_000, flush_all=False, use_custom_metric=False, custom_metric=None, use_dynamic_batching=True),
),
],
)
def test_dynamic_batching(inputs, expected_values):
class MyExec(Executor):
@dynamic_batching(**inputs)
def foo(self, docs, **kwargs):
pass
exec = MyExec()
assert exec.dynamic_batching['foo'] == expected_values
@pytest.mark.parametrize(
'inputs,expected_values',
[
(
dict(preferred_batch_size=4, timeout=5_000),
dict(preferred_batch_size=4, timeout=5_000, flush_all=False, use_custom_metric=False, custom_metric=None, use_dynamic_batching=True),
),
(
dict(preferred_batch_size=4, timeout=5_000, flush_all=True),
dict(preferred_batch_size=4, timeout=5_000, flush_all=True, use_custom_metric=False, custom_metric=None, use_dynamic_batching=True),
),
(
dict(preferred_batch_size=4),
dict(preferred_batch_size=4, timeout=10_000, flush_all=False, use_custom_metric=False, custom_metric=None, use_dynamic_batching=True),
),
],
)
def test_combined_decorators(inputs, expected_values):
class MyExecutor(Executor):
@dynamic_batching(**inputs)
@requests(on='/foo')
def foo(self, docs, **kwargs):
pass
exec = MyExecutor()
assert exec.dynamic_batching['foo'] == expected_values
class MyExecutor2(Executor):
@requests(on='/foo')
@dynamic_batching(**inputs)
def foo(self, docs, **kwargs):
pass
exec = MyExecutor2()
assert exec.dynamic_batching['foo'] == expected_values
def test_write_decorator():
class WriteExecutor(Executor):
@write
@requests(on='/delete')
def delete(self, **kwargs):
pass
@requests(on='/bar')
@write
def bar(self, **kwargs):
pass
@requests(on='/index')
@write()
def index(self, **kwargs):
pass
@write()
@requests(on='/update')
def update(self, **kwargs):
pass
@requests(on='/search')
def search(self, **kwargs):
pass
@requests
def foo(self, **kwargs):
pass
exec = WriteExecutor()
assert set(exec.write_endpoints) == {'/index', '/update', '/delete', '/bar'}
| MyServeExec |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 11070,
"end": 12592
} | class ____(HasExpressionLookup, TypeEngine[int]):
"""A type for ``int`` integers."""
__visit_name__ = "integer"
operator_classes = OperatorClass.INTEGER
if TYPE_CHECKING:
@util.ro_memoized_property
def _type_affinity(self) -> Type[Integer]: ...
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def _resolve_for_literal(self, value):
if value.bit_length() >= 32:
return _BIGINTEGER
else:
return self
def literal_processor(self, dialect):
def process(value):
return str(int(value))
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
Float: Float,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
Float: Float,
},
operators.truediv: {
Integer: Numeric,
Numeric: Numeric,
Float: Float,
},
operators.floordiv: {Integer: self.__class__, Numeric: Numeric},
operators.sub: {
Integer: self.__class__,
Numeric: Numeric,
Float: Float,
},
}
| Integer |
python | Textualize__rich | benchmarks/benchmarks.py | {
"start": 314,
"end": 1828
} | class ____:
def setup(self):
self.console = Console(
file=StringIO(), color_system="truecolor", legacy_windows=False
)
self.len_lorem_ipsum = len(snippets.LOREM_IPSUM)
self.text = Text.from_markup(snippets.MARKUP)
def time_wrapping(self):
self.text.wrap(self.console, 12, overflow="fold")
def time_indent_guides(self):
Text(snippets.PYTHON_SNIPPET).with_indent_guides()
def time_fit(self):
Text(snippets.LOREM_IPSUM).fit(12)
def time_split(self):
self.text.split()
def time_divide(self):
Text(snippets.LOREM_IPSUM).divide(range(20, 100, 4))
def time_align_center(self):
Text(snippets.LOREM_IPSUM).align("center", width=self.len_lorem_ipsum * 3)
def time_render(self):
list(self.text.render(self.console))
def time_wrapping_unicode_heavy(self):
Text(snippets.UNICODE_HEAVY_TEXT).wrap(self.console, 12, overflow="fold")
def time_fit_unicode_heavy(self):
Text(snippets.UNICODE_HEAVY_TEXT).fit(12)
def time_split_unicode_heavy(self):
Text(snippets.UNICODE_HEAVY_TEXT).split()
def time_divide_unicode_heavy(self):
self.text.divide(range(20, 100, 4))
def time_align_center_unicode_heavy(self):
Text(snippets.UNICODE_HEAVY_TEXT).align(
"center", width=self.len_lorem_ipsum * 3
)
def time_render_unicode_heavy(self):
list(Text(snippets.UNICODE_HEAVY_TEXT).render(self.console))
| TextSuite |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/blobstore/gcs/main.py | {
"start": 2492,
"end": 3807
} | class ____(blobstore_handlers.BlobstoreDownloadHandler):
def get(self):
# Get the default Cloud Storage Bucket name and create a file name for
# the object in Cloud Storage.
bucket = app_identity.get_default_gcs_bucket_name()
# Cloud Storage file names are in the format /bucket/object.
filename = "/{}/blobstore_serving_demo".format(bucket)
# Create a file in Google Cloud Storage and write something to it.
with cloudstorage.open(filename, "w") as filehandle:
filehandle.write("abcde\n")
# In order to read the contents of the file using the Blobstore API,
# you must create a blob_key from the Cloud Storage file name.
# Blobstore expects the filename to be in the format of:
# /gs/bucket/object
blobstore_filename = "/gs{}".format(filename)
blob_key = blobstore.create_gs_key(blobstore_filename)
# BlobstoreDownloadHandler serves the file from Google Cloud Storage to
# your computer using blob_key.
self.send_blob(blob_key)
app = webapp2.WSGIApplication(
[
("/", CreateAndReadFileHandler),
("/blobstore/read", CreateAndReadFileHandler),
("/blobstore/serve", CreateAndServeFileHandler),
],
debug=True,
)
| CreateAndServeFileHandler |
python | optuna__optuna | optuna/storages/_rdb/alembic/versions/v3.0.0.d.py | {
"start": 805,
"end": 5827
} | class ____(BaseModel):
class TrialValueType(enum.Enum):
FINITE = 1
INF_POS = 2
INF_NEG = 3
__tablename__ = "trial_values"
trial_value_id = sa.Column(sa.Integer, primary_key=True)
value = sa.Column(sa.Float(precision=FLOAT_PRECISION), nullable=True)
value_type = sa.Column(sa.Enum(TrialValueType), nullable=False)
@classmethod
def value_to_stored_repr(
cls,
value: float,
) -> tuple[float | None, TrialValueType]:
if value == float("inf"):
return None, cls.TrialValueType.INF_POS
elif value == float("-inf"):
return None, cls.TrialValueType.INF_NEG
else:
return value, cls.TrialValueType.FINITE
@classmethod
def stored_repr_to_value(cls, value: float | None, float_type: TrialValueType) -> float:
if float_type == cls.TrialValueType.INF_POS:
assert value is None
return float("inf")
elif float_type == cls.TrialValueType.INF_NEG:
assert value is None
return float("-inf")
else:
assert float_type == cls.TrialValueType.FINITE
assert value is not None
return value
def upgrade():
bind = op.get_bind()
inspector = sa.inspect(bind)
column_names = [c["name"] for c in inspector.get_columns("trial_values")]
sa.Enum(TrialValueModel.TrialValueType).create(bind, checkfirst=True)
# MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE <tbl_name>
# ADD COLUMN <col_name> ... DEFAULT "FINITE"', but seemingly Alembic
# does not support such a SQL statement. So first add a column with schema-level
# default value setting, then remove it by `batch_op.alter_column()`.
if "value_type" not in column_names:
with op.batch_alter_table("trial_values") as batch_op:
batch_op.add_column(
sa.Column(
"value_type",
sa.Enum("FINITE", "INF_POS", "INF_NEG", name="trialvaluetype"),
nullable=False,
server_default="FINITE",
),
)
with op.batch_alter_table("trial_values") as batch_op:
batch_op.alter_column(
"value_type",
existing_type=sa.Enum("FINITE", "INF_POS", "INF_NEG", name="trialvaluetype"),
existing_nullable=False,
server_default=None,
)
batch_op.alter_column(
"value",
existing_type=sa.Float(precision=FLOAT_PRECISION),
nullable=True,
)
session = orm.Session(bind=bind)
try:
records = (
session.query(TrialValueModel)
.filter(
sa.or_(
TrialValueModel.value > 1e16,
TrialValueModel.value < -1e16,
)
)
.all()
)
mapping = []
for r in records:
value: float
if np.isclose(r.value, RDB_MAX_FLOAT) or np.isposinf(r.value):
value = float("inf")
elif np.isclose(r.value, RDB_MIN_FLOAT) or np.isneginf(r.value):
value = float("-inf")
else:
value = r.value
(
stored_value,
float_type,
) = TrialValueModel.value_to_stored_repr(value)
mapping.append(
{
"trial_value_id": r.trial_value_id,
"value_type": float_type,
"value": stored_value,
}
)
session.bulk_update_mappings(TrialValueModel, mapping)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
def downgrade():
bind = op.get_bind()
session = orm.Session(bind=bind)
try:
records = session.query(TrialValueModel).all()
mapping = []
for r in records:
if r.value_type == TrialValueModel.TrialValueType.FINITE:
continue
_value = r.value
if r.value_type == TrialValueModel.TrialValueType.INF_POS:
_value = RDB_MAX_FLOAT
else:
_value = RDB_MIN_FLOAT
mapping.append(
{
"trial_value_id": r.trial_value_id,
"value": _value,
}
)
session.bulk_update_mappings(TrialValueModel, mapping)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
with op.batch_alter_table("trial_values", schema=None) as batch_op:
batch_op.drop_column("value_type")
batch_op.alter_column(
"value",
existing_type=sa.Float(precision=FLOAT_PRECISION),
nullable=False,
)
sa.Enum(TrialValueModel.TrialValueType).drop(bind, checkfirst=True)
| TrialValueModel |
python | mlflow__mlflow | tests/spark/test_spark_model_export.py | {
"start": 2621,
"end": 40635
} | class ____(NamedTuple):
model: Any
spark_df: Any
pandas_df: Any
predictions: Any
def _get_spark_session_with_retry(max_tries=3):
conf = pyspark.SparkConf()
for attempt in range(max_tries):
try:
return get_spark_session(conf)
except Exception as e:
if attempt >= max_tries - 1:
raise
_logger.exception(
f"Attempt {attempt} to create a SparkSession failed ({e!r}), retrying..."
)
# Specify `autouse=True` to ensure that a context is created
# before any tests are executed. This ensures that the Hadoop filesystem
# does not create its own SparkContext.
@pytest.fixture(scope="module")
def spark():
if Version(pyspark.__version__) < Version("3.1"):
# A workaround for this issue:
# https://stackoverflow.com/questions/62109276/errorjava-lang-unsupportedoperationexception-for-pyspark-pandas-udf-documenta
spark_home = (
os.environ.get("SPARK_HOME")
if "SPARK_HOME" in os.environ
else os.path.dirname(pyspark.__file__)
)
conf_dir = os.path.join(spark_home, "conf")
os.makedirs(conf_dir, exist_ok=True)
with open(os.path.join(conf_dir, "spark-defaults.conf"), "w") as f:
conf = """
spark.driver.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true"
spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true"
"""
f.write(conf)
with _get_spark_session_with_retry() as spark:
yield spark
def iris_pandas_df():
iris = datasets.load_iris()
X = iris.data
y = iris.target
feature_names = ["0", "1", "2", "3"]
df = pd.DataFrame(X, columns=feature_names) # to make spark_udf work
df["label"] = pd.Series(y)
return df
@pytest.fixture(scope="module")
def iris_df(spark):
pdf = iris_pandas_df()
feature_names = list(pdf.drop("label", axis=1).columns)
iris_spark_df = spark.createDataFrame(pdf)
return feature_names, pdf, iris_spark_df
@pytest.fixture(scope="module")
def iris_signature():
return ModelSignature(
inputs=Schema(
[
ColSpec(name="0", type=DataType.double),
ColSpec(name="1", type=DataType.double),
ColSpec(name="2", type=DataType.double),
ColSpec(name="3", type=DataType.double),
]
),
outputs=Schema([ColSpec(type=DataType.double)]),
)
@pytest.fixture(scope="module")
def spark_model_iris(iris_df):
feature_names, iris_pandas_df, iris_spark_df = iris_df
assembler = VectorAssembler(inputCols=feature_names, outputCol="features")
lr = LogisticRegression(maxIter=50, regParam=0.1, elasticNetParam=0.8)
pipeline = Pipeline(stages=[assembler, lr])
# Fit the model
model = pipeline.fit(iris_spark_df)
preds_df = model.transform(iris_spark_df)
preds = [x.prediction for x in preds_df.select("prediction").collect()]
return SparkModelWithData(
model=model, spark_df=iris_spark_df, pandas_df=iris_pandas_df, predictions=preds
)
@pytest.fixture(scope="module")
def spark_model_transformer(iris_df):
feature_names, iris_pandas_df, iris_spark_df = iris_df
assembler = VectorAssembler(inputCols=feature_names, outputCol="features")
# Fit the model
preds_df = assembler.transform(iris_spark_df)
preds = [x.features for x in preds_df.select("features").collect()]
return SparkModelWithData(
model=assembler, spark_df=iris_spark_df, pandas_df=iris_pandas_df, predictions=preds
)
@pytest.fixture(scope="module")
def spark_model_estimator(iris_df):
feature_names, iris_pandas_df, iris_spark_df = iris_df
assembler = VectorAssembler(inputCols=feature_names, outputCol="features")
features_df = assembler.transform(iris_spark_df)
lr = LogisticRegression(maxIter=50, regParam=0.1, elasticNetParam=0.8)
# Fit the model
model = lr.fit(features_df)
preds_df = model.transform(features_df)
preds = [x.prediction for x in preds_df.select("prediction").collect()]
return SparkModelWithData(
model=model, spark_df=features_df, pandas_df=iris_pandas_df, predictions=preds
)
@pytest.fixture
def model_path(tmp_path):
return os.path.join(tmp_path, "model")
@pytest.mark.usefixtures("spark")
def test_hadoop_filesystem(tmp_path):
# copy local dir to and back from HadoopFS and make sure the results match
from mlflow.spark import _HadoopFileSystem as FS
test_dir_0 = os.path.join(tmp_path, "expected")
test_file_0 = os.path.join(test_dir_0, "root", "file_0")
test_dir_1 = os.path.join(test_dir_0, "root", "subdir")
test_file_1 = os.path.join(test_dir_1, "file_1")
os.makedirs(os.path.dirname(test_file_0))
with open(test_file_0, "w") as f:
f.write("test0")
os.makedirs(os.path.dirname(test_file_1))
with open(test_file_1, "w") as f:
f.write("test1")
remote = "/tmp/mlflow/test0"
# File should not be copied in this case
assert os.path.abspath(test_dir_0) == FS.maybe_copy_from_local_file(test_dir_0, remote)
FS.copy_from_local_file(test_dir_0, remote, remove_src=False)
local = os.path.join(tmp_path, "actual")
FS.copy_to_local_file(remote, local, remove_src=True)
assert sorted(os.listdir(os.path.join(local, "root"))) == sorted(
["subdir", "file_0", ".file_0.crc"]
)
assert sorted(os.listdir(os.path.join(local, "root", "subdir"))) == sorted(
["file_1", ".file_1.crc"]
)
# compare the files
with open(os.path.join(test_dir_0, "root", "file_0")) as expected_f:
with open(os.path.join(local, "root", "file_0")) as actual_f:
assert expected_f.read() == actual_f.read()
with open(os.path.join(test_dir_0, "root", "subdir", "file_1")) as expected_f:
with open(os.path.join(local, "root", "subdir", "file_1")) as actual_f:
assert expected_f.read() == actual_f.read()
# make sure we cleanup
assert not os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix
FS.copy_from_local_file(test_dir_0, remote, remove_src=False)
assert os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix
FS.delete(remote)
assert not os.path.exists(FS._remote_path(remote).toString()) # skip file: prefix
def test_model_export(spark_model_iris, model_path, spark_custom_env):
mlflow.spark.save_model(spark_model_iris.model, path=model_path, conda_env=spark_custom_env)
# 1. score and compare reloaded sparkml model
reloaded_model = mlflow.spark.load_model(model_uri=model_path)
preds_df = reloaded_model.transform(spark_model_iris.spark_df)
preds1 = [x.prediction for x in preds_df.select("prediction").collect()]
assert spark_model_iris.predictions == preds1
m = pyfunc.load_model(model_path)
# 2. score and compare reloaded pyfunc
preds2 = m.predict(spark_model_iris.pandas_df)
assert spark_model_iris.predictions == preds2
# 3. score and compare reloaded pyfunc Spark udf
preds3 = score_model_as_udf(model_uri=model_path, pandas_df=spark_model_iris.pandas_df)
assert spark_model_iris.predictions == preds3
assert os.path.exists(MLFLOW_DFS_TMP.get())
def test_model_export_with_signature_and_examples(spark_model_iris, iris_signature):
features_df = spark_model_iris.pandas_df.drop("label", axis=1)
example_ = features_df.head(3)
for signature in (None, iris_signature):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.spark.save_model(
spark_model_iris.model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
if example is None and signature is None:
assert mlflow_model.signature is None
else:
assert mlflow_model.signature == iris_signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert all((_read_example(mlflow_model, path) == example).all())
def test_model_export_raise_when_example_is_spark_dataframe(spark, spark_model_iris, model_path):
features_df = spark_model_iris.pandas_df.drop("label", axis=1)
example = spark.createDataFrame(features_df.head(3))
with pytest.raises(MlflowException, match="Examples can not be provided as Spark Dataframe."):
mlflow.spark.save_model(spark_model_iris.model, path=model_path, input_example=example)
def test_log_model_with_signature_and_examples(spark_model_iris, iris_signature):
features_df = spark_model_iris.pandas_df.drop("label", axis=1)
example_ = features_df.head(3)
artifact_path = "model"
for signature in (None, iris_signature):
for example in (None, example_):
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model,
artifact_path=artifact_path,
signature=signature,
input_example=example,
)
mlflow_model = Model.load(model_info.model_uri)
if example is None and signature is None:
assert mlflow_model.signature is None
else:
assert mlflow_model.signature == iris_signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert all((_read_example(mlflow_model, model_info.model_uri) == example).all())
def test_estimator_model_export(spark_model_estimator, model_path, spark_custom_env):
mlflow.spark.save_model(
spark_model_estimator.model, path=model_path, conda_env=spark_custom_env
)
# score and compare the reloaded sparkml model
reloaded_model = mlflow.spark.load_model(model_uri=model_path)
preds_df = reloaded_model.transform(spark_model_estimator.spark_df)
preds = [x.prediction for x in preds_df.select("prediction").collect()]
assert spark_model_estimator.predictions == preds
# 2. score and compare reloaded pyfunc
m = pyfunc.load_model(model_path)
preds2 = m.predict(spark_model_estimator.spark_df.toPandas())
assert spark_model_estimator.predictions == preds2
def test_transformer_model_export(spark_model_transformer, model_path, spark_custom_env):
mlflow.spark.save_model(
spark_model_transformer.model, path=model_path, conda_env=spark_custom_env
)
# score and compare the reloaded sparkml model
reloaded_model = mlflow.spark.load_model(model_uri=model_path)
preds_df = reloaded_model.transform(spark_model_transformer.spark_df)
preds = [x.features for x in preds_df.select("features").collect()]
assert spark_model_transformer.predictions == preds
# 2. score and compare reloaded pyfunc
m = pyfunc.load_model(model_path)
preds2 = m.predict(spark_model_transformer.spark_df.toPandas())
assert spark_model_transformer.predictions == preds2
@pytest.mark.skipif(
PYSPARK_VERSION.is_devrelease, reason="this test does not support PySpark dev version."
)
def test_model_deployment(spark_model_iris, model_path, spark_custom_env, monkeypatch):
mlflow.spark.save_model(
spark_model_iris.model,
path=model_path,
conda_env=spark_custom_env,
)
scoring_response = score_model_in_sagemaker_docker_container(
model_uri=model_path,
data=spark_model_iris.pandas_df,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
flavor=mlflow.pyfunc.FLAVOR_NAME,
)
from mlflow.deployments import PredictionsResponse
np.testing.assert_array_almost_equal(
spark_model_iris.predictions,
PredictionsResponse.from_json(scoring_response.content).get_predictions(
predictions_format="ndarray"
),
decimal=4,
)
@pytest.mark.skipif(
"dev" in pyspark.__version__,
reason="The dev version of pyspark built from the source doesn't exist on PyPI or Anaconda",
)
def test_sagemaker_docker_model_scoring_with_default_conda_env(spark_model_iris, model_path):
mlflow.spark.save_model(
spark_model_iris.model, path=model_path, extra_pip_requirements=["/opt/mlflow"]
)
scoring_response = score_model_in_sagemaker_docker_container(
model_uri=model_path,
data=spark_model_iris.pandas_df,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
flavor=mlflow.pyfunc.FLAVOR_NAME,
)
deployed_model_preds = np.array(json.loads(scoring_response.content)["predictions"])
np.testing.assert_array_almost_equal(
deployed_model_preds, spark_model_iris.predictions, decimal=4
)
@pytest.mark.parametrize("should_start_run", [False, True])
@pytest.mark.parametrize("use_dfs_tmpdir", [False, True])
def test_sparkml_model_log(tmp_path, spark_model_iris, should_start_run, use_dfs_tmpdir):
old_tracking_uri = mlflow.get_tracking_uri()
dfs_tmpdir = None if use_dfs_tmpdir else tmp_path.joinpath("test")
try:
tracking_dir = tmp_path.joinpath("mlruns")
mlflow.set_tracking_uri(f"file://{tracking_dir}")
if should_start_run:
mlflow.start_run()
artifact_path = "model"
model_info = mlflow.spark.log_model(
spark_model_iris.model,
artifact_path=artifact_path,
dfs_tmpdir=dfs_tmpdir,
)
reloaded_model = mlflow.spark.load_model(
model_uri=model_info.model_uri, dfs_tmpdir=dfs_tmpdir
)
preds_df = reloaded_model.transform(spark_model_iris.spark_df)
preds = [x.prediction for x in preds_df.select("prediction").collect()]
assert spark_model_iris.predictions == preds
finally:
mlflow.end_run()
mlflow.set_tracking_uri(old_tracking_uri)
@pytest.mark.parametrize(
("registry_uri", "artifact_repo_class"),
[
("databricks-uc", UnityCatalogModelsArtifactRepository),
("databricks", DatabricksModelsArtifactRepository),
],
)
def test_load_spark_model_from_models_uri(
tmp_path, spark_model_estimator, registry_uri, artifact_repo_class
):
model_dir = str(tmp_path.joinpath("spark_model"))
model_name = "mycatalog.myschema.mymodel"
fake_model_version = ModelVersion(name=model_name, version=str(3), creation_timestamp=0)
with (
mock.patch(f"{MODELS_ARTIFACT_REPOSITORY}.get_underlying_uri") as mock_get_underlying_uri,
mock.patch.object(
artifact_repo_class, "download_artifacts", return_value=model_dir
) as mock_download_artifacts,
mock.patch("mlflow.get_registry_uri", return_value=registry_uri),
mock.patch.object(
mlflow.tracking._model_registry.client.ModelRegistryClient,
"get_model_version_by_alias",
return_value=fake_model_version,
) as get_model_version_by_alias_mock,
):
mlflow.spark.save_model(
path=model_dir,
spark_model=spark_model_estimator.model,
)
mock_get_underlying_uri.return_value = "nonexistentscheme://fakeuri"
mlflow.spark.load_model(f"models:/{model_name}/1")
# Assert that we downloaded both the MLmodel file and the whole model itself using
# the models:/ URI
kwargs = (
{"lineage_header_info": None}
if artifact_repo_class is UnityCatalogModelsArtifactRepository
else {}
)
mock_download_artifacts.assert_called_once_with("", None, **kwargs)
mock_download_artifacts.reset_mock()
mlflow.spark.load_model(f"models:/{model_name}@Champion")
mock_download_artifacts.assert_called_once_with("", None, **kwargs)
assert get_model_version_by_alias_mock.called_with(model_name, "Champion")
@pytest.mark.parametrize("should_start_run", [False, True])
@pytest.mark.parametrize("use_dfs_tmpdir", [False, True])
def test_sparkml_estimator_model_log(
tmp_path, spark_model_estimator, should_start_run, use_dfs_tmpdir
):
old_tracking_uri = mlflow.get_tracking_uri()
dfs_tmpdir = None if use_dfs_tmpdir else tmp_path.joinpath("test")
try:
tracking_dir = tmp_path.joinpath("mlruns")
mlflow.set_tracking_uri(f"file://{tracking_dir}")
if should_start_run:
mlflow.start_run()
artifact_path = "model"
model_info = mlflow.spark.log_model(
spark_model_estimator.model,
artifact_path=artifact_path,
dfs_tmpdir=dfs_tmpdir,
)
reloaded_model = mlflow.spark.load_model(
model_uri=model_info.model_uri, dfs_tmpdir=dfs_tmpdir
)
preds_df = reloaded_model.transform(spark_model_estimator.spark_df)
preds = [x.prediction for x in preds_df.select("prediction").collect()]
assert spark_model_estimator.predictions == preds
finally:
mlflow.end_run()
mlflow.set_tracking_uri(old_tracking_uri)
def test_log_model_calls_register_model(tmp_path, spark_model_iris):
artifact_path = "model"
dfs_tmp_dir = tmp_path.joinpath("test")
register_model_patch = mock.patch("mlflow.tracking._model_registry.fluent._register_model")
with mlflow.start_run(), register_model_patch:
model_info = mlflow.spark.log_model(
spark_model_iris.model,
artifact_path=artifact_path,
dfs_tmpdir=dfs_tmp_dir,
registered_model_name="AdsModel1",
)
assert_register_model_called_with_local_model_path(
register_model_mock=mlflow.tracking._model_registry.fluent._register_model,
model_uri=model_info.model_uri,
registered_model_name="AdsModel1",
)
def test_log_model_no_registered_model_name(tmp_path, spark_model_iris):
artifact_path = "model"
dfs_tmp_dir = os.path.join(tmp_path, "test")
register_model_patch = mock.patch("mlflow.tracking._model_registry.fluent._register_model")
with mlflow.start_run(), register_model_patch:
mlflow.spark.log_model(
spark_model_iris.model,
artifact_path=artifact_path,
dfs_tmpdir=dfs_tmp_dir,
)
mlflow.tracking._model_registry.fluent._register_model.assert_not_called()
def test_sparkml_model_load_from_remote_uri_succeeds(spark_model_iris, model_path, mock_s3_bucket):
mlflow.spark.save_model(spark_model=spark_model_iris.model, path=model_path)
artifact_root = f"s3://{mock_s3_bucket}"
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
reloaded_model = mlflow.spark.load_model(model_uri=model_uri)
preds_df = reloaded_model.transform(spark_model_iris.spark_df)
preds = [x.prediction for x in preds_df.select("prediction").collect()]
assert spark_model_iris.predictions == preds
def test_sparkml_model_save_persists_specified_conda_env_in_mlflow_model_directory(
spark_model_iris, model_path, spark_custom_env
):
mlflow.spark.save_model(
spark_model=spark_model_iris.model, path=model_path, conda_env=spark_custom_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != spark_custom_env
with open(spark_custom_env) as f:
spark_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == spark_custom_env_parsed
def test_sparkml_model_save_persists_requirements_in_mlflow_model_directory(
spark_model_iris, model_path, spark_custom_env
):
mlflow.spark.save_model(
spark_model=spark_model_iris.model, path=model_path, conda_env=spark_custom_env
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(spark_custom_env, saved_pip_req_path)
def test_log_model_with_pip_requirements(spark_model_iris, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
# Path to a requirements file
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model, artifact_path="model", pip_requirements=str(req_file)
)
_assert_pip_requirements(model_info.model_uri, [expected_mlflow_version, "a"], strict=True)
# List of requirements
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model, artifact_path="model", pip_requirements=[f"-r {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, "a", "b"], strict=True
)
# Constraints file
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model, artifact_path="model", pip_requirements=[f"-c {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri,
[expected_mlflow_version, "b", "-c constraints.txt"],
["a"],
strict=True,
)
def test_log_model_with_extra_pip_requirements(spark_model_iris, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
default_reqs = mlflow.spark.get_default_pip_requirements()
# Path to a requirements file
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model, artifact_path="model", extra_pip_requirements=str(req_file)
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, *default_reqs, "a"]
)
# List of requirements
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model,
artifact_path="model",
extra_pip_requirements=[f"-r {req_file}", "b"],
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, *default_reqs, "a", "b"]
)
# Constraints file
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model,
artifact_path="model",
extra_pip_requirements=[f"-c {req_file}", "b"],
)
_assert_pip_requirements(
model_info.model_uri,
[expected_mlflow_version, *default_reqs, "b", "-c constraints.txt"],
["a"],
)
def test_sparkml_model_save_accepts_conda_env_as_dict(spark_model_iris, model_path):
conda_env = dict(mlflow.spark.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.spark.save_model(
spark_model=spark_model_iris.model, path=model_path, conda_env=conda_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
def test_sparkml_model_log_persists_specified_conda_env_in_mlflow_model_directory(
spark_model_iris, model_path, spark_custom_env
):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model,
artifact_path=artifact_path,
conda_env=spark_custom_env,
)
model_path = _download_artifact_from_uri(artifact_uri=model_info.model_uri)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != spark_custom_env
with open(spark_custom_env) as f:
spark_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == spark_custom_env_parsed
def test_sparkml_model_log_persists_requirements_in_mlflow_model_directory(
spark_model_iris, model_path, spark_custom_env
):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model,
artifact_path=artifact_path,
conda_env=spark_custom_env,
)
model_path = _download_artifact_from_uri(artifact_uri=model_info.model_uri)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(spark_custom_env, saved_pip_req_path)
def test_sparkml_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
spark_model_iris, model_path
):
mlflow.spark.save_model(spark_model=spark_model_iris.model, path=model_path)
_assert_pip_requirements(model_path, mlflow.spark.get_default_pip_requirements())
def test_sparkml_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
spark_model_iris,
):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.spark.log_model(spark_model_iris.model, artifact_path=artifact_path)
_assert_pip_requirements(model_info.model_uri, mlflow.spark.get_default_pip_requirements())
def test_pyspark_version_is_logged_without_dev_suffix(spark_model_iris):
expected_mlflow_version = _mlflow_major_version_string()
unsuffixed_version = "2.4.0"
for dev_suffix in [".dev0", ".dev", ".dev1", "dev.a", ".devb"]:
with mock.patch("importlib_metadata.version", return_value=unsuffixed_version + dev_suffix):
with mlflow.start_run():
model_info = mlflow.spark.log_model(spark_model_iris.model, artifact_path="model")
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, f"pyspark=={unsuffixed_version}"]
)
for unaffected_version in ["2.0", "2.3.4", "2"]:
with mock.patch("importlib_metadata.version", return_value=unaffected_version):
pip_deps = _get_pip_deps(mlflow.spark.get_default_conda_env())
assert any(x == f"pyspark=={unaffected_version}" for x in pip_deps)
def test_model_is_recorded_when_using_direct_save(spark_model_iris):
# Patch `is_local_uri` to enforce direct model serialization to DFS
with mock.patch("mlflow.spark.is_local_uri", return_value=False):
with mlflow.start_run():
mlflow.spark.log_model(spark_model_iris.model, artifact_path="model")
current_tags = mlflow.get_run(mlflow.active_run().info.run_id).data.tags
assert mlflow.utils.mlflow_tags.MLFLOW_LOGGED_MODELS in current_tags
@pytest.mark.parametrize(
(
"artifact_uri",
"db_runtime_version",
"mlflowdbfs_disabled",
"mlflowdbfs_available",
"dbutils_available",
"expected_uri",
),
[
(
"dbfs:/databricks/mlflow-tracking/a/b",
"12.0",
"",
True,
True,
"mlflowdbfs:///artifacts?run_id={}&path=/model/sparkml",
),
(
"dbfs:/databricks/mlflow-tracking/a/b",
"12.0",
"false",
True,
True,
"mlflowdbfs:///artifacts?run_id={}&path=/model/sparkml",
),
(
"dbfs:/databricks/mlflow-tracking/a/b",
"12.0",
"false",
True,
False,
"dbfs:/databricks/mlflow-tracking/a/b/model/sparkml",
),
(
"dbfs:/databricks/mlflow-tracking/a/b",
"12.0",
"",
False,
True,
"dbfs:/databricks/mlflow-tracking/a/b/model/sparkml",
),
(
"dbfs:/databricks/mlflow-tracking/a/b",
"",
"",
True,
True,
"dbfs:/databricks/mlflow-tracking/a/b/model/sparkml",
),
(
"dbfs:/databricks/mlflow-tracking/a/b",
"12.0",
"true",
True,
True,
"dbfs:/databricks/mlflow-tracking/a/b/model/sparkml",
),
("dbfs:/root/a/b", "12.0", "", True, True, "dbfs:/root/a/b/model/sparkml"),
("s3://mybucket/a/b", "12.0", "", True, True, "s3://mybucket/a/b/model/sparkml"),
],
)
def test_model_logged_via_mlflowdbfs_when_appropriate(
monkeypatch,
spark_model_iris,
artifact_uri,
db_runtime_version,
mlflowdbfs_disabled,
mlflowdbfs_available,
dbutils_available,
expected_uri,
):
def mock_spark_session_load(path):
raise Exception("MlflowDbfsClient operation failed!")
mock_spark_session = mock.Mock()
mock_read_spark_session = mock.Mock()
mock_read_spark_session.load = mock_spark_session_load
from mlflow.utils.databricks_utils import _get_dbutils as og_getdbutils
def mock_get_dbutils():
# _get_dbutils is called during run creation and model logging; to avoid breaking run
# creation, we only mock the output if _get_dbutils is called during spark model logging
caller_fn_name = inspect.stack()[1].function
if caller_fn_name == "_should_use_mlflowdbfs":
if dbutils_available:
return mock.Mock()
else:
raise Exception("dbutils not available")
else:
return og_getdbutils()
with (
mock.patch(
"mlflow.utils._spark_utils._get_active_spark_session", return_value=mock_spark_session
),
mock.patch("mlflow.get_artifact_uri", return_value=artifact_uri),
mock.patch(
"mlflow.spark._HadoopFileSystem.is_filesystem_available",
return_value=mlflowdbfs_available,
),
mock.patch("mlflow.utils.databricks_utils.MlflowCredentialContext", autospec=True),
mock.patch("mlflow.utils.databricks_utils._get_dbutils", mock_get_dbutils),
mock.patch.object(spark_model_iris.model, "save") as mock_save,
mock.patch("mlflow.models.infer_pip_requirements", return_value=[]) as mock_infer,
):
with mlflow.start_run():
if db_runtime_version:
monkeypatch.setenv("DATABRICKS_RUNTIME_VERSION", db_runtime_version)
monkeypatch.setenv("DISABLE_MLFLOWDBFS", mlflowdbfs_disabled)
mlflow.spark.log_model(spark_model_iris.model, artifact_path="model")
mock_save.assert_called_once_with(expected_uri.format(mlflow.active_run().info.run_id))
if expected_uri.startswith("mflowdbfs"):
# If mlflowdbfs is used, infer_pip_requirements should load the model from the
# remote model path instead of a local tmp path.
assert (
mock_infer.call_args[0][0]
== "dbfs:/databricks/mlflow-tracking/a/b/model/sparkml"
)
@pytest.mark.parametrize("dummy_read_shows_mlflowdbfs_available", [True, False])
def test_model_logging_uses_mlflowdbfs_if_appropriate_when_hdfs_check_fails(
monkeypatch, spark_model_iris, dummy_read_shows_mlflowdbfs_available
):
def mock_spark_session_load(path):
if dummy_read_shows_mlflowdbfs_available:
raise Exception("MlflowdbfsClient operation failed!")
else:
raise Exception("mlflowdbfs filesystem not found")
mock_read_spark_session = mock.Mock()
mock_read_spark_session.load = mock_spark_session_load
mock_spark_session = mock.Mock()
mock_spark_session.read = mock_read_spark_session
from mlflow.utils.databricks_utils import _get_dbutils as og_getdbutils
def mock_get_dbutils():
# _get_dbutils is called during run creation and model logging; to avoid breaking run
# creation, we only mock the output if _get_dbutils is called during spark model logging
caller_fn_name = inspect.stack()[1].function
if caller_fn_name == "_should_use_mlflowdbfs":
return mock.Mock()
else:
return og_getdbutils()
with (
mock.patch(
"mlflow.utils._spark_utils._get_active_spark_session",
return_value=mock_spark_session,
),
mock.patch(
"mlflow.get_artifact_uri",
return_value="dbfs:/databricks/mlflow-tracking/a/b",
),
mock.patch(
"mlflow.spark._HadoopFileSystem.is_filesystem_available",
side_effect=Exception("MlflowDbfsClient operation failed!"),
),
mock.patch("mlflow.utils.databricks_utils.MlflowCredentialContext", autospec=True),
mock.patch(
"mlflow.utils.databricks_utils._get_dbutils",
mock_get_dbutils,
),
mock.patch.object(spark_model_iris.model, "save") as mock_save,
):
with mlflow.start_run():
monkeypatch.setenv("DATABRICKS_RUNTIME_VERSION", "12.0")
mlflow.spark.log_model(spark_model_iris.model, artifact_path="model")
run_id = mlflow.active_run().info.run_id
mock_save.assert_called_once_with(
f"mlflowdbfs:///artifacts?run_id={run_id}&path=/model/sparkml"
if dummy_read_shows_mlflowdbfs_available
else "dbfs:/databricks/mlflow-tracking/a/b/model/sparkml"
)
def test_log_model_with_code_paths(spark_model_iris):
artifact_path = "model"
with (
mlflow.start_run(),
mock.patch(
"mlflow.spark._add_code_from_conf_to_system_path",
wraps=_add_code_from_conf_to_system_path,
) as add_mock,
):
model_info = mlflow.spark.log_model(
spark_model_iris.model, artifact_path=artifact_path, code_paths=[__file__]
)
_compare_logged_code_paths(__file__, model_info.model_uri, mlflow.spark.FLAVOR_NAME)
mlflow.spark.load_model(model_info.model_uri)
add_mock.assert_called()
def test_virtualenv_subfield_points_to_correct_path(spark_model_iris, model_path):
mlflow.spark.save_model(spark_model_iris.model, path=model_path)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
python_env_path = Path(model_path, pyfunc_conf[pyfunc.ENV]["virtualenv"])
assert python_env_path.exists()
assert python_env_path.is_file()
def test_model_save_load_with_metadata(spark_model_iris, model_path):
mlflow.spark.save_model(
spark_model_iris.model, path=model_path, metadata={"metadata_key": "metadata_value"}
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_path)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
def test_model_log_with_metadata(spark_model_iris):
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model,
artifact_path="model",
metadata={"metadata_key": "metadata_value"},
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_info.model_uri)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
_df_input_example = iris_pandas_df().drop("label", axis=1).iloc[[0]]
@pytest.mark.parametrize(
"input_example",
# array and dict input examples are not supported any more as they
# won't be converted to pandas dataframe when saving example
[_df_input_example],
)
def test_model_log_with_signature_inference(spark_model_iris, input_example):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.spark.log_model(
spark_model_iris.model, artifact_path=artifact_path, input_example=input_example
)
mlflow_model = Model.load(model_info.model_uri)
input_columns = mlflow_model.signature.inputs.inputs
assert all(col.type == DataType.double for col in input_columns)
column_names = [col.name for col in input_columns]
if isinstance(input_example, list):
assert column_names == [0, 1, 2, 3]
else:
assert column_names == ["0", "1", "2", "3"]
assert mlflow_model.signature.outputs == Schema([ColSpec(type=DataType.double)])
def test_log_model_with_vector_input_type_signature(spark, spark_model_estimator):
from pyspark.ml.functions import vector_to_array
from mlflow.types.schema import SparkMLVector
model = spark_model_estimator.model
with mlflow.start_run():
model_info = mlflow.spark.log_model(
model,
artifact_path="model",
signature=ModelSignature(
inputs=Schema(
[
ColSpec(name="features", type=SparkMLVector()),
]
),
outputs=Schema([ColSpec(type=DataType.double)]),
),
)
model_uri = model_info.model_uri
model_meta = Model.load(model_uri)
input_type = model_meta.signature.inputs.input_dict()["features"].type
assert isinstance(input_type, SparkMLVector)
pyfunc_model = pyfunc.load_model(model_uri)
infer_data = spark_model_estimator.spark_df.withColumn(
"features", vector_to_array("features")
).toPandas()
preds = pyfunc_model.predict(infer_data)
assert spark_model_estimator.predictions == preds
| SparkModelWithData |
python | apache__airflow | providers/dbt/cloud/tests/unit/dbt/cloud/hooks/test_dbt.py | {
"start": 4895,
"end": 50540
} | class ____:
# TODO: Potential performance issue, converted setup_class to a setup_connections function level fixture
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
# Connection with ``account_id`` specified
account_id_conn = Connection(
conn_id=ACCOUNT_ID_CONN,
conn_type=DbtCloudHook.conn_type,
login=str(DEFAULT_ACCOUNT_ID),
password=TOKEN,
)
# Connection with no ``account_id`` specified
no_account_id_conn = Connection(
conn_id=NO_ACCOUNT_ID_CONN,
conn_type=DbtCloudHook.conn_type,
password=TOKEN,
)
# Connection with `host` parameter set
host_conn = Connection(
conn_id=SINGLE_TENANT_CONN,
conn_type=DbtCloudHook.conn_type,
login=str(DEFAULT_ACCOUNT_ID),
password=TOKEN,
host=SINGLE_TENANT_DOMAIN,
)
# Connection with a proxy set in extra parameters
proxy_conn = Connection(
conn_id=PROXY_CONN,
conn_type=DbtCloudHook.conn_type,
login=str(DEFAULT_ACCOUNT_ID),
password=TOKEN,
host=SINGLE_TENANT_DOMAIN,
extra=EXTRA_PROXIES,
)
create_connection_without_db(account_id_conn)
create_connection_without_db(no_account_id_conn)
create_connection_without_db(host_conn)
create_connection_without_db(proxy_conn)
@pytest.mark.parametrize(
argnames=("conn_id", "url"),
argvalues=[(ACCOUNT_ID_CONN, BASE_URL), (SINGLE_TENANT_CONN, SINGLE_TENANT_URL)],
ids=["multi-tenant", "single-tenant"],
)
def test_init_hook(self, conn_id, url):
hook = DbtCloudHook(conn_id)
assert hook.auth_type == TokenAuth
assert hook.method == "POST"
assert hook.dbt_cloud_conn_id == conn_id
@pytest.mark.parametrize(
argnames=("conn_id", "url"),
argvalues=[(ACCOUNT_ID_CONN, BASE_URL), (SINGLE_TENANT_CONN, SINGLE_TENANT_URL)],
ids=["multi-tenant", "single-tenant"],
)
def test_tenant_base_url(self, conn_id, url):
hook = DbtCloudHook(conn_id)
hook.get_conn()
assert hook.base_url == url
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_fallback_to_default_account(self, conn_id, account_id):
hook = DbtCloudHook(conn_id)
def dbt_cloud_func(_, account_id=None):
return account_id
_account_id = account_id or DEFAULT_ACCOUNT_ID
if conn_id == ACCOUNT_ID_CONN:
assert fallback_to_default_account(dbt_cloud_func)(hook, account_id=account_id) == _account_id
assert fallback_to_default_account(dbt_cloud_func)(hook) == _account_id
if conn_id == NO_ACCOUNT_ID_CONN:
assert fallback_to_default_account(dbt_cloud_func)(hook, account_id=account_id) == _account_id
with pytest.raises(AirflowException):
fallback_to_default_account(dbt_cloud_func)(hook)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_accounts(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_accounts()
assert hook.method == "GET"
hook.run.assert_called_once_with(endpoint=None, data=None, extra_options=None)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_get_account(self, mock_paginate, mock_http_run, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.get_account(account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/", data=None, extra_options=None
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_projects(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_projects(account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_not_called()
hook._paginate.assert_called_once_with(
endpoint=f"api/v3/accounts/{_account_id}/projects/",
payload=None,
proxies=None,
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id", "name_contains"),
argvalues=[(ACCOUNT_ID_CONN, None, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID, PROJECT_NAME)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_projects_with_payload(
self, mock_http_run, mock_paginate, conn_id, account_id, name_contains
):
hook = DbtCloudHook(conn_id)
hook.list_projects(account_id=account_id, name_contains=name_contains)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_not_called()
hook._paginate.assert_called_once_with(
endpoint=f"api/v3/accounts/{_account_id}/projects/",
payload={"name__icontains": name_contains} if name_contains else None,
proxies=None,
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_get_project(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.get_project(project_id=PROJECT_ID, account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v3/accounts/{_account_id}/projects/{PROJECT_ID}/", data=None, extra_options=None
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_environments(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_environments(project_id=PROJECT_ID, account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_not_called()
hook._paginate.assert_called_once_with(
endpoint=f"api/v3/accounts/{_account_id}/projects/{PROJECT_ID}/environments/",
payload=None,
proxies=None,
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id", "name_contains"),
argvalues=[(ACCOUNT_ID_CONN, None, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID, ENVIRONMENT_NAME)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_environments_with_payload(
self, mock_http_run, mock_paginate, conn_id, account_id, name_contains
):
hook = DbtCloudHook(conn_id)
hook.list_environments(project_id=PROJECT_ID, account_id=account_id, name_contains=name_contains)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_not_called()
hook._paginate.assert_called_once_with(
endpoint=f"api/v3/accounts/{_account_id}/projects/{PROJECT_ID}/environments/",
payload={"name__icontains": name_contains} if name_contains else None,
proxies=None,
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_get_environment(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.get_environment(project_id=PROJECT_ID, environment_id=ENVIRONMENT_ID, account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v3/accounts/{_account_id}/projects/{PROJECT_ID}/environments/{ENVIRONMENT_ID}/",
data=None,
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_jobs(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_jobs(account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook._paginate.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/",
payload={"order_by": None, "project_id": None},
proxies=None,
)
hook.run.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_jobs_with_payload(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_jobs(
project_id=PROJECT_ID,
account_id=account_id,
order_by="-id",
environment_id=ENVIRONMENT_ID,
name_contains=JOB_NAME,
)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook._paginate.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/",
payload={
"order_by": "-id",
"project_id": PROJECT_ID,
"environment_id": ENVIRONMENT_ID,
"name__icontains": JOB_NAME,
},
proxies=None,
)
hook.run.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_get_job(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.get_job(job_id=JOB_ID, account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/{JOB_ID}", data=None, extra_options=None
)
hook._paginate.assert_not_called()
@patch.object(DbtCloudHook, "list_jobs", return_value=[mock_response_json(DEFAULT_LIST_JOBS_RESPONSE)])
@patch.object(
DbtCloudHook,
"list_environments",
return_value=[mock_response_json(DEFAULT_LIST_ENVIRONMENTS_RESPONSE)],
)
@patch.object(
DbtCloudHook, "list_projects", return_value=[mock_response_json(DEFAULT_LIST_PROJECTS_RESPONSE)]
)
def test_get_job_by_name_returns_response(
self, mock_list_projects, mock_list_environments, mock_list_jobs
):
hook = DbtCloudHook(ACCOUNT_ID_CONN)
job_details = hook.get_job_by_name(
project_name=PROJECT_NAME,
environment_name=ENVIRONMENT_NAME,
job_name=JOB_NAME,
account_id=None,
)
assert job_details == DEFAULT_LIST_JOBS_RESPONSE["data"][0]
@pytest.mark.parametrize(
argnames=("project_name", "environment_name", "job_name"),
argvalues=[
("dummy_name", ENVIRONMENT_NAME, JOB_NAME),
(PROJECT_NAME, "dummy_name", JOB_NAME),
(PROJECT_NAME, ENVIRONMENT_NAME, JOB_NAME.upper()),
(None, ENVIRONMENT_NAME, JOB_NAME),
(PROJECT_NAME, "", JOB_NAME),
("", "", ""),
],
)
@patch.object(DbtCloudHook, "list_jobs", return_value=[mock_response_json(DEFAULT_LIST_JOBS_RESPONSE)])
@patch.object(
DbtCloudHook,
"list_environments",
return_value=[mock_response_json(DEFAULT_LIST_ENVIRONMENTS_RESPONSE)],
)
@patch.object(
DbtCloudHook, "list_projects", return_value=[mock_response_json(DEFAULT_LIST_PROJECTS_RESPONSE)]
)
def test_get_job_by_incorrect_name_raises_exception(
self,
mock_list_projects,
mock_list_environments,
mock_list_jobs,
project_name,
environment_name,
job_name,
):
hook = DbtCloudHook(ACCOUNT_ID_CONN)
with pytest.raises(DbtCloudResourceLookupError, match="Found 0"):
hook.get_job_by_name(
project_name=project_name,
environment_name=environment_name,
job_name=job_name,
account_id=None,
)
@pytest.mark.parametrize("duplicated", ["projects", "environments", "jobs"])
def test_get_job_by_duplicate_name_raises_exception(self, duplicated):
hook = DbtCloudHook(ACCOUNT_ID_CONN)
mock_list_jobs_response = deepcopy(DEFAULT_LIST_JOBS_RESPONSE)
mock_list_environments_response = deepcopy(DEFAULT_LIST_ENVIRONMENTS_RESPONSE)
mock_list_projects_response = deepcopy(DEFAULT_LIST_PROJECTS_RESPONSE)
if duplicated == "projects":
mock_list_projects_response["data"].append(
{
"id": PROJECT_ID + 1,
"name": PROJECT_NAME,
}
)
elif duplicated == "environments":
mock_list_environments_response["data"].append(
{
"id": ENVIRONMENT_ID + 1,
"name": ENVIRONMENT_NAME,
}
)
elif duplicated == "jobs":
mock_list_jobs_response["data"].append(
{
"id": JOB_ID + 1,
"name": JOB_NAME,
}
)
with (
patch.object(
DbtCloudHook, "list_jobs", return_value=[mock_response_json(mock_list_jobs_response)]
),
patch.object(
DbtCloudHook,
"list_environments",
return_value=[mock_response_json(mock_list_environments_response)],
),
patch.object(
DbtCloudHook,
"list_projects",
return_value=[mock_response_json(mock_list_projects_response)],
),
):
with pytest.raises(DbtCloudResourceLookupError, match=f"Found 2 {duplicated}"):
hook.get_job_by_name(
project_name=PROJECT_NAME,
environment_name=ENVIRONMENT_NAME,
job_name=JOB_NAME,
account_id=None,
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_trigger_job_run(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
cause = ""
hook.trigger_job_run(job_id=JOB_ID, cause=cause, account_id=account_id)
assert hook.method == "POST"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/{JOB_ID}/run/",
data=json.dumps({"cause": cause, "steps_override": None, "schema_override": None}),
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_trigger_job_run_with_overrides(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
cause = ""
steps_override = ["dbt test", "dbt run"]
schema_override = ["other_schema"]
hook.trigger_job_run(
job_id=JOB_ID,
cause=cause,
account_id=account_id,
steps_override=steps_override,
schema_override=schema_override,
)
assert hook.method == "POST"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/{JOB_ID}/run/",
data=json.dumps(
{"cause": cause, "steps_override": steps_override, "schema_override": schema_override}
),
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_trigger_job_run_with_additional_run_configs(
self, mock_http_run, mock_paginate, conn_id, account_id
):
hook = DbtCloudHook(conn_id)
cause = ""
additional_run_config = {"threads_override": 8, "generate_docs_override": False}
hook.trigger_job_run(
job_id=JOB_ID, cause=cause, account_id=account_id, additional_run_config=additional_run_config
)
assert hook.method == "POST"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/{JOB_ID}/run/",
data=json.dumps(
{
"cause": cause,
"steps_override": None,
"schema_override": None,
"threads_override": 8,
"generate_docs_override": False,
}
),
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_trigger_job_run_with_longer_cause(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
cause = "Some cause that is longer than limit. " * 15
expected_cause = cause[:DBT_CAUSE_MAX_LENGTH]
assert len(cause) > DBT_CAUSE_MAX_LENGTH
with pytest.warns(
UserWarning,
match=f"Cause `{cause}` exceeds limit of {DBT_CAUSE_MAX_LENGTH}"
f" characters and will be truncated.",
):
hook.trigger_job_run(job_id=JOB_ID, cause=cause, account_id=account_id)
assert hook.method == "POST"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/{JOB_ID}/run/",
data=json.dumps({"cause": expected_cause, "steps_override": None, "schema_override": None}),
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@pytest.mark.parametrize(
argnames=("get_job_runs_data", "should_use_rerun"),
argvalues=[
([], False),
([{"status": DbtCloudJobRunStatus.QUEUED.value}], False),
([{"status": DbtCloudJobRunStatus.STARTING.value}], False),
([{"status": DbtCloudJobRunStatus.RUNNING.value}], False),
([{"status": DbtCloudJobRunStatus.SUCCESS.value}], False),
([{"status": DbtCloudJobRunStatus.ERROR.value}], True),
([{"status": DbtCloudJobRunStatus.CANCELLED.value}], False),
],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_trigger_job_run_with_retry_from_failure(
self,
mock_http_run,
mock_paginate,
get_job_runs_data,
should_use_rerun,
conn_id,
account_id,
):
hook = DbtCloudHook(conn_id)
cause = ""
retry_from_failure = True
with patch.object(DbtCloudHook, "get_job_runs") as mock_get_job_run_status:
mock_get_job_run_status.return_value.json.return_value = {"data": get_job_runs_data}
hook.trigger_job_run(
job_id=JOB_ID, cause=cause, account_id=account_id, retry_from_failure=retry_from_failure
)
assert hook.method == "POST"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook._paginate.assert_not_called()
if should_use_rerun:
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/{JOB_ID}/rerun/",
data=None,
extra_options=None,
)
else:
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/{JOB_ID}/run/",
data=json.dumps(
{
"cause": cause,
"steps_override": None,
"schema_override": None,
}
),
extra_options=None,
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(PROXY_CONN, ACCOUNT_ID)],
ids=["proxy_connection"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_trigger_job_run_with_proxy(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
cause = ""
hook.trigger_job_run(job_id=JOB_ID, cause=cause, account_id=account_id)
assert hook.method == "POST"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/jobs/{JOB_ID}/run/",
data=json.dumps({"cause": cause, "steps_override": None, "schema_override": None}),
extra_options={"proxies": {"https": "http://myproxy:1234"}},
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_job_runs(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_job_runs(account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_not_called()
hook._paginate.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/",
payload={
"include_related": None,
"job_definition_id": None,
"order_by": None,
},
proxies=None,
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_job_runs_with_payload(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_job_runs(
account_id=account_id, include_related=["job"], job_definition_id=JOB_ID, order_by="id"
)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_not_called()
hook._paginate.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/",
payload={
"include_related": ["job"],
"job_definition_id": JOB_ID,
"order_by": "id",
},
proxies=None,
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
def test_get_job_runs(self, mock_http_run, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.get_job_runs(account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/", data=None, extra_options=None
)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_get_job_run(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.get_job_run(run_id=RUN_ID, account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/{RUN_ID}/",
data={"include_related": None},
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_get_job_run_with_payload(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.get_job_run(run_id=RUN_ID, account_id=account_id, include_related=["triggers"])
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/{RUN_ID}/",
data={"include_related": ["triggers"]},
extra_options=None,
)
hook._paginate.assert_not_called()
wait_for_job_run_status_test_args = [
(DbtCloudJobRunStatus.SUCCESS.value, DbtCloudJobRunStatus.SUCCESS.value, True),
(DbtCloudJobRunStatus.ERROR.value, DbtCloudJobRunStatus.SUCCESS.value, False),
(DbtCloudJobRunStatus.CANCELLED.value, DbtCloudJobRunStatus.SUCCESS.value, False),
(DbtCloudJobRunStatus.RUNNING.value, DbtCloudJobRunStatus.SUCCESS.value, "timeout"),
(DbtCloudJobRunStatus.QUEUED.value, DbtCloudJobRunStatus.SUCCESS.value, "timeout"),
(DbtCloudJobRunStatus.STARTING.value, DbtCloudJobRunStatus.SUCCESS.value, "timeout"),
(DbtCloudJobRunStatus.SUCCESS.value, DbtCloudJobRunStatus.TERMINAL_STATUSES.value, True),
(DbtCloudJobRunStatus.ERROR.value, DbtCloudJobRunStatus.TERMINAL_STATUSES.value, True),
(DbtCloudJobRunStatus.CANCELLED.value, DbtCloudJobRunStatus.TERMINAL_STATUSES.value, True),
]
@pytest.mark.parametrize(
argnames=("job_run_status", "expected_status", "expected_output"),
argvalues=wait_for_job_run_status_test_args,
ids=[
(
f"run_status_{argval[0]}_expected_{argval[1]}"
if isinstance(argval[1], int)
else f"run_status_{argval[0]}_expected_AnyTerminalStatus"
)
for argval in wait_for_job_run_status_test_args
],
)
def test_wait_for_job_run_status(self, job_run_status, expected_status, expected_output, time_machine):
config = {"run_id": RUN_ID, "timeout": 3, "check_interval": 1, "expected_statuses": expected_status}
hook = DbtCloudHook(ACCOUNT_ID_CONN)
# Freeze time for avoid real clock side effects
time_machine.move_to(timezone.datetime(1970, 1, 1), tick=False)
def fake_sleep(seconds):
# Shift frozen time every time we call a ``time.sleep`` during this test case.
time_machine.shift(timedelta(seconds=seconds))
with (
patch.object(DbtCloudHook, "get_job_run_status") as mock_job_run_status,
patch("airflow.providers.dbt.cloud.hooks.dbt.time.sleep", side_effect=fake_sleep),
):
mock_job_run_status.return_value = job_run_status
if expected_output != "timeout":
assert hook.wait_for_job_run_status(**config) == expected_output
else:
with pytest.raises(DbtCloudJobRunException):
hook.wait_for_job_run_status(**config)
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_cancel_job_run(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.cancel_job_run(run_id=RUN_ID, account_id=account_id)
assert hook.method == "POST"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/{RUN_ID}/cancel/", data=None, extra_options=None
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_job_run_artifacts(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_job_run_artifacts(run_id=RUN_ID, account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/{RUN_ID}/artifacts/",
data={"step": None},
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_list_job_run_artifacts_with_payload(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
hook.list_job_run_artifacts(run_id=RUN_ID, account_id=account_id, step=2)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/{RUN_ID}/artifacts/",
data={"step": 2},
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_get_job_run_artifact(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
path = "manifest.json"
hook.get_job_run_artifact(run_id=RUN_ID, path=path, account_id=account_id)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/{RUN_ID}/artifacts/{path}",
data={"step": None},
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames=("conn_id", "account_id"),
argvalues=[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
@patch.object(DbtCloudHook, "run")
@patch.object(DbtCloudHook, "_paginate")
def test_get_job_run_artifact_with_payload(self, mock_http_run, mock_paginate, conn_id, account_id):
hook = DbtCloudHook(conn_id)
path = "manifest.json"
hook.get_job_run_artifact(run_id=RUN_ID, path="manifest.json", account_id=account_id, step=2)
assert hook.method == "GET"
_account_id = account_id or DEFAULT_ACCOUNT_ID
hook.run.assert_called_once_with(
endpoint=f"api/v2/accounts/{_account_id}/runs/{RUN_ID}/artifacts/{path}",
data={"step": 2},
extra_options=None,
)
hook._paginate.assert_not_called()
@pytest.mark.parametrize(
argnames="conn_id",
argvalues=[ACCOUNT_ID_CONN, NO_ACCOUNT_ID_CONN],
ids=["default_account", "explicit_account"],
)
def test_connection_success(self, requests_mock, conn_id):
requests_mock.get(BASE_URL, status_code=200)
status, msg = DbtCloudHook(conn_id).test_connection()
assert status is True
assert msg == "Successfully connected to dbt Cloud."
@pytest.mark.parametrize(
argnames="conn_id",
argvalues=[ACCOUNT_ID_CONN, NO_ACCOUNT_ID_CONN],
ids=["default_account", "explicit_account"],
)
def test_connection_failure(self, requests_mock, conn_id):
requests_mock.get(BASE_URL, status_code=403, reason="Authentication credentials were not provided")
status, msg = DbtCloudHook(conn_id).test_connection()
assert status is False
assert msg == "403:Authentication credentials were not provided"
@pytest.mark.parametrize(
argnames="timeout_seconds",
argvalues=[60, 180, 300],
ids=["60s", "180s", "300s"],
)
@patch.object(DbtCloudHook, "run_with_advanced_retry")
def test_timeout_passed_to_run_and_get_response(self, mock_run_with_retry, timeout_seconds):
"""Test that timeout is passed to extra_options in _run_and_get_response."""
hook = DbtCloudHook(ACCOUNT_ID_CONN, timeout_seconds=timeout_seconds)
mock_run_with_retry.return_value = mock_response_json({"data": {"id": JOB_ID}})
hook.get_job(job_id=JOB_ID, account_id=DEFAULT_ACCOUNT_ID)
call_args = mock_run_with_retry.call_args
assert call_args is not None
extra_options = call_args.kwargs.get("extra_options")
assert extra_options is not None
assert extra_options["timeout"] == timeout_seconds
@pytest.mark.parametrize(
argnames="timeout_seconds",
argvalues=[60, 180, 300],
ids=["60s", "180s", "300s"],
)
@patch.object(DbtCloudHook, "run_with_advanced_retry")
def test_timeout_passed_to_paginate(self, mock_run_with_retry, timeout_seconds):
"""Test that timeout is passed to extra_options in _paginate."""
hook = DbtCloudHook(ACCOUNT_ID_CONN, timeout_seconds=timeout_seconds)
mock_response = mock_response_json(
{
"data": [{"id": JOB_ID}],
"extra": {"filters": {"limit": 100}, "pagination": {"count": 1, "total_count": 1}},
}
)
mock_run_with_retry.return_value = mock_response
hook.list_jobs(account_id=DEFAULT_ACCOUNT_ID)
call_args = mock_run_with_retry.call_args
assert call_args is not None
extra_options = call_args.kwargs.get("extra_options")
assert extra_options is not None
assert extra_options["timeout"] == timeout_seconds
@pytest.mark.parametrize(
argnames="timeout_seconds",
argvalues=[60, 180, 300],
ids=["60s", "180s", "300s"],
)
@patch.object(DbtCloudHook, "run_with_advanced_retry")
def test_timeout_with_proxies(self, mock_run_with_retry, timeout_seconds):
"""Test that both timeout and proxies are passed to extra_options."""
hook = DbtCloudHook(PROXY_CONN, timeout_seconds=timeout_seconds)
mock_run_with_retry.return_value = mock_response_json({"data": {"id": JOB_ID}})
hook.get_job(job_id=JOB_ID, account_id=DEFAULT_ACCOUNT_ID)
call_args = mock_run_with_retry.call_args
assert call_args is not None
extra_options = call_args.kwargs.get("extra_options")
assert extra_options is not None
assert extra_options["timeout"] == timeout_seconds
assert "proxies" in extra_options
assert extra_options["proxies"] == EXTRA_PROXIES["proxies"]
@pytest.mark.parametrize(
argnames=("exception", "expected"),
argvalues=[
(requests_exceptions.ConnectionError(), True),
(requests_exceptions.Timeout(), True),
(request_exception_with_status(503), True),
(request_exception_with_status(429), True),
(request_exception_with_status(404), False),
(aiohttp.ClientResponseError(MagicMock(), (), status=500, message=""), True),
(aiohttp.ClientResponseError(MagicMock(), (), status=429, message=""), True),
(aiohttp.ClientResponseError(MagicMock(), (), status=400, message=""), False),
(aiohttp.ClientConnectorError(MagicMock(), OSError()), True),
(TimeoutError(), True),
(ValueError(), False),
],
ids=[
"requests_connection_error",
"requests_timeout",
"requests_status_503",
"requests_status_429",
"requests_status_404",
"aiohttp_status_500",
"aiohttp_status_429",
"aiohttp_status_400",
"aiohttp_connector_error",
"timeout_error",
"value_error",
],
)
def test_retryable_error(self, exception, expected):
assert DbtCloudHook._retryable_error(exception) is expected
@pytest.mark.asyncio
@pytest.mark.parametrize(
("error_factory", "retry_qty", "retry_delay"),
[
(
lambda: aiohttp.ClientResponseError(
request_info=AsyncMock(), history=(), status=500, message=""
),
3,
0.1,
),
(
lambda: aiohttp.ClientResponseError(
request_info=AsyncMock(), history=(), status=429, message=""
),
5,
0.1,
),
(lambda: aiohttp.ClientConnectorError(AsyncMock(), OSError("boom")), 2, 0.1),
(lambda: TimeoutError(), 2, 0.1),
],
ids=["aiohttp_500", "aiohttp_429", "connector_error", "timeout"],
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.aiohttp.ClientSession.get")
async def test_get_job_details_retry_with_retryable_errors(
self, get_mock, error_factory, retry_qty, retry_delay
):
hook = DbtCloudHook(ACCOUNT_ID_CONN, retry_limit=retry_qty, retry_delay=retry_delay)
def fail_cm():
cm = AsyncMock()
cm.__aenter__.side_effect = error_factory()
return cm
ok_resp = AsyncMock()
ok_resp.raise_for_status = MagicMock(return_value=None)
ok_resp.json = AsyncMock(return_value={"data": "Success"})
ok_cm = AsyncMock()
ok_cm.__aenter__.return_value = ok_resp
ok_cm.__aexit__.return_value = AsyncMock()
all_resp = [fail_cm() for _ in range(retry_qty - 1)]
all_resp.append(ok_cm)
get_mock.side_effect = all_resp
result = await hook.get_job_details(run_id=RUN_ID, account_id=None)
assert result == {"data": "Success"}
assert get_mock.call_count == retry_qty
@pytest.mark.asyncio
@pytest.mark.parametrize(
("error_factory", "expected_exception"),
[
(
lambda: aiohttp.ClientResponseError(
request_info=AsyncMock(), history=(), status=404, message="Not Found"
),
aiohttp.ClientResponseError,
),
(
lambda: aiohttp.ClientResponseError(
request_info=AsyncMock(), history=(), status=400, message="Bad Request"
),
aiohttp.ClientResponseError,
),
(lambda: ValueError("Invalid parameter"), ValueError),
],
ids=["aiohttp_404", "aiohttp_400", "value_error"],
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.aiohttp.ClientSession.get")
async def test_get_job_details_retry_with_non_retryable_errors(
self, get_mock, error_factory, expected_exception
):
hook = DbtCloudHook(ACCOUNT_ID_CONN, retry_limit=3, retry_delay=0.1)
def fail_cm():
cm = AsyncMock()
cm.__aenter__.side_effect = error_factory()
return cm
get_mock.return_value = fail_cm()
with pytest.raises(expected_exception):
await hook.get_job_details(run_id=RUN_ID, account_id=None)
assert get_mock.call_count == 1
@pytest.mark.asyncio
@pytest.mark.parametrize(
argnames=("error_factory", "expected_exception"),
argvalues=[
(
lambda: aiohttp.ClientResponseError(
request_info=AsyncMock(), history=(), status=503, message="Service Unavailable"
),
aiohttp.ClientResponseError,
),
(
lambda: aiohttp.ClientResponseError(
request_info=AsyncMock(), history=(), status=500, message="Internal Server Error"
),
aiohttp.ClientResponseError,
),
(
lambda: aiohttp.ClientConnectorError(AsyncMock(), OSError("Connection refused")),
aiohttp.ClientConnectorError,
),
(lambda: TimeoutError("Request timeout"), TimeoutError),
],
ids=[
"aiohttp_503_exhausted",
"aiohttp_500_exhausted",
"connector_error_exhausted",
"timeout_exhausted",
],
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.aiohttp.ClientSession.get")
async def test_get_job_details_retry_with_exhausted_retries(
self, get_mock, error_factory, expected_exception
):
hook = DbtCloudHook(ACCOUNT_ID_CONN, retry_limit=2, retry_delay=0.1)
def fail_cm():
cm = AsyncMock()
cm.__aenter__.side_effect = error_factory()
return cm
get_mock.side_effect = [fail_cm() for _ in range(2)]
with pytest.raises(expected_exception):
await hook.get_job_details(run_id=RUN_ID, account_id=None)
assert get_mock.call_count == 2
| TestDbtCloudHook |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/bug2437/autodoc_dummy_foo.py | {
"start": 0,
"end": 48
} | class ____:
"""Dummy class Foo."""
pass
| Foo |
python | huggingface__transformers | src/transformers/models/owlv2/modeling_owlv2.py | {
"start": 6789,
"end": 10107
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
Classification logits (including no-object) for all queries.
objectness_logits (`torch.FloatTensor` of shape `(batch_size, num_patches, 1)`):
The objectness logits of all image patches. OWL-ViT represents images as a set of image patches where the
total number of patches is (image_size / patch_size)**2.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image
embeddings for each patch.
class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total
number of patches is (image_size / patch_size)**2.
text_model_output (tuple[`BaseModelOutputWithPooling`]):
The output of the [`Owlv2TextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`Owlv2VisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
objectness_logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
class_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`Owlv2ForObjectDetection.image_guided_detection`].
"""
)
# Copied from transformers.models.owlvit.modeling_owlvit.OwlViTImageGuidedObjectDetectionOutput with OwlViT->Owlv2,OWL-ViT->OWLv2
| Owlv2ObjectDetectionOutput |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.