language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/donut/modeling_donut_swin.py | {
"start": 23111,
"end": 28789
} | class ____(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size)
self.drop_path = DonutSwinDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = DonutSwinIntermediate(config, dim)
self.output = DonutSwinOutput(config, dim)
def set_shift_and_window_size(self, input_resolution):
if min(input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = torch_int(0)
self.window_size = (
torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution)
)
def get_attn_mask(self, height, width, dtype, device):
if self.shift_size > 0:
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
height_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
width_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
always_partition: Optional[bool] = False,
) -> tuple[torch.Tensor, torch.Tensor]:
if not always_partition:
self.set_shift_and_window_size(input_dimensions)
else:
pass
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
# pad hidden_states to multiples of window size
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
# cyclic shift
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
# partition windows
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(
height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device
)
attention_outputs = self.attention(hidden_states_windows, attn_mask, output_attentions=output_attentions)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
# reverse cyclic shift
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
# Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->DonutSwin
| DonutSwinLayer |
python | pytorch__pytorch | torchgen/model.py | {
"start": 84158,
"end": 85857
} | class ____:
name: str | None
type: Type
annotation: Annotation | None
@property
def alias_info(self) -> Annotation | None:
return self.annotation
@staticmethod
def parse(arg: str) -> Return:
name: str | None
if " " in arg:
type_and_annot, name = arg.rsplit(" ", 1)
else:
type_and_annot = arg
name = None
match = re.match(r"Tensor\((.+)\)(.*)", type_and_annot)
annotation: Annotation | None
if match:
# If you update this, make sure the __str__ still works too
assert match.group(2) in [
"",
"?",
"[]",
], "unrecognized alias analysis form with Tensor"
type_s = "Tensor" + match.group(2)
annotation = Annotation.parse(match.group(1))
else:
type_s = type_and_annot
annotation = None
type = Type.parse(type_s)
r = Return(
name=name,
type=type,
annotation=annotation,
)
assert str(r) == arg, f"{str(r)} != {arg}"
return r
@property
def is_write(self) -> bool:
return self.annotation is not None and self.annotation.is_write
def __str__(self) -> str:
type = f"{self.type}"
if self.annotation:
assert type in ["Tensor", "Tensor?", "Tensor[]"]
type = type.replace("Tensor", f"Tensor({self.annotation})")
if self.name is None:
return type
else:
return f"{type} {self.name}"
# Represents the self argument for functions that may be methods
@dataclass(frozen=True)
| Return |
python | doocs__leetcode | solution/1600-1699/1649.Create Sorted Array through Instructions/Solution.py | {
"start": 0,
"end": 357
} | class ____:
def __init__(self, n):
self.n = n
self.c = [0] * (n + 1)
def update(self, x: int, v: int):
while x <= self.n:
self.c[x] += v
x += x & -x
def query(self, x: int) -> int:
s = 0
while x:
s += self.c[x]
x -= x & -x
return s
| BinaryIndexedTree |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/config.py | {
"start": 1555,
"end": 1892
} | class ____(graphene.ObjectType):
map_key = graphene.NonNull(GenericScalar)
class Meta:
name = "EvaluationStackMapValueEntry"
def __init__(self, map_key):
super().__init__()
self._map_key = map_key
def resolve_map_key(self, _info):
return self._map_key
| GrapheneEvaluationStackMapValueEntry |
python | walkccc__LeetCode | solutions/2018. Check if Word Can Be Placed In Crossword/2018.py | {
"start": 0,
"end": 402
} | class ____:
def placeWordInCrossword(self, board: list[list[str]], word: str) -> bool:
for x in board, zip(*board):
for row in x:
for token in ''.join(row).split('#'):
for letters in word, word[::-1]:
if len(token) == len(letters):
if all(c in (' ', letter) for c, letter in zip(token, letters)):
return True
return False
| Solution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_run_cancellation.py | {
"start": 9844,
"end": 19192
} | class ____(RunTerminationTestSuite):
def test_basic_termination(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "infinite_loop_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"ops": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
assert run_id
# ensure the execution has happened
while not os.path.exists(path):
time.sleep(0.1)
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": run_id}
)
assert result.data["terminatePipelineExecution"]["__typename"] == "TerminateRunSuccess"
def test_force_termination(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "infinite_loop_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"ops": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
assert run_id
# ensure the execution has happened
while not os.path.exists(path):
time.sleep(0.1)
result = execute_dagster_graphql(
graphql_context,
RUN_CANCELLATION_QUERY,
variables={
"runId": run_id,
"terminatePolicy": "MARK_AS_CANCELED_IMMEDIATELY",
},
)
assert result.data["terminatePipelineExecution"]["__typename"] == "TerminateRunSuccess"
instance = graphql_context.instance
run = instance.get_run_by_id(run_id)
assert run and run.status == DagsterRunStatus.CANCELED
def test_run_not_found(self, graphql_context: WorkspaceRequestContext):
random_run_id = str(uuid4())
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": random_run_id}
)
assert result.data["terminatePipelineExecution"]["__typename"] == "RunNotFoundError"
@pytest.mark.parametrize(
argnames=["new_terminate_method", "terminate_result"],
argvalues=[
[
_return_fail_terminate,
"TerminateRunFailure",
],
[_exception_terminate, "PythonError"],
],
)
def test_terminate_failed(
self, graphql_context: WorkspaceRequestContext, new_terminate_method, terminate_result
):
selector = infer_job_selector(graphql_context, "infinite_loop_job")
with safe_tempfile_path() as path:
old_terminate = graphql_context.instance.run_launcher.terminate
graphql_context.instance.run_launcher.terminate = new_terminate_method
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"ops": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
# ensure the execution has happened
while not os.path.exists(path):
time.sleep(0.1)
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": run_id}
)
assert result.data["terminatePipelineExecution"]["__typename"] == terminate_result, str(
result.data
)
result = execute_dagster_graphql(
graphql_context,
RUN_CANCELLATION_QUERY,
variables={
"runId": run_id,
"terminatePolicy": "MARK_AS_CANCELED_IMMEDIATELY",
},
)
assert result.data["terminatePipelineExecution"]["__typename"] == "TerminateRunSuccess"
assert result.data["terminatePipelineExecution"]["run"]["runId"] == run_id
graphql_context.instance.run_launcher.terminate = old_terminate
# Clean up the run process on the gRPC server
code_location = graphql_context.code_locations[0]
code_location.client.cancel_execution(CancelExecutionRequest(run_id=run_id)) # type: ignore
run = graphql_context.instance.get_run_by_id(run_id)
assert run and run.status == DagsterRunStatus.CANCELED
def test_run_finished(self, graphql_context: WorkspaceRequestContext):
instance = graphql_context.instance
recon_job = ReconstructableRepository.for_file(
file_relative_path(__file__, "repo.py"),
"test_repo",
).get_reconstructable_job("noop_job")
with execute_job(recon_job, instance=instance) as exec_result:
assert exec_result.success
assert exec_result.run_id
time.sleep(0.05) # guarantee execution finish
result = execute_dagster_graphql(
graphql_context,
RUN_CANCELLATION_QUERY,
variables={"runId": exec_result.run_id},
)
assert result.data["terminatePipelineExecution"]["__typename"] == "TerminateRunFailure"
assert (
"could not be terminated due to having status SUCCESS."
in result.data["terminatePipelineExecution"]["message"]
)
# Still fails even if you change the terminate policy to fail immediately
result = execute_dagster_graphql(
graphql_context,
RUN_CANCELLATION_QUERY,
variables={
"runId": exec_result.run_id,
"terminatePolicy": "MARK_AS_CANCELED_IMMEDIATELY",
},
)
assert result.data["terminatePipelineExecution"]["__typename"] == "TerminateRunFailure"
assert (
"could not be terminated due to having status SUCCESS."
in result.data["terminatePipelineExecution"]["message"]
)
def test_backcompat_termination(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "infinite_loop_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"ops": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
assert run_id
# ensure the execution has happened
while not os.path.exists(path):
time.sleep(0.1)
result = execute_dagster_graphql(
graphql_context,
BACKCOMPAT_LEGACY_TERMINATE_PIPELINE,
variables={"runId": run_id},
)
assert result.data["terminatePipelineExecution"]["run"]["runId"] == run_id
def test_has_bulk_terminate_permission(self, graphql_context: WorkspaceRequestContext):
result = execute_dagster_graphql(graphql_context, BULK_TERMINATION_PERMISSIONS_QUERY)
assert not result.errors
assert result.data
assert result.data["canBulkTerminate"] is True
| TestRunVariantTermination |
python | pandas-dev__pandas | pandas/core/resample.py | {
"start": 71838,
"end": 72891
} | class ____( # type: ignore[misc]
_GroupByMixin, TimedeltaIndexResampler
):
"""
Provides a resample of a groupby implementation.
"""
@property
def _resampler_cls(self):
return TimedeltaIndexResampler
def get_resampler(obj: Series | DataFrame, **kwds) -> Resampler:
"""
Create a TimeGrouper and return our resampler.
"""
tg = TimeGrouper(obj, **kwds) # type: ignore[arg-type]
return tg._get_resampler(obj)
get_resampler.__doc__ = Resampler.__doc__
def get_resampler_for_grouping(
groupby: GroupBy,
rule,
how=None,
fill_method=None,
limit: int | None = None,
on=None,
**kwargs,
) -> Resampler:
"""
Return our appropriate resampler when grouping as well.
"""
# .resample uses 'on' similar to how .groupby uses 'key'
tg = TimeGrouper(freq=rule, key=on, **kwargs)
resampler = tg._get_resampler(groupby.obj)
return resampler._get_resampler_for_grouping(groupby=groupby, key=tg.key)
@set_module("pandas.api.typing")
| TimedeltaIndexResamplerGroupby |
python | doocs__leetcode | solution/3700-3799/3713.Longest Balanced Substring I/Solution.py | {
"start": 0,
"end": 447
} | class ____:
def longestBalanced(self, s: str) -> int:
n = len(s)
ans = 0
for i in range(n):
cnt = Counter()
mx = v = 0
for j in range(i, n):
cnt[s[j]] += 1
mx = max(mx, cnt[s[j]])
if cnt[s[j]] == 1:
v += 1
if mx * v == j - i + 1:
ans = max(ans, j - i + 1)
return ans
| Solution |
python | ray-project__ray | python/ray/data/_internal/output_buffer.py | {
"start": 1805,
"end": 8415
} | class ____:
"""Generates output blocks of a given size or number of rows given a stream of
inputs.
This class is used to turn a stream of items / blocks of arbitrary size
into a stream of blocks of target max block size or
target max rows per block. The caller should check ``has_next()`` after each
``add()`` call, and call ``next()`` to get the next block when ``has_next()``
returns True.
When all items have been added, the caller must call ``finalize()`` and
then check ``has_next()`` one last time.
Examples:
>>> from ray.data._internal.output_buffer import BlockOutputBuffer
>>> udf = ... # doctest: +SKIP
>>> generator = ... # doctest: +SKIP
>>> # Yield a stream of output blocks.
>>> output_block_size_option = OutputBlockSizeOption(target_max_block_size=500 * 1024 * 1024) # doctest: +SKIP
>>> output = BlockOutputBuffer(output_block_size_option) # doctest: +SKIP
>>> for item in generator(): # doctest: +SKIP
... output.add(item) # doctest: +SKIP
... if output.has_next(): # doctest: +SKIP
... yield output.next() # doctest: +SKIP
>>> output.finalize() # doctest: +SKIP
>>> if output.has_next() # doctest: +SKIP
... yield output.next() # doctest: +SKIP
"""
def __init__(self, output_block_size_option: Optional[OutputBlockSizeOption]):
self._output_block_size_option = output_block_size_option
self._buffer = DelegatingBlockBuilder()
self._finalized = False
self._has_yielded_blocks = False
def add(self, item: Any) -> None:
"""Add a single item to this output buffer."""
assert not self._finalized
self._buffer.add(item)
def add_batch(self, batch: DataBatch) -> None:
"""Add a data batch to this output buffer."""
assert not self._finalized
self._buffer.add_batch(batch)
def add_block(self, block: Block) -> None:
"""Add a data block to this output buffer."""
assert not self._finalized
self._buffer.add_block(block)
def finalize(self) -> None:
"""Must be called once all items have been added."""
assert not self._finalized
self._finalized = True
def _exceeded_buffer_row_limit(self) -> bool:
if self._output_block_size_option.disable_block_shaping:
return False
return (
self._max_num_rows_per_block() is not None
and self._buffer.num_rows() > self._max_num_rows_per_block()
)
def _exceeded_buffer_size_limit(self) -> bool:
if self._output_block_size_option.disable_block_shaping:
return False
return (
self._max_bytes_per_block() is not None
and self._buffer.get_estimated_memory_usage() > self._max_bytes_per_block()
)
def _max_num_rows_per_block(self) -> Optional[int]:
if self._output_block_size_option is None:
return None
if self._output_block_size_option.disable_block_shaping:
return None
return self._output_block_size_option.target_num_rows_per_block
def _max_bytes_per_block(self) -> Optional[int]:
if self._output_block_size_option is None:
return None
if self._output_block_size_option.disable_block_shaping:
return None
return self._output_block_size_option.target_max_block_size
def has_next(self) -> bool:
"""Returns true when a complete output block is produced."""
# TODO remove emitting empty blocks
if self._finalized:
return not self._has_yielded_blocks or self._buffer.num_rows() > 0
elif self._output_block_size_option is None:
# NOTE: When block sizing is disabled, buffer won't be producing
# incrementally, until the whole sequence is ingested. This
# is required to align it with semantic of producing 1 block
# from 1 block of the input
return False
elif self._output_block_size_option.disable_block_shaping:
# When block shaping is disabled, produce blocks immediately
return self._buffer.num_rows() > 0
return self._exceeded_buffer_row_limit() or self._exceeded_buffer_size_limit()
def _exceeded_block_size_slice_limit(self, block: BlockAccessor) -> bool:
# Slice a block to respect the target max block size. We only do this if we are
# more than 50% above the target block size, because this ensures that the last
# block produced will be at least half the target block size.
return (
self._max_bytes_per_block() is not None
and block.size_bytes()
>= MAX_SAFE_BLOCK_SIZE_FACTOR * self._max_bytes_per_block()
)
def _exceeded_block_row_slice_limit(self, block: BlockAccessor) -> bool:
# Slice a block to respect the target max rows per block. We only do this if we
# are more than 50% above the target rows per block, because this ensures that
# the last block produced will be at least half the target row count.
return (
self._max_num_rows_per_block() is not None
and block.num_rows()
>= MAX_SAFE_ROWS_PER_BLOCK_FACTOR * self._max_num_rows_per_block()
)
def next(self) -> Block:
"""Returns the next complete output block."""
assert self.has_next()
block = self._buffer.build()
accessor = BlockAccessor.for_block(block)
block_remainder = None
target_num_rows = None
if self._exceeded_block_row_slice_limit(accessor):
target_num_rows = self._max_num_rows_per_block()
elif self._exceeded_block_size_slice_limit(accessor):
assert accessor.num_rows() > 0, "Block may not be empty"
num_bytes_per_row = accessor.size_bytes() / accessor.num_rows()
target_num_rows = max(
1, math.ceil(self._max_bytes_per_block() / num_bytes_per_row)
)
if target_num_rows is not None and target_num_rows < accessor.num_rows():
block = accessor.slice(0, target_num_rows, copy=False)
block_remainder = accessor.slice(
target_num_rows, accessor.num_rows(), copy=False
)
self._buffer = DelegatingBlockBuilder()
if block_remainder is not None:
self._buffer.add_block(block_remainder)
self._has_yielded_blocks = True
return block
| BlockOutputBuffer |
python | keras-team__keras | keras/src/layers/convolutional/separable_conv1d.py | {
"start": 245,
"end": 6452
} | class ____(BaseSeparableConv):
"""1D separable convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.
Args:
filters: int, the dimensionality of the output space (i.e. the number
of filters in the pointwise convolution).
kernel_size: int or tuple/list of 1 integers, specifying the size of the
depthwise convolution window.
strides: int or tuple/list of 1 integers, specifying the stride length
of the depthwise convolution. If only one int is specified, the same
stride size will be used for all dimensions. `strides > 1` is
incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution. If only one int is specified,
the same dilation rate will be used for all dimensions.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: An initializer for the depthwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
pointwise_initializer: An initializer for the pointwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: An initializer for the bias vector. If None, the
default initializer ('"zeros"') will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used
for norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape).
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, new_steps, filters)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, filters, new_steps)`
Returns:
A 3D tensor representing
`activation(separable_conv1d(inputs, kernel) + bias)`.
Example:
>>> x = np.random.rand(4, 10, 12)
>>> y = keras.layers.SeparableConv1D(3, 4, 3, 2, activation='relu')(x)
>>> print(y.shape)
(4, 4, 4)
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=1,
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| SeparableConv1D |
python | PrefectHQ__prefect | tests/server/models/test_task_run_states.py | {
"start": 9330,
"end": 10069
} | class ____:
async def test_task_run_states(self, session, task_run, task_run_states):
task_run_states_by_task_run_id = (
await models.task_run_states.read_task_run_states(
session=session, task_run_id=task_run.id
)
)
assert len(task_run_states_by_task_run_id) == len(task_run_states)
async def test_task_run_states_filters_by_task_run_id(self, session):
# query for states using a random task run id
task_run_states_by_task_run_id = (
await models.task_run_states.read_task_run_states(
session=session, task_run_id=uuid4()
)
)
assert len(task_run_states_by_task_run_id) == 0
| TestReadTaskRunStates |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_variables.py | {
"start": 12105,
"end": 17791
} | class ____(TestVariableEndpoint):
@pytest.mark.enable_redact
@pytest.mark.parametrize(
("key", "body", "params", "expected_response"),
[
(
TEST_VARIABLE_KEY,
{
"key": TEST_VARIABLE_KEY,
"value": "The new value",
"description": "The new description",
},
None,
{
"key": TEST_VARIABLE_KEY,
"value": "The new value",
"description": "The new description",
"is_encrypted": True,
"team_id": None,
},
),
(
TEST_VARIABLE_KEY,
{
"key": TEST_VARIABLE_KEY,
"value": "The new value",
"description": "The new description",
"team_id": None,
},
{"update_mask": ["value"]},
{
"key": TEST_VARIABLE_KEY,
"value": "The new value",
"description": TEST_VARIABLE_DESCRIPTION,
"is_encrypted": True,
"team_id": None,
},
),
(
TEST_VARIABLE_KEY4,
{
"key": TEST_VARIABLE_KEY4,
"value": "The new value",
"description": "The new description",
},
{"update_mask": ["value"]},
{
"key": TEST_VARIABLE_KEY4,
"value": "The new value",
"description": TEST_VARIABLE_DESCRIPTION4,
"is_encrypted": True,
"team_id": ANY,
},
),
(
TEST_VARIABLE_KEY2,
{
"key": TEST_VARIABLE_KEY2,
"value": "some_other_value",
"description": TEST_VARIABLE_DESCRIPTION2,
},
None,
{
"key": TEST_VARIABLE_KEY2,
"value": "***",
"description": TEST_VARIABLE_DESCRIPTION2,
"is_encrypted": True,
"team_id": None,
},
),
(
TEST_VARIABLE_KEY3,
{
"key": TEST_VARIABLE_KEY3,
"value": '{"password": "new_password"}',
"description": "new description",
},
{"update_mask": ["value", "description"]},
{
"key": TEST_VARIABLE_KEY3,
"value": '{"password": "***"}',
"description": "new description",
"is_encrypted": True,
"team_id": None,
},
),
],
)
def test_patch_should_respond_200(self, test_client, session, key, body, params, expected_response):
self.create_variables()
response = test_client.patch(f"/variables/{key}", json=body, params=params)
assert response.status_code == 200
assert response.json() == expected_response
check_last_log(session, dag_id=None, event="patch_variable", logical_date=None)
def test_patch_with_team_should_respond_200(self, test_client, session, testing_team):
self.create_variables()
body = {
"key": TEST_VARIABLE_KEY,
"value": "The new value",
"description": "The new description",
"team_id": str(testing_team.id),
}
response = test_client.patch(f"/variables/{TEST_VARIABLE_KEY}", json=body)
assert response.status_code == 200
assert response.json() == {
"key": TEST_VARIABLE_KEY,
"value": "The new value",
"description": "The new description",
"is_encrypted": True,
"team_id": str(testing_team.id),
}
check_last_log(session, dag_id=None, event="patch_variable", logical_date=None)
def test_patch_should_respond_400(self, test_client):
response = test_client.patch(
f"/variables/{TEST_VARIABLE_KEY}",
json={"key": "different_key", "value": "some_value", "description": None},
)
assert response.status_code == 400
body = response.json()
assert body["detail"] == "Invalid body, key from request body doesn't match uri parameter"
def test_patch_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.patch(
f"/variables/{TEST_VARIABLE_KEY}",
json={"key": TEST_VARIABLE_KEY, "value": "some_value", "description": None},
)
assert response.status_code == 401
def test_patch_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.patch(
f"/variables/{TEST_VARIABLE_KEY}",
json={"key": TEST_VARIABLE_KEY, "value": "some_value", "description": None},
)
assert response.status_code == 403
def test_patch_should_respond_404(self, test_client):
response = test_client.patch(
f"/variables/{TEST_VARIABLE_KEY}",
json={"key": TEST_VARIABLE_KEY, "value": "some_value", "description": None},
)
assert response.status_code == 404
body = response.json()
assert f"The Variable with key: `{TEST_VARIABLE_KEY}` was not found" == body["detail"]
| TestPatchVariable |
python | getsentry__sentry | tests/sentry/uptime/test_models.py | {
"start": 2354,
"end": 3878
} | class ____(UptimeTestCase):
def setUp(self) -> None:
super().setUp()
self.uptime_subscription = self.create_uptime_subscription(
url="https://santry.io",
)
self.data_source = self.create_data_source(
type=DATA_SOURCE_UPTIME_SUBSCRIPTION,
source_id=str(self.uptime_subscription.id),
)
def test_bulk_get_query_object(self) -> None:
result = UptimeSubscriptionDataSourceHandler.bulk_get_query_object([self.data_source])
assert result[self.data_source.id] == self.uptime_subscription
def test_bulk_get_query_object__incorrect_data_source(self) -> None:
self.ds_with_invalid_subscription_id = self.create_data_source(
type=DATA_SOURCE_UPTIME_SUBSCRIPTION,
source_id="not_uuid",
)
with mock.patch("sentry.uptime.models.logger.exception") as mock_logger:
data_sources = [self.data_source, self.ds_with_invalid_subscription_id]
result = UptimeSubscriptionDataSourceHandler.bulk_get_query_object(data_sources)
assert result[self.data_source.id] == self.uptime_subscription
mock_logger.assert_called_once_with(
"Invalid DataSource.source_id fetching UptimeSubscription",
extra={
"id": self.ds_with_invalid_subscription_id.id,
"source_id": self.ds_with_invalid_subscription_id.source_id,
},
)
| UptimeSubscriptionDataSourceHandlerTest |
python | django__django | tests/forms_tests/widget_tests/test_searchinput.py | {
"start": 69,
"end": 276
} | class ____(WidgetTest):
widget = SearchInput()
def test_render(self):
self.check_html(
self.widget, "search", "", html='<input type="search" name="search">'
)
| SearchInputTest |
python | facebookresearch__faiss | tests/test_index.py | {
"start": 23699,
"end": 25032
} | class ____(unittest.TestCase):
def test_IndexIVFPQ(self):
d = 32
nb = 1000
nt = 1500
nq = 200
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
coarse_quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFPQ(coarse_quantizer, d, 32, 8, 8)
index.cp.min_points_per_centroid = 5 # quiet warning
index.train(xt)
index.add(xb)
# invalid nprobe
index.nprobe = 0
k = 10
self.assertRaises(RuntimeError, index.search, xq, k)
# invalid k
index.nprobe = 4
k = -10
self.assertRaises(AssertionError, index.search, xq, k)
# valid params
index.nprobe = 4
k = 10
D, nns = index.search(xq, k)
self.assertEqual(D.shape[0], nq)
self.assertEqual(D.shape[1], k)
def test_IndexFlat(self):
d = 32
nb = 1000
nt = 0
nq = 200
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
index = faiss.IndexFlat(d, faiss.METRIC_L2)
index.add(xb)
# invalid k
k = -5
self.assertRaises(AssertionError, index.search, xq, k)
# valid k
k = 5
D, I = index.search(xq, k)
self.assertEqual(D.shape[0], nq)
self.assertEqual(D.shape[1], k)
| TestValidIndexParams |
python | langchain-ai__langchain | libs/standard-tests/langchain_tests/integration_tests/chat_models.py | {
"start": 5230,
"end": 120994
} | class ____(ChatModelTests):
'''Base class for chat model integration tests.
Test subclasses must implement the `chat_model_class` and
`chat_model_params` properties to specify what model to test and its
initialization parameters.
```python
from typing import Type
from langchain_tests.integration_tests import ChatModelIntegrationTests
from my_package.chat_models import MyChatModel
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[MyChatModel]:
# Return the chat model class to test here
return MyChatModel
@property
def chat_model_params(self) -> dict:
# Return initialization parameters for the model.
return {"model": "model-001", "temperature": 0}
```
!!! note
API references for individual test methods include troubleshooting tips.
Test subclasses **must** implement the following two properties:
`chat_model_class`: The chat model class to test, e.g., `ChatParrotLink`.
```python
@property
def chat_model_class(self) -> Type[ChatParrotLink]:
return ChatParrotLink
```
`chat_model_params`: Initialization parameters for the chat model.
```python
@property
def chat_model_params(self) -> dict:
return {"model": "bird-brain-001", "temperature": 0}
```
In addition, test subclasses can control what features are tested (such as tool
calling or multi-modality) by selectively overriding the following properties.
Expand to see details:
??? info "`has_tool_calling`"
Boolean property indicating whether the chat model supports tool calling.
By default, this is determined by whether the chat model's `bind_tools` method
is overridden. It typically does not need to be overridden on the test class.
```python "Example override"
@property
def has_tool_calling(self) -> bool:
return True
```
??? info "`tool_choice_value`"
Value to use for tool choice when used in tests.
!!! warning
Deprecated since version 0.3.15.
This property will be removed in version 0.3.20. If a model supports
`tool_choice`, it should accept `tool_choice="any"` and
`tool_choice=<string name of tool>`. If a model does not
support forcing tool calling, override the `has_tool_choice` property to
return `False`.
```python
@property
def tool_choice_value(self) -> str | None:
return "any"
```
??? info "`has_tool_choice`"
Boolean property indicating whether the chat model supports forcing tool
calling via a `tool_choice` parameter.
By default, this is determined by whether the parameter is included in the
signature for the corresponding `bind_tools` method.
If `True`, the minimum requirement for this feature is that
`tool_choice="any"` will force a tool call, and `tool_choice=<tool name>`
will force a call to a specific tool.
```python "Example override"
@property
def has_tool_choice(self) -> bool:
return False
```
??? info "`has_structured_output`"
Boolean property indicating whether the chat model supports structured
output.
By default, this is determined by whether the chat model's
`with_structured_output` method is overridden. If the base implementation is
intended to be used, this method should be overridden.
See: https://docs.langchain.com/oss/python/langchain/structured-output
```python
@property
def has_structured_output(self) -> bool:
return True
```
??? info "`structured_output_kwargs`"
Dict property that can be used to specify additional kwargs for
`with_structured_output`.
Useful for testing different models.
```python
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
```
??? info "`supports_json_mode`"
Boolean property indicating whether the chat model supports JSON mode in
`with_structured_output`.
See: https://docs.langchain.com/oss/python/langchain/structured-output
```python
@property
def supports_json_mode(self) -> bool:
return True
```
??? info "`supports_image_inputs`"
Boolean property indicating whether the chat model supports image inputs.
Defaults to `False`.
If set to `True`, the chat model will be tested by inputting an
`ImageContentBlock` with the shape:
```python
{
"type": "image",
"base64": "<base64 image data>",
"mime_type": "image/jpeg", # or appropriate MIME type
}
```
In addition to OpenAI-style content blocks:
```python
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
}
```
See https://docs.langchain.com/oss/python/langchain/models#multimodal
```python
@property
def supports_image_inputs(self) -> bool:
return True
```
??? info "`supports_image_urls`"
Boolean property indicating whether the chat model supports image inputs from
URLs.
Defaults to `False`.
If set to `True`, the chat model will be tested using content blocks of the
form
```python
{
"type": "image",
"url": "https://...",
}
```
See https://docs.langchain.com/oss/python/langchain/models#multimodal
```python
@property
def supports_image_urls(self) -> bool:
return True
```
??? info "`supports_pdf_inputs`"
Boolean property indicating whether the chat model supports PDF inputs.
Defaults to `False`.
If set to `True`, the chat model will be tested by inputting a
`FileContentBlock` with the shape:
```python
{
"type": "file",
"base64": "<base64 file data>",
"mime_type": "application/pdf",
}
```
See https://docs.langchain.com/oss/python/langchain/models#multimodal
```python
@property
def supports_pdf_inputs(self) -> bool:
return True
```
??? info "`supports_audio_inputs`"
Boolean property indicating whether the chat model supports audio inputs.
Defaults to `False`.
If set to `True`, the chat model will be tested by inputting an
`AudioContentBlock` with the shape:
```python
{
"type": "audio",
"base64": "<base64 audio data>",
"mime_type": "audio/wav", # or appropriate MIME type
}
```
See https://docs.langchain.com/oss/python/langchain/models#multimodal
```python
@property
def supports_audio_inputs(self) -> bool:
return True
```
!!! warning
This test downloads audio data from wikimedia.org. You may need to set the
`LANGCHAIN_TESTS_USER_AGENT` environment variable to identify these tests,
e.g.,
```bash
export LANGCHAIN_TESTS_USER_AGENT="CoolBot/0.0 (https://example.org/coolbot/; coolbot@example.org) generic-library/0.0"
```
Refer to the [Wikimedia Foundation User-Agent Policy](https://foundation.wikimedia.org/wiki/Policy:Wikimedia_Foundation_User-Agent_Policy).
??? info "`supports_video_inputs`"
Boolean property indicating whether the chat model supports image inputs.
Defaults to `False`.
No current tests are written for this feature.
??? info "`returns_usage_metadata`"
Boolean property indicating whether the chat model returns usage metadata
on invoke and streaming responses.
Defaults to `True`.
`usage_metadata` is an optional dict attribute on `AIMessage` objects that track
input and output tokens.
[See more](https://reference.langchain.com/python/langchain_core/language_models/#langchain_core.messages.ai.UsageMetadata).
```python
@property
def returns_usage_metadata(self) -> bool:
return False
```
Models supporting `usage_metadata` should also return the name of the underlying
model in the `response_metadata` of the `AIMessage`.
??? info "`supports_anthropic_inputs`"
Boolean property indicating whether the chat model supports Anthropic-style
inputs.
These inputs might feature "tool use" and "tool result" content blocks, e.g.,
```python
[
{"type": "text", "text": "Hmm let me think about that"},
{
"type": "tool_use",
"input": {"fav_color": "green"},
"id": "foo",
"name": "color_picker",
},
]
```
If set to `True`, the chat model will be tested using content blocks of this
form.
```python
@property
def supports_anthropic_inputs(self) -> bool:
return False
```
??? info "`supports_image_tool_message`"
Boolean property indicating whether the chat model supports a `ToolMessage`
that includes image content, e.g. in the OpenAI Chat Completions format:
```python
ToolMessage(
content=[
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
],
tool_call_id="1",
name="random_image",
)
```
...as well as the LangChain `ImageContentBlock` format:
```python
ToolMessage(
content=[
{
"type": "image",
"base64": image_data,
"mime_type": "image/jpeg",
},
],
tool_call_id="1",
name="random_image",
)
```
If set to `True`, the chat model will be tested with message sequences that
include `ToolMessage` objects of this form.
```python
@property
def supports_image_tool_message(self) -> bool:
return False
```
??? info "`supports_pdf_tool_message`"
Boolean property indicating whether the chat model supports a `ToolMessage
that include PDF content using the LangChain `FileContentBlock` format:
```python
ToolMessage(
content=[
{
"type": "file",
"base64": pdf_data,
"mime_type": "application/pdf",
},
],
tool_call_id="1",
name="random_pdf",
)
```
If set to `True`, the chat model will be tested with message sequences that
include `ToolMessage` objects of this form.
```python
@property
def supports_pdf_tool_message(self) -> bool:
return False
```
??? info "`supported_usage_metadata_details`"
Property controlling what usage metadata details are emitted in both invoke
and stream.
`usage_metadata` is an optional dict attribute on `AIMessage` objects that track
input and output tokens.
[See more](https://reference.langchain.com/python/langchain_core/language_models/#langchain_core.messages.ai.UsageMetadata).
It includes optional keys `input_token_details` and `output_token_details`
that can track usage details associated with special types of tokens, such as
cached, audio, or reasoning.
Only needs to be overridden if these details are supplied.
??? info "`enable_vcr_tests`"
Property controlling whether to enable select tests that rely on
[VCR](https://vcrpy.readthedocs.io/en/latest/) caching of HTTP calls, such
as benchmarking tests.
To enable these tests, follow these steps:
1. Override the `enable_vcr_tests` property to return `True`:
```python
@property
def enable_vcr_tests(self) -> bool:
return True
```
2. Configure VCR to exclude sensitive headers and other information from
cassettes.
!!! warning
VCR will by default record authentication headers and other sensitive
information in cassettes. Read below for how to configure what
information is recorded in cassettes.
To add configuration to VCR, add a `conftest.py` file to the `tests/`
directory and implement the `vcr_config` fixture there.
`langchain-tests` excludes the headers `'authorization'`,
`'x-api-key'`, and `'api-key'` from VCR cassettes. To pick up this
configuration, you will need to add `conftest.py` as shown below. You can
also exclude additional headers, override the default exclusions, or apply
other customizations to the VCR configuration. See example below:
```python title="tests/conftest.py"
import pytest
from langchain_tests.conftest import (
_base_vcr_config as _base_vcr_config,
)
_EXTRA_HEADERS = [
# Specify additional headers to redact
("user-agent", "PLACEHOLDER"),
]
def remove_response_headers(response: dict) -> dict:
# If desired, remove or modify headers in the response.
response["headers"] = {}
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""Extend the default configuration from langchain_tests."""
config = _base_vcr_config.copy()
config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS)
config["before_record_response"] = remove_response_headers
return config
```
??? note "Compressing cassettes"
`langchain-tests` includes a custom VCR serializer that compresses
cassettes using gzip. To use it, register the `yaml.gz` serializer
to your VCR fixture and enable this serializer in the config. See
example below:
```python title="tests/conftest.py"
import pytest
from langchain_tests.conftest import (
CustomPersister,
CustomSerializer,
)
from langchain_tests.conftest import (
_base_vcr_config as _base_vcr_config,
)
from vcr import VCR
_EXTRA_HEADERS = [
# Specify additional headers to redact
("user-agent", "PLACEHOLDER"),
]
def remove_response_headers(response: dict) -> dict:
# If desired, remove or modify headers in the response.
response["headers"] = {}
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""Extend the default configuration from langchain_tests."""
config = _base_vcr_config.copy()
config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS)
config["before_record_response"] = remove_response_headers
# New: enable serializer and set file extension
config["serializer"] = "yaml.gz"
config["path_transformer"] = VCR.ensure_suffix(".yaml.gz")
return config
def pytest_recording_configure(config: dict, vcr: VCR) -> None:
vcr.register_persister(CustomPersister())
vcr.register_serializer("yaml.gz", CustomSerializer())
```
You can inspect the contents of the compressed cassettes (e.g., to
ensure no sensitive information is recorded) using
```bash
gunzip -k /path/to/tests/cassettes/TestClass_test.yaml.gz
```
...or by using the serializer:
```python
from langchain_tests.conftest import (
CustomPersister,
CustomSerializer,
)
cassette_path = "/path/to/tests/cassettes/TestClass_test.yaml.gz"
requests, responses = CustomPersister().load_cassette(
path, CustomSerializer()
)
```
3. Run tests to generate VCR cassettes.
Example:
```bash
uv run python -m pytest tests/integration_tests/test_chat_models.py::TestMyModel::test_stream_time
```
This will generate a VCR cassette for the test in
`tests/integration_tests/cassettes/`.
!!! warning
You should inspect the generated cassette to ensure that it does not
contain sensitive information. If it does, you can modify the
`vcr_config` fixture to exclude headers or modify the response
before it is recorded.
You can then commit the cassette to your repository. Subsequent test runs
will use the cassette instead of making HTTP calls.
''' # noqa: E501,D214
@property
def standard_chat_model_params(self) -> dict:
"""Standard parameters for chat model."""
return {}
def test_invoke(self, model: BaseChatModel) -> None:
"""Test to verify that `model.invoke(simple_message)` works.
This should pass for all integrations.
??? question "Troubleshooting"
If this test fails, you should make sure your `_generate` method
does not raise any exceptions, and that it returns a valid
`langchain_core.outputs.chat_result.ChatResult` like so:
```python
return ChatResult(
generations=[ChatGeneration(message=AIMessage(content="Output text"))]
)
```
"""
result = model.invoke("Hello")
assert result is not None
assert isinstance(result, AIMessage)
assert isinstance(result.text, str)
assert len(result.content) > 0
async def test_ainvoke(self, model: BaseChatModel) -> None:
"""Test to verify that `await model.ainvoke(simple_message)` works.
This should pass for all integrations. Passing this test does not indicate
a "natively async" implementation, but rather that the model can be used
in an async context.
??? question "Troubleshooting"
First, debug
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`.
because `ainvoke` has a default implementation that calls `invoke` in an
async context.
If that test passes but not this one, you should make sure your `_agenerate`
method does not raise any exceptions, and that it returns a valid
`langchain_core.outputs.chat_result.ChatResult` like so:
```python
return ChatResult(
generations=[ChatGeneration(message=AIMessage(content="Output text"))]
)
```
"""
result = await model.ainvoke("Hello")
assert result is not None
assert isinstance(result, AIMessage)
assert isinstance(result.text, str)
assert len(result.content) > 0
@pytest.mark.parametrize("model", [{}, {"output_version": "v1"}], indirect=True)
def test_stream(self, model: BaseChatModel) -> None:
"""Test to verify that `model.stream(simple_message)` works.
This should pass for all integrations. Passing this test does not indicate
a "streaming" implementation, but rather that the model can be used in a
streaming context.
??? question "Troubleshooting"
First, debug
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`.
because `stream` has a default implementation that calls `invoke` and
yields the result as a single chunk.
If that test passes but not this one, you should make sure your `_stream`
method does not raise any exceptions, and that it yields valid
`langchain_core.outputs.chat_generation.ChatGenerationChunk`
objects like so:
```python
yield ChatGenerationChunk(message=AIMessageChunk(content="chunk text"))
```
"""
num_chunks = 0
full: AIMessageChunk | None = None
for chunk in model.stream("Hello"):
assert chunk is not None
assert isinstance(chunk, AIMessageChunk)
assert isinstance(chunk.content, str | list)
num_chunks += 1
full = chunk if full is None else full + chunk
assert num_chunks > 0
assert isinstance(full, AIMessageChunk)
assert full.content
assert len(full.content_blocks) == 1
assert full.content_blocks[0]["type"] == "text"
@pytest.mark.parametrize("model", [{}, {"output_version": "v1"}], indirect=True)
async def test_astream(self, model: BaseChatModel) -> None:
"""Test to verify that `await model.astream(simple_message)` works.
This should pass for all integrations. Passing this test does not indicate
a "natively async" or "streaming" implementation, but rather that the model can
be used in an async streaming context.
??? question "Troubleshooting"
First, debug
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_stream`.
and
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_ainvoke`.
because `astream` has a default implementation that calls `_stream` in
an async context if it is implemented, or `ainvoke` and yields the result
as a single chunk if not.
If those tests pass but not this one, you should make sure your `_astream`
method does not raise any exceptions, and that it yields valid
`langchain_core.outputs.chat_generation.ChatGenerationChunk`
objects like so:
```python
yield ChatGenerationChunk(message=AIMessageChunk(content="chunk text"))
```
"""
num_chunks = 0
full: AIMessageChunk | None = None
async for chunk in model.astream("Hello"):
assert chunk is not None
assert isinstance(chunk, AIMessageChunk)
assert isinstance(chunk.content, str | list)
num_chunks += 1
full = chunk if full is None else full + chunk
assert num_chunks > 0
assert isinstance(full, AIMessageChunk)
assert full.content
assert len(full.content_blocks) == 1
assert full.content_blocks[0]["type"] == "text"
def test_batch(self, model: BaseChatModel) -> None:
"""Test to verify that `model.batch([messages])` works.
This should pass for all integrations. Tests the model's ability to process
multiple prompts in a single batch.
??? question "Troubleshooting"
First, debug
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
because `batch` has a default implementation that calls `invoke` for
each message in the batch.
If that test passes but not this one, you should make sure your `batch`
method does not raise any exceptions, and that it returns a list of valid
`AIMessage` objects.
"""
batch_results = model.batch(["Hello", "Hey"])
assert batch_results is not None
assert isinstance(batch_results, list)
assert len(batch_results) == 2
for result in batch_results:
assert result is not None
assert isinstance(result, AIMessage)
assert isinstance(result.text, str)
assert len(result.content) > 0
async def test_abatch(self, model: BaseChatModel) -> None:
"""Test to verify that `await model.abatch([messages])` works.
This should pass for all integrations. Tests the model's ability to process
multiple prompts in a single batch asynchronously.
??? question "Troubleshooting"
First, debug
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_batch`
and
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_ainvoke`
because `abatch` has a default implementation that calls `ainvoke` for
each message in the batch.
If those tests pass but not this one, you should make sure your `abatch`
method does not raise any exceptions, and that it returns a list of valid
`AIMessage` objects.
"""
batch_results = await model.abatch(["Hello", "Hey"])
assert batch_results is not None
assert isinstance(batch_results, list)
assert len(batch_results) == 2
for result in batch_results:
assert result is not None
assert isinstance(result, AIMessage)
assert isinstance(result.text, str)
assert len(result.content) > 0
def test_conversation(self, model: BaseChatModel) -> None:
"""Test to verify that the model can handle multi-turn conversations.
This should pass for all integrations. Tests the model's ability to process
a sequence of alternating `HumanMessage` and `AIMessage` objects as context for
generating the next response.
??? question "Troubleshooting"
First, debug
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
because this test also uses `model.invoke`.
If that test passes but not this one, you should verify that:
1. Your model correctly processes the message history
2. The model maintains appropriate context from previous messages
3. The response is a valid `langchain_core.messages.AIMessage`
"""
messages = [
HumanMessage("hello"),
AIMessage("hello"),
HumanMessage("how are you"),
]
result = model.invoke(messages)
assert result is not None
assert isinstance(result, AIMessage)
assert isinstance(result.text, str)
assert len(result.content) > 0
def test_double_messages_conversation(self, model: BaseChatModel) -> None:
"""Test to verify that the model can handle double-message conversations.
This should pass for all integrations. Tests the model's ability to process
a sequence of double-system, double-human, and double-ai messages as context
for generating the next response.
??? question "Troubleshooting"
First, debug
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_invoke`
because this test also uses `model.invoke`.
Second, debug
`langchain_tests.integration_tests.chat_models.ChatModelIntegrationTests.test_conversation`
because this test is the "basic case" without double messages.
If that test passes those but not this one, you should verify that:
1. Your model API can handle double messages, or the integration should
merge messages before sending them to the API.
2. The response is a valid `langchain_core.messages.AIMessage`
"""
messages = [
SystemMessage("hello"),
SystemMessage("hello"),
HumanMessage("hello"),
HumanMessage("hello"),
AIMessage("hello"),
AIMessage("hello"),
HumanMessage("how are you"),
]
result = model.invoke(messages)
assert result is not None
assert isinstance(result, AIMessage)
assert isinstance(result.text, str)
assert len(result.content) > 0
def test_usage_metadata(self, model: BaseChatModel) -> None:
"""Test to verify that the model returns correct usage metadata.
This test is optional and should be skipped if the model does not return
usage metadata (see configuration below).
!!! warning "Behavior changed in `langchain-tests` 0.3.17"
Additionally check for the presence of `model_name` in the response
metadata, which is needed for usage tracking in callback handlers.
??? note "Configuration"
By default, this test is run.
To disable this feature, set `returns_usage_metadata` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def returns_usage_metadata(self) -> bool:
return False
```
This test can also check the format of specific kinds of usage metadata
based on the `supported_usage_metadata_details` property.
This property should be configured as follows with the types of tokens that
the model supports tracking:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supported_usage_metadata_details(self) -> dict:
return {
"invoke": [
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
],
"stream": [
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
],
}
```
??? question "Troubleshooting"
If this test fails, first verify that your model returns
`langchain_core.messages.ai.UsageMetadata` dicts
attached to the returned `AIMessage` object in `_generate`:
```python
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="Output text",
usage_metadata={
"input_tokens": 350,
"output_tokens": 240,
"total_tokens": 590,
"input_token_details": {
"audio": 10,
"cache_creation": 200,
"cache_read": 100,
},
"output_token_details": {
"audio": 10,
"reasoning": 200,
},
},
)
)
]
)
```
Check also that the response includes a `model_name` key in its
`usage_metadata`.
"""
if not self.returns_usage_metadata:
pytest.skip("Not implemented.")
result = model.invoke("Hello")
assert result is not None
assert isinstance(result, AIMessage)
assert result.usage_metadata is not None
assert isinstance(result.usage_metadata["input_tokens"], int)
assert isinstance(result.usage_metadata["output_tokens"], int)
assert isinstance(result.usage_metadata["total_tokens"], int)
# Check model_name is in response_metadata
# Needed for langchain_core.callbacks.usage
model_name = result.response_metadata.get("model_name")
assert isinstance(model_name, str)
assert model_name, "model_name is empty"
# `input_tokens` is the total, possibly including other unclassified or
# system-level tokens.
if "audio_input" in self.supported_usage_metadata_details["invoke"]:
# Checks if the specific chat model integration being tested has declared
# that it supports reporting token counts specifically for `audio_input`
msg = self.invoke_with_audio_input() # To be implemented in test subclass
assert (usage_metadata := msg.usage_metadata) is not None
assert (
input_token_details := usage_metadata.get("input_token_details")
) is not None
assert isinstance(input_token_details.get("audio"), int)
# Asserts that total input tokens are at least the sum of the token counts
assert usage_metadata.get("input_tokens", 0) >= sum(
v for v in input_token_details.values() if isinstance(v, int)
)
if "audio_output" in self.supported_usage_metadata_details["invoke"]:
msg = self.invoke_with_audio_output()
assert (usage_metadata := msg.usage_metadata) is not None
assert (
output_token_details := usage_metadata.get("output_token_details")
) is not None
assert isinstance(output_token_details.get("audio"), int)
# Asserts that total output tokens are at least the sum of the token counts
assert usage_metadata.get("output_tokens", 0) >= sum(
v for v in output_token_details.values() if isinstance(v, int)
)
if "reasoning_output" in self.supported_usage_metadata_details["invoke"]:
msg = self.invoke_with_reasoning_output()
assert (usage_metadata := msg.usage_metadata) is not None
assert (
output_token_details := usage_metadata.get("output_token_details")
) is not None
assert isinstance(output_token_details.get("reasoning"), int)
# Asserts that total output tokens are at least the sum of the token counts
assert usage_metadata.get("output_tokens", 0) >= sum(
v for v in output_token_details.values() if isinstance(v, int)
)
if "cache_read_input" in self.supported_usage_metadata_details["invoke"]:
msg = self.invoke_with_cache_read_input()
usage_metadata = msg.usage_metadata
assert usage_metadata is not None
input_token_details = usage_metadata.get("input_token_details")
assert input_token_details is not None
cache_read_tokens = input_token_details.get("cache_read")
assert isinstance(cache_read_tokens, int)
assert cache_read_tokens >= 0
# Asserts that total input tokens are at least the sum of the token counts
total_detailed_tokens = sum(
v for v in input_token_details.values() if isinstance(v, int) and v >= 0
)
input_tokens = usage_metadata.get("input_tokens", 0)
assert isinstance(input_tokens, int)
assert input_tokens >= total_detailed_tokens
if "cache_creation_input" in self.supported_usage_metadata_details["invoke"]:
msg = self.invoke_with_cache_creation_input()
usage_metadata = msg.usage_metadata
assert usage_metadata is not None
input_token_details = usage_metadata.get("input_token_details")
assert input_token_details is not None
cache_creation_tokens = input_token_details.get("cache_creation")
assert isinstance(cache_creation_tokens, int)
assert cache_creation_tokens >= 0
# Asserts that total input tokens are at least the sum of the token counts
total_detailed_tokens = sum(
v for v in input_token_details.values() if isinstance(v, int) and v >= 0
)
input_tokens = usage_metadata.get("input_tokens", 0)
assert isinstance(input_tokens, int)
assert input_tokens >= total_detailed_tokens
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
"""Test usage metadata in streaming mode.
Test to verify that the model returns correct usage metadata in streaming mode.
!!! warning "Behavior changed in `langchain-tests` 0.3.17"
Additionally check for the presence of `model_name` in the response
metadata, which is needed for usage tracking in callback handlers.
??? note "Configuration"
By default, this test is run.
To disable this feature, set `returns_usage_metadata` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def returns_usage_metadata(self) -> bool:
return False
```
This test can also check the format of specific kinds of usage metadata
based on the `supported_usage_metadata_details` property.
This property should be configured as follows with the types of tokens that
the model supports tracking:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supported_usage_metadata_details(self) -> dict:
return {
"invoke": [
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
],
"stream": [
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
],
}
```
??? question "Troubleshooting"
If this test fails, first verify that your model yields
`langchain_core.messages.ai.UsageMetadata` dicts
attached to the returned `AIMessage` object in `_stream`
that sum up to the total usage metadata.
Note that `input_tokens` should only be included on one of the chunks
(typically the first or the last chunk), and the rest should have `0` or
`None` to avoid counting input tokens multiple times.
`output_tokens` typically count the number of tokens in each chunk, not
the sum. This test will pass as long as the sum of `output_tokens` across
all chunks is not `0`.
```python
yield ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="Output text",
usage_metadata={
"input_tokens": (
num_input_tokens if is_first_chunk else 0
),
"output_tokens": 11,
"total_tokens": (
11 + num_input_tokens if is_first_chunk else 11
),
"input_token_details": {
"audio": 10,
"cache_creation": 200,
"cache_read": 100,
},
"output_token_details": {
"audio": 10,
"reasoning": 200,
},
},
)
)
]
)
```
Check also that the aggregated response includes a `model_name` key
in its `usage_metadata`.
"""
if not self.returns_usage_metadata:
pytest.skip("Not implemented.")
full: AIMessageChunk | None = None
for chunk in model.stream("Write me 2 haikus. Only include the haikus."):
assert isinstance(chunk, AIMessageChunk)
# only one chunk is allowed to set usage_metadata.input_tokens
# if multiple do, it's likely a bug that will result in overcounting
# input tokens (since the total number of input tokens applies to the full
# generation, not individual chunks)
if full and full.usage_metadata and full.usage_metadata["input_tokens"]:
assert (
not chunk.usage_metadata or not chunk.usage_metadata["input_tokens"]
), (
"Only one chunk should set input_tokens,"
" the rest should be 0 or None"
)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.usage_metadata is not None
assert isinstance(full.usage_metadata["input_tokens"], int)
assert isinstance(full.usage_metadata["output_tokens"], int)
assert isinstance(full.usage_metadata["total_tokens"], int)
# Check model_name is in response_metadata
# Needed for langchain_core.callbacks.usage
model_name = full.response_metadata.get("model_name")
assert isinstance(model_name, str)
assert model_name, "model_name is empty"
if "audio_input" in self.supported_usage_metadata_details["stream"]:
msg = self.invoke_with_audio_input(stream=True)
assert msg.usage_metadata is not None
assert isinstance(
msg.usage_metadata.get("input_token_details", {}).get("audio"), int
)
if "audio_output" in self.supported_usage_metadata_details["stream"]:
msg = self.invoke_with_audio_output(stream=True)
assert msg.usage_metadata is not None
assert isinstance(
msg.usage_metadata.get("output_token_details", {}).get("audio"), int
)
if "reasoning_output" in self.supported_usage_metadata_details["stream"]:
msg = self.invoke_with_reasoning_output(stream=True)
assert msg.usage_metadata is not None
assert isinstance(
msg.usage_metadata.get("output_token_details", {}).get("reasoning"), int
)
if "cache_read_input" in self.supported_usage_metadata_details["stream"]:
msg = self.invoke_with_cache_read_input(stream=True)
assert msg.usage_metadata is not None
assert isinstance(
msg.usage_metadata.get("input_token_details", {}).get("cache_read"), int
)
if "cache_creation_input" in self.supported_usage_metadata_details["stream"]:
msg = self.invoke_with_cache_creation_input(stream=True)
assert msg.usage_metadata is not None
assert isinstance(
msg.usage_metadata.get("input_token_details", {}).get("cache_creation"),
int,
)
def test_stop_sequence(self, model: BaseChatModel) -> None:
"""Test that model does not fail when invoked with the `stop` parameter.
The `stop` parameter is a standard parameter for stopping generation at a
certain token.
[More on standard parameters](https://python.langchain.com/docs/concepts/chat_models/#standard-parameters).
This should pass for all integrations.
??? question "Troubleshooting"
If this test fails, check that the function signature for `_generate`
(as well as `_stream` and async variants) accepts the `stop` parameter:
```python
def _generate(
self,
messages: List[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
```
"""
result = model.invoke("hi", stop=["you"])
assert isinstance(result, AIMessage)
custom_model = self.chat_model_class(
**{
**self.chat_model_params,
"stop": ["you"],
}
)
result = custom_model.invoke("hi")
assert isinstance(result, AIMessage)
@pytest.mark.parametrize("model", [{}, {"output_version": "v1"}], indirect=True)
def test_tool_calling(self, model: BaseChatModel) -> None:
"""Test that the model generates tool calls.
This test is skipped if the `has_tool_calling` property on the test class is
set to `False`.
This test is optional and should be skipped if the model does not support
tool calling (see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that `bind_tools` is implemented to correctly
translate LangChain tool objects into the appropriate schema for your
chat model.
This test may fail if the chat model does not support a `tool_choice`
parameter. This parameter can be used to force a tool call. If
`tool_choice` is not supported and the model consistently fails this
test, you can `xfail` the test:
```python
@pytest.mark.xfail(reason=("Does not support tool_choice."))
def test_tool_calling(self, model: BaseChatModel) -> None:
super().test_tool_calling(model)
```
Otherwise, in the case that only one tool is bound, ensure that
`tool_choice` supports the string `'any'` to force calling that tool.
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
tool_choice_value = None if not self.has_tool_choice else "any"
model_with_tools = model.bind_tools(
[magic_function], tool_choice=tool_choice_value
)
# Test invoke
query = "What is the value of magic_function(3)? Use the tool."
result = model_with_tools.invoke(query)
_validate_tool_call_message(result)
# Test stream
full: BaseMessage | None = None
for chunk in model_with_tools.stream(query):
full = chunk if full is None else full + chunk # type: ignore[assignment]
assert isinstance(full, AIMessage)
_validate_tool_call_message(full)
async def test_tool_calling_async(self, model: BaseChatModel) -> None:
"""Test that the model generates tool calls.
This test is skipped if the `has_tool_calling` property on the test class is
set to `False`.
This test is optional and should be skipped if the model does not support
tool calling (see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that `bind_tools` is implemented to correctly
translate LangChain tool objects into the appropriate schema for your
chat model.
This test may fail if the chat model does not support a `tool_choice`
parameter. This parameter can be used to force a tool call. If
`tool_choice` is not supported and the model consistently fails this
test, you can `xfail` the test:
```python
@pytest.mark.xfail(reason=("Does not support tool_choice."))
async def test_tool_calling_async(self, model: BaseChatModel) -> None:
await super().test_tool_calling_async(model)
```
Otherwise, in the case that only one tool is bound, ensure that
`tool_choice` supports the string `'any'` to force calling that tool.
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
tool_choice_value = None if not self.has_tool_choice else "any"
model_with_tools = model.bind_tools(
[magic_function], tool_choice=tool_choice_value
)
# Test ainvoke
query = "What is the value of magic_function(3)? Use the tool."
result = await model_with_tools.ainvoke(query)
_validate_tool_call_message(result)
# Test astream
full: BaseMessage | None = None
async for chunk in model_with_tools.astream(query):
full = chunk if full is None else full + chunk # type: ignore[assignment]
assert isinstance(full, AIMessage)
_validate_tool_call_message(full)
def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None:
"""Test bind runnables as tools.
Test that the model generates tool calls for tools that are derived from
LangChain runnables. This test is skipped if the `has_tool_calling` property
on the test class is set to `False`.
This test is optional and should be skipped if the model does not support
tool calling (see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that `bind_tools` is implemented to correctly
translate LangChain tool objects into the appropriate schema for your
chat model.
This test may fail if the chat model does not support a `tool_choice`
parameter. This parameter can be used to force a tool call. If
`tool_choice` is not supported and the model consistently fails this
test, you can `xfail` the test:
```python
@pytest.mark.xfail(reason=("Does not support tool_choice."))
def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None:
super().test_bind_runnables_as_tools(model)
```
Otherwise, ensure that the `tool_choice_value` property is correctly
specified on the test class.
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
prompt = ChatPromptTemplate.from_messages(
[("human", "Hello. Please respond in the style of {answer_style}.")]
)
llm = GenericFakeChatModel(messages=iter(["hello matey"]))
chain = prompt | llm | StrOutputParser()
tool_ = chain.as_tool(
name="greeting_generator",
description="Generate a greeting in a particular style of speaking.",
)
if self.has_tool_choice:
tool_choice: str | None = "any"
else:
tool_choice = None
model_with_tools = model.bind_tools([tool_], tool_choice=tool_choice)
query = "Using the tool, generate a Pirate greeting."
result = model_with_tools.invoke(query)
assert isinstance(result, AIMessage)
assert result.tool_calls
tool_call = result.tool_calls[0]
assert tool_call["args"].get("answer_style")
assert tool_call.get("type") == "tool_call"
def test_tool_message_histories_string_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
"""Test that message histories are compatible with string tool contents.
For instance with OpenAI format contents.
If a model passes this test, it should be compatible
with messages generated from providers following OpenAI format.
This test should be skipped if the model does not support tool calling
(see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that:
1. The model can correctly handle message histories that include
`AIMessage` objects with `""` content.
2. The `tool_calls` attribute on `AIMessage` objects is correctly
handled and passed to the model in an appropriate format.
3. The model can correctly handle `ToolMessage` objects with string
content and arbitrary string values for `tool_call_id`.
You can `xfail` the test if tool calling is implemented but this format
is not supported.
```python
@pytest.mark.xfail(reason=("Not implemented."))
def test_tool_message_histories_string_content(self, *args: Any) -> None:
super().test_tool_message_histories_string_content(*args)
```
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
model_with_tools = model.bind_tools([my_adder_tool])
function_name = "my_adder_tool"
function_args = {"a": 1, "b": 2}
messages_string_content = [
HumanMessage("What is 1 + 2"),
# string content (e.g. OpenAI)
AIMessage(
"",
tool_calls=[
{
"name": function_name,
"args": function_args,
"id": "abc123",
"type": "tool_call",
},
],
),
ToolMessage(
json.dumps({"result": 3}),
name=function_name,
tool_call_id="abc123",
),
]
result_string_content = model_with_tools.invoke(messages_string_content)
assert isinstance(result_string_content, AIMessage)
def test_tool_message_histories_list_content(
self,
model: BaseChatModel,
my_adder_tool: BaseTool,
) -> None:
"""Test that message histories are compatible with list tool contents.
For instance with Anthropic format contents.
These message histories will include `AIMessage` objects with "tool use" and
content blocks, e.g.,
```python
[
{"type": "text", "text": "Hmm let me think about that"},
{
"type": "tool_use",
"input": {"fav_color": "green"},
"id": "foo",
"name": "color_picker",
},
]
```
This test should be skipped if the model does not support tool calling
(see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that:
1. The model can correctly handle message histories that include
`AIMessage` objects with list content.
2. The `tool_calls` attribute on `AIMessage` objects is correctly
handled and passed to the model in an appropriate format.
3. The model can correctly handle ToolMessage objects with string content
and arbitrary string values for `tool_call_id`.
You can `xfail` the test if tool calling is implemented but this format
is not supported.
```python
@pytest.mark.xfail(reason=("Not implemented."))
def test_tool_message_histories_list_content(self, *args: Any) -> None:
super().test_tool_message_histories_list_content(*args)
```
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
model_with_tools = model.bind_tools([my_adder_tool])
function_name = "my_adder_tool"
function_args = {"a": 1, "b": 2}
messages_list_content = [
HumanMessage("What is 1 + 2"),
# List content (e.g., Anthropic)
AIMessage(
[
{"type": "text", "text": "some text"},
{
"type": "tool_use",
"id": "abc123",
"name": function_name,
"input": function_args,
},
],
tool_calls=[
{
"name": function_name,
"args": function_args,
"id": "abc123",
"type": "tool_call",
},
],
),
ToolMessage(
json.dumps({"result": 3}),
name=function_name,
tool_call_id="abc123",
),
]
result_list_content = model_with_tools.invoke(messages_list_content)
assert isinstance(result_list_content, AIMessage)
def test_tool_choice(self, model: BaseChatModel) -> None:
"""Test `tool_choice` parameter.
Test that the model can force tool calling via the `tool_choice`
parameter. This test is skipped if the `has_tool_choice` property on the
test class is set to `False`.
This test is optional and should be skipped if the model does not support
tool calling (see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_choice` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_choice(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check whether the `test_tool_calling` test is passing.
If it is not, refer to the troubleshooting steps in that test first.
If `test_tool_calling` is passing, check that the underlying model
supports forced tool calling. If it does, `bind_tools` should accept a
`tool_choice` parameter that can be used to force a tool call.
It should accept (1) the string `'any'` to force calling the bound tool,
and (2) the string name of the tool to force calling that tool.
"""
if not self.has_tool_choice or not self.has_tool_calling:
pytest.skip("Test requires tool choice.")
@tool
def get_weather(location: str) -> str: # noqa: ARG001
"""Get weather at a location."""
return "It's sunny."
for tool_choice in ["any", "magic_function"]:
model_with_tools = model.bind_tools(
[magic_function, get_weather], tool_choice=tool_choice
)
result = model_with_tools.invoke("Hello!")
assert isinstance(result, AIMessage)
assert result.tool_calls
if tool_choice == "magic_function":
assert result.tool_calls[0]["name"] == "magic_function"
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
"""Test that the model generates tool calls for tools with no arguments.
This test is skipped if the `has_tool_calling` property on the test class
is set to `False`.
This test is optional and should be skipped if the model does not support
tool calling (see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that `bind_tools` is implemented to correctly
translate LangChain tool objects into the appropriate schema for your
chat model. It should correctly handle the case where a tool has no
arguments.
This test may fail if the chat model does not support a `tool_choice`
parameter. This parameter can be used to force a tool call. It may also
fail if a provider does not support this form of tool. In these cases,
you can `xfail` the test:
```python
@pytest.mark.xfail(reason=("Does not support tool_choice."))
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
super().test_tool_calling_with_no_arguments(model)
```
Otherwise, in the case that only one tool is bound, ensure that
`tool_choice` supports the string `'any'` to force calling that tool.
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
tool_choice_value = None if not self.has_tool_choice else "any"
model_with_tools = model.bind_tools(
[magic_function_no_args], tool_choice=tool_choice_value
)
query = "What is the value of magic_function_no_args()? Use the tool."
result = model_with_tools.invoke(query)
_validate_tool_call_message_no_args(result)
full: BaseMessage | None = None
for chunk in model_with_tools.stream(query):
full = chunk if full is None else full + chunk # type: ignore[assignment]
assert isinstance(full, AIMessage)
_validate_tool_call_message_no_args(full)
def test_tool_message_error_status(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
"""Test that `ToolMessage` with `status="error"` can be handled.
These messages may take the form:
```python
ToolMessage(
"Error: Missing required argument 'b'.",
name="my_adder_tool",
tool_call_id="abc123",
status="error",
)
```
If possible, the `status` field should be parsed and passed appropriately
to the model.
This test is optional and should be skipped if the model does not support
tool calling (see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that the `status` field on `ToolMessage`
objects is either ignored or passed to the model appropriately.
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
model_with_tools = model.bind_tools([my_adder_tool])
messages = [
HumanMessage("What is 1 + 2"),
AIMessage(
"",
tool_calls=[
{
"name": "my_adder_tool",
"args": {"a": 1},
"id": "abc123",
"type": "tool_call",
},
],
),
ToolMessage(
"Error: Missing required argument 'b'.",
name="my_adder_tool",
tool_call_id="abc123",
status="error",
),
]
result = model_with_tools.invoke(messages)
assert isinstance(result, AIMessage)
def test_structured_few_shot_examples(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
"""Test that the model can process few-shot examples with tool calls.
These are represented as a sequence of messages of the following form:
- `HumanMessage` with string content;
- `AIMessage` with the `tool_calls` attribute populated;
- `ToolMessage` with string content;
- `AIMessage` with string content (an answer);
- `HumanMessage` with string content (a follow-up question).
This test should be skipped if the model does not support tool calling
(see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
This test uses [a utility function](https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.tool_example_to_messages.html).
in `langchain_core` to generate a sequence of messages representing
"few-shot" examples.
If this test fails, check that the model can correctly handle this
sequence of messages.
You can `xfail` the test if tool calling is implemented but this format
is not supported.
```python
@pytest.mark.xfail(reason=("Not implemented."))
def test_structured_few_shot_examples(self, *args: Any) -> None:
super().test_structured_few_shot_examples(*args)
```
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
model_with_tools = model.bind_tools([my_adder_tool], tool_choice="any")
function_result = json.dumps({"result": 3})
tool_schema = my_adder_tool.args_schema
assert isinstance(tool_schema, type)
assert issubclass(tool_schema, BaseModel)
few_shot_messages = tool_example_to_messages(
"What is 1 + 2",
[tool_schema(a=1, b=2)],
tool_outputs=[function_result],
ai_response=function_result,
)
messages = [*few_shot_messages, HumanMessage("What is 3 + 4")]
result = model_with_tools.invoke(messages)
assert isinstance(result, AIMessage)
@pytest.mark.parametrize("schema_type", ["pydantic", "typeddict", "json_schema"])
def test_structured_output(
self,
model: BaseChatModel,
schema_type: Literal["pydantic", "typeddict", "json_schema"],
) -> None:
"""Test to verify structured output is generated both on invoke and stream.
This test is optional and should be skipped if the model does not support
structured output (see configuration below).
??? note "Configuration"
To disable structured output tests, set `has_structured_output` to False
in your test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_structured_output(self) -> bool:
return False
```
By default, `has_structured_output` is True if a model overrides the
`with_structured_output` or `bind_tools` methods.
??? question "Troubleshooting"
If this test fails, ensure that the model's `bind_tools` method
properly handles both JSON Schema and Pydantic V2 models.
`langchain_core` implements a [utility function](https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html).
that will accommodate most formats.
See [example implementation](https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output).
of `with_structured_output`.
"""
if not self.has_structured_output:
pytest.skip("Test requires structured output.")
schema, validation_function = _get_joke_class(schema_type)
chat = model.with_structured_output(schema, **self.structured_output_kwargs)
mock_callback = MagicMock()
mock_callback.on_chat_model_start = MagicMock()
invoke_callback = _TestCallbackHandler()
result = chat.invoke(
"Tell me a joke about cats.", config={"callbacks": [invoke_callback]}
)
validation_function(result)
assert len(invoke_callback.options) == 1, (
"Expected on_chat_model_start to be called once"
)
assert isinstance(invoke_callback.options[0], dict)
assert isinstance(
invoke_callback.options[0]["ls_structured_output_format"]["schema"], dict
)
assert invoke_callback.options[0]["ls_structured_output_format"][
"schema"
] == convert_to_json_schema(schema)
stream_callback = _TestCallbackHandler()
for chunk in chat.stream(
"Tell me a joke about cats.", config={"callbacks": [stream_callback]}
):
validation_function(chunk)
assert chunk
assert len(stream_callback.options) == 1, (
"Expected on_chat_model_start to be called once"
)
assert isinstance(stream_callback.options[0], dict)
assert isinstance(
stream_callback.options[0]["ls_structured_output_format"]["schema"], dict
)
assert stream_callback.options[0]["ls_structured_output_format"][
"schema"
] == convert_to_json_schema(schema)
@pytest.mark.parametrize("schema_type", ["pydantic", "typeddict", "json_schema"])
async def test_structured_output_async(
self,
model: BaseChatModel,
schema_type: Literal["pydantic", "typeddict", "json_schema"],
) -> None:
"""Test to verify structured output is generated both on invoke and stream.
This test is optional and should be skipped if the model does not support
structured output (see configuration below).
??? note "Configuration"
To disable structured output tests, set `has_structured_output` to False
in your test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_structured_output(self) -> bool:
return False
```
By default, `has_structured_output` is True if a model overrides the
`with_structured_output` or `bind_tools` methods.
??? question "Troubleshooting"
If this test fails, ensure that the model's `bind_tools` method
properly handles both JSON Schema and Pydantic V2 models.
`langchain_core` implements a [utility function](https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html).
that will accommodate most formats.
See [example implementation](https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output).
of `with_structured_output`.
"""
if not self.has_structured_output:
pytest.skip("Test requires structured output.")
schema, validation_function = _get_joke_class(schema_type)
chat = model.with_structured_output(schema, **self.structured_output_kwargs)
ainvoke_callback = _TestCallbackHandler()
result = await chat.ainvoke(
"Tell me a joke about cats.", config={"callbacks": [ainvoke_callback]}
)
validation_function(result)
assert len(ainvoke_callback.options) == 1, (
"Expected on_chat_model_start to be called once"
)
assert isinstance(ainvoke_callback.options[0], dict)
assert isinstance(
ainvoke_callback.options[0]["ls_structured_output_format"]["schema"], dict
)
assert ainvoke_callback.options[0]["ls_structured_output_format"][
"schema"
] == convert_to_json_schema(schema)
astream_callback = _TestCallbackHandler()
async for chunk in chat.astream(
"Tell me a joke about cats.", config={"callbacks": [astream_callback]}
):
validation_function(chunk)
assert chunk
assert len(astream_callback.options) == 1, (
"Expected on_chat_model_start to be called once"
)
assert isinstance(astream_callback.options[0], dict)
assert isinstance(
astream_callback.options[0]["ls_structured_output_format"]["schema"], dict
)
assert astream_callback.options[0]["ls_structured_output_format"][
"schema"
] == convert_to_json_schema(schema)
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="Test requires pydantic 2.")
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
"""Test structured output using pydantic.v1.BaseModel.
Verify we can generate structured output using `pydantic.v1.BaseModel`.
`pydantic.v1.BaseModel` is available in the Pydantic 2 package.
This test is optional and should be skipped if the model does not support
structured output (see configuration below).
??? note "Configuration"
To disable structured output tests, set `has_structured_output` to False
in your test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_structured_output(self) -> bool:
return False
```
By default, `has_structured_output` is True if a model overrides the
`with_structured_output` or `bind_tools` methods.
??? question "Troubleshooting"
If this test fails, ensure that the model's `bind_tools` method
properly handles both JSON Schema and Pydantic V1 models.
`langchain_core` implements [a utility function](https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html).
that will accommodate most formats.
See [example implementation](https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output).
of `with_structured_output`.
"""
if not self.has_structured_output:
pytest.skip("Test requires structured output.")
class Joke(BaseModelV1): # Uses langchain_core.pydantic_v1.BaseModel
"""Joke to tell user."""
setup: str = FieldV1(description="question to set up a joke")
punchline: str = FieldV1(description="answer to resolve the joke")
# Pydantic class
chat = model.with_structured_output(Joke, **self.structured_output_kwargs)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, Joke)
for chunk in chat.stream("Tell me a joke about cats."):
assert isinstance(chunk, Joke)
# Schema
chat = model.with_structured_output(
Joke.schema(), **self.structured_output_kwargs
)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in chat.stream("Tell me a joke about cats."):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}
def test_structured_output_optional_param(self, model: BaseChatModel) -> None:
"""Test structured output with optional parameters.
Test to verify we can generate structured output that includes optional
parameters.
This test is optional and should be skipped if the model does not support
structured output (see configuration below).
??? note "Configuration"
To disable structured output tests, set `has_structured_output` to False
in your test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_structured_output(self) -> bool:
return False
```
By default, `has_structured_output` is True if a model overrides the
`with_structured_output` or `bind_tools` methods.
??? question "Troubleshooting"
If this test fails, ensure that the model's `bind_tools` method
properly handles Pydantic V2 models with optional parameters.
`langchain_core` implements [a utility function](https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html).
that will accommodate most formats.
See [example implementation](https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output).
of `with_structured_output`.
"""
if not self.has_structured_output:
pytest.skip("Test requires structured output.")
# Pydantic
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str | None = Field(
default=None, description="answer to resolve the joke"
)
chat = model.with_structured_output(Joke, **self.structured_output_kwargs)
setup_result = chat.invoke(
"Give me the setup to a joke about cats, no punchline."
)
assert isinstance(setup_result, Joke)
joke_result = chat.invoke("Give me a joke about cats, include the punchline.")
assert isinstance(joke_result, Joke)
# Schema
chat = model.with_structured_output(
Joke.model_json_schema(), **self.structured_output_kwargs
)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)
# TypedDict
class JokeDict(TypedDict):
"""Joke to tell user."""
setup: Annotated[str, ..., "question to set up a joke"]
punchline: Annotated[str | None, None, "answer to resolve the joke"]
chat = model.with_structured_output(JokeDict, **self.structured_output_kwargs)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)
def test_json_mode(self, model: BaseChatModel) -> None:
"""Test structured output via [JSON mode.](https://python.langchain.com/docs/concepts/structured_outputs/#json-mode).
This test is optional and should be skipped if the model does not support
the JSON mode feature (see configuration below).
??? note "Configuration"
To disable this test, set `supports_json_mode` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supports_json_mode(self) -> bool:
return False
```
??? question "Troubleshooting"
See example implementation of `with_structured_output` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
"""
if not self.supports_json_mode:
pytest.skip("Test requires json mode support.")
from pydantic import BaseModel as BaseModelProper # noqa: PLC0415
from pydantic import Field as FieldProper # noqa: PLC0415
class Joke(BaseModelProper):
"""Joke to tell user."""
setup: str = FieldProper(description="question to set up a joke")
punchline: str = FieldProper(description="answer to resolve the joke")
# Pydantic class
chat = model.with_structured_output(Joke, method="json_mode")
msg = (
"Tell me a joke about cats. Return the result as a JSON with 'setup' and "
"'punchline' keys. Return nothing other than JSON."
)
result = chat.invoke(msg)
assert isinstance(result, Joke)
for chunk in chat.stream(msg):
assert isinstance(chunk, Joke)
# Schema
chat = model.with_structured_output(
Joke.model_json_schema(), method="json_mode"
)
result = chat.invoke(msg)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in chat.stream(msg):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}
def test_pdf_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs.
This test should be skipped (see configuration below) if the model does not
support PDF inputs. These will take the shape of the LangChain
`FileContentBlock`:
```python
{
"type": "image",
"base64": "<base64 image data>",
"mime_type": "application/pdf",
}
```
Furthermore, for backward-compatibility, we must also support OpenAI chat
completions file content blocks:
```python
(
{
"type": "file",
"file": {
"filename": "test_file.pdf",
"file_data": f"data:application/pdf;base64,{pdf_data}",
},
},
)
```
??? note "Configuration"
To disable this test, set `supports_pdf_inputs` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supports_pdf_inputs(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that the model can correctly handle messages
with pdf content blocks, including base64-encoded files. Otherwise, set
the `supports_pdf_inputs` property to `False`.
"""
if not self.supports_pdf_inputs:
pytest.skip("Model does not support PDF inputs.")
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
message = HumanMessage(
[
{
"type": "text",
"text": "Summarize this document:",
},
{
"type": "file",
"base64": pdf_data,
"mime_type": "application/pdf",
},
]
)
_ = model.invoke([message])
# Test OpenAI Chat Completions format
message = HumanMessage(
[
{
"type": "text",
"text": "Summarize this document:",
},
{
"type": "file",
"file": {
"filename": "test_file.pdf",
"file_data": f"data:application/pdf;base64,{pdf_data}",
},
},
]
)
_ = model.invoke([message])
def test_audio_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process audio inputs.
This test should be skipped (see configuration below) if the model does not
support audio inputs. These will take the shape of the LangChain
`AudioContentBlock`:
```python
{
"type": "audio",
"base64": "<base64 audio data>",
"mime_type": "audio/wav", # or appropriate MIME type
}
```
Furthermore, for backward-compatibility, we must also support OpenAI chat
completions audio content blocks:
```python
{
"type": "input_audio",
"input_audio": {
"data": "<base64 audio data>",
"format": "wav", # or appropriate format
},
}
```
Note: this test downloads audio data from wikimedia.org. You may need to set
the `LANGCHAIN_TESTS_USER_AGENT` environment variable to identify these
requests, e.g.,
```bash
export LANGCHAIN_TESTS_USER_AGENT="CoolBot/0.0 (https://example.org/coolbot/; coolbot@example.org) generic-library/0.0"
```
Refer to the [Wikimedia Foundation User-Agent Policy](https://foundation.wikimedia.org/wiki/Policy:Wikimedia_Foundation_User-Agent_Policy).
??? note "Configuration"
To disable this test, set `supports_audio_inputs` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supports_audio_inputs(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that the model can correctly handle messages
with audio content blocks, specifically base64-encoded files. Otherwise,
set the `supports_audio_inputs` property to `False`.
""" # noqa: E501
if not self.supports_audio_inputs:
pytest.skip("Model does not support audio inputs.")
# https://commons.wikimedia.org/wiki/File:Northern_Flicker_202280456.wav
# License: CC0 1.0 Universal
url = "https://upload.wikimedia.org/wikipedia/commons/6/6a/Northern_Flicker_202280456.wav"
audio_data = _get_base64_from_url(url)
message = HumanMessage(
[
{
"type": "text",
"text": "Describe this audio:",
},
{
"type": "audio",
"mime_type": "audio/wav",
"base64": audio_data,
},
]
)
_ = model.invoke([message])
# Test OpenAI Chat Completions format
message = HumanMessage(
[
{
"type": "text",
"text": "Describe this audio:",
},
{
"type": "input_audio",
"input_audio": {"data": audio_data, "format": "wav"},
},
]
)
_ = model.invoke([message])
def test_image_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process image inputs.
This test should be skipped (see configuration below) if the model does not
support image inputs. These will take the shape of the LangChain
`ImageContentBlock`:
```python
{
"type": "image",
"base64": "<base64 image data>",
"mime_type": "image/jpeg", # or appropriate MIME type
}
```
For backward-compatibility, we must also support OpenAI chat completions
image content blocks containing base64-encoded images:
```python
[
{"type": "text", "text": "describe the weather in this image"},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
]
```
See https://python.langchain.com/docs/concepts/multimodality/
If the property `supports_image_urls` is set to `True`, the test will also
check that we can process content blocks of the form:
```python
{
"type": "image",
"url": "<url>",
}
```
??? note "Configuration"
To disable this test, set `supports_image_inputs` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supports_image_inputs(self) -> bool:
return False
# Can also explicitly disable testing image URLs:
@property
def supports_image_urls(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that the model can correctly handle messages
with image content blocks, including base64-encoded images. Otherwise, set
the `supports_image_inputs` property to `False`.
"""
if not self.supports_image_inputs:
pytest.skip("Model does not support image message.")
image_url = "https://raw.githubusercontent.com/langchain-ai/docs/4d11d08b6b0e210bd456943f7a22febbd168b543/src/images/agentic-rag-output.png"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
# OpenAI CC format, base64 data
message = HumanMessage(
content=[
{"type": "text", "text": "Give a concise description of this image."},
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_data}"},
},
],
)
_ = model.invoke([message])
# Standard LangChain format, base64 data
message = HumanMessage(
content=[
{"type": "text", "text": "Give a concise description of this image."},
{
"type": "image",
"base64": image_data,
"mime_type": "image/png",
},
],
)
_ = model.invoke([message])
# Standard format, URL
if self.supports_image_urls:
message = HumanMessage(
content=[
{
"type": "text",
"text": "Give a concise description of this image.",
},
{
"type": "image",
"url": image_url,
},
],
)
_ = model.invoke([message])
def test_image_tool_message(self, model: BaseChatModel) -> None:
"""Test that the model can process `ToolMessage` objects with image inputs.
This test should be skipped if the model does not support messages of the
Chat Completions `image_url` format:
```python
ToolMessage(
content=[
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
],
tool_call_id="1",
name="random_image",
)
```
In addition, models should support the standard LangChain `ImageContentBlock`
format:
```python
ToolMessage(
content=[
{
"type": "image",
"base64": image_data,
"mime_type": "image/jpeg",
},
],
tool_call_id="1",
name="random_image",
)
```
This test can be skipped by setting the `supports_image_tool_message` property
to False (see configuration below).
??? note "Configuration"
To disable this test, set `supports_image_tool_message` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supports_image_tool_message(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that the model can correctly handle messages
with image content blocks in `ToolMessage` objects, including base64-encoded
images. Otherwise, set the `supports_image_tool_message` property to
False.
"""
if not self.supports_image_tool_message:
pytest.skip("Model does not support image tool message.")
image_url = "https://raw.githubusercontent.com/langchain-ai/docs/4d11d08b6b0e210bd456943f7a22febbd168b543/src/images/agentic-rag-output.png"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
# OpenAI CC format, base64 data
oai_format_message = ToolMessage(
content=[
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_data}"},
},
],
tool_call_id="1",
name="random_image",
)
# Standard LangChain format, base64 data
standard_format_message = ToolMessage(
content=[
{
"type": "image",
"base64": image_data,
"mime_type": "image/png",
},
],
tool_call_id="1",
name="random_image",
)
for tool_message in [oai_format_message, standard_format_message]:
messages = [
HumanMessage(
"get a random diagram using the tool and give it a concise "
"description"
),
AIMessage(
[],
tool_calls=[
{
"type": "tool_call",
"id": "1",
"name": "random_image",
"args": {},
}
],
),
tool_message,
]
def random_image() -> str:
"""Return a random image."""
return ""
_ = model.bind_tools([random_image]).invoke(messages)
def test_pdf_tool_message(self, model: BaseChatModel) -> None:
"""Test that the model can process `ToolMessage` objects with PDF inputs.
This test should be skipped if the model does not support messages of the
LangChain `FileContentBlock` format:
```python
ToolMessage(
content=[
{
"type": "file",
"base64": pdf_data,
"mime_type": "application/pdf",
},
],
tool_call_id="1",
name="random_pdf",
)
```
This test can be skipped by setting the `supports_pdf_tool_message` property
to False (see configuration below).
??? note "Configuration"
To disable this test, set `supports_pdf_tool_message` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supports_pdf_tool_message(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that the model can correctly handle messages
with PDF content blocks in `ToolMessage` objects, specifically
base64-encoded PDFs. Otherwise, set the `supports_pdf_tool_message` property
to False.
"""
if not self.supports_pdf_tool_message:
pytest.skip("Model does not support PDF tool message.")
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
tool_message = ToolMessage(
content_blocks=[
{
"type": "file",
"base64": pdf_data,
"mime_type": "application/pdf",
},
],
tool_call_id="1",
name="random_pdf",
)
messages = [
HumanMessage(
"Get a random PDF using the tool and relay the title verbatim."
),
AIMessage(
[],
tool_calls=[
{
"type": "tool_call",
"id": "1",
"name": "random_pdf",
"args": {},
}
],
),
tool_message,
]
def random_pdf() -> str:
"""Return a random PDF."""
return ""
_ = model.bind_tools([random_pdf]).invoke(messages)
def test_anthropic_inputs(self, model: BaseChatModel) -> None:
"""Test that model can process Anthropic-style message histories.
These message histories will include `AIMessage` objects with `tool_use`
content blocks, e.g.,
```python
AIMessage(
[
{"type": "text", "text": "Hmm let me think about that"},
{
"type": "tool_use",
"input": {"fav_color": "green"},
"id": "foo",
"name": "color_picker",
},
]
)
```
...as well as `HumanMessage` objects containing `tool_result` content blocks:
```python
HumanMessage(
[
{
"type": "tool_result",
"tool_use_id": "foo",
"content": [
{
"type": "text",
"text": "green is a great pick! "
"that's my sister's favorite color",
}
],
"is_error": False,
},
{"type": "text", "text": "what's my sister's favorite color"},
]
)
```
This test should be skipped if the model does not support messages of this
form (or doesn't support tool calling generally). See Configuration below.
??? note "Configuration"
To disable this test, set `supports_anthropic_inputs` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supports_anthropic_inputs(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that:
1. The model can correctly handle message histories that include message
objects with list content.
2. The `tool_calls` attribute on AIMessage objects is correctly handled
and passed to the model in an appropriate format.
3. `HumanMessage`s with "tool_result" content blocks are correctly
handled.
Otherwise, if Anthropic tool call and result formats are not supported,
set the `supports_anthropic_inputs` property to `False`.
"""
if not self.supports_anthropic_inputs:
pytest.skip("Model does not explicitly support Anthropic inputs.")
# Anthropic-format tool
color_picker = {
"name": "color_picker",
"input_schema": {
"type": "object",
"properties": {
"fav_color": {"type": "string"},
},
"required": ["fav_color"],
},
"description": "Input your fav color and get a random fact about it.",
"cache_control": {"type": "ephemeral"},
}
human_content: list[dict] = [
{
"type": "text",
"text": "what's your favorite color in this image",
"cache_control": {"type": "ephemeral"},
},
]
if self.supports_image_inputs:
image_url = "https://raw.githubusercontent.com/langchain-ai/docs/4d11d08b6b0e210bd456943f7a22febbd168b543/src/images/agentic-rag-output.png"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
human_content.append(
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": image_data,
},
}
)
messages = [
SystemMessage("you're a good assistant"),
HumanMessage(human_content), # type: ignore[arg-type]
AIMessage(
[
{"type": "text", "text": "Hmm let me think about that"},
{
"type": "tool_use",
"input": {"fav_color": "purple"},
"id": "foo",
"name": "color_picker",
},
],
tool_calls=[
{
"name": "color_picker",
"args": {"fav_color": "purple"},
"id": "foo",
"type": "tool_call",
}
],
),
ToolMessage("That's a great pick!", tool_call_id="foo"),
]
response = model.bind_tools([color_picker]).invoke(messages)
assert isinstance(response, AIMessage)
# Test thinking blocks
messages = [
HumanMessage(
[
{
"type": "text",
"text": "Hello",
},
]
),
AIMessage(
[
{
"type": "thinking",
"thinking": (
"This is a simple greeting. I should respond warmly and "
"professionally, and perhaps ask how I can help the person "
"today."
),
"signature": (
"ErUBCkYICBgCIkDCTQUXPc3O7nHXd302Zercaz8WrrpddpOqHITxBih5ze"
"FPoJkwKBvkvZ8ID1aAfJftji6+ZI5gBYDo7XmNBIkzEgzVDHKopedAn/sc"
"G80aDFDXVZrDOWgla7lEBiIwLq5kfFjQjvF/CyuL8J5V7dRwsJN5gQIXaM"
"B6xXTs6T+2Zp0VdiyiMb/hcdrHt+7aKh0z2E1UnjiOCoTlofNFHzOnKk0q"
"PIoPmfGgpPgGNRgC"
),
},
{
"type": "text",
"text": "Hello, how are you?",
},
]
),
HumanMessage(
[
{
"type": "text",
"text": "Well, thanks.",
},
]
),
]
response = model.invoke(messages)
assert isinstance(response, AIMessage)
def test_message_with_name(self, model: BaseChatModel) -> None:
"""Test that `HumanMessage` with values for the `name` field can be handled.
These messages may take the form:
```python
HumanMessage("hello", name="example_user")
```
If possible, the `name` field should be parsed and passed appropriately
to the model. Otherwise, it should be ignored.
??? question "Troubleshooting"
If this test fails, check that the `name` field on `HumanMessage`
objects is either ignored or passed to the model appropriately.
"""
result = model.invoke([HumanMessage("hello", name="example_user")])
assert result is not None
assert isinstance(result, AIMessage)
assert isinstance(result.text, str)
assert len(result.content) > 0
@pytest.mark.parametrize("model", [{}, {"output_version": "v1"}], indirect=True)
def test_agent_loop(self, model: BaseChatModel) -> None:
"""Test that the model supports a simple ReAct agent loop.
This test is skipped if the `has_tool_calling` property on the test class is
set to `False`.
This test is optional and should be skipped if the model does not support
tool calling (see configuration below).
??? note "Configuration"
To disable tool calling tests, set `has_tool_calling` to `False` in your
test class:
```python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def has_tool_calling(self) -> bool:
return False
```
??? question "Troubleshooting"
If this test fails, check that `bind_tools` is implemented to correctly
translate LangChain tool objects into the appropriate schema for your
chat model.
Check also that all required information (e.g., tool calling identifiers)
from `AIMessage` objects is propagated correctly to model payloads.
This test may fail if the chat model does not consistently generate tool
calls in response to an appropriate query. In these cases you can `xfail`
the test:
```python
@pytest.mark.xfail(reason=("Does not support tool_choice."))
def test_agent_loop(self, model: BaseChatModel) -> None:
super().test_agent_loop(model)
```
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")
@tool
def get_weather(location: str) -> str: # noqa: ARG001
"""Get the weather at a location."""
return "It's sunny."
llm_with_tools = model.bind_tools([get_weather])
input_message = HumanMessage("What is the weather in San Francisco, CA?")
tool_call_message = llm_with_tools.invoke([input_message])
assert isinstance(tool_call_message, AIMessage)
content_blocks = tool_call_message.content_blocks
assert any(block["type"] == "tool_call" for block in content_blocks)
tool_calls = tool_call_message.tool_calls
assert len(tool_calls) == 1
tool_call = tool_calls[0]
tool_message = get_weather.invoke(tool_call)
assert isinstance(tool_message, ToolMessage)
response = llm_with_tools.invoke(
[
input_message,
tool_call_message,
tool_message,
]
)
assert isinstance(response, AIMessage)
@pytest.mark.benchmark
@pytest.mark.vcr
def test_stream_time(
self, model: BaseChatModel, benchmark: BenchmarkFixture, vcr: Cassette
) -> None:
"""Test that streaming does not introduce undue overhead.
See `enable_vcr_tests` dropdown `above <ChatModelIntegrationTests>`
for more information.
??? note "Configuration"
This test can be enabled or disabled using the `enable_vcr_tests`
property. For example, to disable the test, set this property to `False`:
```python
@property
def enable_vcr_tests(self) -> bool:
return False
```
!!! warning
VCR will by default record authentication headers and other sensitive
information in cassettes. See `enable_vcr_tests` dropdown
`above <ChatModelIntegrationTests>` for how to configure what
information is recorded in cassettes.
"""
if not self.enable_vcr_tests:
pytest.skip("VCR not set up.")
def _run() -> None:
for _ in model.stream("Write a story about a cat."):
pass
if not vcr.responses:
_run()
else:
benchmark(_run)
def invoke_with_audio_input(self, *, stream: bool = False) -> AIMessage:
"""Invoke with audio input."""
raise NotImplementedError
def invoke_with_audio_output(self, *, stream: bool = False) -> AIMessage:
"""Invoke with audio output."""
raise NotImplementedError
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
"""Invoke with reasoning output."""
raise NotImplementedError
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
"""Invoke with cache read input."""
raise NotImplementedError
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
"""Invoke with cache creation input."""
raise NotImplementedError
def test_unicode_tool_call_integration(
self,
model: BaseChatModel,
*,
tool_choice: str | None = None,
force_tool_call: bool = True,
) -> None:
r"""Generic integration test for Unicode characters in tool calls.
Args:
model: The chat model to test
tool_choice: Tool choice parameter to pass to `bind_tools()`
(provider-specific)
force_tool_call: Whether to force a tool call
(use `tool_choice=True` if None)
Tests that Unicode characters in tool call arguments are preserved correctly,
not escaped as `\\uXXXX` sequences.
"""
if not self.has_tool_calling:
pytest.skip("Test requires tool calling support.")
# Configure tool choice based on provider capabilities
if tool_choice is None and force_tool_call:
tool_choice = "any"
if tool_choice is not None:
llm_with_tool = model.bind_tools(
[unicode_customer], tool_choice=tool_choice
)
else:
llm_with_tool = model.bind_tools([unicode_customer])
# Test with Chinese characters
msgs = [
HumanMessage(
"Create a customer named '你好啊集团' (Hello Group) - a Chinese "
"technology company"
)
]
ai_msg = llm_with_tool.invoke(msgs)
assert isinstance(ai_msg, AIMessage)
assert isinstance(ai_msg.tool_calls, list)
if force_tool_call:
assert len(ai_msg.tool_calls) >= 1, (
f"Expected at least 1 tool call, got {len(ai_msg.tool_calls)}"
)
if ai_msg.tool_calls:
tool_call = ai_msg.tool_calls[0]
assert tool_call["name"] == "unicode_customer"
assert "args" in tool_call
# Verify Unicode characters are properly handled
args = tool_call["args"]
assert "customer_name" in args
customer_name = args["customer_name"]
# The model should include the Unicode characters, not escaped sequences
assert (
"你好" in customer_name
or "你" in customer_name
or "好" in customer_name
), f"Unicode characters not found in: {customer_name}"
# Test with additional Unicode examples - Japanese
msgs_jp = [
HumanMessage(
"Create a customer named 'こんにちは株式会社' (Hello Corporation) - a "
"Japanese company"
)
]
ai_msg_jp = llm_with_tool.invoke(msgs_jp)
assert isinstance(ai_msg_jp, AIMessage)
if force_tool_call:
assert len(ai_msg_jp.tool_calls) >= 1
if ai_msg_jp.tool_calls:
tool_call_jp = ai_msg_jp.tool_calls[0]
args_jp = tool_call_jp["args"]
customer_name_jp = args_jp["customer_name"]
# Verify Japanese Unicode characters are preserved
assert (
"こんにちは" in customer_name_jp
or "株式会社" in customer_name_jp
or "こ" in customer_name_jp
or "ん" in customer_name_jp
), f"Japanese Unicode characters not found in: {customer_name_jp}"
| ChatModelIntegrationTests |
python | geekcomputers__Python | Snake Game Using Turtle/wall.py | {
"start": 138,
"end": 1410
} | class ____:
""" This class creates a wall around the game screen that adjusts to its dimensions. """
def __init__(self):
self.screen = Screen()
self.create_wall()
def create_wall(self):
"""Draws a responsive game border and a header area for the scoreboard and controls."""
width = self.screen.window_width()
height = self.screen.window_height()
# Calculate coordinates for the border based on screen size
top = height / 2
bottom = -height / 2
left = -width / 2
right = width / 2
wall = Turtle()
wall.hideturtle()
wall.speed("fastest")
wall.color(colors.WALL_COLOR)
wall.penup()
# Draw the main rectangular border
wall.goto(left + 10, top - 10)
wall.pendown()
wall.pensize(10)
wall.goto(right - 10, top - 10)
wall.goto(right - 10, bottom + 10)
wall.goto(left + 10, bottom + 10)
wall.goto(left + 10, top - 10)
# Draw a line to create a separate header section for the score and buttons
wall.penup()
wall.goto(left + 10, top - 70)
wall.pendown()
wall.pensize(5)
wall.goto(right - 10, top - 70)
self.screen.update()
| Wall |
python | django__django | tests/timezones/models.py | {
"start": 602,
"end": 664
} | class ____(models.Model):
time = models.TimeField()
| DailyEvent |
python | mlflow__mlflow | dev/clint/src/clint/rules/missing_notebook_h1_header.py | {
"start": 36,
"end": 180
} | class ____(Rule):
def _message(self) -> str:
return "Notebook should have at least one H1 header for the title."
| MissingNotebookH1Header |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/prompt_helper.py | {
"start": 1600,
"end": 11263
} | class ____(BaseComponent):
"""
Prompt helper.
General prompt helper that can help deal with LLM context window token limitations.
At its core, it calculates available context size by starting with the context
window size of an LLM and reserve token space for the prompt template, and the
output.
It provides utility for "repacking" text chunks (retrieved from index) to maximally
make use of the available context window (and thereby reducing the number of LLM
calls needed), or truncating them so that they fit in a single LLM call.
Args:
context_window (int): Context window for the LLM.
num_output (int): Number of outputs for the LLM.
chunk_overlap_ratio (float): Chunk overlap as a ratio of chunk size
chunk_size_limit (Optional[int]): Maximum chunk size to use.
tokenizer (Optional[Callable[[str], List]]): Tokenizer to use.
separator (str): Separator for text splitter
"""
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum context size that will get sent to the LLM.",
)
num_output: int = Field(
default=DEFAULT_NUM_OUTPUTS,
description="The amount of token-space to leave in input for generation.",
)
chunk_overlap_ratio: float = Field(
default=DEFAULT_CHUNK_OVERLAP_RATIO,
description="The percentage token amount that each chunk should overlap.",
)
chunk_size_limit: Optional[int] = Field(description="The maximum size of a chunk.")
separator: str = Field(
default=" ", description="The separator when chunking tokens."
)
_token_counter: TokenCounter = PrivateAttr()
def __init__(
self,
context_window: int = DEFAULT_CONTEXT_WINDOW,
num_output: int = DEFAULT_NUM_OUTPUTS,
chunk_overlap_ratio: float = DEFAULT_CHUNK_OVERLAP_RATIO,
chunk_size_limit: Optional[int] = None,
tokenizer: Optional[Callable[[str], List]] = None,
separator: str = " ",
) -> None:
"""Init params."""
if chunk_overlap_ratio > 1.0 or chunk_overlap_ratio < 0.0:
raise ValueError("chunk_overlap_ratio must be a float between 0. and 1.")
super().__init__(
context_window=context_window,
num_output=num_output,
chunk_overlap_ratio=chunk_overlap_ratio,
chunk_size_limit=chunk_size_limit,
separator=separator,
)
# TODO: make configurable
self._token_counter = TokenCounter(tokenizer=tokenizer)
@classmethod
def from_llm_metadata(
cls,
llm_metadata: LLMMetadata,
chunk_overlap_ratio: float = DEFAULT_CHUNK_OVERLAP_RATIO,
chunk_size_limit: Optional[int] = None,
tokenizer: Optional[Callable[[str], List]] = None,
separator: str = " ",
) -> "PromptHelper":
"""
Create from llm predictor.
This will autofill values like context_window and num_output.
"""
context_window = llm_metadata.context_window
if llm_metadata.num_output == -1:
num_output = DEFAULT_NUM_OUTPUTS
else:
num_output = llm_metadata.num_output
return cls(
context_window=context_window,
num_output=num_output,
chunk_overlap_ratio=chunk_overlap_ratio,
chunk_size_limit=chunk_size_limit,
tokenizer=tokenizer,
separator=separator,
)
@classmethod
def class_name(cls) -> str:
return "PromptHelper"
def _get_available_context_size(self, num_prompt_tokens: int) -> int:
"""
Get available context size.
This is calculated as:
available context window = total context window
- input (partially filled prompt)
- output (room reserved for response)
Notes:
- Available context size is further clamped to be non-negative.
"""
context_size_tokens = self.context_window - num_prompt_tokens - self.num_output
if context_size_tokens < 0:
raise ValueError(
f"Calculated available context size {context_size_tokens} was"
" not non-negative."
)
return context_size_tokens
def _get_tools_from_llm(
self, llm: Optional[LLM] = None, tools: Optional[List["BaseTool"]] = None
) -> List["BaseTool"]:
from llama_index.core.program.function_program import get_function_tool
tools = tools or []
if isinstance(llm, StructuredLLM):
tools.append(get_function_tool(llm.output_cls))
return tools
def _get_available_chunk_size(
self,
prompt: BasePromptTemplate,
num_chunks: int = 1,
padding: int = 5,
llm: Optional[LLM] = None,
tools: Optional[List["BaseTool"]] = None,
) -> int:
"""
Get available chunk size.
This is calculated as:
available chunk size = available context window // number_chunks
- padding
Notes:
- By default, we use padding of 5 (to save space for formatting needs).
- Available chunk size is further clamped to chunk_size_limit if specified.
"""
tools = self._get_tools_from_llm(llm=llm, tools=tools)
if isinstance(prompt, SelectorPromptTemplate):
prompt = prompt.select(llm=llm)
if isinstance(prompt, ChatPromptTemplate):
messages: List[ChatMessage] = prompt.message_templates
# account for partial formatting
partial_messages = []
for message in messages:
partial_message = deepcopy(message)
# TODO: This does not count tokens in non-text blocks
prompt_kwargs = prompt.kwargs or {}
partial_message.blocks = format_content_blocks(
partial_message.blocks, **prompt_kwargs
)
# add to list of partial messages
partial_messages.append(partial_message)
num_prompt_tokens = self._token_counter.estimate_tokens_in_messages(
partial_messages
)
else:
prompt_str = get_empty_prompt_txt(prompt)
num_prompt_tokens = self._token_counter.get_string_tokens(prompt_str)
num_prompt_tokens += self._token_counter.estimate_tokens_in_tools(
[x.metadata.to_openai_tool() for x in tools]
)
# structured llms cannot have system prompts currently -- check the underlying llm
if isinstance(llm, StructuredLLM):
num_prompt_tokens += self._token_counter.get_string_tokens(
llm.llm.system_prompt or ""
)
elif llm is not None:
num_prompt_tokens += self._token_counter.get_string_tokens(
llm.system_prompt or ""
)
available_context_size = self._get_available_context_size(num_prompt_tokens)
result = available_context_size // num_chunks - padding
if self.chunk_size_limit is not None:
result = min(result, self.chunk_size_limit)
return result
def get_text_splitter_given_prompt(
self,
prompt: BasePromptTemplate,
num_chunks: int = 1,
padding: int = DEFAULT_PADDING,
llm: Optional[LLM] = None,
tools: Optional[List["BaseTool"]] = None,
) -> TokenTextSplitter:
"""
Get text splitter configured to maximally pack available context window,
taking into account of given prompt, and desired number of chunks.
"""
chunk_size = self._get_available_chunk_size(
prompt, num_chunks, padding=padding, llm=llm, tools=tools
)
if chunk_size <= 0:
raise ValueError(f"Chunk size {chunk_size} is not positive.")
chunk_overlap = int(self.chunk_overlap_ratio * chunk_size)
return TokenTextSplitter(
separator=self.separator,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
tokenizer=self._token_counter.tokenizer,
)
def truncate(
self,
prompt: BasePromptTemplate,
text_chunks: Sequence[str],
padding: int = DEFAULT_PADDING,
llm: Optional[LLM] = None,
tools: Optional[List["BaseTool"]] = None,
) -> List[str]:
"""Truncate text chunks to fit available context window."""
text_splitter = self.get_text_splitter_given_prompt(
prompt,
num_chunks=len(text_chunks),
padding=padding,
llm=llm,
tools=tools,
)
return [truncate_text(chunk, text_splitter) for chunk in text_chunks]
def repack(
self,
prompt: BasePromptTemplate,
text_chunks: Sequence[str],
padding: int = DEFAULT_PADDING,
llm: Optional[LLM] = None,
tools: Optional[List["BaseTool"]] = None,
) -> List[str]:
"""
Repack text chunks to fit available context window.
This will combine text chunks into consolidated chunks
that more fully "pack" the prompt template given the context_window.
"""
text_splitter = self.get_text_splitter_given_prompt(
prompt, padding=padding, llm=llm, tools=tools
)
combined_str = "\n\n".join([c.strip() for c in text_chunks if c.strip()])
return text_splitter.split_text(combined_str)
| PromptHelper |
python | pytest-dev__pluggy | src/pluggy/_tracing.py | {
"start": 260,
"end": 1656
} | class ____:
def __init__(self) -> None:
self._tags2proc: dict[tuple[str, ...], _Processor] = {}
self._writer: _Writer | None = None
self.indent = 0
def get(self, name: str) -> TagTracerSub:
return TagTracerSub(self, (name,))
def _format_message(self, tags: Sequence[str], args: Sequence[object]) -> str:
if isinstance(args[-1], dict):
extra = args[-1]
args = args[:-1]
else:
extra = {}
content = " ".join(map(str, args))
indent = " " * self.indent
lines = [f"{indent}{content} [{':'.join(tags)}]\n"]
for name, value in extra.items():
lines.append(f"{indent} {name}: {value}\n")
return "".join(lines)
def _processmessage(self, tags: tuple[str, ...], args: tuple[object, ...]) -> None:
if self._writer is not None and args:
self._writer(self._format_message(tags, args))
if processor := self._tags2proc.get(tags):
processor(tags, args)
def setwriter(self, writer: _Writer | None) -> None:
self._writer = writer
def setprocessor(self, tags: str | tuple[str, ...], processor: _Processor) -> None:
if isinstance(tags, str):
tags = tuple(tags.split(":"))
else:
assert isinstance(tags, tuple)
self._tags2proc[tags] = processor
| TagTracer |
python | plotly__plotly.py | tests/test_core/test_figure_widget_backend/test_validate_initialization.py | {
"start": 182,
"end": 582
} | class ____(TestCase):
if figure_widget_available:
def test_widget_layout_present_on_init(self):
fig = go.FigureWidget(data=go.Scatter(x=[1, 2], y=[1, 2]))
assert fig._widget_layout != {}
def test_widget_data_present_on_init(self):
fig = go.FigureWidget(data=go.Bar(x=[1, 2], y=[1, 2]))
assert fig._widget_data != []
| TestInitialization |
python | django__django | tests/admin_views/test_adminsite.py | {
"start": 782,
"end": 3618
} | class ____(TestCase):
"""
Check each_context contains the documented variables and that
available_apps context variable structure is the expected one.
"""
request_factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
def setUp(self):
request = self.request_factory.get(reverse("test_adminsite:index"))
request.user = self.u1
self.ctx = site.each_context(request)
def test_each_context(self):
ctx = self.ctx
self.assertEqual(ctx["site_header"], "Django administration")
self.assertEqual(ctx["site_title"], "Django site admin")
self.assertEqual(ctx["site_url"], "/")
self.assertIs(ctx["has_permission"], True)
def test_custom_admin_titles(self):
request = self.request_factory.get(reverse("test_custom_adminsite:index"))
request.user = self.u1
ctx = custom_site.each_context(request)
self.assertEqual(ctx["site_title"], "Custom title")
self.assertEqual(ctx["site_header"], "Custom site")
def test_each_context_site_url_with_script_name(self):
request = self.request_factory.get(
reverse("test_adminsite:index"), SCRIPT_NAME="/my-script-name/"
)
request.user = self.u1
self.assertEqual(site.each_context(request)["site_url"], "/my-script-name/")
def test_available_apps(self):
ctx = self.ctx
apps = ctx["available_apps"]
# we have registered two models from two different apps
self.assertEqual(len(apps), 2)
# admin_views.Article
admin_views = apps[0]
self.assertEqual(admin_views["app_label"], "admin_views")
self.assertEqual(len(admin_views["models"]), 1)
article = admin_views["models"][0]
self.assertEqual(article["object_name"], "Article")
self.assertEqual(article["model"], Article)
# auth.User
auth = apps[1]
self.assertEqual(auth["app_label"], "auth")
self.assertEqual(len(auth["models"]), 1)
user = auth["models"][0]
self.assertEqual(user["object_name"], "User")
self.assertEqual(user["model"], User)
self.assertEqual(auth["app_url"], "/test_admin/admin/auth/")
self.assertIs(auth["has_module_perms"], True)
self.assertIn("perms", user)
self.assertIs(user["perms"]["add"], True)
self.assertIs(user["perms"]["change"], True)
self.assertIs(user["perms"]["delete"], True)
self.assertEqual(user["admin_url"], "/test_admin/admin/auth/user/")
self.assertEqual(user["add_url"], "/test_admin/admin/auth/user/add/")
self.assertEqual(user["name"], "Users")
| SiteEachContextTest |
python | spack__spack | lib/spack/spack/oci/image.py | {
"start": 2121,
"end": 2978
} | class ____:
"""Represents a digest in the format <algorithm>:<digest>.
Currently only supports sha256 digests."""
__slots__ = ["algorithm", "digest"]
def __init__(self, *, algorithm: str, digest: str) -> None:
self.algorithm = algorithm
self.digest = digest
def __eq__(self, __value: object) -> bool:
if not isinstance(__value, Digest):
return NotImplemented
return self.algorithm == __value.algorithm and self.digest == __value.digest
@classmethod
def from_string(cls, string: str) -> "Digest":
return cls(algorithm="sha256", digest=ensure_sha256_checksum(string))
@classmethod
def from_sha256(cls, digest: str) -> "Digest":
return cls(algorithm="sha256", digest=digest)
def __str__(self) -> str:
return f"{self.algorithm}:{self.digest}"
| Digest |
python | getsentry__sentry | src/sentry/sentry_apps/services/app/model.py | {
"start": 5978,
"end": 6104
} | class ____(TypedDict, total=False):
events: list[str]
name: str
# TODO add whatever else as needed
| SentryAppUpdateArgs |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 28300,
"end": 29260
} | class ____(PrefectFilterBaseModel):
"""Filter by `TaskRun.name`."""
any_: Optional[list[str]] = Field(
default=None,
description="A list of task run names to include",
examples=[["my-task-run-1", "my-task-run-2"]],
)
like_: Optional[str] = Field(
default=None,
description=(
"A case-insensitive partial match. For example, "
" passing 'marvin' will match "
"'marvin', 'sad-Marvin', and 'marvin-robot'."
),
examples=["marvin"],
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.TaskRun.name.in_(self.any_))
if self.like_ is not None:
filters.append(db.TaskRun.name.ilike(f"%{self.like_}%"))
return filters
| TaskRunFilterName |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/pipes/context_injectors.py | {
"start": 360,
"end": 1854
} | class ____(PipesContextInjector):
"""A context injector that injects context by writing to a temporary AzureBlobStorage location.
Args:
container (str): The AzureBlobStorage container to write to.
client (azure.storage.blob.BlobServiceClient): An Azure Blob Storage client.
key_prefix (Optional[str]): An optional prefix to use for the Azure Blob Storage key. Defaults to a random
string.
"""
def __init__(self, *, container: str, client: BlobServiceClient):
super().__init__()
self.bucket = check.str_param(container, "container")
self.client = client
@contextmanager
def inject_context(self, context: PipesContextData) -> Iterator[PipesParams]: # pyright: ignore[reportIncompatibleMethodOverride]
key_prefix = "".join(random.choices(string.ascii_letters, k=30))
key = os.path.join(key_prefix, _CONTEXT_FILENAME)
with self.client.get_blob_client(self.bucket, key) as blob_client:
blob_client.upload_blob(json.dumps(context).encode("utf-8"))
yield {"bucket": self.bucket, "key": key}
blob_client.delete_blob()
def no_messages_debug_text(self) -> str:
return (
"Attempted to inject context via a temporary file in AzureBlobStorage. Expected"
" PipesAzureBlobStorageContextLoader to be explicitly passed to open_dagster_pipes in the external"
" process."
)
| PipesAzureBlobStorageContextInjector |
python | pytorch__pytorch | torch/distributed/elastic/timer/api.py | {
"start": 555,
"end": 1569
} | class ____:
"""
Data object representing a countdown timer acquisition and release
that is used between the ``TimerClient`` and ``TimerServer``.
A negative ``expiration_time`` should be interpreted as a "release"
request.
.. note:: the type of ``worker_id`` is implementation specific.
It is whatever the TimerServer and TimerClient implementations
have on to uniquely identify a worker.
"""
__slots__ = ["worker_id", "scope_id", "expiration_time"]
def __init__(self, worker_id: Any, scope_id: str, expiration_time: float):
self.worker_id = worker_id
self.scope_id = scope_id
self.expiration_time = expiration_time
def __eq__(self, other):
if isinstance(other, TimerRequest):
return (
self.worker_id == other.worker_id
and self.scope_id == other.scope_id
and self.expiration_time == other.expiration_time
)
return False
| TimerRequest |
python | doocs__leetcode | solution/0800-0899/0838.Push Dominoes/Solution.py | {
"start": 0,
"end": 855
} | class ____:
def pushDominoes(self, dominoes: str) -> str:
n = len(dominoes)
q = deque()
time = [-1] * n
force = defaultdict(list)
for i, f in enumerate(dominoes):
if f != '.':
q.append(i)
time[i] = 0
force[i].append(f)
ans = ['.'] * n
while q:
i = q.popleft()
if len(force[i]) == 1:
ans[i] = f = force[i][0]
j = i - 1 if f == 'L' else i + 1
if 0 <= j < n:
t = time[i]
if time[j] == -1:
q.append(j)
time[j] = t + 1
force[j].append(f)
elif time[j] == t + 1:
force[j].append(f)
return ''.join(ans)
| Solution |
python | getsentry__sentry | src/sentry/integrations/api/bases/external_actor.py | {
"start": 5317,
"end": 6641
} | class ____(ExternalActorSerializerBase):
_actor_key = "user_id"
user_id = serializers.IntegerField(required=True, help_text="The user ID in Sentry.")
id = serializers.IntegerField(
required=False, read_only=True, help_text="The external actor ID."
)
def validate_user_id(self, user_id: int) -> RpcUser:
"""Ensure that this user exists and that they belong to the organization."""
if (
organization_service.check_membership_by_id(
user_id=user_id, organization_id=self.organization.id
)
is None
or (user := user_service.get_user(user_id=user_id)) is None
):
raise serializers.ValidationError("This member does not exist.")
return user
def serialize(self, instance: ExternalActor) -> ExternalActorResponse:
return {
"id": instance.id,
"external_id": instance.external_id,
"external_name": instance.external_name,
"provider": instance.provider,
"integration_id": instance.integration_id,
}
class Meta:
model = ExternalActor
fields = ["user_id", "external_id", "external_name", "provider", "integration_id", "id"]
@extend_schema_serializer(exclude_fields=["team_id"])
| ExternalUserSerializer |
python | PyCQA__pylint | tests/functional/p/protocol_classes_abstract.py | {
"start": 797,
"end": 908
} | class ____(FooProtocol): # [abstract-method]
"""Doesn't subclass typing.Protocol directly"""
| IndirectProtocol |
python | scipy__scipy | scipy/io/matlab/_miobase.py | {
"start": 10376,
"end": 13102
} | class ____:
""" Base object for reading mat files
To make this class functional, you will need to override the
following methods:
matrix_getter_factory - gives object to fetch next matrix from stream
guess_byte_order - guesses file byte order from file
"""
@docfiller
def __init__(self, mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True,
simplify_cells=False):
'''
Initializer for mat file reader
mat_stream : file-like
object with file API, open for reading
%(load_args)s
'''
# Initialize stream
self.mat_stream = mat_stream
self.dtypes = {}
if not byte_order:
byte_order = self.guess_byte_order()
else:
byte_order = boc.to_numpy_code(byte_order)
self.byte_order = byte_order
self.struct_as_record = struct_as_record
if matlab_compatible:
self.set_matlab_compatible()
else:
self.squeeze_me = squeeze_me
self.chars_as_strings = chars_as_strings
self.mat_dtype = mat_dtype
self.verify_compressed_data_integrity = verify_compressed_data_integrity
self.simplify_cells = simplify_cells
if simplify_cells:
self.squeeze_me = True
self.struct_as_record = False
def set_matlab_compatible(self):
''' Sets options to return arrays as MATLAB loads them '''
self.mat_dtype = True
self.squeeze_me = False
self.chars_as_strings = False
def guess_byte_order(self):
''' As we do not know what file type we have, assume native '''
return boc.native_code
def end_of_stream(self):
b = self.mat_stream.read(1)
curpos = self.mat_stream.tell()
self.mat_stream.seek(curpos-1)
return len(b) == 0
def arr_dtype_number(arr, num):
''' Return dtype for given number of items per element'''
return np.dtype(arr.dtype.str[:2] + str(num))
def arr_to_chars(arr):
''' Convert string array to char array '''
dims = list(arr.shape)
if not dims:
dims = [1]
dims.append(int(arr.dtype.str[2:]))
arr = np.ndarray(shape=dims,
dtype=arr_dtype_number(arr, 1),
buffer=arr)
empties = [arr == np.array('', dtype=arr.dtype)]
if not np.any(empties):
return arr
arr = arr.copy()
arr[tuple(empties)] = ' '
return arr
| MatFileReader |
python | ethereum__web3.py | web3/contract/base_contract.py | {
"start": 50631,
"end": 53629
} | class ____:
"""
Class for contract constructor API.
"""
def __init__(
self,
w3: Union["Web3", "AsyncWeb3[Any]"],
abi: ABI,
bytecode: HexStr,
*args: Any,
**kwargs: Any,
) -> None:
self.w3 = w3
self.abi = abi
self.bytecode = bytecode
self.data_in_transaction = self._encode_data_in_transaction(*args, **kwargs)
@combomethod
def _encode_data_in_transaction(self, *args: Any, **kwargs: Any) -> HexStr:
constructor_abi = find_constructor_abi_element_by_type(self.abi)
if constructor_abi:
if not args:
args = tuple()
if not kwargs:
kwargs = {}
arguments = get_normalized_abi_inputs(constructor_abi, *args, **kwargs)
data = add_0x_prefix(
encode_abi(self.w3, constructor_abi, arguments, data=self.bytecode)
)
else:
data = to_hex(self.bytecode)
return data
@combomethod
def _estimate_gas(self, transaction: TxParams | None = None) -> TxParams:
if transaction is None:
estimate_gas_transaction: TxParams = {}
else:
estimate_gas_transaction = cast(TxParams, dict(**transaction))
self.check_forbidden_keys_in_transaction(
estimate_gas_transaction, ["data", "to"]
)
if self.w3.eth.default_account is not empty:
estimate_gas_transaction.setdefault(
"from", cast(ChecksumAddress, self.w3.eth.default_account)
)
estimate_gas_transaction["data"] = self.data_in_transaction
return estimate_gas_transaction
def _get_transaction(self, transaction: TxParams | None = None) -> TxParams:
if transaction is None:
transact_transaction: TxParams = {}
else:
transact_transaction = cast(TxParams, dict(**transaction))
self.check_forbidden_keys_in_transaction(
transact_transaction, ["data", "to"]
)
if self.w3.eth.default_account is not empty:
transact_transaction.setdefault(
"from", cast(ChecksumAddress, self.w3.eth.default_account)
)
transact_transaction["data"] = self.data_in_transaction
return transact_transaction
@combomethod
def _build_transaction(self, transaction: TxParams | None = None) -> TxParams:
built_transaction = self._get_transaction(transaction)
built_transaction["to"] = Address(b"")
return built_transaction
@staticmethod
def check_forbidden_keys_in_transaction(
transaction: TxParams, forbidden_keys: Collection[str] | None = None
) -> None:
keys_found = transaction.keys() & forbidden_keys
if keys_found:
raise Web3ValueError(
f"Cannot set '{', '.join(keys_found)}' field(s) in transaction"
)
| BaseContractConstructor |
python | pikepdf__pikepdf | src/pikepdf/models/metadata.py | {
"start": 7747,
"end": 8153
} | class ____(ABC):
"""XMP <-> DocumentInfo converter."""
@staticmethod
@abstractmethod
def xmp_from_docinfo(docinfo_val: str | None) -> Any: # type: ignore
"""Derive XMP metadata from a DocumentInfo string."""
@staticmethod
@abstractmethod
def docinfo_from_xmp(xmp_val: Any) -> str | None:
"""Derive a DocumentInfo value from equivalent XMP metadata."""
| Converter |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 35021,
"end": 35113
} | class ____(StringEnum):
sequential = "sequential"
random = "random"
| IterationOrderEnum |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 8626,
"end": 11467
} | class ____(abc.ABC):
seed: int = parser.get_default("seed")
def __str__(self) -> str:
"""
Helper method to output sampler stats to console.
"""
raise TrainerConfigError(f"__str__ not implemented for type {self.__class__}.")
@staticmethod
def structure(
d: Union[Mapping, float], t: type
) -> "ParameterRandomizationSettings":
"""
Helper method to a ParameterRandomizationSettings class. Meant to be registered with
cattr.register_structure_hook() and called with cattr.structure(). This is needed to handle
the special Enum selection of ParameterRandomizationSettings classes.
"""
if isinstance(d, (float, int)):
return ConstantSettings(value=d)
if not isinstance(d, Mapping):
raise TrainerConfigError(
f"Unsupported parameter randomization configuration {d}."
)
if "sampler_type" not in d:
raise TrainerConfigError(
f"Sampler configuration does not contain sampler_type : {d}."
)
if "sampler_parameters" not in d:
raise TrainerConfigError(
f"Sampler configuration does not contain sampler_parameters : {d}."
)
enum_key = ParameterRandomizationType(d["sampler_type"])
t = enum_key.to_settings()
return strict_to_cls(d["sampler_parameters"], t)
@staticmethod
def unstructure(d: "ParameterRandomizationSettings") -> Mapping:
"""
Helper method to a ParameterRandomizationSettings class. Meant to be registered with
cattr.register_unstructure_hook() and called with cattr.unstructure().
"""
_reversed_mapping = {
UniformSettings: ParameterRandomizationType.UNIFORM,
GaussianSettings: ParameterRandomizationType.GAUSSIAN,
MultiRangeUniformSettings: ParameterRandomizationType.MULTIRANGEUNIFORM,
ConstantSettings: ParameterRandomizationType.CONSTANT,
}
sampler_type: Optional[str] = None
for t, name in _reversed_mapping.items():
if isinstance(d, t):
sampler_type = name.value
sampler_parameters = attr.asdict(d)
return {"sampler_type": sampler_type, "sampler_parameters": sampler_parameters}
@abc.abstractmethod
def apply(self, key: str, env_channel: EnvironmentParametersChannel) -> None:
"""
Helper method to send sampler settings over EnvironmentParametersChannel
Calls the appropriate sampler type set method.
:param key: environment parameter to be sampled
:param env_channel: The EnvironmentParametersChannel to communicate sampler settings to environment
"""
pass
@attr.s(auto_attribs=True)
| ParameterRandomizationSettings |
python | pandas-dev__pandas | pandas/tests/reshape/concat/test_sort.py | {
"start": 114,
"end": 4350
} | class ____:
def test_concat_sorts_columns(self, sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(self, sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]},
index=["a", "b", "c"],
columns=["a", "b"],
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(self, sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame(
{"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"]
)
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort(self):
# GH-4588
df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat(
[df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True
)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise(self):
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
def test_concat_frame_with_sort_false(self):
# GH 43375
result = pd.concat(
[DataFrame({i: i}, index=[i]) for i in range(2, 0, -1)], sort=False
)
expected = DataFrame([[2, np.nan], [np.nan, 1]], index=[2, 1], columns=[2, 1])
tm.assert_frame_equal(result, expected)
# GH 37937
df1 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[1, 2, 3])
df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}, index=[3, 1, 6])
result = pd.concat([df2, df1], axis=1, sort=False)
expected = DataFrame(
[
[7.0, 10.0, 3.0, 6.0],
[8.0, 11.0, 1.0, 4.0],
[9.0, 12.0, np.nan, np.nan],
[np.nan, np.nan, 2.0, 5.0],
],
index=[3, 1, 6, 2],
columns=["c", "d", "a", "b"],
)
tm.assert_frame_equal(result, expected)
def test_concat_sort_none_raises(self):
# GH#41518
df = DataFrame({1: [1, 2], "a": [3, 4]})
msg = "The 'sort' keyword only accepts boolean values; None was passed."
with pytest.raises(ValueError, match=msg):
pd.concat([df, df], sort=None)
| TestConcatSort |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/test_date_range.py | {
"start": 49391,
"end": 64465
} | class ____:
# Tests revolving around less-common (non-Tick) `freq` keywords.
def test_date_range_custom_business_month_begin(self, unit):
hcal = USFederalHolidayCalendar()
freq = offsets.CBMonthBegin(calendar=hcal)
dti = date_range(start="20120101", end="20130101", freq=freq, unit=unit)
assert all(freq.is_on_offset(x) for x in dti)
expected = DatetimeIndex(
[
"2012-01-03",
"2012-02-01",
"2012-03-01",
"2012-04-02",
"2012-05-01",
"2012-06-01",
"2012-07-02",
"2012-08-01",
"2012-09-04",
"2012-10-01",
"2012-11-01",
"2012-12-03",
],
dtype=f"M8[{unit}]",
freq=freq,
)
tm.assert_index_equal(dti, expected)
def test_date_range_custom_business_month_end(self, unit):
hcal = USFederalHolidayCalendar()
freq = offsets.CBMonthEnd(calendar=hcal)
dti = date_range(start="20120101", end="20130101", freq=freq, unit=unit)
assert all(freq.is_on_offset(x) for x in dti)
expected = DatetimeIndex(
[
"2012-01-31",
"2012-02-29",
"2012-03-30",
"2012-04-30",
"2012-05-31",
"2012-06-29",
"2012-07-31",
"2012-08-31",
"2012-09-28",
"2012-10-31",
"2012-11-30",
"2012-12-31",
],
dtype=f"M8[{unit}]",
freq=freq,
)
tm.assert_index_equal(dti, expected)
def test_date_range_with_custom_holidays(self, unit):
# GH#30593
freq = offsets.CustomBusinessHour(start="15:00", holidays=["2020-11-26"])
result = date_range(start="2020-11-25 15:00", periods=4, freq=freq, unit=unit)
expected = DatetimeIndex(
[
"2020-11-25 15:00:00",
"2020-11-25 16:00:00",
"2020-11-27 15:00:00",
"2020-11-27 16:00:00",
],
dtype=f"M8[{unit}]",
freq=freq,
)
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self, unit):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
dtype=f"M8[{unit}]",
freq="bh",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="bh", unit=unit)
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
["2014-07-04 16:00", "2014-07-07 09:00"], dtype=f"M8[{unit}]", freq="bh"
)
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="bh", unit=unit)
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
dtype=f"M8[{unit}]",
freq="bh",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="bh", unit=unit)
tm.assert_index_equal(idx, rng)
def test_date_range_business_hour2(self, unit):
idx1 = date_range(
start="2014-07-04 15:00", end="2014-07-08 10:00", freq="bh", unit=unit
)
idx2 = date_range(start="2014-07-04 15:00", periods=12, freq="bh", unit=unit)
idx3 = date_range(end="2014-07-08 10:00", periods=12, freq="bh", unit=unit)
expected = DatetimeIndex(
[
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
],
dtype=f"M8[{unit}]",
freq="bh",
)
tm.assert_index_equal(idx1, expected)
tm.assert_index_equal(idx2, expected)
tm.assert_index_equal(idx3, expected)
idx4 = date_range(
start="2014-07-04 15:45", end="2014-07-08 10:45", freq="bh", unit=unit
)
idx5 = date_range(start="2014-07-04 15:45", periods=12, freq="bh", unit=unit)
idx6 = date_range(end="2014-07-08 10:45", periods=12, freq="bh", unit=unit)
expected2 = expected + Timedelta(minutes=45).as_unit(unit)
expected2.freq = "bh"
tm.assert_index_equal(idx4, expected2)
tm.assert_index_equal(idx5, expected2)
tm.assert_index_equal(idx6, expected2)
def test_date_range_business_hour_short(self, unit):
# GH#49835
idx4 = date_range(start="2014-07-01 10:00", freq="bh", periods=1, unit=unit)
expected4 = DatetimeIndex(["2014-07-01 10:00"], dtype=f"M8[{unit}]", freq="bh")
tm.assert_index_equal(idx4, expected4)
def test_date_range_year_start(self, unit):
# see GH#9313
rng = date_range("1/1/2013", "7/1/2017", freq="YS", unit=unit)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
dtype=f"M8[{unit}]",
freq="YS",
)
tm.assert_index_equal(rng, exp)
def test_date_range_year_end(self, unit):
# see GH#9313
rng = date_range("1/1/2013", "7/1/2017", freq="YE", unit=unit)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"],
dtype=f"M8[{unit}]",
freq="YE",
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq_year_end(self, unit):
# GH#11018
rng = date_range("2011-12-31", freq="-2YE", periods=3, unit=unit)
exp = DatetimeIndex(
["2011-12-31", "2009-12-31", "2007-12-31"], dtype=f"M8[{unit}]", freq="-2YE"
)
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2YE"
def test_date_range_business_year_end_year(self, unit):
# see GH#9313
rng = date_range("1/1/2013", "7/1/2017", freq="BYE", unit=unit)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"],
dtype=f"M8[{unit}]",
freq="BYE",
)
tm.assert_index_equal(rng, exp)
def test_date_range_bms(self, unit):
# GH#1645
result = date_range("1/1/2000", periods=10, freq="BMS", unit=unit)
expected = DatetimeIndex(
[
"2000-01-03",
"2000-02-01",
"2000-03-01",
"2000-04-03",
"2000-05-01",
"2000-06-01",
"2000-07-03",
"2000-08-01",
"2000-09-01",
"2000-10-02",
],
dtype=f"M8[{unit}]",
freq="BMS",
)
tm.assert_index_equal(result, expected)
def test_date_range_semi_month_begin(self, unit):
dates = [
datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15),
]
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SMS", unit=unit)
exp = DatetimeIndex(dates, dtype=f"M8[{unit}]", freq="SMS")
tm.assert_index_equal(result, exp)
def test_date_range_semi_month_end(self, unit):
dates = [
datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31),
]
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SME", unit=unit)
exp = DatetimeIndex(dates, dtype=f"M8[{unit}]", freq="SME")
tm.assert_index_equal(result, exp)
def test_date_range_week_of_month(self, unit):
# GH#20517
# Note the start here is not on_offset for this freq
result = date_range(start="20110101", periods=1, freq="WOM-1MON", unit=unit)
expected = DatetimeIndex(["2011-01-03"], dtype=f"M8[{unit}]", freq="WOM-1MON")
tm.assert_index_equal(result, expected)
result2 = date_range(start="20110101", periods=2, freq="WOM-1MON", unit=unit)
expected2 = DatetimeIndex(
["2011-01-03", "2011-02-07"], dtype=f"M8[{unit}]", freq="WOM-1MON"
)
tm.assert_index_equal(result2, expected2)
def test_date_range_week_of_month2(self, unit):
# GH#5115, GH#5348
result = date_range("2013-1-1", periods=4, freq="WOM-1SAT", unit=unit)
expected = DatetimeIndex(
["2013-01-05", "2013-02-02", "2013-03-02", "2013-04-06"],
dtype=f"M8[{unit}]",
freq="WOM-1SAT",
)
tm.assert_index_equal(result, expected)
def test_date_range_negative_freq_month_end(self, unit):
# GH#11018
rng = date_range("2011-01-31", freq="-2ME", periods=3, unit=unit)
exp = DatetimeIndex(
["2011-01-31", "2010-11-30", "2010-09-30"], dtype=f"M8[{unit}]", freq="-2ME"
)
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2ME"
def test_date_range_fy5253(self, unit):
freq = offsets.FY5253(startingMonth=1, weekday=3, variation="nearest")
dti = date_range(
start="2013-01-01",
periods=2,
freq=freq,
unit=unit,
)
expected = DatetimeIndex(
["2013-01-31", "2014-01-30"], dtype=f"M8[{unit}]", freq=freq
)
tm.assert_index_equal(dti, expected)
@pytest.mark.parametrize(
"freqstr,offset",
[
("QS", offsets.QuarterBegin(startingMonth=1)),
("BQE", offsets.BQuarterEnd(startingMonth=12)),
("W-SUN", offsets.Week(weekday=6)),
],
)
def test_date_range_freqstr_matches_offset(self, freqstr, offset):
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx1 = date_range(start=sdate, end=edate, freq=freqstr)
idx2 = date_range(start=sdate, end=edate, freq=offset)
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
def test_date_range_partial_day_year_end(self, unit):
# GH#56134
rng = date_range(
start="2021-12-31 00:00:01",
end="2023-10-31 00:00:00",
freq="YE",
unit=unit,
)
exp = DatetimeIndex(
["2021-12-31 00:00:01", "2022-12-31 00:00:01"],
dtype=f"M8[{unit}]",
freq="YE",
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq_year_end_inbounds(self, unit):
# GH#56147
rng = date_range(
start="2023-10-31 00:00:00",
end="2021-10-31 00:00:00",
freq="-1YE",
unit=unit,
)
exp = DatetimeIndex(
["2022-12-31 00:00:00", "2021-12-31 00:00:00"],
dtype=f"M8[{unit}]",
freq="-1YE",
)
tm.assert_index_equal(rng, exp)
def test_date_range_tzaware_endpoints_accept_ambiguous(self):
# https://github.com/pandas-dev/pandas/issues/52908
start = Timestamp("1916-08-01", tz="Europe/Oslo")
end = Timestamp("1916-12-01", tz="Europe/Oslo")
res = date_range(start, end, freq="MS", ambiguous=True)
exp = date_range(
"1916-08-01", "1916-12-01", freq="MS", tz="Europe/Oslo", ambiguous=True
)
tm.assert_index_equal(res, exp)
def test_date_range_tzaware_endpoints_accept_nonexistent(self):
# Europe/London spring-forward: 2015-03-29 01:30 does not exist.
tz = "Europe/London"
start = Timestamp("2015-03-28 01:30", tz=tz)
end = Timestamp("2015-03-30 01:30", tz=tz)
result = date_range(start, end, freq="D", nonexistent="shift_forward")
# Build expected by generating naive daily times, then tz_localize so
# the nonexistent handling is applied during localization.
expected = date_range(
"2015-03-28 01:30", "2015-03-30 01:30", freq="D"
).tz_localize(tz, nonexistent="shift_forward")
tm.assert_index_equal(result, expected)
| TestDateRangeNonTickFreq |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 3322,
"end": 7018
} | class ____:
def test_initializes(self):
f = Flow(
name="test",
fn=lambda **kwargs: 42,
version="A",
description="B",
flow_run_name="hi",
)
assert f.name == "test"
assert f.fn() == 42
assert f.version == "A"
assert f.description == "B"
assert f.flow_run_name == "hi"
def test_initializes_with_callable_flow_run_name(self):
f = Flow(name="test", fn=lambda **kwargs: 42, flow_run_name=lambda: "hi")
assert f.name == "test"
assert f.fn() == 42
assert f.flow_run_name() == "hi"
def test_initializes_with_default_version(self):
f = Flow(name="test", fn=lambda **kwargs: 42)
assert isinstance(f.version, str)
@pytest.mark.parametrize(
"sourcefile", [None, "<stdin>", "<ipython-input-1-d31e8a6792d4>"]
)
def test_version_none_if_source_file_cannot_be_determined(
self, monkeypatch, sourcefile
):
"""
`getsourcefile` will return `None` when functions are defined interactively,
or other values on Windows.
"""
monkeypatch.setattr(
"prefect.flows.inspect.getsourcefile", MagicMock(return_value=sourcefile)
)
f = Flow(name="test", fn=lambda **kwargs: 42)
assert f.version is None
def test_raises_on_bad_funcs(self):
with pytest.raises(TypeError):
Flow(name="test", fn={})
def test_default_description_is_from_docstring(self):
def my_fn():
"""
Hello
"""
f = Flow(
name="test",
fn=my_fn,
)
assert f.description == "Hello"
def test_default_name_is_from_function(self):
def my_fn():
pass
f = Flow(
fn=my_fn,
)
assert f.name == "my-fn"
def test_raises_clear_error_when_not_compatible_with_validator(self):
def my_fn(v__args):
pass
with pytest.raises(
ValueError,
match="Flow function is not compatible with `validate_parameters`",
):
Flow(fn=my_fn)
@pytest.mark.parametrize(
"name",
[
"my/flow",
r"my%flow",
"my<flow",
"my>flow",
"my&flow",
],
)
def test_invalid_name(self, name):
with pytest.raises(InvalidNameError, match="contains an invalid character"):
Flow(fn=lambda: 1, name=name)
def test_lambda_name_coerced_to_legal_characters(self):
f = Flow(fn=lambda: 42)
assert f.name == "unknown-lambda"
def test_invalid_run_name(self):
class InvalidFlowRunNameArg:
def format(*args, **kwargs):
pass
with pytest.raises(
TypeError,
match=(
"Expected string or callable for 'flow_run_name'; got"
" InvalidFlowRunNameArg instead."
),
):
Flow(fn=lambda: 1, name="hello", flow_run_name=InvalidFlowRunNameArg())
def test_using_return_state_in_flow_definition_raises_reserved(self):
with pytest.raises(
ReservedArgumentError, match="'return_state' is a reserved argument name"
):
Flow(name="test", fn=lambda return_state: 42, version="A", description="B")
def test_param_description_from_docstring(self):
def my_fn(x):
"""
Hello
Args:
x: description
"""
f = Flow(fn=my_fn)
assert parameter_schema(f).properties["x"]["description"] == "description"
| TestFlow |
python | google__pytype | pytype/directors/parser.py | {
"start": 1711,
"end": 2594
} | class ____:
"""Tracks return statements in with/try blocks."""
def __init__(self):
self._block_ranges = []
self._returns = []
self._block_returns = {}
self._final = False
def add_block(self, node):
line_range = LineRange.from_node(node)
self._block_ranges.append(line_range)
def add_return(self, node):
self._returns.append(node.lineno)
def finalize(self):
for br in self._block_ranges:
self._block_returns[br.start_line] = sorted(
r for r in self._returns if r in br
)
self._final = True
def all_returns(self):
return set(self._returns)
def __iter__(self):
assert self._final
return iter(self._block_returns.items())
def __repr__(self):
return f"""
Blocks: {self._block_ranges}
Returns: {self._returns}
{self._block_returns}
"""
@dataclasses.dataclass
| _BlockReturns |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 592179,
"end": 597506
} | class ____(
ValueChannelMixin, core.ValueDefWithConditionMarkPropFieldOrDatumDefnumber
):
"""
SizeValue schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : dict, float, :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "size"
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> SizeValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> SizeValue: ...
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> SizeValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
empty: Optional[bool] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> SizeValue: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> SizeValue: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> SizeValue: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> SizeValue: ...
def __init__(
self,
value,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
@with_property_setters
| SizeValue |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_pair_cramers_phi_value_to_be_less_than.py | {
"start": 1070,
"end": 6168
} | class ____(BatchExpectation):
def __init__(self, *args, **kwargs):
raise NotImplementedError
library_metadata = {
"maturity": "production",
"tags": [
"core expectation",
"multi-column expectation",
"needs migration to modular expectations api",
],
"contributors": ["@great_expectations"],
"requirements": [],
}
metric_dependencies = tuple()
success_keys = (
"column_A",
"column_B",
"threshold",
)
# default_kwarg_values = {
# "column_A": None,
# "column_B": None,
# "bins_A": None,
# "bins_B": None,
# "n_bins_A": None,
# "n_bins_B": None,
# "threshold": 0.1,
# "result_format": "BASIC",
# "catch_exceptions": False,
# }
args_keys = (
"column_A",
"column_B",
)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column_A", RendererValueType.STRING),
("column_B", RendererValueType.STRING),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.column_A or not params.column_B:
renderer_configuration.template_str = " unrecognized kwargs for expect_column_pair_cramers_phi_value_to_be_less_than: missing column." # noqa: E501 # FIXME CoP
else:
renderer_configuration.template_str = (
"Values in $column_A and $column_B must be independent."
)
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
_ = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(configuration.kwargs, ["column_A", "column_B"])
if (params["column_A"] is None) or (params["column_B"] is None):
template_str = " unrecognized kwargs for expect_column_pair_cramers_phi_value_to_be_less_than: missing column." # noqa: E501 # FIXME CoP
else:
template_str = "Values in $column_A and $column_B must be independent."
rendered_string_template_content = RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
return [rendered_string_template_content]
@classmethod
@renderer(renderer_type=LegacyDiagnosticRendererType.OBSERVED_VALUE)
def _diagnostic_observed_value_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
observed_value = result.result.get("observed_value")
column_A = result.expectation_config.kwargs["column_A"]
column_B = result.expectation_config.kwargs["column_B"]
crosstab = result.result.get("details", {}).get("crosstab")
if observed_value is not None:
observed_value = num_to_str(observed_value, precision=3, use_locale=True)
if crosstab is not None:
table = [[""] + list(crosstab.columns)]
for col in range(len(crosstab)):
table.append([crosstab.index[col]] + list(crosstab.iloc[col, :]))
return RenderedTableContent(
**{
"content_block_type": "table",
"header": f"Observed cramers phi of {observed_value}. \n"
f"Crosstab between {column_A} (rows) and {column_B} (columns):",
"table": table,
"styling": {
"body": {
"classes": [
"table",
"table-sm",
"table-unbordered",
"col-4",
"mt-2",
],
}
},
}
)
else:
return observed_value
else:
return "--"
| ExpectColumnPairCramersPhiValueToBeLessThan |
python | aio-libs__aiohttp | tests/test_multipart.py | {
"start": 1455,
"end": 2224
} | class ____(StreamReader):
def __init__(self, content: bytes) -> None:
self.content = io.BytesIO(content)
async def read(self, size: int | None = None) -> bytes:
return self.content.read(size)
def at_eof(self) -> bool:
return self.content.tell() == len(self.content.getbuffer())
async def readline(self) -> bytes:
return self.content.readline()
def unread_data(self, data: bytes) -> None:
self.content = io.BytesIO(data + self.content.read())
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.content.close()
| Stream |
python | pdm-project__pdm | src/pdm/project/lockfile/pylock.py | {
"start": 362,
"end": 2133
} | class ____(Lockfile):
SUPPORTED_FLAGS = frozenset([FLAG_DIRECT_MINIMAL_VERSIONS, FLAG_INHERIT_METADATA, FLAG_STATIC_URLS])
@property
def hash(self) -> tuple[str, str]:
return next(iter(self._data.get("tool", {}).get("pdm", {}).get("hashes", {}).items()), ("", ""))
def update_hash(self, hash_value: str, algo: str = "sha256") -> None:
self._data.setdefault("tool", {}).setdefault("pdm", {}).setdefault("hashes", {})[algo] = hash_value
@property
def groups(self) -> list[str] | None:
return [*self._data.get("dependency-groups", []), *self._data.get("extras", [])]
@cached_property
def default_strategies(self) -> set[str]:
return {FLAG_INHERIT_METADATA, FLAG_STATIC_URLS}
@property
def strategy(self) -> set[str]:
return set(self._data.get("tool", {}).get("pdm", {}).get("strategy", self.default_strategies))
def apply_strategy_change(self, changes: Iterable[str]) -> set[str]:
for change in changes:
change = change.replace("-", "_").lower()
if change.startswith("no_") and change[3:] != FLAG_DIRECT_MINIMAL_VERSIONS:
raise PdmUsageError(f"Unsupported strategy change for pylock: {change}")
return super().apply_strategy_change(changes)
def format_lockfile(self, repository: LockedRepository, groups: Iterable[str] | None, strategy: set[str]) -> None:
from pdm.formats.pylock import PyLockConverter
converter = PyLockConverter(repository.environment.project, repository)
data = converter.convert(groups)
data["tool"]["pdm"]["strategy"] = sorted(strategy)
self.set_data(data)
def compatibility(self) -> Compatibility: # pragma: no cover
return Compatibility.SAME
| PyLock |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-make-the-array-k-increasing.py | {
"start": 78,
"end": 686
} | class ____(object):
def kIncreasing(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: int
"""
def longest_non_decreasing_subsequence(arr):
result = []
for x in arr:
right = bisect.bisect_right(result, x)
if right == len(result):
result.append(x)
else:
result[right] = x
return len(result)
return len(arr) - sum(longest_non_decreasing_subsequence((arr[j] for j in xrange(i, len(arr), k))) for i in xrange(k))
| Solution |
python | walkccc__LeetCode | solutions/3042. Count Prefix and Suffix Pairs I/3042.py | {
"start": 115,
"end": 449
} | class ____:
def __init__(self):
self.root = TrieNode()
def insert(self, word: str) -> int:
node = self.root
count = 0
for i, prefix in enumerate(word):
suffix = word[-1 - i]
node = node.children.setdefault((prefix, suffix), TrieNode())
count += node.count
node.count += 1
return count
| Trie |
python | pytest-dev__pytest | testing/test_cacheprovider.py | {
"start": 43884,
"end": 46599
} | class ____(Enum):
"""Action to perform on the cache directory."""
MKDIR = auto()
SET = auto()
@pytest.mark.parametrize("action", list(Action))
def test_gitignore(
pytester: Pytester,
action: Action,
) -> None:
"""Ensure we automatically create .gitignore file in the pytest_cache directory (#3286)."""
from _pytest.cacheprovider import Cache
config = pytester.parseconfig()
cache = Cache.for_config(config, _ispytest=True)
if action == Action.MKDIR:
cache.mkdir("foo")
elif action == Action.SET:
cache.set("foo", "bar")
else:
assert_never(action)
msg = "# Created by pytest automatically.\n*\n"
gitignore_path = cache._cachedir.joinpath(".gitignore")
assert gitignore_path.read_text(encoding="UTF-8") == msg
# Does not overwrite existing/custom one.
gitignore_path.write_text("custom", encoding="utf-8")
if action == Action.MKDIR:
cache.mkdir("something")
elif action == Action.SET:
cache.set("something", "else")
else:
assert_never(action)
assert gitignore_path.read_text(encoding="UTF-8") == "custom"
def test_preserve_keys_order(pytester: Pytester) -> None:
"""Ensure keys order is preserved when saving dicts (#9205)."""
from _pytest.cacheprovider import Cache
config = pytester.parseconfig()
cache = Cache.for_config(config, _ispytest=True)
cache.set("foo", {"z": 1, "b": 2, "a": 3, "d": 10})
read_back = cache.get("foo", None)
assert list(read_back.items()) == [("z", 1), ("b", 2), ("a", 3), ("d", 10)]
def test_does_not_create_boilerplate_in_existing_dirs(pytester: Pytester) -> None:
from _pytest.cacheprovider import Cache
pytester.makeini(
"""
[pytest]
cache_dir = .
"""
)
config = pytester.parseconfig()
cache = Cache.for_config(config, _ispytest=True)
cache.set("foo", "bar")
assert os.path.isdir("v") # cache contents
assert not os.path.exists(".gitignore")
assert not os.path.exists("README.md")
def test_cachedir_tag(pytester: Pytester) -> None:
"""Ensure we automatically create CACHEDIR.TAG file in the pytest_cache directory (#4278)."""
from _pytest.cacheprovider import Cache
from _pytest.cacheprovider import CACHEDIR_TAG_CONTENT
config = pytester.parseconfig()
cache = Cache.for_config(config, _ispytest=True)
cache.set("foo", "bar")
cachedir_tag_path = cache._cachedir.joinpath("CACHEDIR.TAG")
assert cachedir_tag_path.read_bytes() == CACHEDIR_TAG_CONTENT
def test_clioption_with_cacheshow_and_help(pytester: Pytester) -> None:
result = pytester.runpytest("--cache-show", "--help")
assert result.ret == 0
| Action |
python | mlflow__mlflow | examples/pytorch/torchscript/IrisClassification/iris_classification.py | {
"start": 285,
"end": 3339
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(4, 10)
self.fc2 = nn.Linear(10, 10)
self.fc3 = nn.Linear(10, 3)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.dropout(x, 0.2)
x = self.fc3(x)
return x
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def prepare_data():
iris = load_iris()
data = iris.data
labels = iris.target
target_names = iris.target_names
X_train, X_test, y_train, y_test = train_test_split(
data, labels, test_size=0.2, random_state=42, shuffle=True, stratify=labels
)
X_train = torch.FloatTensor(X_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_train = torch.LongTensor(y_train).to(device)
y_test = torch.LongTensor(y_test).to(device)
return X_train, X_test, y_train, y_test, target_names
def train_model(model, epochs, X_train, y_train):
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(epochs):
out = model(X_train)
loss = criterion(out, y_train).to(device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print("number of epoch", epoch, "loss", float(loss))
return model
def test_model(model, X_test, y_test):
model.eval()
with torch.no_grad():
predict_out = model(X_test)
_, predict_y = torch.max(predict_out, 1)
print("\nprediction accuracy", float(accuracy_score(y_test.cpu(), predict_y.cpu())))
return infer_signature(X_test.numpy(), predict_out.numpy())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Iris Classification Torchscripted model")
parser.add_argument(
"--epochs", type=int, default=100, help="number of epochs to run (default: 100)"
)
args = parser.parse_args()
model = IrisClassifier()
model = model.to(device)
X_train, X_test, y_train, y_test, target_names = prepare_data()
scripted_model = torch.jit.script(model) # scripting the model
scripted_model = train_model(scripted_model, args.epochs, X_train, y_train)
signature = test_model(scripted_model, X_test, y_test)
with mlflow.start_run() as run:
mlflow.pytorch.log_model(
scripted_model, name="model", signature=signature
) # logging scripted model
model_path = mlflow.get_artifact_uri("model")
loaded_pytorch_model = mlflow.pytorch.load_model(model_path) # loading scripted model
model.eval()
with torch.no_grad():
test_datapoint = torch.Tensor([4.4000, 3.0000, 1.3000, 0.2000]).to(device)
prediction = loaded_pytorch_model(test_datapoint)
actual = "setosa"
predicted = target_names[torch.argmax(prediction)]
print(f"\nPREDICTION RESULT: ACTUAL: {actual}, PREDICTED: {predicted}")
| IrisClassifier |
python | joke2k__faker | faker/providers/automotive/vi_VN/__init__.py | {
"start": 59,
"end": 642
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``vi_VN`` locale.
Sources:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Vietnam
"""
license_formats = ("##?-#####",)
ascii_uppercase_vietnamese = "ABCDĐEFGHKLMNPSTUVXYZ"
def license_plate(self) -> str:
"""Generate a license plate."""
temp = re.sub(
r"\?",
lambda x: self.random_element(self.ascii_uppercase_vietnamese),
self.random_element(self.license_formats),
)
return self.numerify(temp)
| Provider |
python | kennethreitz__tablib | src/tablib/formats/_latex.py | {
"start": 117,
"end": 4154
} | class ____:
title = 'latex'
extensions = ('tex',)
TABLE_TEMPLATE = """\
%% Note: add \\usepackage{booktabs} to your preamble
%%
\\begin{table}[!htbp]
\\centering
%(CAPTION)s
\\begin{tabular}{%(COLSPEC)s}
\\toprule
%(HEADER)s
%(MIDRULE)s
%(BODY)s
\\bottomrule
\\end{tabular}
\\end{table}
"""
TEX_RESERVED_SYMBOLS_MAP = dict([
('\\', '\\textbackslash{}'),
('{', '\\{'),
('}', '\\}'),
('$', '\\$'),
('&', '\\&'),
('#', '\\#'),
('^', '\\textasciicircum{}'),
('_', '\\_'),
('~', '\\textasciitilde{}'),
('%', '\\%'),
])
TEX_RESERVED_SYMBOLS_RE = re.compile(
'(%s)' % '|'.join(map(re.escape, TEX_RESERVED_SYMBOLS_MAP.keys())))
@classmethod
def export_set(cls, dataset):
"""Returns LaTeX representation of dataset
:param dataset: dataset to serialize
:type dataset: tablib.core.Dataset
"""
caption = '\\caption{%s}' % dataset.title if dataset.title else '%'
colspec = cls._colspec(dataset.width)
header = cls._serialize_row(dataset.headers) if dataset.headers else ''
midrule = cls._midrule(dataset.width)
body = '\n'.join([cls._serialize_row(row) for row in dataset])
return cls.TABLE_TEMPLATE % dict(CAPTION=caption, COLSPEC=colspec,
HEADER=header, MIDRULE=midrule, BODY=body)
@classmethod
def _colspec(cls, dataset_width):
"""Generates the column specification for the LaTeX `tabular` environment
based on the dataset width.
The first column is justified to the left, all further columns are aligned
to the right.
.. note:: This is only a heuristic and most probably has to be fine-tuned
post export. Column alignment should depend on the data type, e.g., textual
content should usually be aligned to the left while numeric content almost
always should be aligned to the right.
:param dataset_width: width of the dataset
"""
spec = 'l'
for _ in range(1, dataset_width):
spec += 'r'
return spec
@classmethod
def _midrule(cls, dataset_width):
"""Generates the table `midrule`, which may be composed of several
`cmidrules`.
:param dataset_width: width of the dataset to serialize
"""
if not dataset_width or dataset_width == 1:
return '\\midrule'
return ' '.join([cls._cmidrule(colindex, dataset_width) for colindex in
range(1, dataset_width + 1)])
@classmethod
def _cmidrule(cls, colindex, dataset_width):
"""Generates the `cmidrule` for a single column with appropriate trimming
based on the column position.
:param colindex: Column index
:param dataset_width: width of the dataset
"""
rule = '\\cmidrule(%s){%d-%d}'
if colindex == 1:
# Rule of first column is trimmed on the right
return rule % ('r', colindex, colindex)
if colindex == dataset_width:
# Rule of last column is trimmed on the left
return rule % ('l', colindex, colindex)
# Inner columns are trimmed on the left and right
return rule % ('lr', colindex, colindex)
@classmethod
def _serialize_row(cls, row):
"""Returns string representation of a single row.
:param row: single dataset row
"""
new_row = [cls._escape_tex_reserved_symbols(str(item)) if item else ''
for item in row]
return 6 * ' ' + ' & '.join(new_row) + ' \\\\'
@classmethod
def _escape_tex_reserved_symbols(cls, input):
"""Escapes all TeX reserved symbols ('_', '~', etc.) in a string.
:param input: String to escape
"""
def replace(match):
return cls.TEX_RESERVED_SYMBOLS_MAP[match.group()]
return cls.TEX_RESERVED_SYMBOLS_RE.sub(replace, input)
| LATEXFormat |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/run_coordinator/base.py | {
"start": 323,
"end": 1038
} | class ____(NamedTuple):
"""Context available within a run coordinator's submit_run method."""
dagster_run: DagsterRun
workspace: "BaseWorkspaceRequestContext"
def get_request_header(self, key: str) -> Optional[str]:
from dagster._core.workspace.context import WorkspaceRequestContext
# if there is a source
if isinstance(self.workspace, WorkspaceRequestContext) and self.workspace.source:
headers = getattr(self.workspace.source, "headers", None)
# and it has a headers property
if headers:
# do a get against it
return headers.get(key)
# otherwise return None
return None
| SubmitRunContext |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 121411,
"end": 124597
} | class ____(Request):
"""
Signal a task has completed
:param force: If not true, call fails if the task status is not
in_progress/stopped
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "completed"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not in_progress/stopped",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(CompletedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| CompletedRequest |
python | wandb__wandb | wandb/sdk/artifacts/_generated/update_artifact_sequence.py | {
"start": 353,
"end": 574
} | class ____(GQLResult):
artifact_collection: ArtifactCollectionFragment = Field(alias="artifactCollection")
UpdateArtifactSequence.model_rebuild()
UpdateArtifactSequenceResult.model_rebuild()
| UpdateArtifactSequenceResult |
python | getsentry__sentry | src/sentry/notifications/types.py | {
"start": 8490,
"end": 9058
} | class ____:
organization: Organization
resource_id: int
key: str
referrer: str | None = None
"""
This is a special identifier that is used to indicate that the notification is a test notification.
It is used to set the ID of models that are required in order to send a test notification.
Note: This should eventually be deleted the test notification logic should instead utilize a notification platform
which should provide an API for sending test notifications without "hacking" the notification system.
"""
TEST_NOTIFICATION_ID = -1
| UnsubscribeContext |
python | ansible__ansible | test/integration/targets/ansible-test-sanity-pylint/ansible_collections/ns/col/plugins/lookup/deprecated.py | {
"start": 4140,
"end": 4246
} | class ____:
def __init__(self, thing: AnsibleModule) -> None:
self.module = thing
| MyOtherWrapper |
python | arrow-py__arrow | arrow/locales.py | {
"start": 23033,
"end": 24088
} | class ____(Locale):
names = ["zh-tw"]
past = "{0}前"
future = "{0}後"
and_word = "和"
timeframes = {
"now": "剛才",
"second": "1秒",
"seconds": "{0}秒",
"minute": "1分鐘",
"minutes": "{0}分鐘",
"hour": "1小時",
"hours": "{0}小時",
"day": "1天",
"days": "{0}天",
"week": "1週",
"weeks": "{0}週",
"month": "1個月",
"months": "{0}個月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "週一", "週二", "週三", "週四", "週五", "週六", "週日"]
day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
| ChineseTWLocale |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_ec2.py | {
"start": 1045,
"end": 5492
} | class ____:
def test_init(self):
ec2_operator = EC2InstanceStateSensor(
task_id="task_test",
target_state="stopped",
instance_id="i-123abc",
aws_conn_id="aws_conn_test",
region_name="region-test",
)
assert ec2_operator.task_id == "task_test"
assert ec2_operator.target_state == "stopped"
assert ec2_operator.instance_id == "i-123abc"
assert ec2_operator.aws_conn_id == "aws_conn_test"
assert ec2_operator.region_name == "region-test"
def test_init_invalid_target_state(self):
invalid_target_state = "target_state_test"
with pytest.raises(ValueError, match=f"Invalid target_state: {invalid_target_state}") as ctx:
EC2InstanceStateSensor(
task_id="task_test",
target_state=invalid_target_state,
instance_id="i-123abc",
)
msg = f"Invalid target_state: {invalid_target_state}"
assert str(ctx.value) == msg
@classmethod
def _create_instance(cls, hook: EC2Hook):
"""Create Instance and return instance id."""
conn = hook.get_conn()
try:
ec2_client = conn.meta.client
except AttributeError:
ec2_client = conn
# We need existed AMI Image ID otherwise `moto` will raise DeprecationWarning.
images = ec2_client.describe_images()["Images"]
response = ec2_client.run_instances(MaxCount=1, MinCount=1, ImageId=images[0]["ImageId"])
return response["Instances"][0]["InstanceId"]
@mock_aws
def test_running(self):
# create instance
ec2_hook = EC2Hook()
instance_id = self._create_instance(ec2_hook)
# stop instance
ec2_hook.get_instance(instance_id=instance_id).stop()
# start sensor, waits until ec2 instance state became running
start_sensor = EC2InstanceStateSensor(
task_id="start_sensor",
target_state="running",
instance_id=instance_id,
)
# assert instance state is not running
assert not start_sensor.poke(None)
# start instance
ec2_hook.get_instance(instance_id=instance_id).start()
# assert instance state is running
assert start_sensor.poke(None)
@mock_aws
def test_stopped(self):
# create instance
ec2_hook = EC2Hook()
instance_id = self._create_instance(ec2_hook)
# start instance
ec2_hook.get_instance(instance_id=instance_id).start()
# stop sensor, waits until ec2 instance state became stopped
stop_sensor = EC2InstanceStateSensor(
task_id="stop_sensor",
target_state="stopped",
instance_id=instance_id,
)
# assert instance state is not stopped
assert not stop_sensor.poke(None)
# stop instance
ec2_hook.get_instance(instance_id=instance_id).stop()
# assert instance state is stopped
assert stop_sensor.poke(None)
@mock_aws
def test_terminated(self):
# create instance
ec2_hook = EC2Hook()
instance_id = self._create_instance(ec2_hook)
# start instance
ec2_hook.get_instance(instance_id=instance_id).start()
# stop sensor, waits until ec2 instance state became terminated
stop_sensor = EC2InstanceStateSensor(
task_id="stop_sensor",
target_state="terminated",
instance_id=instance_id,
)
# assert instance state is not terminated
assert not stop_sensor.poke(None)
# stop instance
ec2_hook.get_instance(instance_id=instance_id).terminate()
# assert instance state is terminated
assert stop_sensor.poke(None)
@mock_aws
def test_deferrable(self):
# create instance
ec2_hook = EC2Hook()
instance_id = self._create_instance(ec2_hook)
# start instance
ec2_hook.get_instance(instance_id=instance_id).start()
# stop sensor, waits until ec2 instance state became terminated
deferrable_sensor = EC2InstanceStateSensor(
task_id="deferrable_sensor",
target_state="terminated",
instance_id=instance_id,
deferrable=True,
)
with pytest.raises(TaskDeferred):
deferrable_sensor.execute(context=None)
| TestEC2InstanceStateSensor |
python | mlflow__mlflow | mlflow/types/llm.py | {
"start": 8169,
"end": 9723
} | class ____(_BaseDataclass):
"""
A streaming message delta in a chat response.
Args:
role (str): The role of the entity that sent the message (e.g. ``"user"``,
``"system"``, ``"assistant"``, ``"tool"``).
**Optional** defaults to ``"assistant"``
This is optional because OpenAI clients can explicitly return None for
the role
content (str): The content of the new token being streamed
**Optional** Can be ``None`` on the last delta chunk or if refusal or
tool_calls are provided
refusal (str): The refusal message content.
**Optional** Supplied if a refusal response is provided.
name (str): The name of the entity that sent the message. **Optional**.
tool_calls (List[:py:class:`ToolCall`]): A list of tool calls made by the model.
**Optional** defaults to ``None``
"""
role: str | None = "assistant"
content: str | None = None
refusal: str | None = None
name: str | None = None
tool_calls: list[ToolCall] | None = None
def __post_init__(self):
self._validate_field("role", str, False)
if self.refusal:
self._validate_field("refusal", str, True)
if self.content:
raise ValueError("Both `content` and `refusal` cannot be set")
self._validate_field("content", str, False)
self._validate_field("name", str, False)
self._convert_dataclass_list("tool_calls", ToolCall, False)
@dataclass
| ChatChoiceDelta |
python | numpy__numpy | numpy/ma/tests/test_subclassing.py | {
"start": 625,
"end": 1352
} | class ____(np.ndarray):
# Defines a generic np.ndarray subclass, that stores some metadata
# in the dictionary `info`.
def __new__(cls, arr, info={}):
x = np.asanyarray(arr).view(cls)
x.info = info.copy()
return x
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self.info = getattr(obj, 'info', {}).copy()
def __add__(self, other):
result = super().__add__(other)
result.info['added'] = result.info.get('added', 0) + 1
return result
def __iadd__(self, other):
result = super().__iadd__(other)
result.info['iadded'] = result.info.get('iadded', 0) + 1
return result
subarray = SubArray
| SubArray |
python | lepture__authlib | authlib/oauth2/rfc9101/errors.py | {
"start": 387,
"end": 572
} | class ____(OAuth2Error):
error = "invalid_request_object"
description = "The request parameter contains an invalid Request Object."
status_code = 400
| InvalidRequestObjectError |
python | jazzband__django-model-utils | tests/models.py | {
"start": 9140,
"end": 9219
} | class ____(Tracked):
name2 = models.CharField(max_length=20)
| InheritedTracked |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_set_output.py | {
"start": 6519,
"end": 7907
} | class ____(_SetOutputMixin):
def fit(self, X, y=None):
self.n_features_in_ = X.shape[1]
return self
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object)
def test__safe_set_output():
"""Check _safe_set_output works as expected."""
# Estimator without transform will not raise when setting set_output for transform.
est = EstimatorWithoutSetOutputAndWithoutTransform()
_safe_set_output(est, transform="pandas")
# Estimator with transform but without set_output will raise
est = EstimatorNoSetOutputWithTransform()
with pytest.raises(ValueError, match="Unable to configure output"):
_safe_set_output(est, transform="pandas")
est = EstimatorWithSetOutput().fit(np.asarray([[1, 2, 3]]))
_safe_set_output(est, transform="pandas")
config = _get_output_config("transform", est)
assert config["dense"] == "pandas"
_safe_set_output(est, transform="default")
config = _get_output_config("transform", est)
assert config["dense"] == "default"
# transform is None is a no-op, so the config remains "default"
_safe_set_output(est, transform=None)
config = _get_output_config("transform", est)
assert config["dense"] == "default"
| EstimatorWithSetOutput |
python | falconry__falcon | tests/test_recipes.py | {
"start": 5029,
"end": 7040
} | class ____:
@pytest.fixture(params=['middleware', 'structlog'])
def app(self, request, util, register_module):
class RequestIDResource:
def on_get(self, req, resp):
# NOTE(vytas): Reference either ContextVar or req.context
# depending on the recipe being tested.
context = getattr(recipe, 'ctx', req.context)
resp.media = {'request_id': context.request_id}
context = util.load_module(
'examples/recipes/request_id_context.py', module_name='my_app.context'
)
# NOTE(vytas): Inject `context` into the importable system modules
# as it is referenced from other recipes.
register_module('my_app.context', context)
# NOTE(vytas): Inject a fake structlog module because we do not want to
# introduce a new test dependency for a single recipe.
fake_structlog = types.ModuleType('structlog')
fake_structlog.get_logger = unittest.mock.MagicMock()
register_module('structlog', fake_structlog)
recipe = util.load_module(f'examples/recipes/request_id_{request.param}.py')
app = falcon.App(middleware=[recipe.RequestIDMiddleware()])
app.add_route('/test', RequestIDResource())
return app
def test_request_id_persistence(self, app):
client = falcon.testing.TestClient(app)
resp1 = client.simulate_get('/test')
request_id1 = resp1.json['request_id']
resp2 = client.simulate_get('/test')
request_id2 = resp2.json['request_id']
assert request_id1 != request_id2
def test_request_id_header(self, app):
client = falcon.testing.TestClient(app)
response = client.simulate_get('/test')
assert 'X-Request-ID' in response.headers
assert response.headers['X-Request-ID'] == response.json['request_id']
@pytest.mark.skipif(
sys.version_info < (3, 9), reason='this recipe requires Python 3.9+'
)
| TestRequestIDContext |
python | huggingface__transformers | tests/tensor_parallel/test_tensor_parallel.py | {
"start": 14520,
"end": 18897
} | class ____(TestCasePlus):
"""Base class for tensor parallel tests. Subclasses must set nproc_per_node."""
nproc_per_node = None
@require_torch_multi_accelerator
def test_model_dense_forward_eval(self):
"""Test that TP and non-TP models produce the same outputs in eval mode."""
if self.nproc_per_node is None:
self.skipTest("nproc_per_node not set")
if backend_device_count(torch_device) < self.nproc_per_node:
self.skipTest(f"Need at least {self.nproc_per_node} devices, have {backend_device_count(torch_device)}")
init_distributed(tp=self.nproc_per_node)(_test_model_dense_forward_impl)("eval")
@require_torch_multi_accelerator
def test_model_dense_forward_train(self):
"""Test that TP and non-TP models produce the same outputs in train mode."""
if self.nproc_per_node is None:
self.skipTest("nproc_per_node not set")
if backend_device_count(torch_device) < self.nproc_per_node:
self.skipTest(f"Need at least {self.nproc_per_node} devices, have {backend_device_count(torch_device)}")
init_distributed(tp=self.nproc_per_node)(_test_model_dense_forward_impl)("train")
@require_torch_multi_accelerator
def test_model_dense_backward_pass(self):
if self.nproc_per_node is None:
self.skipTest("nproc_per_node not set")
if backend_device_count(torch_device) < self.nproc_per_node:
self.skipTest(f"Need at least {self.nproc_per_node} devices, have {backend_device_count(torch_device)}")
init_distributed(tp=self.nproc_per_node)(_test_model_dense_backward_pass_impl)()
@require_torch_multi_accelerator
def test_model_dense_forward_compile_eval(self):
"""Test that TP and non-TP models produce the same outputs with torch.compile in eval mode."""
if self.nproc_per_node is None:
self.skipTest("nproc_per_node not set")
if backend_device_count(torch_device) < self.nproc_per_node:
self.skipTest(f"Need at least {self.nproc_per_node} devices, have {backend_device_count(torch_device)}")
init_distributed(tp=self.nproc_per_node)(_test_model_dense_forward_compile_impl)("eval")
@require_torch_multi_accelerator
def test_model_dense_forward_compile_train(self):
"""Test that TP and non-TP models produce the same outputs with torch.compile in train mode."""
if self.nproc_per_node is None:
self.skipTest("nproc_per_node not set")
if backend_device_count(torch_device) < self.nproc_per_node:
self.skipTest(f"Need at least {self.nproc_per_node} devices, have {backend_device_count(torch_device)}")
init_distributed(tp=self.nproc_per_node)(_test_model_dense_forward_compile_impl)("train")
@require_huggingface_hub_greater_or_equal("0.31.4")
@require_torch_multi_accelerator
def test_model_dense_save(self):
if self.nproc_per_node is None:
self.skipTest("nproc_per_node not set")
if backend_device_count(torch_device) < self.nproc_per_node:
self.skipTest(f"Need at least {self.nproc_per_node} devices, have {backend_device_count(torch_device)}")
with tempfile.TemporaryDirectory() as tmp_dir:
# First run with TP (distributed)
init_distributed(tp=self.nproc_per_node)(_test_model_dense_save_impl)(tmp_dir)
# Then run without TP (non-distributed)
_test_model_dense_save_impl(0, tmp_dir)
non_tp_model_path = os.path.join(tmp_dir, "nontp")
tp_model_path = os.path.join(tmp_dir, "tp")
for filename in os.listdir(non_tp_model_path):
if not filename.endswith(".safetensors"):
continue
non_tp_model = safe_open(os.path.join(non_tp_model_path, filename), device="cpu", framework="pt")
tp_model = safe_open(os.path.join(tp_model_path, filename), device="cpu", framework="pt")
for non_tp_key in non_tp_model.keys():
non_tp_tensor = non_tp_model.get_tensor(non_tp_key)
tp_tensor = tp_model.get_tensor(non_tp_key)
assert torch.allclose(non_tp_tensor, tp_tensor), f"Tensor with key: {non_tp_key} does not match"
del non_tp_tensor, tp_tensor
| TestTensorParallelBase |
python | realpython__materials | python-textual/static_and_label_tcss.py | {
"start": 72,
"end": 630
} | class ____(App):
CSS_PATH = "static_and_label.tcss"
def compose(self):
yield Static(
"I am a [bold red]Static[/bold red] widget!",
)
yield Label(
"I am a [yellow italic]Label[/yellow italic] widget with an id!",
id="label_id",
)
yield Label(
"I am a [yellow italic]Label[/yellow italic] widget with a CSS class!",
classes="label_class",
)
if __name__ == "__main__":
app = StaticAndLabelAppWithTCSS()
app.run()
| StaticAndLabelAppWithTCSS |
python | wandb__wandb | tests/unit_tests/test_lib/test_service_client.py | {
"start": 320,
"end": 6253
} | class ____:
"""A fake server to help test the client."""
def __init__(self, asyncer: asyncio_manager.AsyncioManager) -> None:
self.port = 0
"""The localhost port for the server, assigned by the fixture."""
self._asyncer = asyncer
# Initialized when first used. Buffer of 1.
self._responses: (
asyncio.Queue[spb.ServerResponse | bytes | Literal["stop"]] | None
) = None
self._requests: list[spb.ServerRequest] = []
self._done = threading.Event()
async def respond(self, response: spb.ServerResponse | bytes) -> None:
"""Set the response for the current request."""
if not self._responses:
self._responses = asyncio.Queue(1)
self._responses.put_nowait(response)
async def close_connection(self) -> None:
"""Close the connection instead of responding to the current request."""
if not self._responses:
self._responses = asyncio.Queue(1)
self._responses.put_nowait("stop")
def requests(self) -> list[spb.ServerRequest]:
"""Block until all requests are received, then return them."""
self._done.wait()
return self._requests
async def do_connection(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Collect requests and send responses for one connection."""
try:
await self._impl_connection(reader, writer)
finally:
self._done.set()
async def _impl_connection(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
while True:
try:
header = await reader.readexactly(5)
except asyncio.IncompleteReadError:
break
_, length = struct.unpack("<BI", header)
data = await reader.readexactly(length)
request = spb.ServerRequest()
request.ParseFromString(data)
self._requests.append(request)
if request.request_id:
if not self._responses:
self._responses = asyncio.Queue(1)
response = await self._responses.get()
if response == "stop":
break
else:
await self._respond(request, response, writer)
writer.close()
await writer.wait_closed()
async def _respond(
self,
request: spb.ServerRequest,
response: spb.ServerResponse | bytes,
writer: asyncio.StreamWriter,
) -> None:
"""Respond to a request with a given response."""
if isinstance(response, spb.ServerResponse):
response.request_id = request.request_id
writer.write(struct.pack("<BI", ord("W"), response.ByteSize()))
writer.write(response.SerializeToString())
else:
writer.write(response)
await writer.drain()
@pytest.fixture
def asyncer():
asyncer = asyncio_manager.AsyncioManager()
asyncer.start()
try:
yield asyncer
finally:
asyncer.join()
@pytest.fixture
def fake_server(asyncer: asyncio_manager.AsyncioManager):
spy = _FakeServer(asyncer)
server = asyncer.run(lambda: asyncio.start_server(spy.do_connection, "localhost"))
spy.port = server.sockets[0].getsockname()[1]
asyncer.run_soon(server.serve_forever)
try:
yield spy
finally:
async def close_server():
server.close()
await server.wait_closed()
asyncer.run(close_server)
@pytest.fixture
def client(
asyncer: asyncio_manager.AsyncioManager,
fake_server: _FakeServer,
) -> ServiceClient:
"""An initialized ServiceClient connected to the fake_server.
Not automatically closed.
"""
reader, writer = asyncer.run(
lambda: asyncio.open_connection("localhost", fake_server.port),
)
return ServiceClient(asyncer, reader, writer)
def test_publish_sends_request(
asyncer: asyncio_manager.AsyncioManager,
client: ServiceClient,
fake_server: _FakeServer,
):
try:
request = spb.ServerRequest()
request.record_publish.exit.exit_code = 123
asyncer.run(lambda: client.publish(request))
finally:
asyncer.run(lambda: client.close())
assert fake_server.requests() == [request]
def test_deliver_reads_response(
asyncer: asyncio_manager.AsyncioManager,
client: ServiceClient,
fake_server: _FakeServer,
):
expected_response = spb.ServerResponse()
expected_response.result_communicate.run_result.error.message = "test"
try:
request = spb.ServerRequest()
handle = asyncer.run(lambda: client.deliver(request))
asyncer.run(lambda: fake_server.respond(expected_response))
response = handle.wait_or(timeout=5)
finally:
asyncer.run(client.close)
expected_response.request_id = request.request_id
assert response == expected_response
def test_closes_mailbox_on_read_error(
asyncer: asyncio_manager.AsyncioManager,
client: ServiceClient,
fake_server: _FakeServer,
):
try:
handle = asyncer.run(lambda: client.deliver(spb.ServerRequest()))
asyncer.run(lambda: fake_server.respond(b"invalid response"))
with pytest.raises(mailbox.HandleAbandonedError):
handle.wait_or(timeout=5)
finally:
asyncer.run(client.close)
def test_closes_mailbox_on_eof(
asyncer: asyncio_manager.AsyncioManager,
client: ServiceClient,
fake_server: _FakeServer,
):
try:
handle = asyncer.run(lambda: client.deliver(spb.ServerRequest()))
asyncer.run(fake_server.close_connection)
with pytest.raises(mailbox.HandleAbandonedError):
handle.wait_or(timeout=5)
finally:
asyncer.run(client.close)
| _FakeServer |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/entities/snippets.py | {
"start": 7258,
"end": 7479
} | class ____(ndb.Model):
user_id = ndb.StringProperty()
color = ndb.StringProperty()
@classmethod
def get_by_user(cls, user):
return cls.query().filter(cls.user_id == user.user_id()).get()
| ModelWithUser |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_test.py | {
"start": 68679,
"end": 78863
} | class ____(LiteTest):
def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
"""Test a shape overriding case."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [2, 16, 16, 3]})
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertAllEqual([2, 16, 16, 3], input_details[0]['shape'])
def testInvalidShapesArray(self):
"""Test an invalid shape overriding case, which has a wrong input name."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
with self.assertRaises(ValueError):
lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'wrong_input': [2, 16, 16, 3]})
def testPartialShapesArray(self):
"""Test a shape overriding case, with the only one input among two."""
with ops.Graph().as_default():
a = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32, name='a')
b = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32, name='b')
_ = math_ops.add(a, b, name='add')
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['a', 'b'], ['add'], input_shapes={'a': [2, 16, 16, 3]})
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertAllEqual([2, 16, 16, 3], input_details[0]['shape'])
self.assertAllEqual([1, 16, 16, 3], input_details[1]['shape'])
def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
def testFloatTocoConverter(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testGraphDebugInfo(self):
"""Test a frozen graph doesn't have debug info captured."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
converter.convert()
# GraphDebugInfo should be none for frozen graph.
self.assertFalse(converter._debug_info)
def testExcludeConversionMetadata(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
converter.exclude_conversion_metadata = True
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(tflite_model)
self.assertIsNone(metadata)
| FromFrozenGraphFile |
python | Textualize__textual | src/textual/widgets/_log.py | {
"start": 651,
"end": 11502
} | class ____(ScrollView, can_focus=True):
"""A widget to log text."""
ALLOW_SELECT = True
DEFAULT_CSS = """
Log {
background: $surface;
color: $text;
overflow: scroll;
&:focus {
background-tint: $foreground 5%;
}
}
"""
max_lines: var[int | None] = var[Optional[int]](None)
"""Maximum number of lines to show"""
auto_scroll: var[bool] = var(True)
"""Automatically scroll to new lines."""
def __init__(
self,
highlight: bool = False,
max_lines: int | None = None,
auto_scroll: bool = True,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
) -> None:
"""Create a Log widget.
Args:
highlight: Enable highlighting.
max_lines: Maximum number of lines to display.
auto_scroll: Scroll to end on new lines.
name: The name of the text log.
id: The ID of the text log in the DOM.
classes: The CSS classes of the text log.
disabled: Whether the text log is disabled or not.
"""
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
self.highlight = highlight
"""Enable highlighting."""
self.max_lines = max_lines
self.auto_scroll = auto_scroll
self._lines: list[str] = []
self._width = 0
self._updates = 0
self._render_line_cache: LRUCache[int, Strip] = LRUCache(1024)
self.highlighter: Highlighter = ReprHighlighter()
"""The Rich Highlighter object to use, if `highlight=True`"""
self._clear_y = 0
@property
def allow_select(self) -> bool:
return True
@property
def lines(self) -> Sequence[str]:
"""The raw lines in the Log.
Note that this attribute is read only.
Changing the lines will not update the Log's contents.
"""
return self._lines
def notify_style_update(self) -> None:
"""Called by Textual when styles update."""
super().notify_style_update()
self._render_line_cache.clear()
def _update_maximum_width(self, updates: int, size: int) -> None:
"""Update the virtual size width.
Args:
updates: A counter of updates.
size: Maximum size of new lines.
"""
if updates == self._updates:
self._width = max(size, self._width)
self.virtual_size = Size(self._width, self.line_count)
@property
def line_count(self) -> int:
"""Number of lines of content."""
if self._lines:
return len(self._lines) - (self._lines[-1] == "")
return 0
@classmethod
def _process_line(cls, line: str) -> str:
"""Process a line before it is rendered to remove control codes.
Args:
line: A string.
Returns:
New string with no control codes.
"""
return _sub_escape("�", line.expandtabs())
@work(thread=True)
def _update_size(self, updates: int, lines: list[str]) -> None:
"""A thread worker to update the width in the background.
Args:
updates: The update index at the time of invocation.
lines: Lines that were added.
"""
if lines:
_process_line = self._process_line
max_length = max(cell_len(_process_line(line)) for line in lines)
self.app.call_from_thread(self._update_maximum_width, updates, max_length)
def _prune_max_lines(self) -> None:
"""Prune lines if there are more than the maximum."""
if self.max_lines is None:
return
remove_lines = len(self._lines) - self.max_lines
if remove_lines > 0:
_cache = self._render_line_cache
# We've removed some lines, which means the y values in the cache are out of sync
# Calculated a new dict of cache values
updated_cache = {
y - remove_lines: _cache[y] for y in _cache.keys() if y > remove_lines
}
# Clear the cache
_cache.clear()
# Update the cache with previously calculated values
for y, line in updated_cache.items():
_cache[y] = line
del self._lines[:remove_lines]
def write(
self,
data: str,
scroll_end: bool | None = None,
) -> Self:
"""Write to the log.
Args:
data: Data to write.
scroll_end: Scroll to the end after writing, or `None` to use `self.auto_scroll`.
Returns:
The `Log` instance.
"""
is_vertical_scroll_end = self.is_vertical_scroll_end
if data:
if not self._lines:
self._lines.append("")
for line, ending in line_split(data):
self._lines[-1] += line
self._width = max(
self._width, cell_len(self._process_line(self._lines[-1]))
)
self.refresh_lines(len(self._lines) - 1)
if ending:
self._lines.append("")
self.virtual_size = Size(self._width, self.line_count)
if self.max_lines is not None and len(self._lines) > self.max_lines:
self._prune_max_lines()
auto_scroll = self.auto_scroll if scroll_end is None else scroll_end
if auto_scroll:
self.scroll_end(animate=False, immediate=True, x_axis=False)
return self
def write_line(
self,
line: str,
scroll_end: bool | None = None,
) -> Self:
"""Write content on a new line.
Args:
line: String to write to the log.
scroll_end: Scroll to the end after writing, or `None` to use `self.auto_scroll`.
Returns:
The `Log` instance.
"""
self.write_lines([line], scroll_end)
return self
def write_lines(
self,
lines: Iterable[str],
scroll_end: bool | None = None,
) -> Self:
"""Write an iterable of lines.
Args:
lines: An iterable of strings to write.
scroll_end: Scroll to the end after writing, or `None` to use `self.auto_scroll`.
Returns:
The `Log` instance.
"""
is_vertical_scroll_end = self.is_vertical_scroll_end
auto_scroll = self.auto_scroll if scroll_end is None else scroll_end
new_lines = []
for line in lines:
new_lines.extend(line.splitlines())
start_line = len(self._lines)
self._lines.extend(new_lines)
if self.max_lines is not None and len(self._lines) > self.max_lines:
self._prune_max_lines()
self.virtual_size = Size(self._width, len(self._lines))
self._update_size(self._updates, new_lines)
self.refresh_lines(start_line, len(new_lines))
if (
auto_scroll
and not self.is_vertical_scrollbar_grabbed
and is_vertical_scroll_end
):
self.scroll_end(animate=False, immediate=True, x_axis=False)
else:
self.refresh()
return self
def clear(self) -> Self:
"""Clear the Log.
Returns:
The `Log` instance.
"""
self._lines.clear()
self._width = 0
self._render_line_cache.clear()
self._updates += 1
self.virtual_size = Size(0, 0)
self._clear_y = 0
return self
def get_selection(self, selection: Selection) -> tuple[str, str] | None:
"""Get the text under the selection.
Args:
selection: Selection information.
Returns:
Tuple of extracted text and ending (typically "\n" or " "), or `None` if no text could be extracted.
"""
text = "\n".join(self._lines)
return selection.extract(text), "\n"
def selection_updated(self, selection: Selection | None) -> None:
self._render_line_cache.clear()
self.refresh()
def render_line(self, y: int) -> Strip:
"""Render a line of content.
Args:
y: Y Coordinate of line.
Returns:
A rendered line.
"""
scroll_x, scroll_y = self.scroll_offset
strip = self._render_line(scroll_y + y, scroll_x, self.size.width)
return strip
def _render_line(self, y: int, scroll_x: int, width: int) -> Strip:
"""Render a line into a cropped strip.
Args:
y: Y offset of line.
scroll_x: Current horizontal scroll.
width: Width of the widget.
Returns:
A Strip suitable for rendering.
"""
rich_style = self.rich_style
if y >= len(self._lines):
return Strip.blank(width, rich_style)
line = self._render_line_strip(y, rich_style)
assert line._cell_length is not None
line = line.crop_extend(scroll_x, scroll_x + width, rich_style)
line = line.apply_offsets(scroll_x, y)
return line
def _render_line_strip(self, y: int, rich_style: Style) -> Strip:
"""Render a line into a Strip.
Args:
y: Y offset of line.
rich_style: Rich style of line.
Returns:
An uncropped Strip.
"""
selection = self.text_selection
if y in self._render_line_cache and selection is None:
return self._render_line_cache[y]
_line = self._process_line(self._lines[y])
line_text = Text(_line, no_wrap=True)
line_text.stylize(rich_style)
if self.highlight:
line_text = self.highlighter(line_text)
if selection is not None:
if (select_span := selection.get_span(y - self._clear_y)) is not None:
start, end = select_span
if end == -1:
end = len(line_text)
selection_style = self.screen.get_component_rich_style(
"screen--selection"
)
line_text.stylize(selection_style, start, end)
line = Strip(line_text.render(self.app.console), cell_len(_line))
if selection is not None:
self._render_line_cache[y] = line
return line
def refresh_lines(self, y_start: int, line_count: int = 1) -> None:
"""Refresh one or more lines.
Args:
y_start: First line to refresh.
line_count: Total number of lines to refresh.
"""
for y in range(y_start, y_start + line_count):
self._render_line_cache.discard(y)
super().refresh_lines(y_start, line_count=line_count)
| Log |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-tasks-you-can-assign.py | {
"start": 1396,
"end": 2694
} | class ____(object):
def maxTaskAssign(self, tasks, workers, pills, strength):
"""
:type tasks: List[int]
:type workers: List[int]
:type pills: int
:type strength: int
:rtype: int
"""
def check(tasks, workers, pills, strength, x):
w = SortedList(workers[-x:])
for task in tasks[-x:]: # enumerate from the hardest task to the easiest task, greedily assign it to the weakest worker whom it can be done by
i = w.bisect_left(task)
if i != len(w):
w.pop(i)
continue
if pills:
i = w.bisect_left(task-strength)
if i != len(w):
w.pop(i)
pills -= 1
continue
return False
return True
tasks.sort(reverse=True)
workers.sort()
left, right = 1, min(len(workers), len(tasks))
while left <= right:
mid = left + (right-left)//2
if not check(tasks, workers, pills, strength, mid):
right = mid-1
else:
left = mid+1
return right
# Time: O(n^2 * logn)
# Space: O(n)
import bisect
| Solution2 |
python | falconry__falcon | examples/recipes/msgspec_main.py | {
"start": 1606,
"end": 2717
} | class ____:
def process_resource(
self, req: Request, resp: Response, resource: object, params: dict[str, Any]
) -> None:
if schema := getattr(resource, f'{req.method}_SCHEMA', None):
param = schema.__name__.lower()
params[param] = msgspec.convert(req.get_media(), schema)
def _handle_validation_error(
req: Request, resp: Response, ex: msgspec.ValidationError, params: dict[str, Any]
) -> None:
raise falcon.HTTPUnprocessableEntity(description=str(ex))
def create_app() -> falcon.App:
app = falcon.App(middleware=[MsgspecMiddleware()])
app.add_error_handler(msgspec.ValidationError, _handle_validation_error)
json_handler = JSONHandler(
dumps=msgspec.json.encode,
loads=msgspec.json.decode,
)
app.req_options.media_handlers[falcon.MEDIA_JSON] = json_handler
app.resp_options.media_handlers[falcon.MEDIA_JSON] = json_handler
notes = NoteResource()
app.add_route('/notes', notes)
app.add_route('/notes/{noteid:uuid}', notes, suffix='note')
return app
application = create_app()
| MsgspecMiddleware |
python | django__django | django/db/migrations/serializer.py | {
"start": 1960,
"end": 2070
} | class ____(BaseSerializer):
def serialize(self):
return repr(self.value), set()
| BaseSimpleSerializer |
python | django__django | tests/i18n/test_extraction.py | {
"start": 34470,
"end": 35372
} | class ____(ExtractorTests):
def setUp(self):
super().setUp()
self.symlinked_dir = os.path.join(self.test_dir, "templates_symlinked")
def test_symlink(self):
if symlinks_supported():
os.symlink(os.path.join(self.test_dir, "templates"), self.symlinked_dir)
else:
self.skipTest(
"os.symlink() not available on this OS + Python version combination."
)
management.call_command(
"makemessages", locale=[LOCALE], verbosity=0, symlinks=True
)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE) as fp:
po_contents = fp.read()
self.assertMsgId("This literal should be included.", po_contents)
self.assertLocationCommentPresent(
self.PO_FILE, None, "templates_symlinked", "test.html"
)
| SymlinkExtractorTests |
python | realpython__materials | django-migrations/bitcoin_tracker/historical_data/migrations/0001_initial.py | {
"start": 92,
"end": 810
} | class ____(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="PriceHistory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date", models.DateTimeField(auto_now_add=True)),
("price", models.DecimalField(decimal_places=2, max_digits=7)),
("volume", models.PositiveIntegerField()),
],
)
]
| Migration |
python | realpython__materials | arcade-platformer/arcade_platformer/12_instructions_view.py | {
"start": 3289,
"end": 4683
} | class ____(arcade.View):
"""Show instructions to the player"""
def __init__(self) -> None:
"""Create instructions screen"""
super().__init__()
# Find the instructions image in the image folder
instructions_image_path = (
ASSETS_PATH / "images" / "instructions_image.png"
)
# Load our title image
self.instructions_image = arcade.load_texture(instructions_image_path)
def on_draw(self) -> None:
# Start the rendering loop
arcade.start_render()
# Draw a rectangle filled with the instructions image
arcade.draw_texture_rectangle(
center_x=SCREEN_WIDTH / 2,
center_y=SCREEN_HEIGHT / 2,
width=SCREEN_WIDTH,
height=SCREEN_HEIGHT,
texture=self.instructions_image,
)
def on_key_press(self, key: int, modifiers: int) -> None:
"""Start the game when the user presses Enter
Arguments:
key -- Which key was pressed
modifiers -- What modifiers were active
"""
if key == arcade.key.RETURN:
game_view = PlatformerView()
game_view.setup()
self.window.show_view(game_view)
elif key == arcade.key.ESCAPE:
title_view = TitleView()
self.window.show_view(title_view)
# PlatformerView
| InstructionsView |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py | {
"start": 26697,
"end": 34649
} | class ____(HourlyReportsTestWithStateChangesAfterMigration):
stream_name = "ad_group_impression_performance_report_hourly"
report_file = "ad_group_impression_performance_report_hourly"
records_number = 24
state_file = "hourly_reports_state"
incremental_report_file = "ad_group_impression_performance_report_hourly_incremental"
report_file_with_records_further_start_date = "ad_group_impression_performance_report_hourly_with_records_further_config_start_date"
state_file_legacy = "hourly_reports_state_legacy"
state_file_after_migration = "hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
incremental_report_file_with_records_further_cursor = (
"ad_group_impression_performance_report_hourly_incremental_with_records_further_cursor"
)
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AdGroupPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AdGroupPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "Status", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "DeviceType", "Language", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "TrackingTemplate", "CustomParameters", "AccountStatus", "CampaignStatus", "AdGroupLabels", "FinalUrlSuffix", "CampaignType", "TopImpressionSharePercent", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "BaseCampaignId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AdGroupType", "AverageCpm", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for second read
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AdGroupPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AdGroupPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "Status", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "DeviceType", "Language", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "TrackingTemplate", "CustomParameters", "AccountStatus", "CampaignStatus", "AdGroupLabels", "FinalUrlSuffix", "CampaignType", "TopImpressionSharePercent", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "BaseCampaignId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AdGroupType", "AverageCpm", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for no config start date test
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AdGroupPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "AdGroupPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "Status", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "DeviceType", "Language", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "TrackingTemplate", "CustomParameters", "AccountStatus", "CampaignStatus", "AdGroupLabels", "FinalUrlSuffix", "CampaignType", "TopImpressionSharePercent", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "BaseCampaignId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AdGroupType", "AverageCpm", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
| TestAdGroupImpressionPerformanceReportHourlyStream |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/test/test_utils.py | {
"start": 2821,
"end": 3089
} | class ____(Component, Resolvable):
key: ResolvedAssetKey
def build_defs(self, context: ComponentLoadContext) -> Definitions:
@asset(key=self.key)
def asset_def():
pass
return Definitions(assets=[asset_def])
| BasicAssetComponent |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 41953,
"end": 43807
} | class ____(Box):
"""A list of nodes (either horizontal or vertical)."""
def __init__(self, elements: T.Sequence[Node]):
super().__init__(0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = [*elements] # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return "{}<w={:.02f} h={:.02f} d={:.02f} s={:.02f}>[{}]".format(
super().__repr__(),
self.width, self.height,
self.depth, self.shift_amount,
"\n" + textwrap.indent(
"\n".join(map("{!r},".format, self.children)),
" ") + "\n"
if self.children else ""
)
def _set_glue(self, x: float, sign: int, totals: list[float],
error_type: str) -> None:
self.glue_order = o = next(
# Highest order of glue used by the members of this list.
(i for i in range(len(totals))[::-1] if totals[i] != 0), 0)
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
_log.warning("%s %s: %r",
error_type, type(self).__name__, self)
def shrink(self) -> None:
for child in self.children:
child.shrink()
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
| List |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 85339,
"end": 85445
} | class ____:
def build_absolute_uri(self, value):
return 'http://example.com' + value
| MockRequest |
python | getsentry__sentry | tests/sentry/integrations/data_forwarding/segment/test_forwarder.py | {
"start": 403,
"end": 2789
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.data_forwarder = DataForwarder.objects.create(
organization=self.organization,
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "secret-api-key"},
is_enabled=True,
)
self.data_forwarder_project = DataForwarderProject.objects.create(
data_forwarder=self.data_forwarder,
project=self.project,
is_enabled=True,
)
self.forwarder = SegmentForwarder()
@responses.activate
def test_simple_notification(self):
responses.add(responses.POST, "https://api.segment.io/v1/track")
event = self.store_event(
data={
"exception": {"type": "ValueError", "value": "foo bar"},
"user": {"id": "1", "email": "foo@example.com"},
"type": "error",
"metadata": {"type": "ValueError", "value": "foo bar"},
"level": "warning",
},
project_id=self.project.id,
)
self.forwarder.post_process(event, self.data_forwarder_project)
assert len(responses.calls) == 1
request = responses.calls[0].request
payload = orjson.loads(request.body)
assert {
"userId": "1",
"event": "Error Captured",
"context": {"library": {"name": "sentry", "version": VERSION}},
"properties": {
"environment": "",
"eventId": event.event_id,
"exceptionType": "ValueError",
"level": "warning",
"release": "",
"transaction": "",
},
"integration": {"name": "sentry", "version": VERSION},
"timestamp": event.datetime.isoformat() + "Z",
} == payload
@responses.activate
def test_forward_event_http_error(self):
responses.add(responses.POST, "https://api.segment.io/v1/track", status=500)
event = self.store_event(
data={
"exception": {"type": "ValueError", "value": "foo bar"},
"user": {"id": "1"},
"type": "error",
},
project_id=self.project.id,
)
self.forwarder.post_process(event, self.data_forwarder_project)
| SegmentDataForwarderTest |
python | ray-project__ray | python/ray/exceptions.py | {
"start": 20300,
"end": 20743
} | class ____(RayError):
"""Indicates that the node is running out of memory and is close to full.
This is raised if the node is low on memory and tasks or actors are being
evicted to free up memory.
"""
# TODO: (clarng) expose the error message string here and format it with proto
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
@PublicAPI
| OutOfMemoryError |
python | numpy__numpy | numpy/ma/core.py | {
"start": 5356,
"end": 5453
} | class ____(Exception):
"""
Class for masked array related errors.
"""
pass
| MAError |
python | Pylons__pyramid | tests/test_scripting.py | {
"start": 8299,
"end": 8714
} | class ____:
matchdict = None
matched_route = None
def __init__(self, environ):
self.environ = environ
self.finished_callbacks = deque()
def add_finished_callback(self, cb):
self.finished_callbacks.append(cb)
def _process_finished_callbacks(self):
while self.finished_callbacks:
cb = self.finished_callbacks.popleft()
cb(self)
| DummyRequest |
python | huggingface__transformers | src/transformers/models/granite/modular_granite.py | {
"start": 1207,
"end": 1497
} | class ____(LlamaAttention):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: GraniteConfig, layer_idx: Optional[int] = None):
super().__init__(config, layer_idx)
self.scaling = config.attention_multiplier
| GraniteAttention |
python | tensorflow__tensorflow | tensorflow/python/util/protobuf/compare.py | {
"start": 12493,
"end": 12953
} | class ____(object):
"""Mix this into a googletest.TestCase class to get proto2 assertions.
Usage:
class SomeTestCase(compare.ProtoAssertions, googletest.TestCase):
...
def testSomething(self):
...
self.assertProtoEqual(a, b)
See module-level definitions for method documentation.
"""
# pylint: disable=invalid-name
def assertProtoEqual(self, *args, **kwargs):
return assertProtoEqual(self, *args, **kwargs)
| ProtoAssertions |
python | getsentry__sentry | src/sentry/incidents/metric_issue_detector.py | {
"start": 4889,
"end": 6908
} | class ____(BaseDataConditionGroupValidator):
conditions = serializers.ListField(required=True)
def validate_conditions(self, value):
MetricIssueComparisonConditionValidator(data=value, many=True).is_valid(
raise_exception=True
)
if not any(
condition["condition_result"] == DetectorPriorityLevel.OK for condition in value
) and not any(condition["type"] == Condition.ANOMALY_DETECTION for condition in value):
raise serializers.ValidationError(
"Resolution condition required for metric issue detector."
)
return value
def is_invalid_extrapolation_mode(old_extrapolation_mode, new_extrapolation_mode) -> bool:
if type(new_extrapolation_mode) is int:
new_extrapolation_mode = ExtrapolationMode(new_extrapolation_mode).name.lower()
if type(new_extrapolation_mode) is ExtrapolationMode:
new_extrapolation_mode = new_extrapolation_mode.name.lower()
if type(old_extrapolation_mode) is int:
old_extrapolation_mode = ExtrapolationMode(old_extrapolation_mode).name.lower()
if type(old_extrapolation_mode) is ExtrapolationMode:
old_extrapolation_mode = old_extrapolation_mode.name.lower()
if (
new_extrapolation_mode is not None
and ExtrapolationMode.from_str(new_extrapolation_mode) is None
):
return True
if (
new_extrapolation_mode == ExtrapolationMode.SERVER_WEIGHTED.name.lower()
and old_extrapolation_mode != ExtrapolationMode.SERVER_WEIGHTED.name.lower()
):
return True
return False
def format_extrapolation_mode(extrapolation_mode) -> ExtrapolationMode | None:
if extrapolation_mode is None:
return None
if type(extrapolation_mode) is int:
return ExtrapolationMode(extrapolation_mode)
if type(extrapolation_mode) is ExtrapolationMode:
return extrapolation_mode
return ExtrapolationMode.from_str(extrapolation_mode)
| MetricIssueConditionGroupValidator |
python | cython__cython | Cython/CodeWriter.py | {
"start": 23415,
"end": 24172
} | class ____(DeclarationWriter, ExpressionWriter):
"""
A Cython code writer for everything supported in pxd files.
(currently unused)
"""
def __call__(self, node):
print('\n'.join(self.write(node).lines))
return node
def visit_CFuncDefNode(self, node):
if node.overridable:
self.startline('cpdef ')
else:
self.startline('cdef ')
if node.modifiers:
self.put(' '.join(node.modifiers))
self.put(' ')
if node.visibility != 'private':
self.put(node.visibility)
self.put(' ')
if node.api:
self.put('api ')
self.visit(node.declarator)
def visit_StatNode(self, node):
pass
| PxdWriter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1406291,
"end": 1406585
} | class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData, RepositoryAuditEntryData
):
"""Audit log entry for a repo.config.disable_collaborators_only
event.
"""
__schema__ = github_schema
__field_names__ = ()
| RepoConfigDisableCollaboratorsOnlyAuditEntry |
python | kubernetes-client__python | kubernetes/client/models/v1_container_resize_policy.py | {
"start": 383,
"end": 5178
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'resource_name': 'str',
'restart_policy': 'str'
}
attribute_map = {
'resource_name': 'resourceName',
'restart_policy': 'restartPolicy'
}
def __init__(self, resource_name=None, restart_policy=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerResizePolicy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._resource_name = None
self._restart_policy = None
self.discriminator = None
self.resource_name = resource_name
self.restart_policy = restart_policy
@property
def resource_name(self):
"""Gets the resource_name of this V1ContainerResizePolicy. # noqa: E501
Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. # noqa: E501
:return: The resource_name of this V1ContainerResizePolicy. # noqa: E501
:rtype: str
"""
return self._resource_name
@resource_name.setter
def resource_name(self, resource_name):
"""Sets the resource_name of this V1ContainerResizePolicy.
Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. # noqa: E501
:param resource_name: The resource_name of this V1ContainerResizePolicy. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and resource_name is None: # noqa: E501
raise ValueError("Invalid value for `resource_name`, must not be `None`") # noqa: E501
self._resource_name = resource_name
@property
def restart_policy(self):
"""Gets the restart_policy of this V1ContainerResizePolicy. # noqa: E501
Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. # noqa: E501
:return: The restart_policy of this V1ContainerResizePolicy. # noqa: E501
:rtype: str
"""
return self._restart_policy
@restart_policy.setter
def restart_policy(self, restart_policy):
"""Sets the restart_policy of this V1ContainerResizePolicy.
Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. # noqa: E501
:param restart_policy: The restart_policy of this V1ContainerResizePolicy. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and restart_policy is None: # noqa: E501
raise ValueError("Invalid value for `restart_policy`, must not be `None`") # noqa: E501
self._restart_policy = restart_policy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerResizePolicy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerResizePolicy):
return True
return self.to_dict() != other.to_dict()
| V1ContainerResizePolicy |
python | TheAlgorithms__Python | graphs/graph_adjacency_list.py | {
"start": 6886,
"end": 21684
} | class ____(unittest.TestCase):
def __assert_graph_edge_exists_check(
self,
undirected_graph: GraphAdjacencyList,
directed_graph: GraphAdjacencyList,
edge: list[int],
) -> None:
assert undirected_graph.contains_edge(edge[0], edge[1])
assert undirected_graph.contains_edge(edge[1], edge[0])
assert directed_graph.contains_edge(edge[0], edge[1])
def __assert_graph_edge_does_not_exist_check(
self,
undirected_graph: GraphAdjacencyList,
directed_graph: GraphAdjacencyList,
edge: list[int],
) -> None:
assert not undirected_graph.contains_edge(edge[0], edge[1])
assert not undirected_graph.contains_edge(edge[1], edge[0])
assert not directed_graph.contains_edge(edge[0], edge[1])
def __assert_graph_vertex_exists_check(
self,
undirected_graph: GraphAdjacencyList,
directed_graph: GraphAdjacencyList,
vertex: int,
) -> None:
assert undirected_graph.contains_vertex(vertex)
assert directed_graph.contains_vertex(vertex)
def __assert_graph_vertex_does_not_exist_check(
self,
undirected_graph: GraphAdjacencyList,
directed_graph: GraphAdjacencyList,
vertex: int,
) -> None:
assert not undirected_graph.contains_vertex(vertex)
assert not directed_graph.contains_vertex(vertex)
def __generate_random_edges(
self, vertices: list[int], edge_pick_count: int
) -> list[list[int]]:
assert edge_pick_count <= len(vertices)
random_source_vertices: list[int] = random.sample(
vertices[0 : int(len(vertices) / 2)], edge_pick_count
)
random_destination_vertices: list[int] = random.sample(
vertices[int(len(vertices) / 2) :], edge_pick_count
)
random_edges: list[list[int]] = []
for source in random_source_vertices:
for dest in random_destination_vertices:
random_edges.append([source, dest])
return random_edges
def __generate_graphs(
self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int
) -> tuple[GraphAdjacencyList, GraphAdjacencyList, list[int], list[list[int]]]:
if max_val - min_val + 1 < vertex_count:
raise ValueError(
"Will result in duplicate vertices. Either increase range "
"between min_val and max_val or decrease vertex count."
)
# generate graph input
random_vertices: list[int] = random.sample(
range(min_val, max_val + 1), vertex_count
)
random_edges: list[list[int]] = self.__generate_random_edges(
random_vertices, edge_pick_count
)
# build graphs
undirected_graph = GraphAdjacencyList(
vertices=random_vertices, edges=random_edges, directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices, edges=random_edges, directed=True
)
return undirected_graph, directed_graph, random_vertices, random_edges
def test_init_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# test graph initialization with vertices and edges
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
for edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
assert not undirected_graph.directed
assert directed_graph.directed
def test_contains_vertex(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# Build graphs WITHOUT edges
undirected_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=True
)
# Test contains_vertex
for num in range(101):
assert (num in random_vertices) == undirected_graph.contains_vertex(num)
assert (num in random_vertices) == directed_graph.contains_vertex(num)
def test_add_vertices(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# build empty graphs
undirected_graph: GraphAdjacencyList = GraphAdjacencyList(
vertices=[], edges=[], directed=False
)
directed_graph: GraphAdjacencyList = GraphAdjacencyList(
vertices=[], edges=[], directed=True
)
# run add_vertex
for num in random_vertices:
undirected_graph.add_vertex(num)
for num in random_vertices:
directed_graph.add_vertex(num)
# test add_vertex worked
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
def test_remove_vertices(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=True
)
# test remove_vertex worked
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
undirected_graph.remove_vertex(num)
directed_graph.remove_vertex(num)
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, num
)
def test_add_and_remove_vertices_repeatedly(self) -> None:
random_vertices1: list[int] = random.sample(range(51), 20)
random_vertices2: list[int] = random.sample(range(51, 101), 20)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyList(
vertices=random_vertices1, edges=[], directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices1, edges=[], directed=True
)
# test adding and removing vertices
for i, _ in enumerate(random_vertices1):
undirected_graph.add_vertex(random_vertices2[i])
directed_graph.add_vertex(random_vertices2[i])
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, random_vertices2[i]
)
undirected_graph.remove_vertex(random_vertices1[i])
directed_graph.remove_vertex(random_vertices1[i])
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, random_vertices1[i]
)
# remove all vertices
for i, _ in enumerate(random_vertices1):
undirected_graph.remove_vertex(random_vertices2[i])
directed_graph.remove_vertex(random_vertices2[i])
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, random_vertices2[i]
)
def test_contains_edge(self) -> None:
# generate graphs and graph input
vertex_count = 20
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(vertex_count, 0, 100, 4)
# generate all possible edges for testing
all_possible_edges: list[list[int]] = []
for i in range(vertex_count - 1):
for j in range(i + 1, vertex_count):
all_possible_edges.append([random_vertices[i], random_vertices[j]])
all_possible_edges.append([random_vertices[j], random_vertices[i]])
# test contains_edge function
for edge in all_possible_edges:
if edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
elif [edge[1], edge[0]] in random_edges:
# since this edge exists for undirected but the reverse
# may not exist for directed
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, [edge[1], edge[0]]
)
else:
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, edge
)
def test_add_edge(self) -> None:
# generate graph input
random_vertices: list[int] = random.sample(range(101), 15)
random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=True
)
# run and test add_edge
for edge in random_edges:
undirected_graph.add_edge(edge[0], edge[1])
directed_graph.add_edge(edge[0], edge[1])
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
def test_remove_edge(self) -> None:
# generate graph input and graphs
(
undirected_graph,
directed_graph,
_random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# run and test remove_edge
for edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
undirected_graph.remove_edge(edge[0], edge[1])
directed_graph.remove_edge(edge[0], edge[1])
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, edge
)
def test_add_and_remove_edges_repeatedly(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# make some more edge options!
more_random_edges: list[list[int]] = []
while len(more_random_edges) != len(random_edges):
edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
for edge in edges:
if len(more_random_edges) == len(random_edges):
break
elif edge not in more_random_edges and edge not in random_edges:
more_random_edges.append(edge)
for i, _ in enumerate(random_edges):
undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, more_random_edges[i]
)
undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1])
directed_graph.remove_edge(random_edges[i][0], random_edges[i][1])
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, random_edges[i]
)
def test_add_vertex_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for vertex in random_vertices:
with pytest.raises(ValueError):
undirected_graph.add_vertex(vertex)
with pytest.raises(ValueError):
directed_graph.add_vertex(vertex)
def test_remove_vertex_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for i in range(101):
if i not in random_vertices:
with pytest.raises(ValueError):
undirected_graph.remove_vertex(i)
with pytest.raises(ValueError):
directed_graph.remove_vertex(i)
def test_add_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
_random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for edge in random_edges:
with pytest.raises(ValueError):
undirected_graph.add_edge(edge[0], edge[1])
with pytest.raises(ValueError):
directed_graph.add_edge(edge[0], edge[1])
def test_remove_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
more_random_edges: list[list[int]] = []
while len(more_random_edges) != len(random_edges):
edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
for edge in edges:
if len(more_random_edges) == len(random_edges):
break
elif edge not in more_random_edges and edge not in random_edges:
more_random_edges.append(edge)
for edge in more_random_edges:
with pytest.raises(ValueError):
undirected_graph.remove_edge(edge[0], edge[1])
with pytest.raises(ValueError):
directed_graph.remove_edge(edge[0], edge[1])
def test_contains_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for vertex in random_vertices:
with pytest.raises(ValueError):
undirected_graph.contains_edge(vertex, 102)
with pytest.raises(ValueError):
directed_graph.contains_edge(vertex, 102)
with pytest.raises(ValueError):
undirected_graph.contains_edge(103, 102)
with pytest.raises(ValueError):
directed_graph.contains_edge(103, 102)
if __name__ == "__main__":
unittest.main()
| TestGraphAdjacencyList |
python | scikit-learn__scikit-learn | sklearn/utils/parallel.py | {
"start": 4532,
"end": 8102
} | class ____:
"""Load the global configuration before calling the function."""
def __init__(self, function):
self.function = function
update_wrapper(self, self.function)
def with_config_and_warning_filters(self, config, warning_filters):
self.config = config
self.warning_filters = warning_filters
return self
def __call__(self, *args, **kwargs):
config = getattr(self, "config", {})
warning_filters = getattr(self, "warning_filters", [])
if not config or not warning_filters:
warnings.warn(
(
"`sklearn.utils.parallel.delayed` should be used with"
" `sklearn.utils.parallel.Parallel` to make it possible to"
" propagate the scikit-learn configuration of the current thread to"
" the joblib workers."
),
UserWarning,
)
with config_context(**config), warnings.catch_warnings():
# TODO is there a simpler way that resetwarnings+ filterwarnings?
warnings.resetwarnings()
warning_filter_keys = ["action", "message", "category", "module", "lineno"]
for filter_args in warning_filters:
this_warning_filter_dict = {
k: v
for k, v in zip(warning_filter_keys, filter_args)
if v is not None
}
# Some small discrepancy between warnings filters and what
# filterwarnings expect. simplefilter is more lenient, e.g.
# accepts a tuple as category. We try simplefilter first and
# use filterwarnings in more complicated cases
if (
"message" not in this_warning_filter_dict
and "module" not in this_warning_filter_dict
):
warnings.simplefilter(**this_warning_filter_dict, append=True)
else:
# 'message' and 'module' are most of the time regex.Pattern but
# can be str as well and filterwarnings wants a str
for special_key in ["message", "module"]:
this_value = this_warning_filter_dict.get(special_key)
if this_value is not None and not isinstance(this_value, str):
this_warning_filter_dict[special_key] = this_value.pattern
warnings.filterwarnings(**this_warning_filter_dict, append=True)
return self.function(*args, **kwargs)
def _get_threadpool_controller():
"""Return the global threadpool controller instance."""
global _threadpool_controller
if _threadpool_controller is None:
_threadpool_controller = ThreadpoolController()
return _threadpool_controller
def _threadpool_controller_decorator(limits=1, user_api="blas"):
"""Decorator to limit the number of threads used at the function level.
It should be preferred over `threadpoolctl.ThreadpoolController.wrap` because this
one only loads the shared libraries when the function is called while the latter
loads them at import time.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
controller = _get_threadpool_controller()
with controller.limit(limits=limits, user_api=user_api):
return func(*args, **kwargs)
return wrapper
return decorator
| _FuncWrapper |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 63557,
"end": 65051
} | class ____(PrefectOperatorFilterBaseModel):
"""Filter BlockDocuments. Only BlockDocuments matching all criteria will be returned"""
id: Optional[BlockDocumentFilterId] = Field(
default=None, description="Filter criteria for `BlockDocument.id`"
)
is_anonymous: Optional[BlockDocumentFilterIsAnonymous] = Field(
# default is to exclude anonymous blocks
BlockDocumentFilterIsAnonymous(eq_=False),
description=(
"Filter criteria for `BlockDocument.is_anonymous`. "
"Defaults to excluding anonymous blocks."
),
)
block_type_id: Optional[BlockDocumentFilterBlockTypeId] = Field(
default=None, description="Filter criteria for `BlockDocument.block_type_id`"
)
name: Optional[BlockDocumentFilterName] = Field(
default=None, description="Filter criteria for `BlockDocument.name`"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.id is not None:
filters.append(self.id.as_sql_filter())
if self.is_anonymous is not None:
filters.append(self.is_anonymous.as_sql_filter())
if self.block_type_id is not None:
filters.append(self.block_type_id.as_sql_filter())
if self.name is not None:
filters.append(self.name.as_sql_filter())
return filters
| BlockDocumentFilter |
python | huggingface__transformers | src/transformers/models/ibert/quant_modules.py | {
"start": 11293,
"end": 13255
} | class ____(nn.Module):
"""
Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`.
Args:
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "gelu" or "nonlinear" is given.
"""
def __init__(self, quant_mode=True, force_dequant="none"):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "gelu"]:
logger.info("Force dequantize gelu")
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14 # dummy integer constant
self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor**2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor**2 * self.coeff[0]
# avoid overflow
y_int = floor_ste.apply(y_int / 2**self.const)
scaling_factor = scaling_factor * 2**self.const
return y_int, scaling_factor
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return self.activation_fn(x), None
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return x_int * scaling_factor, scaling_factor
| IntGELU |
python | wandb__wandb | tests/unit_tests/test_step_upload.py | {
"start": 3935,
"end": 7192
} | class ____:
def test_finishes_when_no_commands(self):
run_step_upload([])
def test_finishes_after_simple_upload(self):
api = make_api()
run_step_upload([make_request_upload(make_tmp_file(Path("/tmp")))], api=api)
api.upload_file_retry.assert_called()
def test_finishes_after_nonexistent_upload_failure(self, tmp_path: Path):
api = make_api()
run_step_upload(
[make_request_upload(tmp_path / "nonexistent-file.txt")], api=api
)
api.upload_file_retry.assert_not_called()
def test_finishes_after_multiple_uploads(self, tmp_path: Path):
api = make_api()
run_step_upload(
[
make_request_upload(make_tmp_file(tmp_path)),
make_request_upload(make_tmp_file(tmp_path)),
make_request_upload(make_tmp_file(tmp_path)),
],
api=api,
)
api.upload_file_retry.assert_called()
def test_finishes_after_upload_urls_err(self, tmp_path: Path):
api = make_api(upload_urls=Mock(side_effect=Exception("upload_urls failed")))
run_step_upload([make_request_upload(make_tmp_file(tmp_path))], api=api)
api.upload_urls.assert_called()
def test_finishes_after_upload_err(self, tmp_path: Path):
api = make_api(upload_file_retry=Mock(side_effect=Exception("upload failed")))
run_step_upload([make_request_upload(make_tmp_file(tmp_path))], api=api)
api.upload_file_retry.assert_called()
def test_finishes_after_artifact_upload_err(self, tmp_path: Path):
api = make_api(upload_file_retry=Mock(side_effect=Exception("upload failed")))
run_step_upload(
[
make_request_upload(make_tmp_file(tmp_path), artifact_id="my-artifact"),
make_request_commit("my-artifact"),
],
api=api,
)
api.upload_file_retry.assert_called()
def test_finishes_after_artifact_commit(self, tmp_path: Path):
api = make_api()
run_step_upload(
[
make_request_upload(make_tmp_file(tmp_path), artifact_id="my-artifact"),
make_request_commit("my-artifact"),
],
api=api,
)
api.commit_artifact.assert_called()
def test_finishes_after_artifact_commit_err(self, tmp_path: Path):
api = make_api(commit_artifact=Mock(side_effect=Exception("commit failed")))
run_step_upload(
[
make_request_upload(make_tmp_file(tmp_path), artifact_id="my-artifact"),
make_request_commit("my-artifact"),
],
api=api,
)
api.commit_artifact.assert_called()
def test_no_finish_until_jobs_done(
self,
tmp_path: Path,
):
api = UploadBlockingMockApi()
done = threading.Event()
q = queue.Queue()
q.put(make_request_upload(make_tmp_file(tmp_path)))
q.put(RequestFinish(callback=done.set))
step_upload = make_step_upload(api=api, event_queue=q)
step_upload.start()
unblock = api.wait_for_upload(2)
assert not done.wait(0.1)
unblock()
assert done.wait(2)
| TestFinish |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 65870,
"end": 66668
} | class ____(themeable):
"""
x-axis tick direction
Parameters
----------
theme_element : Literal["in", "out"]
`in` for ticks inside the panel.
`out` for ticks outside the panel.
"""
def __init__(self, theme_element):
msg = (
f"Themeable '{self.__class__.__name__}' is deprecated and"
"will be removed in a future version. "
"Use +ve or -ve values of the axis_ticks_length"
"to affect the direction of the ticks."
)
warn(msg, FutureWarning, stacklevel=1)
super().__init__(theme_element)
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
ax.xaxis.set_tick_params(
which="major", tickdir=self.properties["value"]
)
| axis_ticks_direction_x |
python | gevent__gevent | src/greentest/3.12/test_weakref.py | {
"start": 773,
"end": 1323
} | class ____:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
| Object |
python | pandas-dev__pandas | pandas/tests/libs/test_hashtable.py | {
"start": 13849,
"end": 19316
} | class ____:
def test_nan_float(self):
nan1 = float("nan")
nan2 = float("nan")
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
def test_nan_complex_both(self):
nan1 = complex(float("nan"), float("nan"))
nan2 = complex(float("nan"), float("nan"))
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
def test_nan_complex_real(self):
nan1 = complex(float("nan"), 1)
nan2 = complex(float("nan"), 1)
other = complex(float("nan"), 2)
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
with pytest.raises(KeyError, match=re.escape(repr(other))):
table.get_item(other)
def test_nan_complex_imag(self):
nan1 = complex(1, float("nan"))
nan2 = complex(1, float("nan"))
other = complex(2, float("nan"))
assert nan1 is not nan2
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
with pytest.raises(KeyError, match=re.escape(repr(other))):
table.get_item(other)
def test_nan_in_tuple(self):
nan1 = (float("nan"),)
nan2 = (float("nan"),)
assert nan1[0] is not nan2[0]
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
def test_nan_in_nested_tuple(self):
nan1 = (1, (2, (float("nan"),)))
nan2 = (1, (2, (float("nan"),)))
other = (1, 2)
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
with pytest.raises(KeyError, match=re.escape(repr(other))):
table.get_item(other)
def test_nan_in_namedtuple(self):
T = namedtuple("T", ["x"])
nan1 = T(float("nan"))
nan2 = T(float("nan"))
assert nan1.x is not nan2.x
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
def test_nan_in_nested_namedtuple(self):
T = namedtuple("T", ["x", "y"])
nan1 = T(1, (2, (float("nan"),)))
nan2 = T(1, (2, (float("nan"),)))
other = T(1, 2)
table = ht.PyObjectHashTable()
table.set_item(nan1, 42)
assert table.get_item(nan2) == 42
with pytest.raises(KeyError, match=re.escape(repr(other))):
table.get_item(other)
def test_hash_equal_tuple_with_nans():
a = (float("nan"), (float("nan"), float("nan")))
b = (float("nan"), (float("nan"), float("nan")))
assert ht.object_hash(a) == ht.object_hash(b)
assert ht.objects_are_equal(a, b)
def test_hash_equal_namedtuple_with_nans():
T = namedtuple("T", ["x", "y"])
a = T(float("nan"), (float("nan"), float("nan")))
b = T(float("nan"), (float("nan"), float("nan")))
assert ht.object_hash(a) == ht.object_hash(b)
assert ht.objects_are_equal(a, b)
def test_hash_equal_namedtuple_and_tuple():
T = namedtuple("T", ["x", "y"])
a = T(1, (2, 3))
b = (1, (2, 3))
assert ht.object_hash(a) == ht.object_hash(b)
assert ht.objects_are_equal(a, b)
def test_get_labels_groupby_for_Int64(writable):
table = ht.Int64HashTable()
vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
vals.flags.writeable = writable
arr, unique = table.get_labels_groupby(vals)
expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp)
expected_unique = np.array([1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(arr, expected_arr)
tm.assert_numpy_array_equal(unique, expected_unique)
def test_tracemalloc_works_for_StringHashTable():
N = 1000
keys = np.arange(N).astype(np.str_).astype(np.object_)
with activated_tracemalloc():
table = ht.StringHashTable()
table.map_locations(keys)
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_tracemalloc_for_empty_StringHashTable():
with activated_tracemalloc():
table = ht.StringHashTable()
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
@pytest.mark.parametrize("N", range(1, 110, 4))
def test_no_reallocation_StringHashTable(N):
keys = np.arange(N).astype(np.str_).astype(np.object_)
preallocated_table = ht.StringHashTable(N)
n_buckets_start = preallocated_table.get_state()["n_buckets"]
preallocated_table.map_locations(keys)
n_buckets_end = preallocated_table.get_state()["n_buckets"]
# original number of buckets was enough:
assert n_buckets_start == n_buckets_end
# check with clean table (not too much preallocated)
clean_table = ht.StringHashTable()
clean_table.map_locations(keys)
assert n_buckets_start == clean_table.get_state()["n_buckets"]
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.Float64HashTable, np.float64),
(ht.Float32HashTable, np.float32),
(ht.Complex128HashTable, np.complex128),
(ht.Complex64HashTable, np.complex64),
],
)
| TestPyObjectHashTableWithNans |
python | wandb__wandb | wandb/sdk/artifacts/_generated/project_artifacts.py | {
"start": 316,
"end": 400
} | class ____(GQLResult):
project: Optional[ProjectArtifactsProject]
| ProjectArtifacts |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/msgpack/fallback.py | {
"start": 3407,
"end": 20372
} | class ____:
"""Streaming unpacker.
Arguments:
:param file_like:
File-like object having `.read(n)` method.
If specified, unpacker reads serialized data from it and `.feed()` is not usable.
:param int read_size:
Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`)
:param bool use_list:
If true, unpack msgpack array to Python list.
Otherwise, unpack to Python tuple. (default: True)
:param bool raw:
If true, unpack msgpack raw to Python bytes.
Otherwise, unpack to Python str by decoding with UTF-8 encoding (default).
:param int timestamp:
Control how timestamp type is unpacked:
0 - Timestamp
1 - float (Seconds from the EPOCH)
2 - int (Nanoseconds from the EPOCH)
3 - datetime.datetime (UTC).
:param bool strict_map_key:
If true (default), only str or bytes are accepted for map (dict) keys.
:param object_hook:
When specified, it should be callable.
Unpacker calls it with a dict argument after unpacking msgpack map.
(See also simplejson)
:param object_pairs_hook:
When specified, it should be callable.
Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
(See also simplejson)
:param str unicode_errors:
The error handler for decoding unicode. (default: 'strict')
This option should be used only when you have msgpack data which
contains invalid UTF-8 string.
:param int max_buffer_size:
Limits size of data waiting unpacked. 0 means 2**32-1.
The default value is 100*1024*1024 (100MiB).
Raises `BufferFull` exception when it is insufficient.
You should set this parameter when unpacking data from untrusted source.
:param int max_str_len:
Deprecated, use *max_buffer_size* instead.
Limits max length of str. (default: max_buffer_size)
:param int max_bin_len:
Deprecated, use *max_buffer_size* instead.
Limits max length of bin. (default: max_buffer_size)
:param int max_array_len:
Limits max length of array.
(default: max_buffer_size)
:param int max_map_len:
Limits max length of map.
(default: max_buffer_size//2)
:param int max_ext_len:
Deprecated, use *max_buffer_size* instead.
Limits max size of ext type. (default: max_buffer_size)
Example of streaming deserialize from file-like object::
unpacker = Unpacker(file_like)
for o in unpacker:
process(o)
Example of streaming deserialize from socket::
unpacker = Unpacker()
while True:
buf = sock.recv(1024**2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
process(o)
Raises ``ExtraData`` when *packed* contains extra bytes.
Raises ``OutOfData`` when *packed* is incomplete.
Raises ``FormatError`` when *packed* is not valid msgpack.
Raises ``StackError`` when *packed* contains too nested.
Other exceptions can be raised during unpacking.
"""
def __init__(
self,
file_like=None,
read_size=0,
use_list=True,
raw=False,
timestamp=0,
strict_map_key=True,
object_hook=None,
object_pairs_hook=None,
list_hook=None,
unicode_errors=None,
max_buffer_size=100 * 1024 * 1024,
ext_hook=ExtType,
max_str_len=-1,
max_bin_len=-1,
max_array_len=-1,
max_map_len=-1,
max_ext_len=-1,
):
if unicode_errors is None:
unicode_errors = "strict"
if file_like is None:
self._feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._feeding = False
#: array of bytes fed.
self._buffer = bytearray()
#: Which position we currently reads
self._buff_i = 0
# When Unpacker is used as an iterable, between the calls to next(),
# the buffer is not "consumed" completely, for efficiency sake.
# Instead, it is done sloppily. To make sure we raise BufferFull at
# the correct moments, we have to keep track of how sloppy we were.
# Furthermore, when the buffer is incomplete (that is: in the case
# we raise an OutOfData) we need to rollback the buffer to the correct
# state, which _buf_checkpoint records.
self._buf_checkpoint = 0
if not max_buffer_size:
max_buffer_size = 2**31 - 1
if max_str_len == -1:
max_str_len = max_buffer_size
if max_bin_len == -1:
max_bin_len = max_buffer_size
if max_array_len == -1:
max_array_len = max_buffer_size
if max_map_len == -1:
max_map_len = max_buffer_size // 2
if max_ext_len == -1:
max_ext_len = max_buffer_size
self._max_buffer_size = max_buffer_size
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 16 * 1024)
self._raw = bool(raw)
self._strict_map_key = bool(strict_map_key)
self._unicode_errors = unicode_errors
self._use_list = use_list
if not (0 <= timestamp <= 3):
raise ValueError("timestamp must be 0..3")
self._timestamp = timestamp
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
self._max_str_len = max_str_len
self._max_bin_len = max_bin_len
self._max_array_len = max_array_len
self._max_map_len = max_map_len
self._max_ext_len = max_ext_len
self._stream_offset = 0
if list_hook is not None and not callable(list_hook):
raise TypeError("`list_hook` is not callable")
if object_hook is not None and not callable(object_hook):
raise TypeError("`object_hook` is not callable")
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError("`object_pairs_hook` is not callable")
if object_hook is not None and object_pairs_hook is not None:
raise TypeError("object_pairs_hook and object_hook are mutually exclusive")
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
assert self._feeding
view = _get_data_from_buffer(next_bytes)
if len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size:
raise BufferFull
# Strip buffer before checkpoint before reading file.
if self._buf_checkpoint > 0:
del self._buffer[: self._buf_checkpoint]
self._buff_i -= self._buf_checkpoint
self._buf_checkpoint = 0
# Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython
self._buffer.extend(view)
def _consume(self):
"""Gets rid of the used parts of the buffer."""
self._stream_offset += self._buff_i - self._buf_checkpoint
self._buf_checkpoint = self._buff_i
def _got_extradata(self):
return self._buff_i < len(self._buffer)
def _get_extradata(self):
return self._buffer[self._buff_i :]
def read_bytes(self, n):
ret = self._read(n, raise_outofdata=False)
self._consume()
return ret
def _read(self, n, raise_outofdata=True):
# (int) -> bytearray
self._reserve(n, raise_outofdata=raise_outofdata)
i = self._buff_i
ret = self._buffer[i : i + n]
self._buff_i = i + len(ret)
return ret
def _reserve(self, n, raise_outofdata=True):
remain_bytes = len(self._buffer) - self._buff_i - n
# Fast path: buffer has n bytes already
if remain_bytes >= 0:
return
if self._feeding:
self._buff_i = self._buf_checkpoint
raise OutOfData
# Strip buffer before checkpoint before reading file.
if self._buf_checkpoint > 0:
del self._buffer[: self._buf_checkpoint]
self._buff_i -= self._buf_checkpoint
self._buf_checkpoint = 0
# Read from file
remain_bytes = -remain_bytes
if remain_bytes + len(self._buffer) > self._max_buffer_size:
raise BufferFull
while remain_bytes > 0:
to_read_bytes = max(self._read_size, remain_bytes)
read_data = self.file_like.read(to_read_bytes)
if not read_data:
break
assert isinstance(read_data, bytes)
self._buffer += read_data
remain_bytes -= len(read_data)
if len(self._buffer) < n + self._buff_i and raise_outofdata:
self._buff_i = 0 # rollback
raise OutOfData
def _read_header(self):
typ = TYPE_IMMEDIATE
n = 0
obj = None
self._reserve(1)
b = self._buffer[self._buff_i]
self._buff_i += 1
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = -1 - (b ^ 0xFF)
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
typ = TYPE_RAW
if n > self._max_str_len:
raise ValueError(f"{n} exceeds max_str_len({self._max_str_len})")
obj = self._read(n)
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
if n > self._max_array_len:
raise ValueError(f"{n} exceeds max_array_len({self._max_array_len})")
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
if n > self._max_map_len:
raise ValueError(f"{n} exceeds max_map_len({self._max_map_len})")
elif b == 0xC0:
obj = None
elif b == 0xC2:
obj = False
elif b == 0xC3:
obj = True
elif 0xC4 <= b <= 0xC6:
size, fmt, typ = _MSGPACK_HEADERS[b]
self._reserve(size)
if len(fmt) > 0:
n = struct.unpack_from(fmt, self._buffer, self._buff_i)[0]
else:
n = self._buffer[self._buff_i]
self._buff_i += size
if n > self._max_bin_len:
raise ValueError(f"{n} exceeds max_bin_len({self._max_bin_len})")
obj = self._read(n)
elif 0xC7 <= b <= 0xC9:
size, fmt, typ = _MSGPACK_HEADERS[b]
self._reserve(size)
L, n = struct.unpack_from(fmt, self._buffer, self._buff_i)
self._buff_i += size
if L > self._max_ext_len:
raise ValueError(f"{L} exceeds max_ext_len({self._max_ext_len})")
obj = self._read(L)
elif 0xCA <= b <= 0xD3:
size, fmt = _MSGPACK_HEADERS[b]
self._reserve(size)
if len(fmt) > 0:
obj = struct.unpack_from(fmt, self._buffer, self._buff_i)[0]
else:
obj = self._buffer[self._buff_i]
self._buff_i += size
elif 0xD4 <= b <= 0xD8:
size, fmt, typ = _MSGPACK_HEADERS[b]
if self._max_ext_len < size:
raise ValueError(f"{size} exceeds max_ext_len({self._max_ext_len})")
self._reserve(size + 1)
n, obj = struct.unpack_from(fmt, self._buffer, self._buff_i)
self._buff_i += size + 1
elif 0xD9 <= b <= 0xDB:
size, fmt, typ = _MSGPACK_HEADERS[b]
self._reserve(size)
if len(fmt) > 0:
(n,) = struct.unpack_from(fmt, self._buffer, self._buff_i)
else:
n = self._buffer[self._buff_i]
self._buff_i += size
if n > self._max_str_len:
raise ValueError(f"{n} exceeds max_str_len({self._max_str_len})")
obj = self._read(n)
elif 0xDC <= b <= 0xDD:
size, fmt, typ = _MSGPACK_HEADERS[b]
self._reserve(size)
(n,) = struct.unpack_from(fmt, self._buffer, self._buff_i)
self._buff_i += size
if n > self._max_array_len:
raise ValueError(f"{n} exceeds max_array_len({self._max_array_len})")
elif 0xDE <= b <= 0xDF:
size, fmt, typ = _MSGPACK_HEADERS[b]
self._reserve(size)
(n,) = struct.unpack_from(fmt, self._buffer, self._buff_i)
self._buff_i += size
if n > self._max_map_len:
raise ValueError(f"{n} exceeds max_map_len({self._max_map_len})")
else:
raise FormatError("Unknown header: 0x%x" % b)
return typ, n, obj
def _unpack(self, execute=EX_CONSTRUCT):
typ, n, obj = self._read_header()
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise ValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise ValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in range(n):
# TODO check whether we need to call `list_hook`
self._unpack(EX_SKIP)
return
ret = newlist_hint(n)
for i in range(n):
ret.append(self._unpack(EX_CONSTRUCT))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in range(n):
# TODO check whether we need to call hooks
self._unpack(EX_SKIP)
self._unpack(EX_SKIP)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._unpack(EX_CONSTRUCT), self._unpack(EX_CONSTRUCT)) for _ in range(n)
)
else:
ret = {}
for _ in range(n):
key = self._unpack(EX_CONSTRUCT)
if self._strict_map_key and type(key) not in (str, bytes):
raise ValueError("%s is not allowed for map key" % str(type(key)))
if isinstance(key, str):
key = sys.intern(key)
ret[key] = self._unpack(EX_CONSTRUCT)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._raw:
obj = bytes(obj)
else:
obj = obj.decode("utf_8", self._unicode_errors)
return obj
if typ == TYPE_BIN:
return bytes(obj)
if typ == TYPE_EXT:
if n == -1: # timestamp
ts = Timestamp.from_bytes(bytes(obj))
if self._timestamp == 1:
return ts.to_unix()
elif self._timestamp == 2:
return ts.to_unix_nano()
elif self._timestamp == 3:
return ts.to_datetime()
else:
return ts
else:
return self._ext_hook(n, bytes(obj))
assert typ == TYPE_IMMEDIATE
return obj
def __iter__(self):
return self
def __next__(self):
try:
ret = self._unpack(EX_CONSTRUCT)
self._consume()
return ret
except OutOfData:
self._consume()
raise StopIteration
except RecursionError:
raise StackError
next = __next__
def skip(self):
self._unpack(EX_SKIP)
self._consume()
def unpack(self):
try:
ret = self._unpack(EX_CONSTRUCT)
except RecursionError:
raise StackError
self._consume()
return ret
def read_array_header(self):
ret = self._unpack(EX_READ_ARRAY_HEADER)
self._consume()
return ret
def read_map_header(self):
ret = self._unpack(EX_READ_MAP_HEADER)
self._consume()
return ret
def tell(self):
return self._stream_offset
| Unpacker |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_bytes.py | {
"start": 960,
"end": 1067
} | class ____:
def __bytes__(self):
print("raise some error")
raise NotImplementedError
| Bytes6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.