language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/box.py | {
"start": 341,
"end": 10239
} | class ____:
"""Defines characters to render boxes.
โโโฌโ top
โ โโ head
โโโผโค head_row
โ โโ mid
โโโผโค row
โโโผโค foot_row
โ โโ foot
โโโดโ bottom
Args:
box (str): Characters making up box.
ascii (bool, optional): True if this box uses ascii characters only. Default is False.
"""
def __init__(self, box: str, *, ascii: bool = False) -> None:
self._box = box
self.ascii = ascii
line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines()
# top
self.top_left, self.top, self.top_divider, self.top_right = iter(line1)
# head
self.head_left, _, self.head_vertical, self.head_right = iter(line2)
# head_row
(
self.head_row_left,
self.head_row_horizontal,
self.head_row_cross,
self.head_row_right,
) = iter(line3)
# mid
self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4)
# row
self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5)
# foot_row
(
self.foot_row_left,
self.foot_row_horizontal,
self.foot_row_cross,
self.foot_row_right,
) = iter(line6)
# foot
self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7)
# bottom
self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter(
line8
)
def __repr__(self) -> str:
return "Box(...)"
def __str__(self) -> str:
return self._box
def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
"""Substitute this box for another if it won't render due to platform issues.
Args:
options (ConsoleOptions): Console options used in rendering.
safe (bool, optional): Substitute this for another Box if there are known problems
displaying on the platform (currently only relevant on Windows). Default is True.
Returns:
Box: A different Box or the same Box.
"""
box = self
if options.legacy_windows and safe:
box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)
if options.ascii_only and not box.ascii:
box = ASCII
return box
def get_plain_headed_box(self) -> "Box":
"""If this box uses special characters for the borders of the header, then
return the equivalent box that does not.
Returns:
Box: The most similar Box that doesn't use header-specific box characters.
If the current Box already satisfies this criterion, then it's returned.
"""
return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
def get_top(self, widths: Iterable[int]) -> str:
"""Get the top of a simple box.
Args:
widths (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
parts: List[str] = []
append = parts.append
append(self.top_left)
for last, width in loop_last(widths):
append(self.top * width)
if not last:
append(self.top_divider)
append(self.top_right)
return "".join(parts)
def get_row(
self,
widths: Iterable[int],
level: Literal["head", "row", "foot", "mid"] = "row",
edge: bool = True,
) -> str:
"""Get the top of a simple box.
Args:
width (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
if level == "head":
left = self.head_row_left
horizontal = self.head_row_horizontal
cross = self.head_row_cross
right = self.head_row_right
elif level == "row":
left = self.row_left
horizontal = self.row_horizontal
cross = self.row_cross
right = self.row_right
elif level == "mid":
left = self.mid_left
horizontal = " "
cross = self.mid_vertical
right = self.mid_right
elif level == "foot":
left = self.foot_row_left
horizontal = self.foot_row_horizontal
cross = self.foot_row_cross
right = self.foot_row_right
else:
raise ValueError("level must be 'head', 'row' or 'foot'")
parts: List[str] = []
append = parts.append
if edge:
append(left)
for last, width in loop_last(widths):
append(horizontal * width)
if not last:
append(cross)
if edge:
append(right)
return "".join(parts)
def get_bottom(self, widths: Iterable[int]) -> str:
"""Get the bottom of a simple box.
Args:
widths (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
parts: List[str] = []
append = parts.append
append(self.bottom_left)
for last, width in loop_last(widths):
append(self.bottom * width)
if not last:
append(self.bottom_divider)
append(self.bottom_right)
return "".join(parts)
# fmt: off
ASCII: Box = Box(
"+--+\n"
"| ||\n"
"|-+|\n"
"| ||\n"
"|-+|\n"
"|-+|\n"
"| ||\n"
"+--+\n",
ascii=True,
)
ASCII2: Box = Box(
"+-++\n"
"| ||\n"
"+-++\n"
"| ||\n"
"+-++\n"
"+-++\n"
"| ||\n"
"+-++\n",
ascii=True,
)
ASCII_DOUBLE_HEAD: Box = Box(
"+-++\n"
"| ||\n"
"+=++\n"
"| ||\n"
"+-++\n"
"+-++\n"
"| ||\n"
"+-++\n",
ascii=True,
)
SQUARE: Box = Box(
"โโโฌโ\n"
"โ โโ\n"
"โโโผโค\n"
"โ โโ\n"
"โโโผโค\n"
"โโโผโค\n"
"โ โโ\n"
"โโโดโ\n"
)
SQUARE_DOUBLE_HEAD: Box = Box(
"โโโฌโ\n"
"โ โโ\n"
"โโโชโก\n"
"โ โโ\n"
"โโโผโค\n"
"โโโผโค\n"
"โ โโ\n"
"โโโดโ\n"
)
MINIMAL: Box = Box(
" โท \n"
" โ \n"
"โถโโผโด\n"
" โ \n"
"โถโโผโด\n"
"โถโโผโด\n"
" โ \n"
" โต \n"
)
MINIMAL_HEAVY_HEAD: Box = Box(
" โท \n"
" โ \n"
"โบโโฟโธ\n"
" โ \n"
"โถโโผโด\n"
"โถโโผโด\n"
" โ \n"
" โต \n"
)
MINIMAL_DOUBLE_HEAD: Box = Box(
" โท \n"
" โ \n"
" โโช \n"
" โ \n"
" โโผ \n"
" โโผ \n"
" โ \n"
" โต \n"
)
SIMPLE: Box = Box(
" \n"
" \n"
" โโ \n"
" \n"
" \n"
" โโ \n"
" \n"
" \n"
)
SIMPLE_HEAD: Box = Box(
" \n"
" \n"
" โโ \n"
" \n"
" \n"
" \n"
" \n"
" \n"
)
SIMPLE_HEAVY: Box = Box(
" \n"
" \n"
" โโ \n"
" \n"
" \n"
" โโ \n"
" \n"
" \n"
)
HORIZONTALS: Box = Box(
" โโ \n"
" \n"
" โโ \n"
" \n"
" โโ \n"
" โโ \n"
" \n"
" โโ \n"
)
ROUNDED: Box = Box(
"โญโโฌโฎ\n"
"โ โโ\n"
"โโโผโค\n"
"โ โโ\n"
"โโโผโค\n"
"โโโผโค\n"
"โ โโ\n"
"โฐโโดโฏ\n"
)
HEAVY: Box = Box(
"โโโณโ\n"
"โ โโ\n"
"โฃโโโซ\n"
"โ โโ\n"
"โฃโโโซ\n"
"โฃโโโซ\n"
"โ โโ\n"
"โโโปโ\n"
)
HEAVY_EDGE: Box = Box(
"โโโฏโ\n"
"โ โโ\n"
"โ โโผโจ\n"
"โ โโ\n"
"โ โโผโจ\n"
"โ โโผโจ\n"
"โ โโ\n"
"โโโทโ\n"
)
HEAVY_HEAD: Box = Box(
"โโโณโ\n"
"โ โโ\n"
"โกโโโฉ\n"
"โ โโ\n"
"โโโผโค\n"
"โโโผโค\n"
"โ โโ\n"
"โโโดโ\n"
)
DOUBLE: Box = Box(
"โโโฆโ\n"
"โ โโ\n"
"โ โโฌโฃ\n"
"โ โโ\n"
"โ โโฌโฃ\n"
"โ โโฌโฃ\n"
"โ โโ\n"
"โโโฉโ\n"
)
DOUBLE_EDGE: Box = Box(
"โโโคโ\n"
"โ โโ\n"
"โโโผโข\n"
"โ โโ\n"
"โโโผโข\n"
"โโโผโข\n"
"โ โโ\n"
"โโโงโ\n"
)
MARKDOWN: Box = Box(
" \n"
"| ||\n"
"|-||\n"
"| ||\n"
"|-||\n"
"|-||\n"
"| ||\n"
" \n",
ascii=True,
)
# fmt: on
# Map Boxes that don't render with raster fonts on to equivalent that do
LEGACY_WINDOWS_SUBSTITUTIONS = {
ROUNDED: SQUARE,
MINIMAL_HEAVY_HEAD: MINIMAL,
SIMPLE_HEAVY: SIMPLE,
HEAVY: SQUARE,
HEAVY_EDGE: SQUARE,
HEAVY_HEAD: SQUARE,
}
# Map headed boxes to their headerless equivalents
PLAIN_HEADED_SUBSTITUTIONS = {
HEAVY_HEAD: SQUARE,
SQUARE_DOUBLE_HEAD: SQUARE,
MINIMAL_DOUBLE_HEAD: MINIMAL,
MINIMAL_HEAVY_HEAD: MINIMAL,
ASCII_DOUBLE_HEAD: ASCII2,
}
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.pip._vendor.rich.columns import Columns
from pipenv.patched.pip._vendor.rich.panel import Panel
from . import box as box
from .console import Console
from .table import Table
from .text import Text
console = Console(record=True)
BOXES = [
"ASCII",
"ASCII2",
"ASCII_DOUBLE_HEAD",
"SQUARE",
"SQUARE_DOUBLE_HEAD",
"MINIMAL",
"MINIMAL_HEAVY_HEAD",
"MINIMAL_DOUBLE_HEAD",
"SIMPLE",
"SIMPLE_HEAD",
"SIMPLE_HEAVY",
"HORIZONTALS",
"ROUNDED",
"HEAVY",
"HEAVY_EDGE",
"HEAVY_HEAD",
"DOUBLE",
"DOUBLE_EDGE",
"MARKDOWN",
]
console.print(Panel("[bold green]Box Constants", style="green"), justify="center")
console.print()
columns = Columns(expand=True, padding=2)
for box_name in sorted(BOXES):
table = Table(
show_footer=True, style="dim", border_style="not dim", expand=True
)
table.add_column("Header 1", "Footer 1")
table.add_column("Header 2", "Footer 2")
table.add_row("Cell", "Cell")
table.add_row("Cell", "Cell")
table.box = getattr(box, box_name)
table.title = Text(f"box.{box_name}", style="magenta")
columns.add_renderable(table)
console.print(columns)
# console.save_svg("box.svg")
| Box |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/advanced_activations.py | {
"start": 2810,
"end": 5826
} | class ____(Layer):
"""Parametric Rectified Linear Unit.
It follows:
```
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
```
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
alpha_initializer: Initializer function for the weights.
alpha_regularizer: Regularizer for the weights.
alpha_constraint: Constraint for the weights.
shared_axes: The axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
"""
def __init__(self,
alpha_initializer='zeros',
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs):
super(PReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
@tf_utils.shape_type_conversion
def build(self, input_shape):
param_shape = list(input_shape[1:])
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.alpha = self.add_weight(
shape=param_shape,
name='alpha',
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs):
pos = backend.relu(inputs)
neg = -self.alpha * backend.relu(-inputs)
return pos + neg
def get_config(self):
config = {
'alpha_initializer': initializers.serialize(self.alpha_initializer),
'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),
'alpha_constraint': constraints.serialize(self.alpha_constraint),
'shared_axes': self.shared_axes
}
base_config = super(PReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| PReLU |
python | skorch-dev__skorch | skorch/tests/callbacks/test_logging.py | {
"start": 8513,
"end": 12866
} | class ____:
@pytest.fixture
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture
def data(self, classifier_data):
X, y = classifier_data
# accelerate training since we don't care for the loss
X, y = X[:40], y[:40]
return X, y
@pytest.fixture
def sacred_logger_cls(self):
from skorch.callbacks import SacredLogger
return SacredLogger
@pytest.fixture
def sacred_experiment_cls(self):
from sacred import Experiment
return Experiment
@pytest.fixture
def mock_experiment(self, sacred_experiment_cls):
mock = Mock(spec=sacred_experiment_cls)
mock.log_scalar = Mock()
return mock
@pytest.fixture
def net_fitted(
self,
net_cls,
classifier_module,
data,
sacred_logger_cls,
mock_experiment,
):
return net_cls(
classifier_module,
callbacks=[sacred_logger_cls(mock_experiment)],
max_epochs=3,
).fit(*data)
def test_ignore_keys(
self,
net_cls,
classifier_module,
data,
sacred_logger_cls,
mock_experiment,
):
# ignore 'dur' and 'valid_loss', 'unknown' doesn't exist but
# this should not cause a problem
logger = sacred_logger_cls(
mock_experiment, keys_ignored=['dur', 'valid_loss', 'unknown'])
net_cls(
classifier_module,
callbacks=[logger],
max_epochs=3,
).fit(*data)
# 3 epochs x 2 epoch metrics = 6 calls
assert mock_experiment.log_scalar.call_count == 6
call_args = [args[0][0] for args in mock_experiment.log_scalar.call_args_list]
assert 'valid_loss' not in call_args
def test_keys_ignored_is_string(self, sacred_logger_cls, mock_experiment):
logger = sacred_logger_cls(
mock_experiment, keys_ignored='a-key').initialize()
expected = {'a-key', 'batches'}
assert logger.keys_ignored_ == expected
def test_fit_with_real_experiment(
self,
net_cls,
classifier_module,
data,
sacred_logger_cls,
sacred_experiment_cls,
):
experiment = sacred_experiment_cls()
@experiment.main
def experiment_main(_run):
net = net_cls(
classifier_module,
callbacks=[sacred_logger_cls(_run)],
max_epochs=5,
)
net.fit(*data)
experiment.run()
def test_log_on_batch_level_on(
self,
net_cls,
classifier_module,
data,
sacred_logger_cls,
mock_experiment,
):
net = net_cls(
classifier_module,
callbacks=[sacred_logger_cls(mock_experiment, log_on_batch_end=True)],
max_epochs=5,
batch_size=4,
train_split=False
)
net.fit(*data)
# 5 epochs x (40/4 batches x 2 batch metrics + 2 epoch metrics) = 110 calls
assert mock_experiment.log_scalar.call_count == 110
mock_experiment.log_scalar.assert_any_call('train_batch_size_batch', 4)
logged_keys = [
call_args.args[0] for call_args in mock_experiment.log_scalar.call_args_list
]
# This is a batch-only metric.
assert 'train_batch_size_epoch' not in logged_keys
def test_log_on_batch_level_off(
self,
net_cls,
classifier_module,
data,
sacred_logger_cls,
mock_experiment,
):
net = net_cls(
classifier_module,
callbacks=[sacred_logger_cls(mock_experiment, log_on_batch_end=False)],
max_epochs=5,
batch_size=4,
train_split=False
)
net.fit(*data)
# 5 epochs x 2 epoch metrics = 10 calls
assert mock_experiment.log_scalar.call_count == 10
call_args_list = mock_experiment.log_scalar.call_args_list
assert call('train_batch_size_batch', 4) not in call_args_list
@pytest.mark.skipif(
not wandb_installed, reason='wandb is not installed')
| TestSacred |
python | has2k1__plotnine | tests/test_layout.py | {
"start": 8282,
"end": 12804
} | class ____:
g = (
ggplot(data)
+ geom_point(aes("x", "y", color="x"))
+ annotate(
"point", x=5, y=5, color="green", shape="s", size=10, alpha=0.5
)
+ annotate("vline", xintercept=5, color="green", size=0.5, alpha=0.5)
+ annotate("hline", yintercept=5, color="green", size=0.5, alpha=0.5)
+ annotate(
"vline",
xintercept=[2.5, 7.5],
color="green",
size=0.5,
alpha=0.125,
)
+ annotate(
"hline",
yintercept=[2.5, 7.5],
color="green",
size=0.5,
alpha=0.125,
)
+ labs(
tag="ABC",
title="The Title of the Plot",
subtitle="The Subtitle of the Plot is Longer than the title",
)
+ coord_cartesian(expand=False)
+ theme_matplotlib()
+ theme(
plot_tag=element_text(color="red"),
plot_background=element_rect(color="black"),
)
)
def test_default_tag_location(self):
assert self.g == "default_tag_location"
def test_margin_bottom(self):
p = self.g + theme(
plot_tag_location="margin",
plot_tag_position="bottom",
)
assert p == "margin_bottom"
def test_plot_topright(self):
p = self.g + theme(
plot_tag_location="plot",
plot_tag_position="topright",
)
assert p == "plot_topright"
def test_plot_topright_margin_in_fig(self):
p = self.g + theme(
plot_tag=element_text(
margin={"t": 0.025, "r": 0.45, "unit": "fig"}
),
plot_tag_location="plot",
plot_tag_position="topright",
)
assert p == "plot_topright_margin_in_fig"
def test_plot_topright_margin_in_pt(self):
p = self.g + theme(
plot_tag=element_text(margin={"t": 10, "r": 200, "unit": "pt"}),
plot_tag_location="plot",
plot_tag_position="topright",
)
assert p == "plot_topright_margin_in_pt"
def test_panel_bottomleft(self):
p = self.g + theme(
plot_tag_location="panel",
plot_tag_position="bottomleft",
)
assert p == "panel_bottomleft"
def test_panel_topleft_margin_in_fig(self):
# In the center of the bottom-left quadrant
p = self.g + theme(
plot_tag=element_text(
margin={"t": 0.75, "l": 0.75, "unit": "fig"}
),
plot_tag_location="panel",
plot_tag_position="topleft",
)
assert p == "panel_topleft_margin_in_fig"
def test_panel_topleft_margin_in_pt(self):
# The topleft point fo the panel and that of the tag
# should define a square space. This is easier to confirm
# when the aspect_ratio = 1
p = self.g + theme(
plot_tag=element_text(margin={"t": 150, "l": 150, "unit": "pt"}),
plot_tag_location="panel",
plot_tag_position="topleft",
)
assert p == "panel_topleft_margin_in_pt"
def test_panel_topleft_margin_in_pt_ar1(self):
# The bottomleft point fo the panel and that of the tag
# should define a square space.
# When the margins are equal the tag should fall along the diagonal
p = self.g + theme(
aspect_ratio=1,
plot_tag=element_text(margin={"b": 150, "l": 150, "unit": "pt"}),
plot_tag_location="panel",
plot_tag_position="bottomleft",
)
assert p == "panel_topleft_margin_in_pt_ar1"
def test_plot_xycoords(self):
p = self.g + theme(
plot_tag_location="plot",
plot_tag_position=(0.15, 0.95),
)
assert p == "plot_xycoords"
def test_panel_xycoords(self):
p = self.g + theme(
plot_tag_location="panel",
plot_tag_position=(0.15, 0.95),
)
assert p == "panel_xycoords"
def test_axis_text_justification():
data = pd.DataFrame(
{
"x": ["A", "BB", "CCCC", "DDDDD\nDDDDD"],
"y": ["A", "BB", "CCCC", "DDDDDDDDDD"],
}
)
p = (
ggplot(data, aes(x="x", y="y"))
+ geom_point()
+ theme(
axis_text_y=element_text(ha="left"),
axis_text_x=element_text(va="bottom"),
)
)
assert p == "axis_text_justification"
| TestPlotTagLayout |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassDescriptors2.py | {
"start": 992,
"end": 1599
} | class ____:
x: Desc[int]
y: Desc[str]
z: Desc[str] = Desc()
reveal_type(B.x, expected_text="A[int]")
reveal_type(B.y, expected_text="A[str]")
reveal_type(B.z, expected_text="A[str]")
reveal_type(C.x, expected_text="A[int]")
reveal_type(C.y, expected_text="A[str]")
reveal_type(C.z, expected_text="A[str]")
b = B(Desc(), Desc(), Desc())
reveal_type(b.x, expected_text="int")
reveal_type(b.y, expected_text="str")
reveal_type(b.z, expected_text="str")
c = C(Desc(), Desc(), Desc())
reveal_type(c.x, expected_text="int")
reveal_type(c.y, expected_text="str")
reveal_type(c.z, expected_text="str")
| C |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 5993,
"end": 6809
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
missing = helper_functions.get_value("MissingValues")
num_missing = missing.sum(axis=1)
return float(np.sum([1 if num > 0 else 0 for num in num_missing]))
def _calculate_sparse(self, X, y, logger, feat_type):
missing = helper_functions.get_value("MissingValues")
new_missing = missing.tocsr()
num_missing = [
np.sum(new_missing.data[new_missing.indptr[i] : new_missing.indptr[i + 1]])
for i in range(new_missing.shape[0])
]
return float(np.sum([1 if num > 0 else 0 for num in num_missing]))
@metafeatures.define(
"PercentageOfInstancesWithMissingValues",
dependency="NumberOfInstancesWithMissingValues",
)
| NumberOfInstancesWithMissingValues |
python | pytorch__pytorch | test/inductor/test_smoke.py | {
"start": 314,
"end": 637
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = torch.nn.Linear(1, 6)
self.l2 = torch.nn.Linear(6, 1)
def forward(self, x=None):
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
return x
def _test_f(x):
return x * x
| MLP |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 14126,
"end": 16973
} | class ____(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| BaseModelOutputWithPastAndCrossAttentions |
python | aio-libs__aiohttp | tests/test_streams.py | {
"start": 1754,
"end": 34986
} | class ____:
DATA: bytes = b"line1\nline2\nline3\n"
def _make_one(self, limit: int = 2**16) -> streams.StreamReader:
loop = asyncio.get_event_loop()
return streams.StreamReader(mock.Mock(_reading_paused=False), limit, loop=loop)
async def test_create_waiter(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
stream._waiter = loop.create_future # type: ignore[assignment]
with pytest.raises(RuntimeError):
await stream._wait("test")
async def test_at_eof(self) -> None:
stream = self._make_one()
assert not stream.at_eof()
stream.feed_data(b"some data\n")
assert not stream.at_eof()
await stream.readline()
assert not stream.at_eof()
stream.feed_data(b"some data\n")
stream.feed_eof()
await stream.readline()
assert stream.at_eof()
async def test_wait_eof(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
wait_task = loop.create_task(stream.wait_eof())
async def cb() -> None:
await asyncio.sleep(0.1)
stream.feed_eof()
t = loop.create_task(cb())
await wait_task
assert stream.is_eof()
assert stream._eof_waiter is None
await t
async def test_wait_eof_eof(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
stream.feed_eof()
wait_task = loop.create_task(stream.wait_eof())
await wait_task
assert stream.is_eof()
async def test_feed_empty_data(self) -> None:
stream = self._make_one()
stream.feed_data(b"")
stream.feed_eof()
data = await stream.read()
assert b"" == data
async def test_feed_nonempty_data(self) -> None:
stream = self._make_one()
stream.feed_data(self.DATA)
stream.feed_eof()
data = await stream.read()
assert self.DATA == data
async def test_read_zero(self) -> None:
# Read zero bytes.
stream = self._make_one()
stream.feed_data(self.DATA)
data = await stream.read(0)
assert b"" == data
stream.feed_eof()
data = await stream.read()
assert self.DATA == data
async def test_read(self) -> None:
loop = asyncio.get_event_loop()
# Read bytes.
stream = self._make_one()
read_task = loop.create_task(stream.read(30))
def cb() -> None:
stream.feed_data(self.DATA)
loop.call_soon(cb)
data = await read_task
assert self.DATA == data
stream.feed_eof()
data = await stream.read()
assert b"" == data
async def test_read_line_breaks(self) -> None:
# Read bytes without line breaks.
stream = self._make_one()
stream.feed_data(b"line1")
stream.feed_data(b"line2")
data = await stream.read(5)
assert b"line1" == data
data = await stream.read(5)
assert b"line2" == data
async def test_read_all(self) -> None:
# Read all available buffered bytes
stream = self._make_one()
stream.feed_data(b"line1")
stream.feed_data(b"line2")
stream.feed_eof()
data = await stream.read()
assert b"line1line2" == data
async def test_read_up_to(self) -> None:
# Read available buffered bytes up to requested amount
stream = self._make_one()
stream.feed_data(b"line1")
stream.feed_data(b"line2")
data = await stream.read(8)
assert b"line1lin" == data
data = await stream.read(8)
assert b"e2" == data
async def test_read_eof(self) -> None:
loop = asyncio.get_event_loop()
# Read bytes, stop at eof.
stream = self._make_one()
read_task = loop.create_task(stream.read(1024))
def cb() -> None:
stream.feed_eof()
loop.call_soon(cb)
data = await read_task
assert b"" == data
data = await stream.read()
assert data == b""
async def test_read_eof_unread_data_no_warning(self) -> None:
# Read bytes.
stream = self._make_one()
stream.feed_eof()
with mock.patch("aiohttp.streams.internal_logger") as internal_logger:
await stream.read()
await stream.read()
await stream.read()
await stream.read()
await stream.read()
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(b"data")
await stream.read()
await stream.read()
assert not internal_logger.warning.called
async def test_read_until_eof(self) -> None:
loop = asyncio.get_event_loop()
# Read all bytes until eof.
stream = self._make_one()
read_task = loop.create_task(stream.read(-1))
def cb() -> None:
stream.feed_data(b"chunk1\n")
stream.feed_data(b"chunk2")
stream.feed_eof()
loop.call_soon(cb)
data = await read_task
assert b"chunk1\nchunk2" == data
data = await stream.read()
assert b"" == data
async def test_read_exception(self) -> None:
stream = self._make_one()
stream.feed_data(b"line\n")
data = await stream.read(2)
assert b"li" == data
stream.set_exception(ValueError())
with pytest.raises(ValueError):
await stream.read(2)
async def test_readline(self) -> None:
loop = asyncio.get_event_loop()
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = self._make_one()
stream.feed_data(b"chunk1 ")
read_task = loop.create_task(stream.readline())
def cb() -> None:
stream.feed_data(b"chunk2 ")
stream.feed_data(b"chunk3 ")
stream.feed_data(b"\n chunk4")
loop.call_soon(cb)
line = await read_task
assert b"chunk1 chunk2 chunk3 \n" == line
stream.feed_eof()
data = await stream.read()
assert b" chunk4" == data
async def test_readline_limit_with_existing_data(self) -> None:
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = self._make_one(limit=2)
stream.feed_data(b"li")
stream.feed_data(b"ne1\nline2\n")
with pytest.raises(ValueError):
await stream.readline()
# The buffer should contain the remaining data after exception
stream.feed_eof()
data = await stream.read()
assert b"line2\n" == data
async def test_readline_limit(self) -> None:
loop = asyncio.get_event_loop()
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = self._make_one(limit=4)
def cb() -> None:
stream.feed_data(b"chunk1")
stream.feed_data(b"chunk2\n")
stream.feed_data(b"chunk3\n")
stream.feed_eof()
loop.call_soon(cb)
with pytest.raises(ValueError):
await stream.readline()
data = await stream.read()
assert b"chunk3\n" == data
async def test_readline_nolimit_nowait(self) -> None:
# All needed data for the first 'readline' call will be
# in the buffer.
stream = self._make_one()
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = await stream.readline()
assert b"line1\n" == line
stream.feed_eof()
data = await stream.read()
assert b"line2\nline3\n" == data
async def test_readline_eof(self) -> None:
stream = self._make_one()
stream.feed_data(b"some data")
stream.feed_eof()
line = await stream.readline()
assert b"some data" == line
async def test_readline_empty_eof(self) -> None:
stream = self._make_one()
stream.feed_eof()
line = await stream.readline()
assert b"" == line
async def test_readline_read_byte_count(self) -> None:
stream = self._make_one()
stream.feed_data(self.DATA)
await stream.readline()
data = await stream.read(7)
assert b"line2\nl" == data
stream.feed_eof()
data = await stream.read()
assert b"ine3\n" == data
async def test_readline_exception(self) -> None:
stream = self._make_one()
stream.feed_data(b"line\n")
data = await stream.readline()
assert b"line\n" == data
stream.set_exception(ValueError())
with pytest.raises(ValueError):
await stream.readline()
@pytest.mark.parametrize("separator", [b"*", b"**"])
async def test_readuntil(self, separator: bytes) -> None:
loop = asyncio.get_event_loop()
# Read one chunk. 'readuntil' will need to wait for the data
# to come from 'cb'
stream = self._make_one()
stream.feed_data(b"chunk1 ")
read_task = loop.create_task(stream.readuntil(separator))
def cb() -> None:
stream.feed_data(b"chunk2 ")
stream.feed_data(b"chunk3 ")
stream.feed_data(separator + b" chunk4")
loop.call_soon(cb)
line = await read_task
assert b"chunk1 chunk2 chunk3 " + separator == line
stream.feed_eof()
data = await stream.read()
assert b" chunk4" == data
@pytest.mark.parametrize("separator", [b"&", b"&&"])
async def test_readuntil_limit_with_existing_data(self, separator: bytes) -> None:
# Read one chunk. The data is in StreamReader's buffer
# before the event loop is run.
stream = self._make_one(limit=2)
stream.feed_data(b"li")
stream.feed_data(b"ne1" + separator + b"line2" + separator)
with pytest.raises(ValueError):
await stream.readuntil(separator)
# The buffer should contain the remaining data after exception
stream.feed_eof()
data = await stream.read()
assert b"line2" + separator == data
@pytest.mark.parametrize("separator", [b"$", b"$$"])
async def test_readuntil_limit(self, separator: bytes) -> None:
loop = asyncio.get_event_loop()
# Read one chunk. StreamReaders are fed with data after
# their 'readuntil' methods are called.
stream = self._make_one(limit=4)
def cb() -> None:
stream.feed_data(b"chunk1")
stream.feed_data(b"chunk2" + separator)
stream.feed_data(b"chunk3#")
stream.feed_eof()
loop.call_soon(cb)
with pytest.raises(ValueError, match="Chunk too big"):
await stream.readuntil(separator)
data = await stream.read()
assert b"chunk3#" == data
@pytest.mark.parametrize("separator", [b"!", b"!!"])
async def test_readuntil_nolimit_nowait(self, separator: bytes) -> None:
# All needed data for the first 'readuntil' call will be
# in the buffer.
seplen = len(separator)
stream = self._make_one()
data = b"line1" + separator + b"line2" + separator + b"line3" + separator
stream.feed_data(data[: 5 + seplen])
stream.feed_data(data[5 + seplen :])
line = await stream.readuntil(separator)
assert b"line1" + separator == line
stream.feed_eof()
data = await stream.read()
assert b"line2" + separator + b"line3" + separator == data
@pytest.mark.parametrize("separator", [b"@", b"@@"])
async def test_readuntil_eof(self, separator: bytes) -> None:
stream = self._make_one()
stream.feed_data(b"some data")
stream.feed_eof()
line = await stream.readuntil(separator)
assert b"some data" == line
@pytest.mark.parametrize("separator", [b"@", b"@@"])
async def test_readuntil_empty_eof(self, separator: bytes) -> None:
stream = self._make_one()
stream.feed_eof()
line = await stream.readuntil(separator)
assert b"" == line
@pytest.mark.parametrize("separator", [b"!", b"!!"])
async def test_readuntil_read_byte_count(self, separator: bytes) -> None:
seplen = len(separator)
stream = self._make_one()
stream.feed_data(
b"line1" + separator + b"line2" + separator + b"line3" + separator
)
await stream.readuntil(separator)
data = await stream.read(6 + seplen)
assert b"line2" + separator + b"l" == data
stream.feed_eof()
data = await stream.read()
assert b"ine3" + separator == data
@pytest.mark.parametrize("separator", [b"#", b"##"])
async def test_readuntil_exception(self, separator: bytes) -> None:
stream = self._make_one()
stream.feed_data(b"line" + separator)
data = await stream.readuntil(separator)
assert b"line" + separator == data
stream.set_exception(ValueError("Another exception"))
with pytest.raises(ValueError, match="Another exception"):
await stream.readuntil(separator)
async def test_readexactly_zero_or_less(self) -> None:
# Read exact number of bytes (zero or less).
stream = self._make_one()
stream.feed_data(self.DATA)
data = await stream.readexactly(0)
assert b"" == data
stream.feed_eof()
data = await stream.read()
assert self.DATA == data
stream = self._make_one()
stream.feed_data(self.DATA)
data = await stream.readexactly(-1)
assert b"" == data
stream.feed_eof()
data = await stream.read()
assert self.DATA == data
async def test_readexactly(self) -> None:
loop = asyncio.get_event_loop()
# Read exact number of bytes.
stream = self._make_one()
n = 2 * len(self.DATA)
read_task = loop.create_task(stream.readexactly(n))
def cb() -> None:
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
loop.call_soon(cb)
data = await read_task
assert self.DATA + self.DATA == data
stream.feed_eof()
data = await stream.read()
assert self.DATA == data
async def test_readexactly_eof(self) -> None:
loop = asyncio.get_event_loop()
# Read exact number of bytes (eof).
stream = self._make_one()
n = 2 * len(self.DATA)
read_task = loop.create_task(stream.readexactly(n))
def cb() -> None:
stream.feed_data(self.DATA)
stream.feed_eof()
loop.call_soon(cb)
with pytest.raises(asyncio.IncompleteReadError) as cm:
await read_task
assert cm.value.partial == self.DATA
assert cm.value.expected == n
assert str(cm.value) == "18 bytes read on a total of 36 expected bytes"
data = await stream.read()
assert b"" == data
async def test_readexactly_exception(self) -> None:
stream = self._make_one()
stream.feed_data(b"line\n")
data = await stream.readexactly(2)
assert b"li" == data
stream.set_exception(ValueError())
with pytest.raises(ValueError):
await stream.readexactly(2)
async def test_unread_data(self) -> None:
stream = self._make_one()
stream.feed_data(b"line1")
stream.feed_data(b"line2")
stream.feed_data(b"onemoreline")
data = await stream.read(5)
assert b"line1" == data
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(data)
data = await stream.read(5)
assert b"line1" == data
data = await stream.read(4)
assert b"line" == data
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(b"line1line")
data = b""
while len(data) < 10:
data += await stream.read(10)
assert b"line1line2" == data
data = await stream.read(7)
assert b"onemore" == data
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(data)
data = b""
while len(data) < 11:
data += await stream.read(11)
assert b"onemoreline" == data
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(b"line")
data = await stream.read(4)
assert b"line" == data
stream.feed_eof()
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(b"at_eof")
data = await stream.read(6)
assert b"at_eof" == data
async def test_exception(self) -> None:
stream = self._make_one()
assert stream.exception() is None
exc = ValueError()
stream.set_exception(exc)
assert stream.exception() is exc
async def test_exception_waiter(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
async def set_err() -> None:
stream.set_exception(ValueError())
t1 = loop.create_task(stream.readline())
t2 = loop.create_task(set_err())
await asyncio.wait((t1, t2))
with pytest.raises(ValueError):
t1.result()
async def test_exception_cancel(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
async def read_a_line() -> None:
await stream.readline()
t = loop.create_task(read_a_line())
await asyncio.sleep(0)
t.cancel()
await asyncio.sleep(0)
# The following line fails if set_exception() isn't careful.
stream.set_exception(RuntimeError("message"))
await asyncio.sleep(0)
assert stream._waiter is None
async def test_readany_eof(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
read_task = loop.create_task(stream.readany())
loop.call_soon(stream.feed_data, b"chunk1\n")
data = await read_task
assert b"chunk1\n" == data
stream.feed_eof()
data = await stream.read()
assert b"" == data
async def test_readany_empty_eof(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
stream.feed_eof()
read_task = loop.create_task(stream.readany())
data = await read_task
assert b"" == data
async def test_readany_exception(self) -> None:
stream = self._make_one()
stream.feed_data(b"line\n")
data = await stream.readany()
assert b"line\n" == data
stream.set_exception(ValueError())
with pytest.raises(ValueError):
await stream.readany()
async def test_read_nowait(self) -> None:
stream = self._make_one()
stream.feed_data(b"line1\nline2\n")
assert stream.read_nowait() == b"line1\nline2\n"
assert stream.read_nowait() == b""
stream.feed_eof()
data = await stream.read()
assert b"" == data
async def test_read_nowait_n(self) -> None:
stream = self._make_one()
stream.feed_data(b"line1\nline2\n")
assert stream.read_nowait(4) == b"line"
assert stream.read_nowait() == b"1\nline2\n"
assert stream.read_nowait() == b""
stream.feed_eof()
data = await stream.read()
assert b"" == data
async def test_read_nowait_exception(self) -> None:
stream = self._make_one()
stream.feed_data(b"line\n")
stream.set_exception(ValueError())
with pytest.raises(ValueError):
stream.read_nowait()
async def test_read_nowait_waiter(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
stream.feed_data(b"line\n")
stream._waiter = loop.create_future()
with pytest.raises(RuntimeError):
stream.read_nowait()
async def test_readchunk(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
def cb() -> None:
stream.feed_data(b"chunk1")
stream.feed_data(b"chunk2")
stream.feed_eof()
loop.call_soon(cb)
data, end_of_chunk = await stream.readchunk()
assert b"chunk1" == data
assert not end_of_chunk
data, end_of_chunk = await stream.readchunk()
assert b"chunk2" == data
assert not end_of_chunk
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert not end_of_chunk
async def test_readchunk_wait_eof(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
async def cb() -> None:
await asyncio.sleep(0.1)
stream.feed_eof()
t = loop.create_task(cb())
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert not end_of_chunk
assert stream.is_eof()
await t
async def test_begin_and_end_chunk_receiving(self) -> None:
stream = self._make_one()
stream.begin_http_chunk_receiving()
stream.feed_data(b"part1")
stream.feed_data(b"part2")
stream.end_http_chunk_receiving()
data, end_of_chunk = await stream.readchunk()
assert b"part1part2" == data
assert end_of_chunk
stream.begin_http_chunk_receiving()
stream.feed_data(b"part3")
data, end_of_chunk = await stream.readchunk()
assert b"part3" == data
assert not end_of_chunk
stream.end_http_chunk_receiving()
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert end_of_chunk
stream.feed_eof()
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert not end_of_chunk
async def test_readany_chunk_end_race(self) -> None:
stream = self._make_one()
stream.begin_http_chunk_receiving()
stream.feed_data(b"part1")
data = await stream.readany()
assert data == b"part1"
loop = asyncio.get_event_loop()
task = loop.create_task(stream.readany())
# Give a chance for task to create waiter and start waiting for it.
await asyncio.sleep(0.1)
assert stream._waiter is not None
assert not task.done() # Just for sure.
# This will trigger waiter, but without feeding any data.
# The stream should re-create waiter again.
stream.end_http_chunk_receiving()
# Give a chance for task to resolve.
# If everything is OK, previous action SHOULD NOT resolve the task.
await asyncio.sleep(0.1)
assert not task.done() # The actual test.
stream.begin_http_chunk_receiving()
# This SHOULD unblock the task actually.
stream.feed_data(b"part2")
stream.end_http_chunk_receiving()
data = await task
assert data == b"part2"
async def test_end_chunk_receiving_without_begin(self) -> None:
stream = self._make_one()
with pytest.raises(RuntimeError):
stream.end_http_chunk_receiving()
async def test_readchunk_with_unread(self) -> None:
# Test that stream.unread does not break controlled chunk receiving.
stream = self._make_one()
# Send 2 chunks
stream.begin_http_chunk_receiving()
stream.feed_data(b"part1")
stream.end_http_chunk_receiving()
stream.begin_http_chunk_receiving()
stream.feed_data(b"part2")
stream.end_http_chunk_receiving()
# Read only one chunk
data, end_of_chunk = await stream.readchunk()
# Try to unread a part of the first chunk
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(b"rt1")
# The end_of_chunk signal was already received for the first chunk,
# so we receive up to the second one
data, end_of_chunk = await stream.readchunk()
assert b"rt1part2" == data
assert end_of_chunk
# Unread a part of the second chunk
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(b"rt2")
data, end_of_chunk = await stream.readchunk()
assert b"rt2" == data
# end_of_chunk was already received for this chunk
assert not end_of_chunk
stream.feed_eof()
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert not end_of_chunk
async def test_readchunk_with_other_read_calls(self) -> None:
# Test that stream.readchunk works when other read calls are made on
# the stream.
stream = self._make_one()
stream.begin_http_chunk_receiving()
stream.feed_data(b"part1")
stream.end_http_chunk_receiving()
stream.begin_http_chunk_receiving()
stream.feed_data(b"part2")
stream.end_http_chunk_receiving()
stream.begin_http_chunk_receiving()
stream.feed_data(b"part3")
stream.end_http_chunk_receiving()
data = await stream.read(7)
assert b"part1pa" == data
data, end_of_chunk = await stream.readchunk()
assert b"rt2" == data
assert end_of_chunk
# Corner case between read/readchunk
data = await stream.read(5)
assert b"part3" == data
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert end_of_chunk
stream.feed_eof()
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert not end_of_chunk
async def test_chunksplits_memory_leak(self) -> None:
# Test for memory leak on chunksplits
stream = self._make_one()
N = 500
# Warm-up variables
stream.begin_http_chunk_receiving()
stream.feed_data(b"Y" * N)
stream.end_http_chunk_receiving()
await stream.read(N)
N = 300
before = get_memory_usage(stream)
for _ in range(N):
stream.begin_http_chunk_receiving()
stream.feed_data(b"X")
stream.end_http_chunk_receiving()
await stream.read(N)
after = get_memory_usage(stream)
assert abs(after - before) == 0
async def test_read_empty_chunks(self) -> None:
# Test that feeding empty chunks does not break stream
stream = self._make_one()
# Simulate empty first chunk. This is significant special case
stream.begin_http_chunk_receiving()
stream.end_http_chunk_receiving()
stream.begin_http_chunk_receiving()
stream.feed_data(b"ungzipped")
stream.end_http_chunk_receiving()
# Possible when compression is enabled.
stream.begin_http_chunk_receiving()
stream.end_http_chunk_receiving()
# is also possible
stream.begin_http_chunk_receiving()
stream.end_http_chunk_receiving()
stream.begin_http_chunk_receiving()
stream.feed_data(b" data")
stream.end_http_chunk_receiving()
stream.feed_eof()
data = await stream.read()
assert data == b"ungzipped data"
async def test_readchunk_separate_http_chunk_tail(self) -> None:
# Test that stream.readchunk returns (b'', True) when end of
# http chunk received after body
loop = asyncio.get_event_loop()
stream = self._make_one()
stream.begin_http_chunk_receiving()
stream.feed_data(b"part1")
data, end_of_chunk = await stream.readchunk()
assert b"part1" == data
assert not end_of_chunk
async def cb() -> None:
await asyncio.sleep(0.1)
stream.end_http_chunk_receiving()
t = loop.create_task(cb())
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert end_of_chunk
stream.begin_http_chunk_receiving()
stream.feed_data(b"part2")
data, end_of_chunk = await stream.readchunk()
assert b"part2" == data
assert not end_of_chunk
stream.end_http_chunk_receiving()
stream.begin_http_chunk_receiving()
stream.feed_data(b"part3")
stream.end_http_chunk_receiving()
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert end_of_chunk
data, end_of_chunk = await stream.readchunk()
assert b"part3" == data
assert end_of_chunk
stream.begin_http_chunk_receiving()
stream.feed_data(b"part4")
data, end_of_chunk = await stream.readchunk()
assert b"part4" == data
assert not end_of_chunk
await t
async def cb2() -> None:
await asyncio.sleep(0.1)
stream.end_http_chunk_receiving()
stream.feed_eof()
t = loop.create_task(cb2())
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert end_of_chunk
data, end_of_chunk = await stream.readchunk()
assert b"" == data
assert not end_of_chunk
await t
async def test___repr__(self) -> None:
stream = self._make_one()
assert "<StreamReader>" == repr(stream)
async def test___repr__nondefault_limit(self) -> None:
stream = self._make_one(limit=123)
assert "<StreamReader low=123 high=246>" == repr(stream)
async def test___repr__eof(self) -> None:
stream = self._make_one()
stream.feed_eof()
assert "<StreamReader eof>" == repr(stream)
async def test___repr__data(self) -> None:
stream = self._make_one()
stream.feed_data(b"data")
assert "<StreamReader 4 bytes>" == repr(stream)
async def test___repr__exception(self) -> None:
stream = self._make_one()
exc = RuntimeError()
stream.set_exception(exc)
assert "<StreamReader e=RuntimeError()>" == repr(stream)
async def test___repr__waiter(self) -> None:
loop = asyncio.get_event_loop()
stream = self._make_one()
stream._waiter = loop.create_future()
assert repr(stream).startswith("<StreamReader w=<Future pending")
stream._waiter.set_result(None)
await stream._waiter
stream._waiter = None
assert "<StreamReader>" == repr(stream)
async def test_unread_empty(self) -> None:
stream = self._make_one()
stream.feed_data(b"line1")
stream.feed_eof()
with pytest.deprecated_call(
match=r"^unread_data\(\) is deprecated and will be "
r"removed in future releases \(#3260\)$",
):
stream.unread_data(b"")
data = await stream.read(5)
assert b"line1" == data
assert stream.at_eof()
async def test_empty_stream_reader() -> None:
s = streams.EmptyStreamReader()
assert str(s) is not None
assert repr(s) == "<EmptyStreamReader>"
assert s.set_exception(ValueError()) is None # type: ignore[func-returns-value]
assert s.exception() is None
assert s.feed_eof() is None # type: ignore[func-returns-value]
assert s.feed_data(b"data") is None # type: ignore[func-returns-value]
assert s.at_eof()
await s.wait_eof()
assert await s.read() == b""
assert await s.readline() == b""
assert await s.readany() == b""
assert await s.readchunk() == (b"", False)
assert await s.readchunk() == (b"", True)
with pytest.raises(asyncio.IncompleteReadError):
await s.readexactly(10)
assert s.read_nowait() == b""
assert s.total_bytes == 0
async def test_empty_stream_reader_iter_chunks() -> None:
s = streams.EmptyStreamReader()
# check that iter_chunks() does not cause infinite loop
iter_chunks = s.iter_chunks()
with pytest.raises(StopAsyncIteration):
await iter_chunks.__anext__()
@pytest.fixture
async def buffer(loop: asyncio.AbstractEventLoop) -> streams.DataQueue[bytes]:
return streams.DataQueue(loop)
| TestStreamReader |
python | doocs__leetcode | lcof2/ๅๆ Offer II 016. ไธๅซ้ๅคๅญ็ฌฆ็ๆ้ฟๅญๅญ็ฌฆไธฒ/Solution.py | {
"start": 0,
"end": 305
} | class ____:
def lengthOfLongestSubstring(self, s: str) -> int:
ss = set()
ans = j = 0
for i, c in enumerate(s):
while c in ss:
ss.remove(s[j])
j += 1
ans = max(ans, i - j + 1)
ss.add(c)
return ans
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels27.py | {
"start": 315,
"end": 1573
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels27.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [48514944, 48516480]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
[10, 20, 30, 40, 50],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {"value": True, "custom": [{"value": "=Sheet1!$D$1"}]},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py | {
"start": 830,
"end": 2350
} | class ____:
def __init__(self, batch_update_size):
self.model = nn.Linear(in_features, out_features)
self.lock = threading.Lock()
self.future_model = torch.futures.Future()
self.batch_update_size = batch_update_size
self.curr_update_size = 0
self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9)
for p in self.model.parameters():
p.grad = torch.zeros_like(p)
def get_model(self):
return self.model
@staticmethod
@rpc.functions.async_execution
def update_and_fetch_model(ps_rref, grads):
self = ps_rref.local_value()
for p, g in zip(self.model.parameters(), grads, strict=True):
if p.grad is None:
p.grad = g
else:
p.grad += g
with self.lock:
timed_log(
f"PS got {self.curr_update_size}/{self.batch_update_size} updates"
)
self.curr_update_size += 1
fut = self.future_model
if self.curr_update_size >= self.batch_update_size:
for p in self.model.parameters():
p.grad /= self.batch_update_size
self.curr_update_size = 0
self.optimizer.step()
self.optimizer.zero_grad()
fut.set_result(self.model)
timed_log("PS updated model")
self.future_model = torch.futures.Future()
return fut
| BatchUpdateParameterServer |
python | spack__spack | lib/spack/spack/test/repo.py | {
"start": 18053,
"end": 18269
} | class ____(PackageBase):
pass
"""
)
(repo_dir / "packages" / "UPPERCASE").mkdir()
(repo_dir / "packages" / "UPPERCASE" / "package.py").write_text(
"""
from spack.package import PackageBase
| ZlibNg |
python | sympy__sympy | sympy/stats/stochastic_process_types.py | {
"start": 57316,
"end": 61470
} | class ____(ContinuousTimeStochasticProcess, MarkovProcess):
"""
Represents continuous time Markov chain.
Parameters
==========
sym : Symbol/str
state_space : Set
Optional, by default, S.Reals
gen_mat : Matrix/ImmutableMatrix/MatrixSymbol
Optional, by default, None
Examples
========
>>> from sympy.stats import ContinuousMarkovChain, P
>>> from sympy import Matrix, S, Eq, Gt
>>> G = Matrix([[-S(1), S(1)], [S(1), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1], gen_mat=G)
>>> C.limiting_distribution()
Matrix([[1/2, 1/2]])
>>> C.state_space
{0, 1}
>>> C.generator_matrix
Matrix([
[-1, 1],
[ 1, -1]])
Probability queries are supported
>>> P(Eq(C(1.96), 0), Eq(C(0.78), 1)).round(5)
0.45279
>>> P(Gt(C(1.7), 0), Eq(C(0.82), 1)).round(5)
0.58602
Probability of expressions with multiple RandomIndexedSymbols
can also be calculated provided there is only 1 RandomIndexedSymbol
in the given condition. It is always better to use Rational instead
of floating point numbers for the probabilities in the
generator matrix to avoid errors.
>>> from sympy import Gt, Le, Rational
>>> G = Matrix([[-S(1), Rational(1, 10), Rational(9, 10)], [Rational(2, 5), -S(1), Rational(3, 5)], [Rational(1, 2), Rational(1, 2), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1, 2], gen_mat=G)
>>> P(Eq(C(3.92), C(1.75)), Eq(C(0.46), 0)).round(5)
0.37933
>>> P(Gt(C(3.92), C(1.75)), Eq(C(0.46), 0)).round(5)
0.34211
>>> P(Le(C(1.57), C(3.14)), Eq(C(1.22), 1)).round(4)
0.7143
Symbolic probability queries are also supported
>>> from sympy import symbols
>>> a,b,c,d = symbols('a b c d')
>>> G = Matrix([[-S(1), Rational(1, 10), Rational(9, 10)], [Rational(2, 5), -S(1), Rational(3, 5)], [Rational(1, 2), Rational(1, 2), -S(1)]])
>>> C = ContinuousMarkovChain('C', state_space=[0, 1, 2], gen_mat=G)
>>> query = P(Eq(C(a), b), Eq(C(c), d))
>>> query.subs({a:3.65, b:2, c:1.78, d:1}).evalf().round(10)
0.4002723175
>>> P(Eq(C(3.65), 2), Eq(C(1.78), 1)).round(10)
0.4002723175
>>> query_gt = P(Gt(C(a), b), Eq(C(c), d))
>>> query_gt.subs({a:43.2, b:0, c:3.29, d:2}).evalf().round(10)
0.6832579186
>>> P(Gt(C(43.2), 0), Eq(C(3.29), 2)).round(10)
0.6832579186
References
==========
.. [1] https://en.wikipedia.org/wiki/Markov_chain#Continuous-time_Markov_chain
.. [2] https://u.math.biu.ac.il/~amirgi/CTMCnotes.pdf
"""
index_set = S.Reals
def __new__(cls, sym, state_space=None, gen_mat=None):
sym = _symbol_converter(sym)
state_space, gen_mat = MarkovProcess._sanity_checks(state_space, gen_mat)
obj = Basic.__new__(cls, sym, state_space, gen_mat)
indices = {}
if isinstance(obj.number_of_states, Integer):
for index, state in enumerate(obj.state_space):
indices[state] = index
obj.index_of = indices
return obj
@property
def generator_matrix(self):
return self.args[2]
@cacheit
def transition_probabilities(self, gen_mat=None):
t = Dummy('t')
if isinstance(gen_mat, (Matrix, ImmutableMatrix)) and \
gen_mat.is_diagonalizable():
# for faster computation use diagonalized generator matrix
Q, D = gen_mat.diagonalize()
return Lambda(t, Q*exp(t*D)*Q.inv())
if gen_mat != None:
return Lambda(t, exp(t*gen_mat))
def limiting_distribution(self):
gen_mat = self.generator_matrix
if gen_mat is None:
return None
if isinstance(gen_mat, MatrixSymbol):
wm = MatrixSymbol('wm', 1, gen_mat.shape[0])
return Lambda((wm, gen_mat), Eq(wm*gen_mat, wm))
w = IndexedBase('w')
wi = [w[i] for i in range(gen_mat.shape[0])]
wm = Matrix([wi])
eqs = (wm*gen_mat).tolist()[0]
eqs.append(sum(wi) - 1)
soln = list(linsolve(eqs, wi))[0]
return ImmutableMatrix([soln])
| ContinuousMarkovChain |
python | conda__conda | conda/plugins/types.py | {
"start": 15090,
"end": 15616
} | class ____(CondaPlugin):
"""
Define new loaders to expose non-conda packages in a given prefix
as ``PrefixRecord`` objects.
:param name: name of the loader
:param loader: a function that takes a prefix and a dictionary that maps
package names to ``PrefixRecord`` objects. The newly loaded packages
must be inserted in the passed dictionary accordingly, and also
returned as a separate dictionary.
"""
name: str
loader: CondaPrefixDataLoaderCallable
| CondaPrefixDataLoader |
python | numpy__numpy | numpy/_core/tests/test_scalarinherit.py | {
"start": 281,
"end": 368
} | class ____:
def __new__(cls, *args, **kwargs):
return cls, args, kwargs
| HasNew |
python | run-llama__llama_index | llama-index-core/llama_index/core/base/base_query_engine.py | {
"start": 707,
"end": 3319
} | class ____(PromptMixin, DispatcherSpanMixin):
"""Base query engine."""
def __init__(
self,
callback_manager: Optional[CallbackManager],
) -> None:
self.callback_manager = callback_manager or CallbackManager([])
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
@dispatcher.span
def query(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE:
dispatcher.event(QueryStartEvent(query=str_or_query_bundle))
with self.callback_manager.as_trace("query"):
if isinstance(str_or_query_bundle, str):
str_or_query_bundle = QueryBundle(str_or_query_bundle)
query_result = self._query(str_or_query_bundle)
dispatcher.event(
QueryEndEvent(query=str_or_query_bundle, response=query_result)
)
return query_result
@dispatcher.span
async def aquery(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE:
dispatcher.event(QueryStartEvent(query=str_or_query_bundle))
with self.callback_manager.as_trace("query"):
if isinstance(str_or_query_bundle, str):
str_or_query_bundle = QueryBundle(str_or_query_bundle)
query_result = await self._aquery(str_or_query_bundle)
dispatcher.event(
QueryEndEvent(query=str_or_query_bundle, response=query_result)
)
return query_result
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
raise NotImplementedError(
"This query engine does not support retrieve, use query directly"
)
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
raise NotImplementedError(
"This query engine does not support synthesize, use query directly"
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
raise NotImplementedError(
"This query engine does not support asynthesize, use aquery directly"
)
@abstractmethod
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
@abstractmethod
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
| BaseQueryEngine |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/freshness_tests/test_freshness_evaluation.py | {
"start": 2073,
"end": 14289
} | class ____:
@pytest.mark.asyncio
async def test_freshness_pass(self, instance: DagsterInstance):
"""Test that an asset with a time window freshness policy is evaluated as fresh if its last materialization was within the fail window."""
def create_defs() -> dg.Definitions:
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(hours=24),
)
)
def asset_with_policy():
return 1
return dg.Definitions(assets=[asset_with_policy])
evaluator = TimeWindowFreshnessPolicyEvaluator()
with setup_remote_repo(instance=instance, fn=create_defs) as workspace_context:
asset_graph = workspace_context.create_request_context().asset_graph
asset_key = dg.AssetKey("asset_with_policy")
start_time = datetime.datetime.now(datetime.timezone.utc)
frozen_time = start_time
# Asset should be fresh immediately after materialization
with freeze_time(frozen_time):
store_mat(instance, asset_key, frozen_time)
asset_node = asset_graph.remote_asset_nodes_by_key[asset_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.PASS
# Asset should still be fresh up until the very end of the fail window
frozen_time += datetime.timedelta(hours=23, minutes=59, seconds=59)
with freeze_time(frozen_time):
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.PASS
@pytest.mark.asyncio
async def test_freshness_pass_varying_fail_window(self, instance: DagsterInstance):
"""Same as test_freshness_pass, but with different fail windows ranging from minutes to months."""
def create_defs() -> dg.Definitions:
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(minutes=10),
)
)
def asset_10min():
return 1
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(days=10),
)
)
def asset_10days():
return 2
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(days=30),
)
)
def asset_1month():
return 3
return dg.Definitions(assets=[asset_10min, asset_10days, asset_1month])
evaluator = TimeWindowFreshnessPolicyEvaluator()
with setup_remote_repo(instance=instance, fn=create_defs) as workspace_context:
asset_graph = workspace_context.create_request_context().asset_graph
asset_10min_key = dg.AssetKey("asset_10min")
asset_10days_key = dg.AssetKey("asset_10days")
asset_1month_key = dg.AssetKey("asset_1month")
start_time = datetime.datetime.now(datetime.timezone.utc)
frozen_time = start_time
# All assets should be fresh immediately after materialization
with freeze_time(frozen_time):
store_mat(instance, asset_10min_key, frozen_time)
store_mat(instance, asset_10days_key, frozen_time)
store_mat(instance, asset_1month_key, frozen_time)
# Check 10-minute asset
asset_node = asset_graph.remote_asset_nodes_by_key[asset_10min_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.PASS
# Check 10-day asset
asset_node = asset_graph.remote_asset_nodes_by_key[asset_10days_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.PASS
# Check 1-month asset
asset_node = asset_graph.remote_asset_nodes_by_key[asset_1month_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.PASS
# Test 10-minute asset at 9 minutes and 59 seconds (still fresh)
frozen_time_10min = frozen_time + datetime.timedelta(minutes=9, seconds=59)
with freeze_time(frozen_time_10min):
asset_node = asset_graph.remote_asset_nodes_by_key[asset_10min_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.PASS
# Test 10-day asset at 9 days, 23 hours, 59 minutes, 59 seconds (still fresh)
frozen_time_10days = frozen_time + datetime.timedelta(
days=9, hours=23, minutes=59, seconds=59
)
with freeze_time(frozen_time_10days):
asset_node = asset_graph.remote_asset_nodes_by_key[asset_10days_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.PASS
# Test 1-month asset at 29 days, 23 hours, 59 minutes, 59 seconds (still fresh)
frozen_time_1month = frozen_time + datetime.timedelta(
days=29, hours=23, minutes=59, seconds=59
)
with freeze_time(frozen_time_1month):
asset_node = asset_graph.remote_asset_nodes_by_key[asset_1month_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.PASS
@pytest.mark.asyncio
async def test_freshness_fail(self, instance: DagsterInstance):
"""Test that an asset fails freshness if its last materialization is outside the fail window."""
def create_defs() -> dg.Definitions:
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(hours=24),
)
)
def asset_with_policy():
return 1
return dg.Definitions(assets=[asset_with_policy])
evaluator = TimeWindowFreshnessPolicyEvaluator()
with setup_remote_repo(instance=instance, fn=create_defs) as workspace_context:
asset_graph = workspace_context.create_request_context().asset_graph
asset_key = dg.AssetKey("asset_with_policy")
start_time = datetime.datetime.now(datetime.timezone.utc)
frozen_time = start_time
# Asset should fail freshness if last materialization is outside the fail window
store_mat(instance, asset_key, frozen_time)
frozen_time += datetime.timedelta(hours=24, seconds=1)
with freeze_time(frozen_time):
asset_node = asset_graph.remote_asset_nodes_by_key[asset_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.FAIL
@pytest.mark.asyncio
async def test_freshness_warn(self, instance: DagsterInstance):
"""Test that an asset enters freshness warning state if its last materialization is outside the warn window but within the fail window."""
def create_defs() -> dg.Definitions:
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(hours=24),
warn_window=datetime.timedelta(hours=12),
)
)
def asset_with_policy():
return 1
return dg.Definitions(assets=[asset_with_policy])
evaluator = TimeWindowFreshnessPolicyEvaluator()
with setup_remote_repo(instance=instance, fn=create_defs) as workspace_context:
asset_graph = workspace_context.create_request_context().asset_graph
asset_key = dg.AssetKey("asset_with_policy")
start_time = datetime.datetime.now(datetime.timezone.utc)
frozen_time = start_time
store_mat(instance, asset_key, frozen_time)
frozen_time += datetime.timedelta(hours=12, seconds=1)
with freeze_time(frozen_time):
asset_node = asset_graph.remote_asset_nodes_by_key[asset_key]
ctx = cast("LoadingContext", workspace_context.create_request_context())
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.WARN
@pytest.mark.asyncio
async def test_freshness_unknown(self, instance: DagsterInstance):
"""Test that assets with freshness policies but no materializations are evaluated as UNKNOWN."""
def create_defs() -> dg.Definitions:
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(hours=24),
)
)
def asset_with_policy():
return 1
@dg.asset(
freshness_policy=FreshnessPolicy.time_window(
fail_window=datetime.timedelta(hours=24),
warn_window=datetime.timedelta(hours=12),
)
)
def asset_with_policy_2():
return 1
return dg.Definitions(assets=[asset_with_policy, asset_with_policy_2])
evaluator = TimeWindowFreshnessPolicyEvaluator()
with setup_remote_repo(instance=instance, fn=create_defs) as workspace_context:
asset_graph = workspace_context.create_request_context().asset_graph
ctx = cast("LoadingContext", workspace_context.create_request_context())
for asset_node in asset_graph.remote_asset_nodes_by_key.values():
freshness_state = await evaluator.evaluate_freshness(context=ctx, node=asset_node)
assert freshness_state == FreshnessState.UNKNOWN
@pytest.mark.asyncio
async def test_freshness_no_policy(self, instance: DagsterInstance):
"""Raise CheckError if attempting to evaluate freshness for an asset without a freshness policy."""
def create_defs() -> dg.Definitions:
@dg.asset
def asset_without_policy():
return 1
return dg.Definitions(assets=[asset_without_policy])
evaluator = TimeWindowFreshnessPolicyEvaluator()
with setup_remote_repo(instance=instance, fn=create_defs) as workspace_context:
asset_graph = workspace_context.create_request_context().asset_graph
ctx = cast("LoadingContext", workspace_context.create_request_context())
for asset_node in asset_graph.remote_asset_nodes_by_key.values():
with pytest.raises(CheckError):
await evaluator.evaluate_freshness(context=ctx, node=asset_node)
| TestTimeWindowFreshnessPolicyEvaluator |
python | tensorflow__tensorflow | tensorflow/python/training/monitored_session_test.py | {
"start": 18929,
"end": 19255
} | class ____:
def __init__(self, between_graph, should_init, should_checkpoint,
should_save_summary):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
| MockExtended |
python | ray-project__ray | rllib/evaluation/tests/test_postprocessing.py | {
"start": 227,
"end": 9137
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_n_step_3(self):
"""Tests, whether n-step adjustments of trajectories work."""
# n-step = 3
gamma = 0.9
obs = [1, 2, 3, 4, 5, 6, 7]
actions = ["ac1", "ac2", "ac1", "ac1", "ac1", "ac2", "ac1"]
rewards = [10.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0]
terminateds = [0, 0, 0, 0, 0, 0, 1]
truncateds = [0, 0, 0, 0, 0, 0, 0]
next_obs = [2, 3, 4, 5, 6, 7, 8]
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.TERMINATEDS: terminateds,
SampleBatch.TRUNCATEDS: truncateds,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(3, gamma, batch)
check(batch[SampleBatch.OBS], [1, 2, 3, 4, 5, 6, 7])
check(
batch[SampleBatch.ACTIONS],
["ac1", "ac2", "ac1", "ac1", "ac1", "ac2", "ac1"],
)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 8, 8, 8])
check(batch[SampleBatch.TERMINATEDS], [0, 0, 0, 0, 1, 1, 1])
check(batch[SampleBatch.TRUNCATEDS], [0, 0, 0, 0, 0, 0, 0])
check(
batch[SampleBatch.REWARDS], [91.0, 171.0, 271.0, 271.0, 271.0, 190.0, 100.0]
)
def test_n_step_4(self):
"""Tests, whether n-step adjustments of trajectories work."""
# n-step = 4
gamma = 0.99
obs = np.arange(0, 7)
actions = np.random.randint(-1, 3, size=(7,))
check_actions = actions.copy()
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
terminateds = [False, False, False, False, False, False, True]
truncateds = [False, False, False, False, False, False, False]
next_obs = np.arange(1, 8)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.TERMINATEDS: terminateds,
SampleBatch.TRUNCATEDS: truncateds,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(4, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1, 2, 3, 4, 5, 6])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 7, 7, 7])
check(
batch[SampleBatch.TERMINATEDS],
[False, False, False, True, True, True, True],
)
check(
batch[SampleBatch.TRUNCATEDS],
[False, False, False, False, False, False, False],
)
check(
batch[SampleBatch.REWARDS],
[
discount_cumsum(np.array(rewards[0:4]), gamma)[0],
discount_cumsum(np.array(rewards[1:5]), gamma)[0],
discount_cumsum(np.array(rewards[2:6]), gamma)[0],
discount_cumsum(np.array(rewards[3:7]), gamma)[0],
discount_cumsum(np.array(rewards[4:]), gamma)[0],
discount_cumsum(np.array(rewards[5:]), gamma)[0],
discount_cumsum(np.array(rewards[6:]), gamma)[0],
],
)
def test_n_step_malformed_terminateds(self):
# Test bad input (trajectory has `terminateds` in middle).
# Re-use same batch, but change terminateds.
gamma = 1.0
obs = np.arange(0, 7)
actions = np.random.randint(-1, 3, size=(7,))
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
next_obs = np.arange(1, 8)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.TERMINATEDS: [
False,
False,
True,
False,
False,
False,
True,
],
SampleBatch.TRUNCATEDS: [
False,
False,
False,
False,
False,
False,
False,
],
SampleBatch.NEXT_OBS: next_obs,
}
)
self.assertRaisesRegex(
AssertionError,
"Unexpected terminated\\|truncated in middle",
lambda: adjust_nstep(5, gamma, batch),
)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.TERMINATEDS: [
False,
False,
False,
False,
False,
False,
True,
],
SampleBatch.TRUNCATEDS: [
False,
True,
False,
True,
False,
False,
False,
],
SampleBatch.NEXT_OBS: next_obs,
}
)
self.assertRaisesRegex(
AssertionError,
"Unexpected terminated\\|truncated in middle",
lambda: adjust_nstep(5, gamma, batch),
)
def test_n_step_very_short_trajectory(self):
"""Tests, whether n-step also works for very small trajectories."""
gamma = 1.0
obs = np.arange(0, 2)
actions = np.random.randint(-100, 300, size=(2,))
check_actions = actions.copy()
rewards = [10.0, 100.0]
next_obs = np.arange(1, 3)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.TERMINATEDS: [False, False],
SampleBatch.TRUNCATEDS: [False, False],
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(3, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.TERMINATEDS], [False, False])
check(batch[SampleBatch.TRUNCATEDS], [False, False])
check(batch[SampleBatch.REWARDS], [10.0 + gamma * 100.0, 100.0])
check(batch[SampleBatch.NEXT_OBS], [2, 2])
def test_n_step_from_same_obs_source_array(self):
"""Tests, whether n-step also works on a shared obs/new-obs array."""
gamma = 0.99
# The underlying observation data. Both obs and next_obs will
# be references into that same np.array.
underlying_obs = np.arange(0, 8)
obs = underlying_obs[:7]
next_obs = underlying_obs[1:]
actions = np.random.randint(-1, 3, size=(7,))
check_actions = actions.copy()
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
terminateds = [False, False, False, False, False, False, False]
truncateds = [False, False, False, False, False, False, True]
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.TERMINATEDS: terminateds,
SampleBatch.TRUNCATEDS: truncateds,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(4, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1, 2, 3, 4, 5, 6])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 7, 7, 7])
check(
batch[SampleBatch.TERMINATEDS],
[False, False, False, False, False, False, False],
)
check(
batch[SampleBatch.TRUNCATEDS],
[False, False, False, True, True, True, True],
)
check(
batch[SampleBatch.REWARDS],
[
discount_cumsum(np.array(rewards[0:4]), gamma)[0],
discount_cumsum(np.array(rewards[1:5]), gamma)[0],
discount_cumsum(np.array(rewards[2:6]), gamma)[0],
discount_cumsum(np.array(rewards[3:7]), gamma)[0],
discount_cumsum(np.array(rewards[4:]), gamma)[0],
discount_cumsum(np.array(rewards[5:]), gamma)[0],
discount_cumsum(np.array(rewards[6:]), gamma)[0],
],
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestPostprocessing |
python | miyuchina__mistletoe | test/test_contrib/test_xwiki20_renderer.py | {
"start": 235,
"end": 4766
} | class ____(BaseRendererTest):
def setUp(self):
super().setUp()
self.renderer = XWiki20Renderer()
self.renderer.__enter__()
self.addCleanup(self.renderer.__exit__, None, None, None)
self.sampleOutputExtension = 'xwiki20'
def genRandomString(self, n, hasWhitespace=False):
source = string.ascii_letters + string.digits
if hasWhitespace:
source = source + ' \t'
result = ''.join(random.SystemRandom().choice(source) for _ in range(n))
return result
def textFormatTest(self, inputTemplate, outputTemplate):
input = self.genRandomString(80, False)
token = next(iter(tokenize_inner(inputTemplate.format(input))))
output = self.renderer.render(token)
expected = outputTemplate.format(input)
self.assertEqual(output, expected)
def test_escaping(self):
self.textFormatTest('**code: `a = 1;// comment`, plain text URL: http://example.com**',
'**code: {{{{code}}}}a = 1;// comment{{{{/code}}}}, plain text URL: http:~//example.com**')
def test_render_strong(self):
self.textFormatTest('**a{}**', '**a{}**')
def test_render_emphasis(self):
self.textFormatTest('*a{}*', '//a{}//')
def test_render_inline_code(self):
self.textFormatTest('`a{}b`', '{{{{code}}}}a{}b{{{{/code}}}}')
def test_render_strikethrough(self):
self.textFormatTest('~~{}~~', '--{}--')
def test_render_image(self):
token = next(iter(tokenize_inner('')))
output = self.renderer.render(token)
expected = '[[image:foo.jpg]]'
self.assertEqual(output, expected)
def test_render_link(self):
url = 'http://{0}.{1}.{2}'.format(self.genRandomString(5), self.genRandomString(5), self.genRandomString(3))
body = self.genRandomString(80, True)
token = next(iter(tokenize_inner('[{body}]({url})'.format(url=url, body=body))))
output = self.renderer.render(token)
expected = '[[{body}>>{url}]]'.format(url=url, body=body)
self.assertEqual(output, expected)
def test_render_auto_link(self):
url = 'http://{0}.{1}.{2}'.format(self.genRandomString(5), self.genRandomString(5), self.genRandomString(3))
token = next(iter(tokenize_inner('<{url}>'.format(url=url))))
output = self.renderer.render(token)
expected = '[[{url}]]'.format(url=url)
self.assertEqual(output, expected)
def test_render_html_span(self):
markdown = 'text styles: <i>italic</i>, <b>bold</b>'
# See fixme at the `render_html_span` method...
# expected = 'text styles: {{html wiki="true"}}<i>italic</i>{{/html}}, {{html wiki="true"}}<b>bold</b>{{/html}}\n\n'
expected = 'text styles: <i>italic</i>, <b>bold</b>\n\n'
self.markdownResultTest(markdown, expected)
def test_render_html_block(self):
markdown = 'paragraph\n\n<pre>some <i>cool</i> code</pre>'
expected = 'paragraph\n\n{{html wiki="true"}}\n<pre>some <i>cool</i> code</pre>\n{{/html}}\n\n'
self.markdownResultTest(markdown, expected)
def test_render_xwiki_macros_simple(self):
markdown = """\
{{warning}}
Use this feature with *caution*. See {{Wikipedia article="SomeArticle"/}}. {{test}}Another inline macro{{/test}}.
{{/warning}}
"""
# Note: There is a trailing ' ' at the end of the second line. It will be a bit complicated to get rid of it.
expected = """\
{{warning}}
Use this feature with //caution//. See {{Wikipedia article="SomeArticle"/}}. {{test}}Another inline macro{{/test}}. \n\
{{/warning}}
"""
self.markdownResultTest(markdown, expected)
def test_render_xwiki_macros_in_list(self):
markdown = """\
* list item
{{warning}}
Use this feature with *caution*. See {{Wikipedia article="SomeArticle"/}}. {{test}}Another inline macro{{/test}}.
{{/warning}}
"""
# Note: There is a trailing ' ' at the end of the second line. It will be a bit complicated to get rid of it.
expected = """\
* list item(((
{{warning}}
Use this feature with //caution//. See {{Wikipedia article="SomeArticle"/}}. {{test}}Another inline macro{{/test}}. \n\
{{/warning}}
)))
"""
self.markdownResultTest(markdown, expected)
@filesBasedTest
def test_render__basic_blocks(self):
pass
@filesBasedTest
def test_render__lists(self):
pass
@filesBasedTest
def test_render__quotes(self):
pass
| TestXWiki20Renderer |
python | pytorch__pytorch | torch/nn/parallel/data_parallel.py | {
"start": 1752,
"end": 11878
} | class ____(Module, Generic[T]):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given :attr:`module` by
splitting the input across the specified devices by chunking in the batch
dimension (other objects will be copied once per device). In the forward
pass, the module is replicated on each device, and each replica handles a
portion of the input. During the backwards pass, gradients from each replica
are summed into the original module.
The batch size should be larger than the number of GPUs used.
.. warning::
It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
instead of this class, to do multi-GPU training, even if there is only a single
node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel but some types are specially handled. tensors will be
**scattered** on dim specified (default 0). tuple, list and dict types will
be shallow copied. The other types will be shared among different threads
and can be corrupted if written to in the model's forward pass.
The parallelized :attr:`module` must have its parameters and buffers on
``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
module.
.. warning::
In each forward, :attr:`module` is **replicated** on each device, so any
updates to the running module in ``forward`` will be lost. For example,
if :attr:`module` has a counter attribute that is incremented in each
``forward``, it will always stay at the initial value because the update
is done on the replicas which are destroyed after ``forward``. However,
:class:`~torch.nn.DataParallel` guarantees that the replica on
``device[0]`` will have its parameters and buffers sharing storage with
the base parallelized :attr:`module`. So **in-place** updates to the
parameters or buffers on ``device[0]`` will be recorded. E.g.,
:class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm`
rely on this behavior to update the buffers.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
will be invoked ``len(device_ids)`` times, each with inputs located on
a particular device. Particularly, the hooks are only guaranteed to be
executed in correct order with respect to operations on corresponding
devices. For example, it is not guaranteed that hooks set via
:meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
`all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
that each such hook be executed before the corresponding
:meth:`~torch.nn.Module.forward` call of that device.
.. warning::
When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in
:func:`forward`, this wrapper will return a vector of length equal to
number of devices used in data parallelism, containing the result from
each device.
.. note::
There is a subtlety in using the
``pack sequence -> recurrent network -> unpack sequence`` pattern in a
:class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
details.
Args:
module (Module): module to be parallelized
device_ids (list of int or torch.device): CUDA devices (default: all devices)
output_device (int or torch.device): device location of output (default: device_ids[0])
Attributes:
module (Module): the module to be parallelized
Example::
>>> # xdoctest: +SKIP
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var) # input_var can be on any device, including CPU
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(
self,
module: T,
device_ids: Sequence[int | torch.device] | None = None,
output_device: int | torch.device | None = None,
dim: int = 0,
) -> None:
super().__init__()
torch._C._log_api_usage_once("torch.nn.parallel.DataParallel")
device_type = _get_available_device_type()
if device_type is None or device_type == "mps":
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = _get_all_device_indices()
if device_ids is None:
raise RuntimeError("no available devices were found")
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = [_get_device_index(x, True) for x in device_ids]
self.output_device = _get_device_index(output_device, True)
# pyrefly: ignore [read-only]
self.src_device_obj = torch.device(device_type, self.device_ids[0])
if device_type == "cuda":
_check_balance(self.device_ids)
if len(self.device_ids) == 1:
self.module.to(self.src_device_obj)
def forward(self, *inputs: Any, **kwargs: Any) -> Any:
with torch.autograd.profiler.record_function("DataParallel.forward"):
if not self.device_ids:
return self.module(*inputs, **kwargs)
# pyrefly: ignore [bad-argument-type]
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError(
"module must have its parameters and buffers "
f"on device {self.src_device_obj} (device_ids[0]) but found one of "
f"them on device: {t.device}"
)
inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids)
# for forward function without any inputs, empty list and dict will be created
# so the module can be executed on one device which is the first one in device_ids
if not inputs and not module_kwargs:
inputs = ((),)
module_kwargs = ({},)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **module_kwargs[0])
replicas = self.replicate(self.module, self.device_ids[: len(inputs)])
outputs = self.parallel_apply(replicas, inputs, module_kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module: T, device_ids: Sequence[int | torch.device]) -> list[T]:
return replicate(module, device_ids, not torch.is_grad_enabled())
def scatter(
self,
inputs: tuple[Any, ...],
kwargs: dict[str, Any] | None,
device_ids: Sequence[int | torch.device],
) -> Any:
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(
self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any
) -> list[Any]:
return parallel_apply(
replicas, inputs, kwargs, self.device_ids[: len(replicas)]
)
def gather(self, outputs: Any, output_device: int | torch.device) -> Any:
return gather(outputs, output_device, dim=self.dim)
def data_parallel(
module: Module,
inputs: Any,
device_ids: Sequence[int | torch.device] | None = None,
output_device: int | torch.device | None = None,
dim: int = 0,
module_kwargs: Any | None = None,
) -> torch.Tensor:
r"""Evaluate module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module (Module): the module to evaluate in parallel
inputs (Tensor): inputs to the module
device_ids (list of int or torch.device): GPU ids on which to replicate module
output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Tensor containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,) if inputs is not None else ()
device_type = _get_available_device_type()
if device_type is None:
raise RuntimeError("device type could not be determined")
if device_ids is None:
device_ids = _get_all_device_indices()
if device_ids is None:
raise RuntimeError("no available devices were found")
if output_device is None:
output_device = device_ids[0]
device_ids = [_get_device_index(x, True) for x in device_ids]
output_device = _get_device_index(output_device, True)
# pyrefly: ignore [no-matching-overload]
src_device_obj = torch.device(device_type, device_ids[0])
# pyrefly: ignore [bad-argument-type]
for t in chain(module.parameters(), module.buffers()):
if t.device != src_device_obj:
raise RuntimeError(
"module must have its parameters and buffers "
f"on device {src_device_obj} (device_ids[0]) but found one of "
f"them on device: {t.device}"
)
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
# for module without any inputs, empty list and dict will be created
# so the module can be executed on one device which is the first one in device_ids
if not inputs and not module_kwargs:
inputs = ((),)
module_kwargs = ({},)
assert module_kwargs is not None
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[: len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
| DataParallel |
python | django-extensions__django-extensions | django_extensions/collision_resolvers.py | {
"start": 5949,
"end": 6262
} | class ____(AppNamePrefixCR, InstalledAppsOrderCR):
"""
Collision resolver which is mixin of AppNamePrefixCR and InstalledAppsOrderCR.
In case of collisions he sets aliases like AppNamePrefixCR, but sets default model using InstalledAppsOrderCR.
""" # noqa: E501
pass
| AppNamePrefixCustomOrderCR |
python | keras-team__keras | keras/src/layers/convolutional/conv1d.py | {
"start": 205,
"end": 7321
} | class ____(BaseConv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved with the layer
input over a single spatial (or temporal) dimension to produce a tensor of
outputs. If `use_bias` is True, a bias vector is created and added to the
outputs. Finally, if `activation` is not `None`, it is applied to the
outputs as well.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, `"valid"`, `"same"` or `"causal"`(case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
`"causal"` results in causal(dilated) convolutions, e.g. `output[t]`
does not depend on`input[t+1:]`. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section2.1](
https://arxiv.org/abs/1609.03499).
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution.
groups: A positive int specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters // groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, new_steps, filters)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, filters, new_steps)`
Returns:
A 3D tensor representing `activation(conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Example:
>>> # The inputs are 128-length vectors with 10 timesteps, and the
>>> # batch size is 4.
>>> x = np.random.rand(4, 10, 128)
>>> y = keras.layers.Conv1D(32, 3, activation='relu')(x)
>>> print(y.shape)
(4, 8, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
def _compute_causal_padding(self):
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == "channels_last":
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def call(self, inputs):
padding = self.padding
if self.padding == "causal":
# Apply causal padding to inputs.
inputs = ops.pad(inputs, self._compute_causal_padding())
padding = "valid"
outputs = ops.conv(
inputs,
self.kernel,
strides=list(self.strides),
padding=padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (self.filters,)
else:
bias_shape = (1, self.filters) + (1,) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs = ops.add(outputs, bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
| Conv1D |
python | astropy__astropy | astropy/samp/web_profile.py | {
"start": 428,
"end": 4228
} | class ____(SAMPSimpleXMLRPCRequestHandler):
"""
Handler of XMLRPC requests performed through the Web Profile.
"""
def _send_CORS_header(self):
if self.headers.get("Origin") is not None:
method = self.headers.get("Access-Control-Request-Method")
if method and self.command == "OPTIONS":
# Preflight method
self.send_header("Content-Length", "0")
self.send_header(
"Access-Control-Allow-Origin", self.headers.get("Origin")
)
self.send_header("Access-Control-Allow-Methods", method)
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.send_header("Access-Control-Allow-Credentials", "true")
if self.headers.get("Access-Control-Request-Private-Network") == "true":
self.send_header("Access-Control-Allow-Private-Network", "true")
else:
# Simple method
self.send_header(
"Access-Control-Allow-Origin", self.headers.get("Origin")
)
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.send_header("Access-Control-Allow-Credentials", "true")
def end_headers(self):
self._send_CORS_header()
SAMPSimpleXMLRPCRequestHandler.end_headers(self)
def _serve_cross_domain_xml(self):
cross_domain = False
if self.path == "/crossdomain.xml":
# Adobe standard
response = CROSS_DOMAIN
self.send_response(200, "OK")
self.send_header("Content-Type", "text/x-cross-domain-policy")
self.send_header("Content-Length", f"{len(response)}")
self.end_headers()
self.wfile.write(response.encode("utf-8"))
self.wfile.flush()
cross_domain = True
elif self.path == "/clientaccesspolicy.xml":
# Microsoft standard
response = CLIENT_ACCESS_POLICY
self.send_response(200, "OK")
self.send_header("Content-Type", "text/xml")
self.send_header("Content-Length", f"{len(response)}")
self.end_headers()
self.wfile.write(response.encode("utf-8"))
self.wfile.flush()
cross_domain = True
return cross_domain
def do_POST(self):
if self._serve_cross_domain_xml():
return
return SAMPSimpleXMLRPCRequestHandler.do_POST(self)
def do_HEAD(self):
if not self.is_http_path_valid():
self.report_404()
return
if self._serve_cross_domain_xml():
return
def do_OPTIONS(self):
self.send_response(200, "OK")
self.end_headers()
def do_GET(self):
if not self.is_http_path_valid():
self.report_404()
return
split_path = self.path.split("?")
if split_path[0] in [f"/translator/{clid}" for clid in self.server.clients]:
# Request of a file proxying
urlpath = parse_qs(split_path[1])
try:
proxyfile = urlopen(urlpath["ref"][0])
self.send_response(200, "OK")
self.end_headers()
self.wfile.write(proxyfile.read())
proxyfile.close()
except OSError:
self.report_404()
return
if self._serve_cross_domain_xml():
return
def is_http_path_valid(self):
valid_paths = ["/clientaccesspolicy.xml", "/crossdomain.xml"] + [
f"/translator/{clid}" for clid in self.server.clients
]
return self.path.split("?")[0] in valid_paths
| WebProfileRequestHandler |
python | google__jax | tests/jet_test.py | {
"start": 1755,
"end": 17051
} | class ____(jtu.JaxTestCase):
def check_jet(self, fun, primals, series, atol=1e-5, rtol=1e-5,
check_dtypes=True):
# Convert to jax arrays to ensure dtype canonicalization.
primals = jax.tree.map(jnp.asarray, primals)
series = jax.tree.map(jnp.asarray, series)
y, terms = jet(fun, primals, series)
expected_y, expected_terms = jvp_taylor(fun, primals, series)
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
def check_jet_finite(self, fun, primals, series, atol=1e-5, rtol=1e-5,
check_dtypes=True):
# Convert to jax arrays to ensure dtype canonicalization.
primals = jax.tree.map(jnp.asarray, primals)
series = jax.tree.map(jnp.asarray, series)
y, terms = jet(fun, primals, series)
expected_y, expected_terms = jvp_taylor(fun, primals, series)
def _convert(x):
return jnp.where(jnp.isfinite(x), x, jnp.nan)
y = _convert(y)
expected_y = _convert(expected_y)
terms = _convert(jnp.asarray(terms))
expected_terms = _convert(jnp.asarray(expected_terms))
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol,
check_dtypes=check_dtypes)
@jtu.skip_on_devices("tpu")
# Default tolerance too tight on A100 after openxla/xla@a58070090
@jax.default_matmul_precision("float32")
def test_dot(self):
M, K, N = 2, 3, 4
order = 3
rng = self.rng()
x1 = rng.randn(M, K)
x2 = rng.randn(K, N)
primals = (x1, x2)
terms_in1 = [rng.randn(*x1.shape) for _ in range(order)]
terms_in2 = [rng.randn(*x2.shape) for _ in range(order)]
series_in = (terms_in1, terms_in2)
self.check_jet(jnp.dot, primals, series_in)
@jtu.skip_on_devices("tpu")
@jax.legacy_prng_key('allow')
def test_conv(self):
order = 3
input_shape = (1, 5, 5, 1)
key = random.PRNGKey(0)
# TODO(duvenaud): Check all types of padding
init_fun, apply_fun = stax.Conv(3, (2, 2), padding='VALID')
_, (W, b) = init_fun(key, input_shape)
rng = self.rng()
x = rng.randn(*input_shape).astype(W.dtype)
primals = (W, b, x)
series_in1 = [rng.randn(*W.shape).astype(W.dtype) for _ in range(order)]
series_in2 = [rng.randn(*b.shape).astype(W.dtype) for _ in range(order)]
series_in3 = [rng.randn(*x.shape).astype(W.dtype) for _ in range(order)]
series_in = (series_in1, series_in2, series_in3)
def f(W, b, x):
return apply_fun((W, b), x)
self.check_jet(f, primals, series_in, check_dtypes=False)
def unary_check(self, fun, lims=(-2, 2), order=3, dtype=None, atol=1e-3,
rtol=1e-3):
dims = 2, 3
rng = self.rng()
if dtype is None:
primal_in = transform(lims, rng.rand(*dims))
terms_in = [rng.randn(*dims) for _ in range(order)]
else:
rng = jtu.rand_uniform(rng, *lims)
primal_in = rng(dims, dtype)
terms_in = [rng(dims, dtype) for _ in range(order)]
self.check_jet(fun, (primal_in,), (terms_in,), atol, rtol)
def binary_check(self, fun, lims=None, order=3, finite=True, dtype=None):
lims = lims or [-2, 2]
dims = 2, 3
rng = self.rng()
if isinstance(lims, tuple):
x_lims, y_lims = lims
else:
x_lims, y_lims = lims, lims
if dtype is None:
primal_in = (transform(x_lims, rng.rand(*dims)),
transform(y_lims, rng.rand(*dims)))
series_in = ([rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)])
else:
rng = jtu.rand_uniform(rng, *lims)
primal_in = (rng(dims, dtype),
rng(dims, dtype))
series_in = ([rng(dims, dtype) for _ in range(order)],
[rng(dims, dtype) for _ in range(order)])
if finite:
self.check_jet(fun, primal_in, series_in, atol=1e-4, rtol=1e-4)
else:
self.check_jet_finite(fun, primal_in, series_in, atol=1e-4, rtol=1e-4)
def unary_check_float0(self, fun, lims=(-2, 2), order=3, dtype=None):
# like unary_check but for functions that output integers (so their tangent
# type is float0 arrays)
raise unittest.SkipTest("jet tests must be adapted for integer-output functions")
def binary_check_float0(self, fun, lims=(-2, 2), order=3, finite=True, dtype=None):
# like binary_check but for functions that output integers (so their tangent
# type is float0 arrays)
raise unittest.SkipTest("jet tests must be adapted for integer-output functions")
def expit_check(self, lims=(-2, 2), order=3):
dims = 2, 3
rng = self.rng()
primal_in = transform(lims, rng.rand(*dims))
terms_in = [rng.randn(*dims) for _ in range(order)]
primals = (primal_in, )
series = (terms_in, )
y, terms = jax.experimental.jet._logistic_taylor(primals, series)
expected_y, expected_terms = jvp_taylor(jax.scipy.special.expit, primals, series)
atol = 1e-4
rtol = 1e-4
self.assertAllClose(y, expected_y, atol=atol, rtol=rtol)
self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol)
@jtu.skip_on_devices("tpu")
def test_int_pow(self):
for p in range(6):
self.unary_check(lambda x: x ** p, lims=[-2, 2])
self.unary_check(lambda x: x ** 10, lims=[0, 0])
@jtu.skip_on_devices("tpu")
def test_is_finite(self): self.unary_check_float0(lax.is_finite)
@jtu.skip_on_devices("tpu")
def test_and(self): self.binary_check_float0(lax.bitwise_and, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_or(self): self.binary_check_float0(lax.bitwise_or, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_xor(self): self.binary_check_float0(jnp.bitwise_xor, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_shift_left(self): self.binary_check_float0(lax.shift_left, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_shift_right_a(self): self.binary_check_float0(lax.shift_right_arithmetic, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_shift_right_l(self): self.binary_check_float0(lax.shift_right_logical, dtype=np.int32)
@jtu.skip_on_devices("tpu")
def test_le(self): self.binary_check_float0(lambda x, y: x <= y)
@jtu.skip_on_devices("tpu")
def test_gt(self): self.binary_check_float0(lambda x, y: x > y)
@jtu.skip_on_devices("tpu")
def test_lt(self): self.binary_check_float0(lambda x, y: x < y)
@jtu.skip_on_devices("tpu")
def test_ge(self): self.binary_check_float0(lambda x, y: x >= y)
@jtu.skip_on_devices("tpu")
def test_eq(self): self.binary_check_float0(lambda x, y: x == y)
@jtu.skip_on_devices("tpu")
def test_ne(self): self.binary_check_float0(lambda x, y: x != y)
@jtu.skip_on_devices("tpu")
def test_not(self): self.unary_check_float0(lax.bitwise_not, dtype=np.bool_)
@jtu.skip_on_devices("tpu")
def test_exp(self): self.unary_check(jnp.exp)
@jtu.skip_on_devices("tpu")
def test_neg(self): self.unary_check(jnp.negative)
@jtu.skip_on_devices("tpu")
def test_floor(self): self.unary_check(jnp.floor)
@jtu.skip_on_devices("tpu")
def test_ceil(self): self.unary_check(jnp.ceil)
@jtu.skip_on_devices("tpu")
def test_trunc(self): self.unary_check(jnp.trunc)
@jtu.skip_on_devices("tpu")
def test_round(self): self.unary_check(lax.round)
@jtu.skip_on_devices("tpu")
def test_sign(self): self.unary_check(lax.sign)
@jtu.skip_on_devices("tpu")
def test_real(self): self.unary_check(lax.real, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_conj(self): self.unary_check(lax.conj, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_imag(self): self.unary_check(lax.imag, dtype=np.complex64)
@jtu.skip_on_devices("tpu")
def test_log(self): self.unary_check(jnp.log, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_gather(self): self.unary_check(lambda x: x[1:])
@jtu.skip_on_devices("tpu")
def test_reduce_max(self): self.unary_check(lambda x: x.max(axis=1))
@jtu.skip_on_devices("tpu")
def test_reduce_min(self): self.unary_check(lambda x: x.min(axis=1))
@jtu.skip_on_devices("tpu")
def test_all_max(self): self.unary_check(jnp.max)
@jtu.skip_on_devices("tpu")
def test_all_min(self): self.unary_check(jnp.min)
@jtu.skip_on_devices("tpu")
def test_stopgrad(self): self.unary_check(lax.stop_gradient)
@jtu.skip_on_devices("tpu")
def test_abs(self): self.unary_check(jnp.abs)
@jtu.skip_on_devices("tpu")
def test_fft(self): self.unary_check(jnp.fft.fft)
@jtu.skip_on_devices("tpu")
def test_log1p(self): self.unary_check(jnp.log1p, lims=[0, 4.])
@jtu.skip_on_devices("tpu")
def test_expm1(self): self.unary_check(jnp.expm1)
@jtu.skip_on_devices("tpu")
def test_sin(self): self.unary_check(jnp.sin)
@jtu.skip_on_devices("tpu")
def test_cos(self): self.unary_check(jnp.cos)
@jtu.skip_on_devices("tpu")
def test_sinh(self): self.unary_check(jnp.sinh)
@jtu.skip_on_devices("tpu")
def test_cosh(self): self.unary_check(jnp.cosh)
@jtu.skip_on_devices("tpu")
def test_tanh(self): self.unary_check(jnp.tanh, lims=[-500, 500], order=5,
atol=5e-3)
@jtu.skip_on_devices("tpu")
def test_logistic(self): self.unary_check(lax.logistic, lims=[-100, 100], order=5)
@jtu.skip_on_devices("tpu")
def test_expit2(self): self.expit_check(lims=[-500, 500], order=5)
@jtu.skip_on_devices("tpu")
def test_sqrt(self): self.unary_check(jnp.sqrt, lims=[0, 5.])
@jtu.skip_on_devices("tpu")
def test_rsqrt(self): self.unary_check(lax.rsqrt, lims=[0, 5000.])
@jtu.skip_on_devices("tpu")
def test_asinh(self): self.unary_check(lax.asinh, lims=[-100, 100])
@jtu.skip_on_devices("tpu")
def test_acosh(self): self.unary_check(lax.acosh, lims=[-100, 100])
@jtu.skip_on_devices("tpu")
def test_atanh(self): self.unary_check(lax.atanh, lims=[-1, 1])
@jtu.skip_on_devices("tpu")
def test_erf(self): self.unary_check(lax.erf)
@jtu.skip_on_devices("tpu")
def test_erfc(self): self.unary_check(lax.erfc)
@jtu.skip_on_devices("tpu")
def test_erf_inv(self): self.unary_check(lax.erf_inv, lims=[-1, 1])
@jtu.skip_on_devices("tpu")
def test_cumsum(self): self.unary_check(jnp.cumsum)
@jtu.skip_on_devices("tpu")
def test_cumprod(self): self.unary_check(jnp.cumprod)
@jtu.skip_on_devices("tpu")
def test_cummax(self): self.unary_check(partial(lax.cummax, axis=0))
@jtu.skip_on_devices("tpu")
def test_cummin(self): self.unary_check(partial(lax.cummin, axis=0))
@jtu.skip_on_devices("tpu")
def test_dynamic_slice(self): self.unary_check(partial(lax.dynamic_slice, start_indices=(1,2), slice_sizes=(1,1)))
@jtu.skip_on_devices("tpu")
def test_dynamic_update_slice(self): self.unary_check(partial(lax.dynamic_update_slice, start_indices=(1,2), update=np.arange(6.0).reshape(2, 3)))
@jtu.skip_on_devices("tpu")
def test_copy(self): self.unary_check(jnp.array)
@jtu.skip_on_devices("tpu")
def test_div(self): self.binary_check(lambda x, y: x / y, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_rem(self): self.binary_check(lax.rem, lims=[0.8, 4.0])
@jtu.skip_on_devices("tpu")
def test_complex(self): self.binary_check(lax.complex)
@jtu.skip_on_devices("tpu")
def test_sub(self): self.binary_check(lambda x, y: x - y)
@jtu.skip_on_devices("tpu")
def test_add(self): self.binary_check(lambda x, y: x + y)
@jtu.skip_on_devices("tpu")
def test_mul(self): self.binary_check(lambda x, y: x * y)
@jtu.skip_on_devices("tpu")
def test_max(self): self.binary_check(lax.max)
@jtu.skip_on_devices("tpu")
def test_min(self): self.binary_check(lax.min)
@jtu.skip_on_devices("tpu")
@jtu.ignore_warning(message="overflow encountered in power")
def test_pow(self): self.binary_check(lambda x, y: x ** y, lims=([0.2, 500], [-500, 500]), finite=False)
@jtu.skip_on_devices("tpu")
def test_atan2(self): self.binary_check(lax.atan2, lims=[-40, 40])
@jtu.skip_on_devices("tpu")
def test_clamp(self):
lims = [-1, 1]
order = 3
dims = 2, 3
# TODO(jakevdp): This test is very sensitive to the inputs, so we use a known
# working seed. We should instead use self.rng(), and make sure that the primal
# points lie outside an epsilon ball of the two critical points in the function.
rng = np.random.RandomState(0)
primal_in = (transform(lims, rng.rand(*dims)),
transform(lims, rng.rand(*dims)),
transform(lims, rng.rand(*dims)))
series_in = ([rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)],
[rng.randn(*dims) for _ in range(order)])
self.check_jet(lax.clamp, primal_in, series_in, atol=1e-4, rtol=1e-4)
def test_process_call(self):
def f(x):
return jit(lambda x: x * x)(x)
self.unary_check(f, rtol=2e-4)
def test_post_process_call(self):
def f(x):
return jit(lambda y: x * y)(2.)
self.unary_check(f, rtol=5e-4)
def test_select(self):
M, K = 2, 3
order = 3
rng = self.rng()
b = rng.rand(M, K) < 0.5
x = rng.randn(M, K)
y = rng.randn(M, K)
primals = (b, x, y)
terms_b = [rng.randn(*b.shape) for _ in range(order)]
terms_x = [rng.randn(*x.shape) for _ in range(order)]
terms_y = [rng.randn(*y.shape) for _ in range(order)]
series_in = (terms_b, terms_x, terms_y)
# Since this nudges bool inputs, we need to allow promotion to float.
with jax.numpy_dtype_promotion('standard'):
self.check_jet(jnp.where, primals, series_in, rtol=5e-4)
def test_inst_zero(self):
def f(x):
return jnp.full_like(x, 2.)
def g(x):
return 2. + 0 * x
x = jnp.ones(1)
order = 3
f_out_primals, f_out_series = jet(f, (x, ), ([jnp.ones_like(x) for _ in range(order)], ))
assert f_out_series is not zero_series
g_out_primals, g_out_series = jet(g, (x, ), ([jnp.ones_like(x) for _ in range(order)], ))
self.assertArraysEqual(g_out_primals, f_out_primals)
self.assertArraysEqual(g_out_series, f_out_series)
def test_add_any(self):
# https://github.com/jax-ml/jax/issues/5217
f = lambda x, eps: x * eps + eps + x
def g(eps):
x = jnp.array(1.)
return jax.grad(f)(x, eps)
jet(g, (1.,), ([1.],)) # doesn't crash
def test_scatter_add(self):
# very basic test from https://github.com/jax-ml/jax/issues/5365
def f(x):
x0 = x[0]
x1 = x[1]
return (x0**5 + x1**5).sum()
def h(eps):
from jax import jacfwd, grad
x = jnp.array([1., 1.])
ฮผ = eps * x
def F(t):
return f(x + t * ฮผ)
return grad(jacfwd(F))(0.)
self.check_jet(h, (0.,), ([1., 2., 3.],), rtol=1e-3)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| JetTest |
python | lepture__authlib | authlib/integrations/requests_client/oauth1_session.py | {
"start": 722,
"end": 2324
} | class ____(OAuth1Client, Session):
auth_class = OAuth1Auth
def __init__(
self,
client_id,
client_secret=None,
token=None,
token_secret=None,
redirect_uri=None,
rsa_key=None,
verifier=None,
signature_method=SIGNATURE_HMAC_SHA1,
signature_type=SIGNATURE_TYPE_HEADER,
force_include_body=False,
**kwargs,
):
Session.__init__(self)
update_session_configure(self, kwargs)
OAuth1Client.__init__(
self,
session=self,
client_id=client_id,
client_secret=client_secret,
token=token,
token_secret=token_secret,
redirect_uri=redirect_uri,
rsa_key=rsa_key,
verifier=verifier,
signature_method=signature_method,
signature_type=signature_type,
force_include_body=force_include_body,
**kwargs,
)
def rebuild_auth(self, prepared_request, response):
"""When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
"""
if "Authorization" in prepared_request.headers:
# If we get redirected to a new host, we should strip out
# any authentication headers.
prepared_request.headers.pop("Authorization", True)
prepared_request.prepare_auth(self.auth)
@staticmethod
def handle_error(error_type, error_description):
raise OAuthError(error_type, error_description)
| OAuth1Session |
python | mlflow__mlflow | tests/pyfunc/test_pyfunc_exceptions.py | {
"start": 77,
"end": 579
} | class ____(mlflow.pyfunc.PythonModel):
def __init__(self, path):
with open(path, "w+") as f:
pass
self.not_a_file = f
def test_pyfunc_unpicklable_exception(tmp_path):
model = UnpicklableModel(tmp_path / "model.pkl")
with pytest.raises(
MlflowException,
match="Please save the model into a python file and use code-based logging method instead",
):
mlflow.pyfunc.save_model(python_model=model, path=tmp_path / "model")
| UnpicklableModel |
python | pypa__warehouse | tests/unit/manage/test_views.py | {
"start": 235832,
"end": 239552
} | class ____:
def test_archive(self, db_request):
project = ProjectFactory.create(name="foo")
user = UserFactory.create(username="testuser")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.method = "POST"
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.archive_project_view(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert project.lifecycle_status == LifecycleStatus.ArchivedNoindex
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name=project.name)
]
def test_unarchive_project(self, db_request):
project = ProjectFactory.create(
name="foo", lifecycle_status=LifecycleStatus.Archived
)
user = UserFactory.create(username="testuser")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.method = "POST"
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.unarchive_project_view(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name=project.name)
]
assert project.lifecycle_status is None
def test_disallowed_archive(self, db_request):
project = ProjectFactory.create(name="foo", lifecycle_status="quarantine-enter")
user = UserFactory.create(username="testuser")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.method = "POST"
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.archive_project_view(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call(
f"Cannot archive project with status {project.lifecycle_status}",
queue="error",
)
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert project.lifecycle_status == "quarantine-enter"
def test_disallowed_unarchive(self, db_request):
project = ProjectFactory.create(name="foo", lifecycle_status="quarantine-enter")
user = UserFactory.create(username="testuser")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.method = "POST"
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.unarchive_project_view(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call("Can only unarchive an archived project", queue="error")
]
assert db_request.route_path.calls == [
pretend.call("manage.project.settings", project_name="foo")
]
assert project.lifecycle_status == "quarantine-enter"
| TestArchiveProject |
python | ray-project__ray | python/ray/serve/_private/common.py | {
"start": 6090,
"end": 7982
} | class ____(str, Enum):
HEALTHY = "HEALTHY"
CONFIG_UPDATE = "CONFIG_UPDATE"
AUTOSCALE_UP = "AUTOSCALE_UP"
AUTOSCALE_DOWN = "AUTOSCALE_DOWN"
# MANUALLY_INCREASE_NUM_REPLICAS and MANUALLY_DECREASE_NUM_REPLICAS are used
# instead of CONFIG_UPDATE when the config update only scales
# the number of replicas.
MANUALLY_INCREASE_NUM_REPLICAS = "MANUALLY_INCREASE_NUM_REPLICAS"
MANUALLY_DECREASE_NUM_REPLICAS = "MANUALLY_DECREASE_NUM_REPLICAS"
REPLICA_STARTUP_FAILED = "REPLICA_STARTUP_FAILED"
HEALTH_CHECK_FAILED = "HEALTH_CHECK_FAILED"
INTERNAL_ERROR = "INTERNAL_ERROR"
DELETE = "DELETE"
# List of states in ranked order.
#
# Each ranked state has the format of a tuple with either 1 or 2 items.
# If 1 item: contains a single DeploymentStatus, representing states with
# that DeploymentStatus and any DeploymentStatusTrigger.
# If 2 items: tuple contains a DeploymentStatus and a DeploymentStatusTrigger,
# representing a state with that status and status trigger.
DEPLOYMENT_STATUS_RANKING_ORDER = {
# Status ranking order is defined in a following fashion:
# 0. (Highest) State signaling a deploy failure.
(DeploymentStatus.DEPLOY_FAILED,): 0,
# 1. State signaling any non-deploy failures in the system.
(DeploymentStatus.UNHEALTHY,): 1,
# 2. States signaling the user updated the configuration.
(DeploymentStatus.UPDATING,): 2,
(DeploymentStatus.UPSCALING, DeploymentStatusTrigger.CONFIG_UPDATE_STARTED): 2,
(
DeploymentStatus.DOWNSCALING,
DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
): 2,
# 3. Steady state or autoscaling.
(DeploymentStatus.UPSCALING, DeploymentStatusTrigger.AUTOSCALING): 3,
(DeploymentStatus.DOWNSCALING, DeploymentStatusTrigger.AUTOSCALING): 3,
(DeploymentStatus.HEALTHY,): 3,
}
@dataclass(eq=True)
| DeploymentStatusInternalTrigger |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 57988,
"end": 58109
} | class ____:
xlX = -4168 # from enum XlErrorBarDirection
xlY = 1 # from enum XlErrorBarDirection
| ErrorBarDirection |
python | kamyu104__LeetCode-Solutions | Python/count-of-range-sum.py | {
"start": 33,
"end": 1528
} | class ____(object):
def countRangeSum(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: int
"""
def countAndMergeSort(sums, start, end, lower, upper):
if end - start <= 1: # The size of range [start, end) less than 2 is always with count 0.
return 0
mid = start + (end - start) / 2
count = countAndMergeSort(sums, start, mid, lower, upper) + \
countAndMergeSort(sums, mid, end, lower, upper)
j, k, r = mid, mid, mid
tmp = []
for i in xrange(start, mid):
# Count the number of range sums that lie in [lower, upper].
while k < end and sums[k] - sums[i] < lower:
k += 1
while j < end and sums[j] - sums[i] <= upper:
j += 1
count += j - k
# Merge the two sorted arrays into tmp.
while r < end and sums[r] < sums[i]:
tmp.append(sums[r])
r += 1
tmp.append(sums[i])
# Copy tmp back to sums.
sums[start:start+len(tmp)] = tmp
return count
sums = [0] * (len(nums) + 1)
for i in xrange(len(nums)):
sums[i + 1] = sums[i] + nums[i]
return countAndMergeSort(sums, 0, len(sums), lower, upper)
# Divide and Conquer solution.
| Solution |
python | jackfrued__Python-100-Days | Day31-35/code/example17.py | {
"start": 563,
"end": 885
} | class ____(SetOnceMappingMixin, dict):
"""่ชๅฎไนๅญๅ
ธ"""
pass
def main():
print(D.mro())
# print(D.__mro__)
D().say_hello()
print(SetOnceDict.__mro__)
my_dict= SetOnceDict()
my_dict['username'] = 'jackfrued'
my_dict['username'] = 'hellokitty'
if __name__ == '__main__':
main()
| SetOnceDict |
python | kamyu104__LeetCode-Solutions | Python/house-robber.py | {
"start": 29,
"end": 330
} | class ____(object):
# @param num, a list of integer
# @return an integer
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
last, now = 0, 0
for i in nums:
last, now = now, max(last + i, now)
return now
| Solution |
python | has2k1__plotnine | plotnine/composition/_beside.py | {
"start": 217,
"end": 1475
} | class ____(Compose):
"""
Place plots or compositions side by side
**Usage**
plot | plot
plot | composition
composition | plot
composition | composition
Typically, you will use this class through the `|` operator.
See Also
--------
plotnine.composition.Stack : To arrange plots vertically
plotnine.composition.Wrap : To arrange plots in a grid
plotnine.composition.plot_spacer : To add a blank space between plots
plotnine.composition.Compose : For more on composing plots
"""
def __or__(self, rhs: ggplot | Compose) -> Compose:
"""
Add rhs as a column
"""
# This is adjacent or i.e. (OR | rhs) so we collapse the
# operands into a single operation
return Beside([*self, rhs]) + self.layout
def __truediv__(self, rhs: ggplot | Compose) -> Compose:
"""
Add rhs as a row
"""
from ._stack import Stack
return Stack([self, rhs])
def __add__(self, rhs):
"""
Add rhs into the besides composition
"""
from plotnine import ggplot
if not isinstance(rhs, (ggplot, Compose)):
return super().__add__(rhs)
return self | rhs
| Beside |
python | django__django | tests/force_insert_update/tests.py | {
"start": 2151,
"end": 2818
} | class ____(TestCase):
def test_force_update_on_inherited_model(self):
a = InheritedCounter(name="count", value=1, tag="spam")
a.save()
a.save(force_update=True)
def test_force_update_on_proxy_model(self):
a = ProxyCounter(name="count", value=1)
a.save()
a.save(force_update=True)
def test_force_update_on_inherited_model_without_fields(self):
"""
Issue 13864: force_update fails on subclassed models, if they don't
specify custom fields.
"""
a = SubCounter(name="count", value=1)
a.save()
a.value = 2
a.save(force_update=True)
| InheritanceTests |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_memusage.py | {
"start": 8434,
"end": 9060
} | class ____(fixtures.ORMTest):
def setup_test(self):
_sessions.clear()
clear_mappers()
# enable query caching, however make the cache small so that
# the tests don't take too long. issues w/ caching include making
# sure sessions don't get stuck inside of it. However it will
# make tests like test_mapper_reset take a long time because mappers
# are very much a part of what's in the cache.
self.engine = engines.testing_engine(
options={"use_reaper": False, "query_cache_size": 10}
)
@testing.add_to_marker.memory_intensive
| EnsureZeroed |
python | readthedocs__readthedocs.org | readthedocs/config/tests/test_validation.py | {
"start": 791,
"end": 1400
} | class ____:
def test_it_accepts_valid_choice(self):
result = validate_choice("choice", ("choice", "another_choice"))
assert result == "choice"
with raises(ConfigValidationError) as excinfo:
validate_choice("c", "abc")
assert excinfo.value.message_id == ConfigValidationError.INVALID_LIST
def test_it_rejects_invalid_choice(self):
with raises(ConfigValidationError) as excinfo:
validate_choice("not-a-choice", ("choice", "another_choice"))
assert excinfo.value.message_id == ConfigValidationError.INVALID_CHOICE
| TestValidateChoice |
python | ethereum__web3.py | tests/core/providers/test_provider_init.py | {
"start": 387,
"end": 1145
} | class ____(BaseProvider):
pass
@pytest.mark.parametrize(
"provider_class",
(
AsyncBaseProvider,
ExtendsAsyncBaseProvider,
AsyncHTTPProvider,
AsyncIPCProvider,
WebSocketProvider,
AsyncEthereumTesterProvider,
),
)
def test_init_web3_with_async_provider(provider_class):
with pytest.raises(Web3ValidationError):
Web3(provider_class())
@pytest.mark.parametrize(
"provider_class",
(
BaseProvider,
ExtendsBaseProvider,
HTTPProvider,
IPCProvider,
EthereumTesterProvider,
),
)
def test_init_async_web3_with_sync_provider(provider_class):
with pytest.raises(Web3ValidationError):
AsyncWeb3(provider_class())
| ExtendsBaseProvider |
python | huggingface__transformers | src/transformers/models/llava_next_video/modular_llava_next_video.py | {
"start": 1331,
"end": 7992
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlavaNextVideoForConditionalGeneration`]. It is used to instantiate an
Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [llava-hf/LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf)
model.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 32001):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
video_token_index (`int`, *optional*, defaults to 32000):
The video token index to encode the image prompt.
spatial_pool_mode (`str`, *optional*, defaults to `"average"`):
Pooling mode to use for videos. Can be "average", "max" or "conv".
spatial_pool_stride (`int`, *optional*, defaults to 2):
Stride used in the pooling layer for videos.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
video_seq_length (`int`, *optional*, defaults to 288):
Sequence length of one video embedding.
Example:
```python
>>> from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = CLIPVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> configuration = LlavaNextVideoConfig(vision_config, text_config)
>>> model = LlavaNextVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llava_next_video"
attribute_map = {
"image_token_id": "image_token_index",
"video_token_id": "video_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_index=32001,
projector_hidden_act="gelu",
multimodal_projector_bias=True,
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_grid_pinpoints=None,
tie_word_embeddings=False,
video_token_index=32000,
spatial_pool_mode="average",
spatial_pool_stride=2,
image_seq_length=576,
video_seq_length=288,
**kwargs,
):
self.video_token_index = video_token_index
self.spatial_pool_mode = spatial_pool_mode
self.spatial_pool_stride = spatial_pool_stride
self.image_seq_length = image_seq_length
self.video_seq_length = video_seq_length
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.multimodal_projector_bias = multimodal_projector_bias
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
image_grid_pinpoints = (
image_grid_pinpoints
if image_grid_pinpoints is not None
else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
)
self.image_grid_pinpoints = image_grid_pinpoints
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "clip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["clip_vision_model"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=336,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
projection_dim=768,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
| LlavaNextVideoConfig |
python | pytorch__pytorch | torch/numa/binding.py | {
"start": 519,
"end": 777
} | class ____(str, Enum):
"""
See behavior description for each affinity mode
in torch.distributed.run.
"""
NODE = "node"
SOCKET = "socket"
EXCLUSIVE = "exclusive"
CORE_COMPLEX = "core-complex"
@dataclass(frozen=True)
| AffinityMode |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py | {
"start": 30221,
"end": 32663
} | class ____(nn.Module):
"""
Convolutional backbone, using either the AutoBackbone API or one from the timm library.
nn.BatchNorm2d layers are replaced by MMGroundingDinoFrozenBatchNorm2d as defined above.
"""
def __init__(self, config):
super().__init__()
self.config = config
if config.use_timm_backbone:
requires_backends(self, ["timm"])
backbone = create_model(
config.backbone,
pretrained=config.use_pretrained_backbone,
features_only=True,
**config.backbone_kwargs,
)
else:
backbone = load_backbone(config)
# replace batch norm by frozen batch norm
with torch.no_grad():
replace_batch_norm(backbone)
self.model = backbone
self.intermediate_channel_sizes = (
self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
)
backbone_model_type = None
if config.backbone is not None:
backbone_model_type = config.backbone
elif config.backbone_config is not None:
backbone_model_type = config.backbone_config.model_type
else:
raise ValueError("Either `backbone` or `backbone_config` should be provided in the config")
if "resnet" in backbone_model_type:
for name, parameter in self.model.named_parameters():
if config.use_timm_backbone:
if "layer2" not in name and "layer3" not in name and "layer4" not in name:
parameter.requires_grad_(False)
else:
if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name:
parameter.requires_grad_(False)
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
# send pixel_values through the model to get list of feature maps
features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
out = []
for feature_map in features:
# downsample pixel_mask to match shape of corresponding feature_map
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
| MMGroundingDinoConvEncoder |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/normalize_op_test.py | {
"start": 1775,
"end": 3489
} | class ____(test_lib.TestCase):
pass
def _GetNormalizeOpTest(dtype_, shape_, ord_, axis_):
@test_util.run_in_graph_and_eager_modes
def Test(self):
is_matrix_norm = (isinstance(axis_, tuple) or
isinstance(axis_, list)) and len(axis_) == 2
is_fancy_p_norm = np.isreal(ord_) and np.floor(ord_) != ord_
if ((not is_matrix_norm and ord_ == "fro") or
(is_matrix_norm and is_fancy_p_norm)):
self.skipTest("Not supported by neither numpy.linalg.norm nor tf.norm")
if ord_ == "euclidean" or (axis_ is None and len(shape) > 2):
self.skipTest("Not supported by numpy.linalg.norm")
matrix = np.random.randn(*shape_).astype(dtype_)
if dtype_ in (np.complex64, np.complex128):
matrix += 1j * np.random.randn(*shape_).astype(dtype_)
tf_np_n, _ = self.evaluate(nn_impl.normalize(matrix, ord_, axis_))
np_n = _Normalize(matrix, ord_, axis_)
self.assertAllClose(tf_np_n, np_n, rtol=1e-5, atol=1e-5)
return Test
# pylint: disable=redefined-builtin
if __name__ == "__main__":
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 2, 5:
for cols in 2, 5:
for batch in [], [2], [2, 3]:
shape = batch + [rows, cols]
for ord in "euclidean", "fro", 0.5, 1, 2, np.inf:
for axis in [
None, (-2, -1), (-1, -2), -len(shape), 0,
len(shape) - 1
]:
name = "%s_%s_ord_%s_axis_%s" % (dtype.__name__, "_".join(
map(str, shape)), ord, axis)
_AddTest(NormalizeOpTest, "Normalize_" + name,
_GetNormalizeOpTest(dtype, shape, ord, axis))
test_lib.main()
| NormalizeOpTest |
python | ansible__ansible | test/units/module_utils/datatag/test_datatag.py | {
"start": 1767,
"end": 1976
} | class ____(AnsibleSingletonTagBase):
def _get_tag_to_propagate(self, src: t.Any, value: object, *, value_type: t.Optional[type] = None) -> t.Self | None:
return None
| ExampleTagThatPreventsPropagation |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_glue.py | {
"start": 16324,
"end": 22453
} | class ____:
RULE_SET_NAME = "TestRuleSet"
RULE_SET = 'Rules=[ColumnLength "review_id" = 15]'
TARGET_TABLE = {"TableName": "TestTable", "DatabaseName": "TestDB"}
@pytest.fixture
def glue_data_quality_hook(self) -> Generator[GlueDataQualityHook, None, None]:
with mock_aws():
hook = GlueDataQualityHook(aws_conn_id="aws_default")
yield hook
def test_init(self):
self.operator = GlueDataQualityOperator(
task_id="create_data_quality_ruleset", name=self.RULE_SET_NAME, ruleset=self.RULE_SET
)
self.operator.defer = mock.MagicMock()
assert self.operator.name == self.RULE_SET_NAME
assert self.operator.ruleset == self.RULE_SET
@mock.patch.object(GlueDataQualityHook, "conn")
def test_execute_create_rule(self, glue_data_quality_mock_conn):
self.operator = GlueDataQualityOperator(
task_id="create_data_quality_ruleset",
name=self.RULE_SET_NAME,
ruleset=self.RULE_SET,
description="create ruleset",
)
self.operator.defer = mock.MagicMock()
self.operator.execute({})
glue_data_quality_mock_conn.create_data_quality_ruleset.assert_called_once_with(
Description="create ruleset",
Name=self.RULE_SET_NAME,
Ruleset=self.RULE_SET,
)
@mock.patch.object(GlueDataQualityHook, "conn")
def test_execute_create_rule_should_fail_if_rule_already_exists(self, glue_data_quality_mock_conn):
self.operator = GlueDataQualityOperator(
task_id="create_data_quality_ruleset",
name=self.RULE_SET_NAME,
ruleset=self.RULE_SET,
description="create ruleset",
)
self.operator.defer = mock.MagicMock()
error_message = f"Another ruleset with the same name already exists: {self.RULE_SET_NAME}"
err_response = {"Error": {"Code": "AlreadyExistsException", "Message": error_message}}
exception = client("glue").exceptions.ClientError(err_response, "test")
returned_exception = type(exception)
glue_data_quality_mock_conn.exceptions.AlreadyExistsException = returned_exception
glue_data_quality_mock_conn.create_data_quality_ruleset.side_effect = exception
with pytest.raises(AirflowException, match=error_message):
self.operator.execute({})
glue_data_quality_mock_conn.create_data_quality_ruleset.assert_called_once_with(
Description="create ruleset",
Name=self.RULE_SET_NAME,
Ruleset=self.RULE_SET,
)
@mock.patch.object(GlueDataQualityHook, "conn")
def test_execute_update_rule(self, glue_data_quality_mock_conn):
self.operator = GlueDataQualityOperator(
task_id="update_data_quality_ruleset",
name=self.RULE_SET_NAME,
ruleset=self.RULE_SET,
description="update ruleset",
update_rule_set=True,
)
self.operator.defer = mock.MagicMock()
self.operator.execute({})
glue_data_quality_mock_conn.update_data_quality_ruleset.assert_called_once_with(
Description="update ruleset", Name=self.RULE_SET_NAME, Ruleset=self.RULE_SET
)
@mock.patch.object(GlueDataQualityHook, "conn")
def test_execute_update_rule_should_fail_if_rule_not_exists(self, glue_data_quality_mock_conn):
self.operator = GlueDataQualityOperator(
task_id="update_data_quality_ruleset",
name=self.RULE_SET_NAME,
ruleset=self.RULE_SET,
description="update ruleset",
update_rule_set=True,
)
self.operator.defer = mock.MagicMock()
error_message = f"Cannot find Data Quality Ruleset in account 1234567 with name {self.RULE_SET_NAME}"
err_response = {"Error": {"Code": "EntityNotFoundException", "Message": error_message}}
exception = client("glue").exceptions.ClientError(err_response, "test")
returned_exception = type(exception)
glue_data_quality_mock_conn.exceptions.EntityNotFoundException = returned_exception
glue_data_quality_mock_conn.update_data_quality_ruleset.side_effect = exception
with pytest.raises(AirflowException, match=error_message):
self.operator.execute({})
glue_data_quality_mock_conn.update_data_quality_ruleset.assert_called_once_with(
Description="update ruleset", Name=self.RULE_SET_NAME, Ruleset=self.RULE_SET
)
def test_validate_inputs(self):
self.operator = GlueDataQualityOperator(
task_id="create_data_quality_ruleset",
name=self.RULE_SET_NAME,
ruleset=self.RULE_SET,
)
assert self.operator.validate_inputs() is None
def test_validate_inputs_error(self):
self.operator = GlueDataQualityOperator(
task_id="create_data_quality_ruleset",
name=self.RULE_SET_NAME,
ruleset='[ColumnLength "review_id" = 15]',
)
with pytest.raises(AttributeError, match="RuleSet must starts with Rules = \\[ and ends with \\]"):
self.operator.validate_inputs()
def test_template_fields(self):
operator = GlueDataQualityOperator(
task_id="create_data_quality_ruleset", name=self.RULE_SET_NAME, ruleset=self.RULE_SET
)
validate_template_fields(operator)
def test_overwritten_conn_passed_to_hook(self):
OVERWRITTEN_CONN = "new-conn-id"
op = GlueDataQualityOperator(
task_id="test_overwritten_conn_passed_to_hook",
name=self.RULE_SET_NAME,
ruleset=self.RULE_SET,
aws_conn_id=OVERWRITTEN_CONN,
)
assert op.hook.aws_conn_id == OVERWRITTEN_CONN
def test_default_conn_passed_to_hook(self):
DEFAULT_CONN = "aws_default"
op = GlueDataQualityOperator(
task_id="test_default_conn_passed_to_hook", name=self.RULE_SET_NAME, ruleset=self.RULE_SET
)
assert op.hook.aws_conn_id == DEFAULT_CONN
| TestGlueDataQualityOperator |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/unit_tests/test_unresolvable_component.py | {
"start": 246,
"end": 1352
} | class ____(dg.Component, dg.Model):
"""This component class does not subclass Resolvable."""
some_field: str
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions: ...
def test_unresolvable_component():
with create_defs_folder_sandbox() as sandbox:
defs_path = sandbox.scaffold_component(
UnresolvableComponent,
defs_yaml_contents={
"type": "dagster_tests.components_tests.unit_tests.test_unresolvable_component.UnresolvableComponent",
# this component is not resolvable and so cannot have attributes
"attributes": {"some_field": "foo"},
},
)
with pytest.raises(ComponentTreeException) as e:
with sandbox.load_component_and_build_defs(defs_path=defs_path) as (component, defs):
...
# the ComponentTreeException wraps the DagsterInvalidDefinitionError
cause = e.value.__cause__
assert isinstance(cause, DagsterInvalidDefinitionError)
assert "subclass of `Resolvable`" in str(cause)
| UnresolvableComponent |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/pandas/pandas_query_engine.py | {
"start": 404,
"end": 1115
} | class ____:
"""
Pandas query engine.
DEPRECATED: Use `PandasQueryEngine` from `llama-index-experimental` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"PandasQueryEngine has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine import PandasQueryEngine`\n"
"Note that the PandasQueryEngine allows for arbitrary code execution, \n"
"and should be used in a secure environment."
)
# legacy
NLPandasQueryEngine = PandasQueryEngine
GPTNLPandasQueryEngine = PandasQueryEngine
| PandasQueryEngine |
python | huggingface__transformers | src/transformers/models/zamba2/modeling_zamba2.py | {
"start": 56525,
"end": 60031
} | class ____(GradientCheckpointingLayer):
def __init__(
self, shared_transformer: Zamba2AttentionDecoderLayer, linear: nn.Linear, mamba: Zamba2MambaDecoderLayer
):
super().__init__()
self.linear = linear
self.mamba_decoder = mamba
self.shared_transformer = shared_transformer
def forward(
self,
hidden_states: torch.Tensor,
original_hidden_states: Optional[torch.Tensor] = None,
layer_idx: Optional[int] = None,
attention_mask: Optional[torch.Tensor] = None,
causal_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
position_embeddings: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
hidden activations to form the input of the shared transformer layer.
layer_idx (`int`): layer number.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
layer_outputs = self.shared_transformer(
hidden_states,
original_hidden_states=original_hidden_states,
layer_idx=layer_idx,
attention_mask=causal_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
position_embeddings=position_embeddings,
position_ids=position_ids,
)
transformer_hidden_states = layer_outputs[0]
if output_attentions:
self_attn_weights = layer_outputs[1]
transformer_hidden_states = self.linear(transformer_hidden_states)
layer_outputs = self.mamba_decoder(
hidden_states,
transformer_hidden_states=transformer_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
position_embeddings=position_embeddings,
)
if output_attentions:
layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:]
return layer_outputs
| Zamba2HybridLayer |
python | ray-project__ray | python/ray/data/_internal/execution/backpressure_policy/concurrency_cap_backpressure_policy.py | {
"start": 630,
"end": 9866
} | class ____(BackpressurePolicy):
"""A backpressure policy that caps the concurrency of each operator.
This policy dynamically limits the number of concurrent tasks per operator
based on the output queue growth rate.
- Maintain asymmetric EWMA of total enqueued output bytes as the
typical level: `level`.
- Maintain asymmetric EWMA of absolute residual vs the *previous* level as a
scale proxy: `dev = EWMA(|q - level_prev|)`.
- Define deadband: Deadband is the acceptable range of the output queue size
around the typical level where the queue size is expected to stay stable.
deadband [lower, upper] = [level - K_DEV*dev, level + K_DEV*dev].
- If q > upper -> target cap = running - BACKOFF_FACTOR (back off)
If q < lower -> target cap = running + RAMPUP_FACTOR (ramp up)
Else -> target cap = running (hold)
- Apply user-configured max concurrency cap, admit iff running < target cap.
NOTE: Only support setting concurrency cap for `TaskPoolMapOperator` for now.
TODO(chengsu): Consolidate with actor scaling logic of `ActorPoolMapOperator`.
"""
# Smoothing factor for the asymmetric EWMA (slow fall, faster rise).
EWMA_ALPHA = env_float("RAY_DATA_CONCURRENCY_CAP_EWMA_ALPHA", 0.2)
EWMA_ALPHA_UP = 1.0 - (1.0 - EWMA_ALPHA) ** 2 # fast rise
# Deadband width in units of the EWMA absolute deviation estimate.
K_DEV = env_float("RAY_DATA_CONCURRENCY_CAP_K_DEV", 2.0)
# Factor to back off when the queue is too large.
BACKOFF_FACTOR = env_float("RAY_DATA_CONCURRENCY_CAP_BACKOFF_FACTOR", 1)
# Factor to ramp up when the queue is too small.
RAMPUP_FACTOR = env_float("RAY_DATA_CONCURRENCY_CAP_RAMPUP_FACTOR", 1)
# Threshold for per-Op object store budget (available) vs total
# (available / total) ratio to enable dynamic output queue size backpressure.
OBJECT_STORE_BUDGET_RATIO = env_float(
"RAY_DATA_CONCURRENCY_CAP_OBJECT_STORE_BUDGET_RATIO", 0.1
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Configured per-operator caps (+inf if unset).
self._concurrency_caps: Dict["PhysicalOperator", float] = {}
# EWMA state for level
self._q_level_nbytes: Dict["PhysicalOperator", float] = defaultdict(float)
# EWMA state for dev
self._q_level_dev: Dict["PhysicalOperator", float] = defaultdict(float)
# Per-operator cached threshold (bootstrapped from first sample).
self._queue_level_thresholds: Dict["PhysicalOperator", int] = defaultdict(int)
# Last effective cap for change logs.
self._last_effective_caps: Dict["PhysicalOperator", int] = {}
# Initialize caps from operators (infinite if unset)
for op, _ in self._topology.items():
if (
isinstance(op, TaskPoolMapOperator)
and op.get_max_concurrency_limit() is not None
):
self._concurrency_caps[op] = op.get_max_concurrency_limit()
else:
self._concurrency_caps[op] = float("inf")
# Whether to cap the concurrency of an operator based on its and downstream's queue size.
self.enable_dynamic_output_queue_size_backpressure = (
self._data_context.enable_dynamic_output_queue_size_backpressure
)
dynamic_output_queue_size_backpressure_configs = ""
if self.enable_dynamic_output_queue_size_backpressure:
dynamic_output_queue_size_backpressure_configs = (
f", EWMA_ALPHA={self.EWMA_ALPHA}, K_DEV={self.K_DEV}, "
f"BACKOFF_FACTOR={self.BACKOFF_FACTOR}, RAMPUP_FACTOR={self.RAMPUP_FACTOR}, "
f"OBJECT_STORE_BUDGET_RATIO={self.OBJECT_STORE_BUDGET_RATIO}"
)
logger.debug(
f"ConcurrencyCapBackpressurePolicy caps: {self._concurrency_caps}, "
f"enabled: {self.enable_dynamic_output_queue_size_backpressure}{dynamic_output_queue_size_backpressure_configs}"
)
def _update_ewma_asymmetric(self, prev_value: float, sample: float) -> float:
"""
Update EWMA with asymmetric behavior: fast rise, slow fall.
Args:
prev_value: Previous EWMA value
sample: New sample value
Returns:
Updated EWMA value
"""
if prev_value <= 0:
return sample
# fast rise if sample > prev_value, slow fall otherwise
alpha = self.EWMA_ALPHA_UP if sample > prev_value else self.EWMA_ALPHA
return (1 - alpha) * prev_value + alpha * sample
def _update_level_and_dev(self, op: "PhysicalOperator", q_bytes: int) -> None:
"""Update EWMA level and dev (residual w.r.t. previous level)."""
q = float(q_bytes)
level_prev = self._q_level_nbytes[op]
dev_prev = self._q_level_dev[op]
# Deviation vs the previous level
dev_sample = abs(q - level_prev) if level_prev > 0 else 0.0
dev = self._update_ewma_asymmetric(dev_prev, dev_sample)
# Now update the level itself
level = self._update_ewma_asymmetric(level_prev, q)
self._q_level_nbytes[op] = level
self._q_level_dev[op] = dev
# For visibility, store the integer center of the band
self._queue_level_thresholds[op] = max(1, int(level))
def can_add_input(self, op: "PhysicalOperator") -> bool:
"""Return whether `op` may accept another input now."""
num_tasks_running = op.metrics.num_tasks_running
# If not a MapOperator or feature disabled, just enforce configured cap.
if (
not isinstance(op, MapOperator)
or not self.enable_dynamic_output_queue_size_backpressure
):
return num_tasks_running < self._concurrency_caps[op]
# For this Op, if the objectstore budget (available) to total
# ratio is below threshold (10%), skip dynamic output queue size backpressure.
op_usage = self._resource_manager.get_op_usage(op)
op_budget = self._resource_manager.get_budget(op)
if op_usage is not None and op_budget is not None:
total_mem = op_usage.object_store_memory + op_budget.object_store_memory
if total_mem == 0 or (
op_budget.object_store_memory / total_mem
> self.OBJECT_STORE_BUDGET_RATIO
):
# If the objectstore budget (available) to total
# ratio is above threshold (10%), skip dynamic output queue size
# backpressure, but still enforce the configured cap.
return num_tasks_running < self._concurrency_caps[op]
# Current total queued bytes (this op + downstream)
current_queue_size_bytes = (
self._resource_manager.get_op_internal_object_store_usage(op)
+ self._resource_manager.get_op_outputs_object_store_usage_with_downstream(
op
)
)
# Update EWMA state (level & dev) and compute effective cap. Note that
# we don't update the EWMA state if the objectstore budget (available) vs total
# ratio is above threshold (10%), because the level and dev adjusts quickly.
self._update_level_and_dev(op, current_queue_size_bytes)
effective_cap = self._effective_cap(
op, num_tasks_running, current_queue_size_bytes
)
last = self._last_effective_caps.get(op, None)
if last != effective_cap:
logger.debug(
f"Cap change {op.name}: {last if last is not None else 'None'} -> "
f"{effective_cap} (running={num_tasks_running}, queue={current_queue_size_bytes}, "
f"thr={self._queue_level_thresholds[op]})"
)
self._last_effective_caps[op] = effective_cap
return num_tasks_running < effective_cap
def _effective_cap(
self,
op: "PhysicalOperator",
num_tasks_running: int,
current_queue_size_bytes: int,
) -> int:
"""A simple controller around EWMA level.
Args:
op: The operator to compute the effective cap for.
num_tasks_running: The number of tasks currently running.
current_queue_size_bytes: Current total queued bytes for this operator + downstream.
Returns:
The effective cap.
"""
cap_cfg = self._concurrency_caps[op]
level = float(self._q_level_nbytes[op])
dev = max(1.0, float(self._q_level_dev[op]))
upper = level + self.K_DEV * dev
lower = level - self.K_DEV * dev
if current_queue_size_bytes > upper:
# back off
target = num_tasks_running - self.BACKOFF_FACTOR
elif current_queue_size_bytes < lower:
# ramp up
target = num_tasks_running + self.RAMPUP_FACTOR
else:
# hold
target = num_tasks_running
# Clamp to [1, configured_cap]
target = max(1, target)
if not math.isinf(cap_cfg):
target = min(target, int(cap_cfg))
return int(target)
| ConcurrencyCapBackpressurePolicy |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_checks/asset_check_evaluation.py | {
"start": 1837,
"end": 5164
} | class ____(
NamedTuple(
"_AssetCheckEvaluation",
[
("asset_key", AssetKey),
("check_name", str),
("passed", bool),
("metadata", Mapping[str, MetadataValue]),
(
"target_materialization_data",
Optional[AssetCheckEvaluationTargetMaterializationData],
),
("severity", AssetCheckSeverity),
("description", Optional[str]),
("blocking", Optional[bool]),
],
)
):
"""Represents the outcome of a evaluating an asset check.
Args:
asset_key (AssetKey):
The asset key that was checked.
check_name (str):
The name of the check.
passed (bool):
The pass/fail result of the check.
metadata (Optional[Mapping[str, MetadataValue]]):
Arbitrary user-provided metadata about the asset. Keys are displayed string labels, and
values are one of the following: string, float, int, JSON-serializable dict, JSON-serializable
list, and one of the data classes returned by a MetadataValue static method.
target_materialization_data (Optional[AssetCheckEvaluationTargetMaterializationData]):
The latest materialization at execution time of the check.
severity (AssetCheckSeverity):
Severity of the check result.
description (Optional[str]):
A text description of the result of the check evaluation.
blocking (Optional[bool]):
Whether the check is blocking.
"""
def __new__(
cls,
asset_key: AssetKey,
check_name: str,
passed: bool,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
target_materialization_data: Optional[AssetCheckEvaluationTargetMaterializationData] = None,
severity: AssetCheckSeverity = AssetCheckSeverity.ERROR,
description: Optional[str] = None,
blocking: Optional[bool] = None,
):
normed_metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str),
)
return super().__new__(
cls,
asset_key=check.inst_param(asset_key, "asset_key", AssetKey),
check_name=check.str_param(check_name, "check_name"),
passed=check.bool_param(passed, "passed"),
metadata=normed_metadata,
target_materialization_data=check.opt_inst_param(
target_materialization_data,
"target_materialization_data",
AssetCheckEvaluationTargetMaterializationData,
),
severity=check.inst_param(severity, "severity", AssetCheckSeverity),
description=check.opt_str_param(description, "description"),
blocking=check.opt_bool_param(blocking, "blocking"),
)
@property
def asset_check_key(self) -> AssetCheckKey:
return AssetCheckKey(self.asset_key, self.check_name)
def with_metadata(self, metadata: Mapping[str, RawMetadataValue]) -> "AssetCheckEvaluation":
normed_metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str),
)
return self._replace(metadata=normed_metadata)
| AssetCheckEvaluation |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/translate.py | {
"start": 12018,
"end": 17847
} | class ____(GoogleCloudBaseOperator):
"""
Translate large volumes of text content, by the inputs provided.
Wraps the Google cloud Translate Text (Advanced) functionality.
See https://cloud.google.com/translate/docs/advanced/batch-translation
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TranslateTextBatchOperator`.
:param project_id: Optional. The ID of the Google Cloud project that the
service belongs to. If not specified the hook project_id will be used.
:param location: required. The ID of the Google Cloud location, (non-global) that the
service belongs to.
:param source_language_code: Required. Source language code.
:param target_language_codes: Required. Up to 10 language codes allowed here.
:param input_configs: Required. Input configurations.
The total number of files matched should be <=100. The total content size should be <= 100M Unicode codepoints.
The files must use UTF-8 encoding.
:param models: Optional. The models to use for translation. Map's key is
target language code. Map's value is model name. Value can
be a built-in general model, or an AutoML Translation model.
The value format depends on model type:
- AutoML Translation models:
``projects/{project-number-or-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-number-or-id}/locations/{location-id}/models/general/nmt``
If the map is empty or a specific model is not requested for
a language pair, then the default Google model (NMT) is used.
:param output_config: Required. Output configuration.
:param glossaries: Optional. Glossaries to be applied for translation. It's keyed by target language code.
:param labels: Optional. The labels with user-defined metadata.
See https://cloud.google.com/translate/docs/advanced/labels for more information.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (TranslateTextBatchLink(),)
template_fields: Sequence[str] = (
"input_configs",
"target_language_codes",
"source_language_code",
"models",
"glossaries",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str = PROVIDE_PROJECT_ID,
location: str,
target_language_codes: MutableSequence[str],
source_language_code: str,
input_configs: MutableSequence[InputConfig | dict],
output_config: OutputConfig | dict,
models: str | None = None,
glossaries: MutableMapping[str, TranslateTextGlossaryConfig] | None = None,
labels: MutableMapping[str, str] | None = None,
metadata: Sequence[tuple[str, str]] = (),
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.target_language_codes = target_language_codes
self.source_language_code = source_language_code
self.input_configs = input_configs
self.output_config = output_config
self.models = models
self.glossaries = glossaries
self.labels = labels
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = TranslateHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
translate_operation = hook.batch_translate_text(
project_id=self.project_id,
location=self.location,
target_language_codes=self.target_language_codes,
source_language_code=self.source_language_code,
input_configs=self.input_configs,
output_config=self.output_config,
models=self.models,
glossaries=self.glossaries,
labels=self.labels,
metadata=self.metadata,
timeout=self.timeout,
retry=self.retry,
)
self.log.info("Translate text batch job started.")
TranslateTextBatchLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
output_config=self.output_config,
)
hook.wait_for_operation_result(translate_operation)
self.log.info("Translate text batch job finished")
return {"batch_text_translate_results": self.output_config["gcs_destination"]}
| TranslateTextBatchOperator |
python | ray-project__ray | release/nightly_tests/dask_on_ray/large_scale_test.py | {
"start": 1365,
"end": 3093
} | class ____:
def __init__(
self,
num_workers: int,
worker_obj_store_size_in_gb: int,
trigger_object_spill: bool,
error_rate: float,
):
"""
`batch_size` is the # of Dask graphs sent to the cluster
simultaneously for processing.
One element in the batch represents 1 Dask graph.
The Dask graph involves reading 30 arrays (one is 1.44GB)
and concatenating them into a Dask array.
Then, it does FFT computations across chunks of the Dask array.
It saves the FFT-ed version of the Dask array as an output file.
If `trigger_object_spill` is True, then we send work to
the cluster such that each worker gets the number of graphs
that would exceed the worker memory, triggering object spills.
We use the estimated peak memory consumption to determine
how many graphs should be sent.
If `error_rate` is True, we throw an exception at the Data
load layer as per error rate.
"""
self.error_rate = error_rate
if trigger_object_spill:
num_graphs_per_worker = (
int(
math.floor(
worker_obj_store_size_in_gb / PEAK_MEMORY_CONSUMPTION_IN_GB
)
)
+ 1
)
else:
num_graphs_per_worker = int(
math.floor(worker_obj_store_size_in_gb / PEAK_MEMORY_CONSUMPTION_IN_GB)
)
self.batch_size = num_graphs_per_worker * num_workers
def __str__(self):
return "Error rate = {}, Batch Size = {}".format(
self.error_rate, self.batch_size
)
| TestSpec |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 23256,
"end": 25203
} | class ____(AttributeTemplate):
key = types.NumberClass
def resolve___call__(self, classty):
"""
Resolve a NumPy number class's constructor (e.g. calling numpy.int32(...))
"""
ty = classty.instance_type
def typer(val):
if isinstance(val, (types.BaseTuple, types.Sequence)):
# Array constructor, e.g. np.int32([1, 2])
fnty = self.context.resolve_value_type(np.array)
sig = fnty.get_call_type(self.context, (val, types.DType(ty)),
{})
return sig.return_type
elif isinstance(val, (types.Number, types.Boolean, types.IntEnumMember)):
# Scalar constructor, e.g. np.int32(42)
return ty
elif isinstance(val, (types.NPDatetime, types.NPTimedelta)):
# Constructor cast from datetime-like, e.g.
# > np.int64(np.datetime64("2000-01-01"))
if ty.bitwidth == 64:
return ty
else:
msg = (f"Cannot cast {val} to {ty} as {ty} is not 64 bits "
"wide.")
raise errors.TypingError(msg)
else:
if (isinstance(val, types.Array) and val.ndim == 0 and
val.dtype == ty):
# This is 0d array -> scalar degrading
return ty
else:
# unsupported
msg = f"Casting {val} to {ty} directly is unsupported."
if isinstance(val, types.Array):
# array casts are supported a different way.
msg += f" Try doing '<array>.astype(np.{ty})' instead"
raise errors.TypingError(msg)
return types.Function(make_callable_template(key=ty, typer=typer))
@infer_getattr
| NumberClassAttribute |
python | Pylons__pyramid | src/pyramid/config/predicates.py | {
"start": 2932,
"end": 9079
} | class ____:
def __init__(self):
self.sorter = TopologicalSorter()
self.last_added = None
def add(self, name, factory, weighs_more_than=None, weighs_less_than=None):
# Predicates should be added to a predicate list in (presumed)
# computation expense order.
# if weighs_more_than is None and weighs_less_than is None:
# weighs_more_than = self.last_added or FIRST
# weighs_less_than = LAST
self.last_added = name
self.sorter.add(
name, factory, after=weighs_more_than, before=weighs_less_than
)
def names(self):
# Return the list of valid predicate names.
return self.sorter.names
def make(self, config, **kw):
# Given a configurator and a list of keywords, a predicate list is
# computed. Elsewhere in the code, we evaluate predicates using a
# generator expression. All predicates associated with a view or
# route must evaluate true for the view or route to "match" during a
# request. The fastest predicate should be evaluated first, then the
# next fastest, and so on, as if one returns false, the remainder of
# the predicates won't need to be evaluated.
#
# While we compute predicates, we also compute a predicate hash (aka
# phash) that can be used by a caller to identify identical predicate
# lists.
ordered = self.sorter.sorted()
phash = sha256()
weights = []
preds = []
info = PredicateInfo(
package=config.package,
registry=config.registry,
settings=config.get_settings(),
maybe_dotted=config.maybe_dotted,
)
for n, (name, predicate_factory) in enumerate(ordered):
vals = kw.pop(name, None)
if vals is None: # XXX should this be a sentinel other than None?
continue
if not isinstance(vals, predvalseq):
vals = (vals,)
for val in vals:
realval = val
notted = False
if isinstance(val, not_):
realval = val.value
notted = True
pred = predicate_factory(realval, info)
if notted:
pred = Notted(pred)
hashes = pred.phash()
if not is_nonstr_iter(hashes):
hashes = [hashes]
for h in hashes:
phash.update(bytes_(h))
weights.append(1 << n + 1)
preds.append(pred)
if kw:
from difflib import get_close_matches
closest = []
names = [name for name, _ in ordered]
for name in kw:
closest.extend(get_close_matches(name, names, 3))
raise ConfigurationError(
'Unknown predicate values: %r (did you mean %s)'
% (kw, ','.join(closest))
)
# A "order" is computed for the predicate list. An order is
# a scoring.
#
# Each predicate is associated with a weight value. The weight of a
# predicate symbolizes the relative potential "importance" of the
# predicate to all other predicates. A larger weight indicates
# greater importance.
#
# All weights for a given predicate list are bitwise ORed together
# to create a "score"; this score is then subtracted from
# MAX_ORDER and divided by an integer representing the number of
# predicates+1 to determine the order.
#
# For views, the order represents the ordering in which a "multiview"
# ( a collection of views that share the same context/request/name
# triad but differ in other ways via predicates) will attempt to call
# its set of views. Views with lower orders will be tried first.
# The intent is to a) ensure that views with more predicates are
# always evaluated before views with fewer predicates and b) to
# ensure a stable call ordering of views that share the same number
# of predicates. Views which do not have any predicates get an order
# of MAX_ORDER, meaning that they will be tried very last.
score = 0
for bit in weights:
score = score | bit
order = (MAX_ORDER - score) // (len(preds) + 1)
return order, preds, phash.hexdigest()
def normalize_accept_offer(offer):
return str(Accept.parse_offer(offer))
def sort_accept_offers(offers, order=None):
"""
Sort a list of offers by preference.
For a given ``type/subtype`` category of offers, this algorithm will
always sort offers with params higher than the bare offer.
:param offers: A list of offers to be sorted.
:param order: A weighted list of offers where items closer to the start of
the list will be a preferred over items closer to the end.
:return: A list of offers sorted first by specificity (higher to lower)
then by ``order``.
"""
if order is None:
order = []
max_weight = len(offers)
def find_order_index(value, default=None):
return next((i for i, x in enumerate(order) if x == value), default)
def offer_sort_key(value):
"""
(type_weight, params_weight)
type_weight:
- index of specific ``type/subtype`` in order list
- ``max_weight * 2`` if no match is found
params_weight:
- index of specific ``type/subtype;params`` in order list
- ``max_weight`` if not found
- ``max_weight + 1`` if no params at all
"""
parsed = Accept.parse_offer(value)
type_w = find_order_index(
parsed.type + '/' + parsed.subtype, max_weight
)
if parsed.params:
param_w = find_order_index(value, max_weight)
else:
param_w = max_weight + 1
return (type_w, param_w)
return sorted(offers, key=offer_sort_key)
| PredicateList |
python | ansible__ansible | lib/ansible/module_utils/facts/network/darwin.py | {
"start": 845,
"end": 1853
} | class ____(GenericBsdIfconfigNetwork):
"""
This is the Mac macOS Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
# MacOSX sets the media to '<unknown type>' for bridge interface
# and parsing splits this into two words; this if/else helps
if words[1] == '<unknown' and words[2] == 'type>':
current_if['media_select'] = 'Unknown'
current_if['media_type'] = 'unknown type'
else:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
| DarwinNetwork |
python | streamlit__streamlit | lib/tests/streamlit/elements/data_editor_test.py | {
"start": 2036,
"end": 18484
} | class ____(unittest.TestCase):
@parameterized.expand(
[
(None, ColumnDataKind.STRING, None),
("hello", ColumnDataKind.STRING, "hello"),
(123, ColumnDataKind.STRING, "123"),
(123.1234, ColumnDataKind.STRING, "123.1234"),
(None, ColumnDataKind.INTEGER, None),
("123", ColumnDataKind.INTEGER, 123),
(123, ColumnDataKind.INTEGER, 123),
(123.1234, ColumnDataKind.INTEGER, 123),
(None, ColumnDataKind.FLOAT, None),
("123.45", ColumnDataKind.FLOAT, 123.45),
(123.45, ColumnDataKind.FLOAT, 123.45),
(123, ColumnDataKind.FLOAT, 123),
(None, ColumnDataKind.BOOLEAN, None),
(True, ColumnDataKind.BOOLEAN, True),
("true", ColumnDataKind.BOOLEAN, True),
(None, ColumnDataKind.DATETIME, None),
(
"2021-01-01T10:20:30",
ColumnDataKind.DATETIME,
pd.Timestamp(
"2021-01-01T10:20:30",
),
),
(
"2021-01-01",
ColumnDataKind.DATETIME,
pd.Timestamp("2021-01-01T00:00:00"),
),
(
"2021-01-01T10:20:30Z",
ColumnDataKind.DATETIME,
pd.Timestamp("2021-01-01T10:20:30Z"),
),
(
"2021-01-01T10:20:30.123456",
ColumnDataKind.DATETIME,
pd.Timestamp("2021-01-01T10:20:30.123456"),
),
(
"2021-01-01T10:20:30.123456Z",
ColumnDataKind.DATETIME,
pd.Timestamp("2021-01-01T10:20:30.123456Z"),
),
(None, ColumnDataKind.TIME, None),
("10:20:30", ColumnDataKind.TIME, datetime.time(10, 20, 30)),
("10:20:30.123456", ColumnDataKind.TIME, datetime.time(10, 20, 30, 123456)),
(
"2021-01-01T10:20:30.123456Z",
ColumnDataKind.TIME,
datetime.time(10, 20, 30, 123456),
),
(
"1970-01-01T10:20:30.123456Z",
ColumnDataKind.TIME,
datetime.time(10, 20, 30, 123456),
),
(None, ColumnDataKind.DATE, None),
("2021-01-01", ColumnDataKind.DATE, datetime.date(2021, 1, 1)),
(
"2021-01-01T10:20:30.123456Z",
ColumnDataKind.DATE,
datetime.date(2021, 1, 1),
),
(
100000,
ColumnDataKind.TIMEDELTA,
pd.Timedelta(100000),
),
(
[1, 2, 3],
ColumnDataKind.LIST,
[1, 2, 3],
),
(
("1", "2", "3"),
ColumnDataKind.LIST,
["1", "2", "3"],
),
(
"foo",
ColumnDataKind.LIST,
["foo"],
),
(
["foo"],
ColumnDataKind.EMPTY,
["foo"],
),
]
)
def test_parse_value(
self,
value: str | int | float | bool | None,
column_data_kind: ColumnDataKind,
expected: Any,
):
"""Test that _parse_value parses the input to the correct type."""
result = _parse_value(value, column_data_kind)
assert result == expected
def test_apply_cell_edits(self):
"""Test applying cell edits to a DataFrame."""
df = pd.DataFrame(
{
"col1": [1, 2, 3],
"col2": ["a", "b", "c"],
"col3": [True, False, True],
"col4": [
datetime.datetime.now(),
datetime.datetime.now(),
datetime.datetime.now(),
],
"col5": [
Decimal("1.1"),
Decimal("-12.3456"),
Decimal(123456),
],
}
)
edited_rows: Mapping[int, Mapping[str, str | int | float | bool | None]] = {
0: {
"col1": 10,
"col2": "foo",
"col3": False,
"col4": "2020-03-20T14:28:23",
"col5": "2.3",
},
1: {"col2": None},
}
_apply_cell_edits(
df, edited_rows, determine_dataframe_schema(df, _get_arrow_schema(df))
)
assert df.iat[0, 0] == 10
assert df.iat[0, 1] == "foo"
assert df.iat[1, 1] is None
assert not df.iat[0, 2]
assert df.iat[0, 3] == pd.Timestamp("2020-03-20T14:28:23")
assert df.iat[0, 4] == Decimal("2.3")
def test_apply_row_additions(self):
"""Test applying row additions to a DataFrame."""
df = pd.DataFrame(
{
"col1": [1, 2, 3],
"col2": ["a", "b", "c"],
"col3": [True, False, True],
"col4": [
datetime.datetime.now(),
datetime.datetime.now(),
datetime.datetime.now(),
],
"col5": [["x"], ["y"], ["z"]],
}
)
added_rows: list[dict[str, Any]] = [
{
"col1": 10,
"col2": "foo",
"col3": False,
"col4": "2020-03-20T14:28:23",
"col5": ["x", "y"],
},
{
"col1": 11,
"col2": "bar",
"col3": True,
"col4": "2023-03-20T14:28:23",
"col5": ["z"],
},
]
_apply_row_additions(
df, added_rows, determine_dataframe_schema(df, _get_arrow_schema(df))
)
assert len(df) == 5
assert df.loc[3, "col5"] == ["x", "y"]
assert df.loc[4, "col5"] == ["z"]
assert pd.api.types.is_bool_dtype(df["col3"])
def test_apply_row_deletions(self):
"""Test applying row deletions to a DataFrame."""
df = pd.DataFrame(
{
"col1": [1, 2, 3],
"col2": ["a", "b", "c"],
"col3": [True, False, True],
}
)
deleted_rows: list[int] = [0, 2]
_apply_row_deletions(df, deleted_rows)
assert len(df) == 1, f"Only one row should be left, but has {len(df)}."
assert df.iloc[0].to_list() == [2, "b", False]
def test_apply_dataframe_edits(self):
"""Test applying edits to a DataFrame."""
df = pd.DataFrame(
{
"col1": [1, 2, 3],
"col2": ["a", "b", "c"],
"col3": [True, False, True],
}
)
deleted_rows: list[int] = [0, 2]
added_rows: list[dict[str, Any]] = [
{"col1": 10, "col2": "foo", "col3": False},
{"col1": 11, "col2": "bar", "col3": True},
]
edited_rows: dict[int, dict[str, str | int | float | bool | None]] = {
1: {
"col1": 123,
}
}
_apply_dataframe_edits(
df,
{
"deleted_rows": deleted_rows,
"added_rows": added_rows,
"edited_rows": edited_rows,
},
determine_dataframe_schema(df, _get_arrow_schema(df)),
)
assert df.to_dict(orient="list") == {
"col1": [123, 10, 11],
"col2": ["b", "foo", "bar"],
"col3": [False, False, True],
}
def test_apply_dataframe_edits_handles_index_changes(self):
"""Test applying edits to a DataFrame correctly handles index changes.
See: https://github.com/streamlit/streamlit/issues/8854
"""
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5],
"B": [10, 20, 30, 40, 50],
}
).set_index("A")
deleted_rows: list[int] = [4]
added_rows: list[dict[str, Any]] = [{"_index": 5, "B": 123}]
edited_rows: dict[int, Any] = {}
_apply_dataframe_edits(
df,
{
"deleted_rows": deleted_rows,
"added_rows": added_rows,
"edited_rows": edited_rows,
},
determine_dataframe_schema(df, _get_arrow_schema(df)),
)
assert df.to_dict(orient="list") == {"B": [10, 20, 30, 40, 123]}
def test_apply_row_additions_range_index(self):
"""Test adding rows to a DataFrame with a RangeIndex."""
df = pd.DataFrame({"col1": [1, 2]}, index=pd.RangeIndex(0, 2, 1))
added_rows: list[dict[str, Any]] = [
{"col1": 10},
{"col1": 11},
]
_apply_row_additions(
df, added_rows, determine_dataframe_schema(df, _get_arrow_schema(df))
)
expected_df = pd.DataFrame(
{"col1": [1, 2, 10, 11]}, index=pd.RangeIndex(0, 4, 1)
)
pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
def test_apply_row_additions_int_index_non_contiguous(self):
"""Test adding rows to a DataFrame with a non-contiguous integer index."""
df = pd.DataFrame({"col1": [1, 3]}, index=pd.Index([0, 2], dtype="int64"))
added_rows: list[dict[str, Any]] = [
{"col1": 10},
{"col1": 11},
]
_apply_row_additions(
df, added_rows, determine_dataframe_schema(df, _get_arrow_schema(df))
)
expected_df = pd.DataFrame(
{"col1": [1, 3, 10, 11]}, index=pd.Index([0, 2, 3, 4], dtype="int64")
)
pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
def test_apply_row_additions_empty_df(self):
"""Test adding rows to an empty DataFrame."""
df = pd.DataFrame(
{"col1": pd.Series(dtype="int")}, index=pd.RangeIndex(0, 0, 1)
)
assert df.empty
added_rows: list[dict[str, Any]] = [
{"col1": 10},
{"col1": 11},
]
_apply_row_additions(
df, added_rows, determine_dataframe_schema(df, _get_arrow_schema(df))
)
expected_df = pd.DataFrame({"col1": [10, 11]}, index=pd.RangeIndex(0, 2, 1))
pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
@patch("streamlit.elements.widgets.data_editor._LOGGER")
def test_apply_row_additions_other_index_no_value_logs_warning(self, mock_logger):
"""Test adding to non-auto-increment index without value logs warning."""
df = pd.DataFrame(
{"col1": [1, 2]},
index=pd.to_datetime(["2023-01-01", "2023-01-02"]),
)
added_rows: list[dict[str, Any]] = [
{"col1": 10}, # No _index provided
]
original_len = len(df)
_apply_row_additions(
df, added_rows, determine_dataframe_schema(df, _get_arrow_schema(df))
)
# Verify row was NOT added
assert len(df) == original_len
# Verify warning was logged
mock_logger.warning.assert_called_once()
assert "Cannot automatically add row" in mock_logger.warning.call_args[0][0]
def test_apply_row_additions_other_index_with_value(self):
"""Test adding to non-auto-increment index with provided value."""
index = pd.to_datetime(["2023-01-01", "2023-01-02"])
df = pd.DataFrame({"col1": [1, 2]}, index=index)
added_rows: list[dict[str, Any]] = [
{"_index": "2023-01-03", "col1": 10},
]
_apply_row_additions(
df, added_rows, determine_dataframe_schema(df, _get_arrow_schema(df))
)
expected_index = pd.to_datetime(["2023-01-01", "2023-01-02", "2023-01-03"])
expected_df = pd.DataFrame({"col1": [1, 2, 10]}, index=expected_index)
pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
def test_apply_row_additions_range_index_with_value(self):
r"""Test adding row to RangeIndex with explicit _index provided
(should still auto-increment)."""
# This tests the `index_type != \"range\"` condition in the first branch.
df = pd.DataFrame({"col1": [1, 2]}, index=pd.RangeIndex(0, 2, 1))
added_rows: list[dict[str, Any]] = [
{"_index": 99, "col1": 10}, # Provide an index value
]
_apply_row_additions(
df, added_rows, determine_dataframe_schema(df, _get_arrow_schema(df))
)
# Even though _index=99 was provided, it should auto-increment the RangeIndex.
expected_df = pd.DataFrame({"col1": [1, 2, 10]}, index=pd.RangeIndex(0, 3, 1))
pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
def test_apply_dataframe_edits_delete_and_add_range_index(self):
"""Test applying edits involving deletion and addition on a RangeIndex."""
# Initial DF with RangeIndex
df = pd.DataFrame({"col1": [1, 2, 3, 4]}, index=pd.RangeIndex(0, 4, 1))
# Delete row at index 1 (value 2)
deleted_rows: list[int] = [1]
# Add a new row
added_rows: list[dict[str, Any]] = [
{"col1": 10},
]
# No cell edits for this test
edited_rows: dict[int, Any] = {}
# Expected state after edits:
# - Row 1 (value 2) deleted.
# - Index becomes integer index [0, 2, 3].
# - New row added with index max+1 = 4.
# - Final index: integer index [0, 2, 3, 4]
# - Final values: [1, 3, 4, 10]
expected_df = pd.DataFrame(
{"col1": [1, 3, 4, 10]}, index=pd.Index([0, 2, 3, 4], dtype="int64")
)
_apply_dataframe_edits(
df,
{
"deleted_rows": deleted_rows,
"added_rows": added_rows,
"edited_rows": edited_rows,
},
determine_dataframe_schema(df, _get_arrow_schema(df)),
)
# Check dtypes=False because deletion/addition might change column dtypes
pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
def test_apply_dataframe_edits_string_index_delete_and_edit(self):
"""Test applying edits with string index: delete last two rows and edit first row index.
Related issue: https://github.com/streamlit/streamlit/pull/11448
"""
# Create DataFrame with 10 rows and string index
df = pd.DataFrame(
{"col1": list(range(10)), "col2": [f"value_{i}" for i in range(10)]},
index=[f"row_{i}" for i in range(10)],
)
# Delete the last two rows (indices 8 and 9)
deleted_rows: list[int] = [8, 9]
# Edit the index value of the first row (row 0)
edited_rows: dict[int, dict[str, str | int | float | bool | None]] = {
0: {
INDEX_IDENTIFIER: "edited_row_0",
}
}
# No row additions for this test
added_rows: list[dict[str, Any]] = []
_apply_dataframe_edits(
df,
{
"deleted_rows": deleted_rows,
"added_rows": added_rows,
"edited_rows": edited_rows,
},
determine_dataframe_schema(df, _get_arrow_schema(df)),
)
# Expected results:
# - Rows 8 and 9 should be deleted (original rows with values 8,9)
# - Index of first row should be changed from "row_0" to "edited_row_0"
# - Should have 8 rows remaining (0-7, with 8-9 deleted)
assert len(df) == 8
# Check that the index was properly edited
assert df.index[0] == "edited_row_0"
# Check that the remaining indices are correct (excluding the edited first one)
expected_remaining_indices = ["edited_row_0"] + [
f"row_{i}" for i in range(1, 8)
]
assert df.index.tolist() == expected_remaining_indices
# Check that the data values are correct
expected_col1_values = list(range(8)) # 0-7, since rows 8-9 were deleted
expected_col2_values = [f"value_{i}" for i in range(8)]
assert df["col1"].tolist() == expected_col1_values
assert df["col2"].tolist() == expected_col2_values
| DataEditorUtilTest |
python | pypa__pipenv | pipenv/vendor/tomlkit/items.py | {
"start": 53299,
"end": 53725
} | class ____(Item):
"""
A null item.
"""
def __init__(self) -> None:
super().__init__(Trivia(trail=""))
def unwrap(self) -> None:
return None
@property
def discriminant(self) -> int:
return -1
@property
def value(self) -> None:
return None
def as_string(self) -> str:
return ""
def _getstate(self, protocol=3) -> tuple:
return ()
| Null |
python | numpy__numpy | numpy/random/tests/test_smoke.py | {
"start": 28380,
"end": 28702
} | class ____(RNG):
@classmethod
def _create_rng(cls):
bit_generator = PCG64DXSM
advance = 2**63 + 2**31 + 2**15 + 1
seed = [12345]
rg = Generator(bit_generator(*seed))
seed_vector_bits = 64
return RNGData(bit_generator, advance, seed, rg, seed_vector_bits)
| TestPCG64DXSM |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/summary_ops/summary_ops_test.py | {
"start": 49243,
"end": 61292
} | class ____(test_util.TensorFlowTestCase):
def tearDown(self):
summary_ops.trace_off()
super().tearDown()
def exec_summary_op(self, summary_op_fn):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
with writer.as_default():
summary_op_fn()
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
with writer.as_default():
summary_ops.run_metadata(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata_graphs(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
with writer.as_default():
summary_ops.run_metadata_graphs(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def create_run_metadata(self):
step_stats = step_stats_pb2.StepStats(dev_stats=[
step_stats_pb2.DeviceStepStats(
device='cpu:0',
node_stats=[step_stats_pb2.NodeExecStats(node_name='hello')])
])
return config_pb2.RunMetadata(
function_graphs=[
config_pb2.RunMetadata.FunctionGraphs(
pre_optimization_graph=graph_pb2.GraphDef(
node=[node_def_pb2.NodeDef(name='foo')]))
],
step_stats=step_stats)
def run_trace(self, f, step=1):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
summary_ops.trace_on(graph=True, profiler=False)
with writer.as_default():
f()
summary_ops.trace_export(name='foo', step=step)
writer.close()
events = events_from_logdir(logdir)
return events[1]
@test_util.run_v2_only
def testRunMetadata_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadata_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadata_wholeRunMetadata(self):
expected_run_metadata = """
step_stats {
dev_stats {
device: "cpu:0"
node_stats {
node_name: "hello"
}
}
}
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadata_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testRunMetadataGraph_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadataGraph_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata_graph"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_runMetadataFragment(self):
expected_run_metadata = """
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata_graphs(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testTrace(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
event = self.run_trace(f)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
# Content of function_graphs is large and, for instance, device can change.
self.assertTrue(hasattr(actual_run_metadata, 'function_graphs'))
@test_util.run_v2_only
def testTrace_cannotEnableTraceInFunction(self):
@def_function.function
def f():
summary_ops.trace_on(graph=True, profiler=False)
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegex(
str(mock_log.call_args), 'Cannot enable trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotEnableTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_on(graph=True, profiler=False)
self.assertRegex(
str(mock_log.call_args), 'Must enable trace in eager mode.')
@test_util.run_v2_only
def testTrace_cannotExportTraceWithoutTrace(self):
with self.assertRaisesRegex(ValueError, 'Must enable trace before export.'):
summary_ops.trace_export(name='foo', step=1)
@test_util.run_v2_only
def testTrace_cannotExportTraceInFunction(self):
summary_ops.trace_on(graph=True, profiler=False)
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
summary_ops.trace_export(name='foo', step=1)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegex(
str(mock_log.call_args), 'Cannot export trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotExportTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_export(name='foo', step=1)
self.assertRegex(
str(mock_log.call_args),
'Can only export trace while executing eagerly.')
@test_util.run_v2_only
def testTrace_usesDefaultStep(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
try:
summary_ops.set_step(42)
event = self.run_trace(f, step=None)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testTrace_withProfiler(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer_v2(logdir)
profiler_outdir = self.get_temp_dir()
summary_ops.trace_on(
graph=True, profiler=True, profiler_outdir=profiler_outdir
)
with writer.as_default():
f()
summary_ops.trace_export(name='foo', step=1)
writer.close()
@test_util.run_v2_only
def testGraph_graph(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
def summary_op_fn():
summary_ops.graph(f.get_concrete_function().graph)
event = self.exec_summary_op(summary_op_fn)
self.assertIsNotNone(event.graph_def)
@test_util.run_v2_only
def testGraph_graphDef(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
def summary_op_fn():
summary_ops.graph(f.get_concrete_function().graph.as_graph_def())
event = self.exec_summary_op(summary_op_fn)
self.assertIsNotNone(event.graph_def)
@test_util.run_v2_only
def testGraph_invalidData(self):
def summary_op_fn():
summary_ops.graph('hello')
with self.assertRaisesRegex(
ValueError,
r'\'graph_data\' is not tf.Graph or tf.compat.v1.GraphDef',
):
self.exec_summary_op(summary_op_fn)
@test_util.run_v2_only
def testGraph_fromGraphMode(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
@def_function.function
def g(graph):
summary_ops.graph(graph)
def summary_op_fn():
graph_def = f.get_concrete_function().graph.as_graph_def(add_shapes=True)
func_graph = constant_op.constant(graph_def.SerializeToString())
g(func_graph)
with self.assertRaisesRegex(
ValueError,
r'graph\(\) cannot be invoked inside a graph context.',
):
self.exec_summary_op(summary_op_fn)
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def events_from_multifile_logdir(logdir):
"""Returns map of filename to events for all `tfevents` files in the logdir.
Args:
logdir: The directory from which to load events.
Returns:
A dict mapping from relative filenames to lists of tf.Event protos.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = [file for file in gfile.ListDirectory(logdir) if 'tfevents' in file]
return {file: events_from_file(os.path.join(logdir, file)) for file in files}
def to_numpy(summary_value):
return tensor_util.MakeNdarray(summary_value.tensor)
if __name__ == '__main__':
test.main()
| SummaryOpsTest |
python | google__jax | tests/random_lax_test.py | {
"start": 7986,
"end": 56664
} | class ____(RandomTestBase):
"""
Tests of distribution statistics that need only be run with the default PRNG.
We limit this to the default PRNG to avoid repeated execution of very costly
tests. So long as the input bits are valid (as tested in BasicRandomTest) then
the distribution logic tested here will apply correctly.
"""
@jtu.sample_product(dtype=float_dtypes)
def testNormal(self, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.normal(key, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.norm().cdf)
def testNormalBfloat16(self):
# Passing bfloat16 as dtype string.
# https://github.com/jax-ml/jax/issues/6813
res_bfloat16_str = random.normal(self.make_key(0), dtype='bfloat16')
res_bfloat16 = random.normal(self.make_key(0), dtype=jnp.bfloat16)
self.assertAllClose(res_bfloat16, res_bfloat16_str)
@jtu.sample_product(dtype=complex_dtypes)
def testNormalComplex(self, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.normal(key, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(jnp.real(samples), scipy.stats.norm(scale=1/np.sqrt(2)).cdf)
self._CheckKolmogorovSmirnovCDF(jnp.imag(samples), scipy.stats.norm(scale=1/np.sqrt(2)).cdf)
self.assertEqual(dtype, samples.dtype)
@jtu.sample_product(dtype=float_dtypes)
def testTruncatedNormal(self, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.truncated_normal(key, -0.3, 0.3, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
min_val = np.min(uncompiled_samples)
max_val = np.max(uncompiled_samples)
self.assertTrue(min_val > -0.3)
self.assertTrue(max_val < 0.3)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.truncnorm(-0.3, 0.3).cdf)
@jtu.sample_product(
[dict(shape=shape, replace=replace, axis=axis,
input_range_or_shape=input_range_or_shape)
for shape in [(), (5,), (4, 5)]
for replace in [True, False]
for input_range_or_shape in [100, (10, 10), (10, 5, 2), 1, (1, 5)]
for is_range in [type(input_range_or_shape) is int]
for ndim in [1 if is_range else len(input_range_or_shape)]
for axis in range(-ndim, ndim or 1)
for ninputs in [input_range_or_shape if is_range else input_range_or_shape[axis]]
if replace or math.prod(shape) <= ninputs
],
dtype=jtu.dtypes.floating + jtu.dtypes.integer,
weighted=[True, False],
mode=[None, 'low', 'high']
)
def testChoice(self, dtype, input_range_or_shape, shape, replace, weighted, axis, mode):
# This is the function API that we test against (note that self.rng().choice differs)
np_choice = np.random.default_rng(0).choice
p_dtype = dtypes.to_inexact_dtype(dtype)
key = lambda: self.make_key(0)
is_range = type(input_range_or_shape) is int
x = (input_range_or_shape if is_range else
self.rng().permutation(np.arange(math.prod(
input_range_or_shape), dtype=dtype)).reshape(input_range_or_shape))
N = x if is_range else x.shape[axis]
if weighted:
p = np.arange(N, dtype=p_dtype) + 1
p /= p.sum()
else:
p = None
rand = lambda key, x: random.choice(key, x, shape, replace, p, axis, mode=mode)
sample = rand(key(), x)
if not is_range:
self.assertEqual(dtype, sample.dtype)
expected_shape = np.shape(np_choice(x, shape or None, replace, p, axis))
self.assertEqual(expected_shape, sample.shape)
expected_dtype = dtypes.result_type(int if is_range else x)
self.assertEqual(expected_dtype, sample.dtype)
if not replace and shape:
def lsort(x):
if not math.prod(x.shape): return x
ind = np.lexsort(np.swapaxes(x, axis, -1).reshape((-1, x.shape[axis])))
return jnp.take(x, ind, axis)
self.assertArraysEqual(lsort(sample), lsort(np.unique(sample, axis=axis)))
self.assertArraysEqual(sample, rand(key(), np.array(x)))
self.assertArraysEqual(sample, jax.jit(rand, static_argnames=
'x' if is_range else None)(key(), x))
@jtu.sample_product(
[dict(range_or_shape=range_or_shape, axis=axis)
for range_or_shape in [0, 1, 100, (0,), (1,), (100,),
(10, 10), (10, 5, 2), (0, 5), (1, 5)]
for ndim in [1 if type(range_or_shape) is int else len(range_or_shape)]
for axis in range(-ndim, ndim or 1)
],
dtype=jtu.dtypes.floating + jtu.dtypes.integer,
independent=[True, False],
)
def testPermutation(self, dtype, range_or_shape, axis, independent):
key = lambda: self.make_key(0)
is_range = type(range_or_shape) is int
x = (range_or_shape if is_range else
self.rng().permutation(np.arange(
math.prod(range_or_shape), dtype=dtype)).reshape(range_or_shape))
shape = ((range_or_shape,) if is_range else range_or_shape)
x_ = np.copy(x)
rand = lambda key, x: random.permutation(key, x, axis, independent=independent)
perm = rand(key(), x)
if shape[axis] >= 10:
self.assertFalse(np.all(perm == x)) # seems unlikely!
arr = np.arange(x) if is_range else x
def lsort(x):
if not math.prod(x.shape): return x
ind = np.lexsort(np.swapaxes(x, axis, -1).reshape((-1, x.shape[axis])))
return jnp.take(x, ind, axis)
if not independent:
self.assertArraysEqual(lsort(arr), lsort(perm), check_dtypes=not is_range)
if independent and (arr.shape[axis] > 4) and (arr.size // arr.shape[axis] > 4):
# Check for independent shuffling if there are >4 vectors of size >4.
# Chance of false positive is 1 in (5!)^4
with self.assertRaises(AssertionError):
self.assertArraysEqual(lsort(arr), lsort(perm), check_dtypes=not is_range)
self.assertArraysEqual(x_, x)
self.assertArraysEqual(perm, rand(key(), np.array(x)))
self.assertArraysEqual(perm, jax.jit(rand, static_argnames=
'x' if is_range else None)(key(), x))
def testPermutationErrors(self):
key = self.make_key(0)
with self.assertRaises(ValueError):
random.permutation(key, 10, axis=3)
with self.assertRaises(TypeError):
random.permutation(key, 10.)
with self.assertRaises(core.ConcretizationTypeError):
jax.jit(random.permutation)(key, 10)
@jtu.sample_product(
p=[0.1, 0.5, 0.9],
dtype=jtu.dtypes.floating,
mode=[None, 'low', 'high'],
)
def testBernoulli(self, p, dtype, mode):
key = lambda: self.make_key(0)
p = np.array(p, dtype=dtype)
kwds = {} if mode is None else {'mode': mode}
rand = lambda key, p: random.bernoulli(key, p, (10000,), **kwds)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), p)
compiled_samples = crand(key(), p)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckChiSquared(samples, scipy.stats.bernoulli(p).pmf)
@jtu.sample_product(
[dict(p=p, axis=axis)
for (p, axis) in [
([.25] * 4, -1),
([.1, .2, .3, .4], -1),
([[.5, .5], [.1, .9]], 1),
([[.5, .1], [.5, .9]], 0),
]
],
sample_shape=[(10000,), (5000, 2)],
mode=[None, 'low', 'high'],
dtype=jtu.dtypes.floating,
)
def testCategorical(self, p, axis, dtype, sample_shape, mode):
key = lambda: self.make_key(0)
p = np.array(p, dtype=dtype)
logits = np.log(p) - 42 # test unnormalized
out_shape = tuple(np.delete(logits.shape, axis))
shape = sample_shape + out_shape
rand = partial(random.categorical, shape=shape, axis=axis, mode=mode)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), logits)
compiled_samples = crand(key(), logits)
if axis < 0:
axis += len(logits.shape)
for samples in [uncompiled_samples, compiled_samples]:
assert samples.shape == shape
samples = jnp.reshape(samples, (10000,) + out_shape)
if len(p.shape[:-1]) > 0:
ps = np.transpose(p, (1, 0)) if axis == 0 else p
for cat_samples, cat_p in zip(samples.transpose(), ps):
pmf = lambda x: np.where(x < len(cat_p), cat_p[np.minimum(len(cat_p) - 1, x)], 0.0)
self._CheckChiSquared(cat_samples, pmf=pmf)
else:
pmf = lambda x: np.where(x < len(p), p[np.minimum(len(p) - 1, x)], 0.0)
self._CheckChiSquared(samples, pmf=pmf)
@jtu.sample_product(
logits_shape=[(7,), (8, 9), (10, 11, 12)],
prefix_shape=[(2,), (3, 4), (5, 6)],
)
def testCategoricalWithoutReplacement(self, logits_shape, prefix_shape):
key = random.key(0)
key, subkey = random.split(key)
logits = random.normal(subkey, logits_shape)
key, subkey = random.split(key)
axis = random.randint(subkey, (), -len(logits_shape), len(logits_shape))
dists_shape = tuple(np.delete(logits_shape, axis))
n_categories = logits_shape[axis]
shape = prefix_shape + dists_shape
prefix_size = math.prod(prefix_shape)
if n_categories < prefix_size:
with self.assertRaisesRegex(ValueError, "Number of samples without replacement"):
random.categorical(key, logits, axis=axis, shape=shape, replace=False)
else:
output = random.categorical(key, logits, axis=axis, shape=shape, replace=False)
self.assertEqual(output.shape, shape)
assert (0 <= output).all()
assert (output < n_categories).all()
flat = output.reshape((prefix_size, math.prod(dists_shape)))
counts = jax.vmap(partial(jnp.bincount, length=n_categories), 1)(flat)
assert (counts <= 1).all()
def testBernoulliShape(self):
key = self.make_key(0)
with jax.numpy_rank_promotion('allow'):
x = random.bernoulli(key, np.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
def testBernoulliSmallProbabilty(self):
# Regression test for https://github.com/jax-ml/jax/issues/28017
key = jax.random.key(0)
# Choose such that N * p is much less than 1.
p = jnp.float32(1E-10)
N = int(1E8)
# mode='low' fails for p<~1E-7 in float32
samples = jax.random.bernoulli(key, p=p, shape=N, mode='low')
self.assertNotEqual(samples.sum(), 0)
# mode='high' is good up to p<~1E-14 in float32
samples = jax.random.bernoulli(key, p=p, shape=N, mode='high')
self.assertEqual(samples.sum(), 0)
@jtu.sample_product(
a=[0.2, 5.],
b=[0.2, 5.],
dtype=[np.float64], # NOTE: KS test fails with float32
)
def testBeta(self, a, b, dtype):
if not config.enable_x64.value:
raise SkipTest("skip test except on X64")
key = lambda: self.make_key(0)
rand = lambda key, a, b: random.beta(key, a, b, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), a, b)
compiled_samples = crand(key(), a, b)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.beta(a, b).cdf)
@jtu.skip_on_devices("tpu") # TPU precision causes issues.
def testBetaSmallParameters(self, dtype=np.float32):
# Regression test for beta version of https://github.com/jax-ml/jax/issues/9896
key = self.make_key(0)
a, b = 0.0001, 0.0002
samples = random.beta(key, a, b, shape=(100,), dtype=dtype)
# With such small parameters, all samples should be exactly zero or one.
tol = 5E-2 if jtu.test_device_matches(["tpu"]) else 1E-3
zeros = samples[samples < 0.5]
self.assertAllClose(zeros, jnp.zeros_like(zeros), atol=tol)
ones = samples[samples >= 0.5]
self.assertAllClose(ones, jnp.ones_like(ones), atol=tol)
@jtu.sample_product(dtype=float_dtypes)
def testCauchy(self, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.cauchy(key, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.cauchy().cdf)
@jtu.sample_product(
alpha=[np.array([0.2, 1., 5.]),],
dtype=jtu.dtypes.floating,
)
@jtu.skip_on_devices("tpu") # TODO(mattjj): slow compilation times
def testDirichlet(self, alpha, dtype):
key = lambda: self.make_key(0)
num_samples = 10000
rand = lambda key, alpha: random.dirichlet(key, alpha, (num_samples,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), alpha)
compiled_samples = crand(key(), alpha)
for samples in [uncompiled_samples, compiled_samples]:
self.assertAllClose(samples.sum(-1), np.ones(num_samples, dtype=dtype))
alpha_sum = sum(alpha)
for i, a in enumerate(alpha):
self._CheckKolmogorovSmirnovCDF(samples[..., i],
scipy.stats.beta(a, alpha_sum - a).cdf,
pval=0.003)
@jtu.skip_on_devices("tpu") # lower accuracy leads to failures.
def testDirichletSmallAlpha(self, dtype=np.float32):
# Regression test for https://github.com/jax-ml/jax/issues/9896
key = self.make_key(0)
alpha = 0.00001 * jnp.ones(3)
samples = random.dirichlet(key, alpha, shape=(100,), dtype=dtype)
# Check that results lie on the simplex.
self.assertAllClose(samples.sum(1), jnp.ones(samples.shape[0]),
check_dtypes=False, rtol=1E-5)
# Check that results contain 1 in one of the dimensions:
# this is highly likely to be true when alpha is small.
self.assertAllClose(samples.max(1), jnp.ones(samples.shape[0]),
check_dtypes=False, rtol=1E-4)
@jtu.sample_product(dtype=float_dtypes)
def testExponential(self, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.exponential(key, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.expon().cdf)
@jtu.sample_product(
a=[0.1, 1., 10.],
dtype=jtu.dtypes.floating,
)
@jtu.skip_on_devices("tpu") # low accuracy leads to failures.
def testGammaVsLogGamma(self, a, dtype):
# Test that gamma() and loggamma() produce equivalent samples.
rand_gamma = lambda key, a: random.gamma(key, a, (100,), dtype)
rand_loggamma = lambda key, a: random.loggamma(key, a, (100,), dtype)
crand_loggamma = jax.jit(rand_loggamma)
tol = {np.float32: 1E-6, np.float64: 1E-12}
key = lambda: self.make_key(0)
self.assertAllClose(rand_gamma(key(), a), jnp.exp(rand_loggamma(key(), a)),
atol=tol, rtol=tol)
self.assertAllClose(rand_gamma(key(), a), jnp.exp(crand_loggamma(key(), a)),
atol=tol, rtol=tol)
@jtu.sample_product(
a=[0.1, 1., 10.],
dtype=jtu.dtypes.floating,
)
def testGamma(self, a, dtype):
key = lambda: self.make_key(1)
rand = lambda key, a: random.gamma(key, a, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), a)
compiled_samples = crand(key(), a)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.gamma(a).cdf)
def testGammaShape(self):
key = self.make_key(0)
x = random.gamma(key, np.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
@jtu.sample_product(
log_space=[True, False],
alpha=[1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4],
)
def testGammaGrad(self, log_space, alpha):
rng = lambda: self.make_key(0)
alphas = np.full((100,), alpha)
z = random.gamma(rng(), alphas)
if log_space:
actual_grad = jax.grad(lambda x: lax.exp(random.loggamma(rng(), x)).sum())(alphas)
else:
actual_grad = jax.grad(lambda x: random.gamma(rng(), x).sum())(alphas)
eps = 0.01 * alpha / (1.0 + np.sqrt(alpha))
cdf_dot = (scipy.stats.gamma.cdf(z, alpha + eps)
- scipy.stats.gamma.cdf(z, alpha - eps)) / (2 * eps)
with np.errstate(over='ignore'):
pdf = scipy.stats.gamma.pdf(z, alpha)
expected_grad = -cdf_dot / pdf
rtol = 2e-2 if jtu.test_device_matches(["tpu"]) else 7e-4
self.assertAllClose(actual_grad, expected_grad, check_dtypes=True,
rtol=rtol)
def testGammaGradType(self):
# Regression test for https://github.com/jax-ml/jax/issues/2130
key = self.make_key(0)
a = jnp.array(1., dtype=jnp.float32)
b = jnp.array(3., dtype=jnp.float32)
f = lambda x, y: random.gamma(key=key, a=x, dtype=jnp.float32) / y
# Should not crash with a type error.
jax.vjp(f, a, b)
@jtu.sample_product(
lam=[0.5, 3, 9, 11, 50, 500],
dtype=jtu.dtypes.supported([np.int16, np.int32, np.int64]),
)
def testPoisson(self, lam, dtype):
key = lambda: self.make_key(0)
rand = lambda key, lam: random.poisson(key, lam, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), lam)
compiled_samples = crand(key(), lam)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckChiSquared(samples, scipy.stats.poisson(lam).pmf)
# TODO(shoyer): determine error bounds for moments more rigorously (e.g.,
# based on the central limit theorem).
self.assertAllClose(samples.mean(), lam, rtol=0.02, check_dtypes=False)
self.assertAllClose(samples.var(), lam, rtol=0.03, check_dtypes=False)
def testPoissonBatched(self):
key = self.make_key(1)
lam = jnp.concatenate([2 * jnp.ones(10000), 20 * jnp.ones(10000)])
samples = random.poisson(key, lam, shape=(20000,))
self._CheckChiSquared(samples[:10000], scipy.stats.poisson(2.0).pmf)
self._CheckChiSquared(samples[10000:], scipy.stats.poisson(20.0).pmf)
def testPoissonWithoutShape(self):
key = self.make_key(1)
lam = 2 * jnp.ones(10000)
samples = random.poisson(key, lam)
self._CheckChiSquared(samples, scipy.stats.poisson(2.0).pmf)
def testPoissonShape(self):
key = self.make_key(0)
x = random.poisson(key, np.array([2.0, 20.0]), shape=(3, 2))
assert x.shape == (3, 2)
def testPoissonZeros(self):
key = self.make_key(0)
lam = jnp.concatenate([jnp.zeros(10), 20 * jnp.ones(10)])
samples = random.poisson(key, lam, shape=(2, 20))
self.assertArraysEqual(samples[:, :10], jnp.zeros_like(samples[:, :10]))
def testPoissonCornerCases(self):
key = self.make_key(0)
lam = jnp.array([-1, 0, jnp.nan])
samples = random.poisson(key, lam, shape=(3,))
self.assertArraysEqual(samples, jnp.array([-1, 0, -1]), check_dtypes=False)
@jtu.sample_product(dtype=jtu.dtypes.floating)
def testGumbel(self, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.gumbel(key, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.gumbel_r().cdf)
def testLowProbabilityGumbel(self):
dtype = jnp.bfloat16
nmant = jnp.finfo(dtype).nmant
probs = [x * 2 ** -nmant for x in [0.125, 0.75, 1.25, 2.125]]
num_samples = 1024 * 128
num_groups = 128
key = jax.random.key(0)
def compute_counts(key):
v = jax.random.gumbel(key, (num_samples, 1), dtype=dtype, mode="high")
thresholds = np.array([[-np.log(-np.log(1 - x)) for x in probs]],
dtype=dtype)
return (v > thresholds).sum(axis=0)
pts = [float(x) for x in jax.lax.map(
compute_counts, jax.random.split(key, num_groups)).sum(axis=0)]
cdf_probs = [x / (num_samples * num_groups) for x in pts]
np.testing.assert_allclose(cdf_probs, probs, rtol=0.25, atol=0)
@jtu.sample_product(dtype=float_dtypes)
def testLaplace(self, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.laplace(key, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.laplace().cdf)
@jtu.sample_product(dtype=float_dtypes)
def testLogistic(self, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.logistic(key, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.logistic().cdf)
@jtu.sample_product(
n=range(5),
shape=[(), (5,), (10, 5)],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
m=list(range(5)) + [None],
)
@jax.default_matmul_precision("float32")
def testOrthogonal(self, n, shape, dtype, m):
if m is None:
m = n
key = self.make_key(0)
q = random.orthogonal(key, n, shape, dtype, m)
self.assertEqual(q.shape, (*shape, n, m))
self.assertEqual(q.dtype, dtype)
qT = jnp.conj(q).mT
if n <= m:
I_n = jnp.broadcast_to(jnp.eye(n, dtype=dtype), (*shape, n, n))
self.assertAllClose(jnp.linalg.matmul(q, qT), I_n, atol={jnp.complex128: 1e-14})
if n >= m:
I_m = jnp.broadcast_to(jnp.eye(m, dtype=dtype), (*shape, m, m))
self.assertAllClose(jnp.linalg.matmul(qT, q), I_m, atol={jnp.complex128: 1e-14})
@jtu.sample_product(
p=[.5, 1., 1.5, 2., 2.5],
shape=[(), (5,), (10, 5)],
dtype=jtu.dtypes.floating,
)
def testGeneralizedNormal(self, p, shape, dtype):
key = lambda: self.make_key(2)
rand = lambda key, p: random.generalized_normal(key, p, shape, dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), p)
compiled_samples = crand(key(), p)
for samples in [uncompiled_samples, compiled_samples]:
self.assertEqual(samples.shape, shape)
self.assertEqual(samples.dtype, dtype)
@jtu.sample_product(
p=[.5, 1., 1.5, 2., 2.5],
shape=[(), (5,), (10, 5)],
dtype=jtu.dtypes.floating,
)
def testGeneralizedNormalKS(self, p, shape, dtype):
self.skipTest( # test is also sometimes slow, with (300, ...)-shape draws
"sensitive to random key - https://github.com/jax-ml/jax/issues/18941")
key = lambda: self.make_key(2)
rand = lambda key, p: random.generalized_normal(key, p, (300, *shape), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), p)
compiled_samples = crand(key(), p)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples.ravel(), scipy.stats.gennorm(p).cdf)
@jtu.sample_product(
d=range(1, 5),
p=[.5, 1., 1.5, 2., 2.5],
shape=[(), (5,), (10, 5)],
dtype=jtu.dtypes.floating,
)
@jtu.skip_on_devices("tpu") # TPU precision causes issues.
def testBall(self, d, p, shape, dtype):
key = lambda: self.make_key(123)
rand = lambda key, p: random.ball(key, d, p, shape, dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), p)
compiled_samples = crand(key(), p)
for samples in [uncompiled_samples, compiled_samples]:
self.assertEqual(samples.shape, (*shape, d))
self.assertEqual(samples.dtype, dtype)
self.assertTrue(((jnp.abs(samples) ** p).sum(-1) <= 1).all())
@jtu.sample_product(
d=range(1, 5),
p=[.5, 1., 1.5, 2., 2.5],
shape=[(), (5,), (10, 5)],
dtype=jtu.dtypes.floating,
)
@jtu.skip_on_devices("tpu") # TPU precision causes issues.
def testBallKS(self, d, p, shape, dtype):
self.skipTest(
"sensitive to random key - https://github.com/jax-ml/jax/issues/18932")
key = lambda: self.make_key(123)
rand = lambda key, p: random.ball(key, d, p, (100, *shape), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), p)
compiled_samples = crand(key(), p)
for samples in [uncompiled_samples, compiled_samples]:
norms = (jnp.abs(samples) ** p).sum(-1) ** (d / p)
self._CheckKolmogorovSmirnovCDF(norms.ravel(), scipy.stats.uniform().cdf)
@jtu.sample_product(
b=[0.1, 1., 10.],
dtype=jtu.dtypes.floating,
)
def testPareto(self, b, dtype):
key = lambda: self.make_key(0)
rand = lambda key, b: random.pareto(key, b, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), b)
compiled_samples = crand(key(), b)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.pareto(b).cdf)
def testParetoShape(self):
key = self.make_key(0)
with jax.numpy_rank_promotion('allow'):
x = random.pareto(key, np.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
@jtu.sample_product(
df=[0.1, 1., 10.],
dtype=jtu.dtypes.floating,
)
@jtu.skip_on_devices("cpu", "tpu") # TODO(phawkins): slow compilation times
def testT(self, df, dtype):
key = lambda: self.make_key(1)
rand = lambda key, df: random.t(key, df, (10000,), dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), df)
compiled_samples = crand(key(), df)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.t(df).cdf)
@jtu.sample_product(
dim=[1, 3, 5],
dtype=float_dtypes,
method=['svd', 'eigh', 'cholesky'],
)
def testMultivariateNormal(self, dim, dtype, method):
r = self.rng()
mean = r.randn(dim)
cov_factor = r.randn(dim, dim)
cov = np.dot(cov_factor, cov_factor.T) + dim * np.eye(dim)
key = lambda: self.make_key(0)
rand = partial(random.multivariate_normal, mean=mean, cov=cov,
shape=(10000,), method=method)
crand = jax.jit(rand)
with jax.numpy_rank_promotion('allow'):
uncompiled_samples = np.asarray(rand(key()), np.float64)
compiled_samples = np.asarray(crand(key()), np.float64)
inv_scale = scipy.linalg.lapack.dtrtri(np.linalg.cholesky(cov), lower=True)[0]
for samples in [uncompiled_samples, compiled_samples]:
centered = samples - mean
whitened = np.einsum('nj,ij->ni', centered, inv_scale)
# This is a quick-and-dirty multivariate normality check that tests that a
# uniform mixture of the marginals along the covariance matrix's
# eigenvectors follow a standard normal distribution.
self._CheckKolmogorovSmirnovCDF(whitened.ravel(), scipy.stats.norm().cdf)
@jtu.sample_product(
dim=[1, 2, 4],
mean_batch_size=[(), (3,), (2, 3)],
cov_batch_size=[(), (3,), (2, 3)],
shape=[(), (1,), (5,)],
method=['cholesky', 'svd', 'eigh'],
)
def testMultivariateNormalShapes(self, dim, mean_batch_size, cov_batch_size,
shape, method):
r = self.rng()
key = self.make_key(0)
eff_batch_size = mean_batch_size \
if len(mean_batch_size) > len(cov_batch_size) else cov_batch_size
mean = r.randn(*(mean_batch_size + (dim,)))
cov_factor = r.randn(*(cov_batch_size + (dim, dim)))
cov = np.einsum('...ij,...kj->...ik', cov_factor, cov_factor)
cov += 1e-3 * np.eye(dim)
shape = shape + eff_batch_size
with jax.numpy_rank_promotion('allow'):
samples = random.multivariate_normal(key, mean, cov, shape=shape, method=method)
assert samples.shape == shape + (dim,)
def testMultivariateNormalCovariance(self):
# test code based on https://github.com/jax-ml/jax/issues/1869
N = 100000
mean = jnp.zeros(4)
cov = jnp.array([[ 0.19, 0.00, -0.13, 0.00],
[ 0.00, 0.29, 0.00, -0.23],
[ -0.13, 0.00, 0.39, 0.00],
[ 0.00, -0.23, 0.00, 0.49]], dtype=mean.dtype)
out_np = self.rng().multivariate_normal(mean, cov, N)
key = self.make_key(0)
with jax.numpy_rank_promotion('allow'):
out_jnp = random.multivariate_normal(key, mean=mean, cov=cov, shape=(N,))
var_np = out_np.var(axis=0)
var_jnp = out_jnp.var(axis=0)
self.assertAllClose(var_np, var_jnp, rtol=1e-2, atol=1e-2,
check_dtypes=False)
var_np = np.cov(out_np, rowvar=False)
var_jnp = np.cov(out_jnp, rowvar=False)
self.assertAllClose(var_np, var_jnp, rtol=1e-2, atol=1e-2,
check_dtypes=False)
@jtu.sample_product(method=['cholesky', 'eigh', 'svd'])
@jtu.skip_on_devices('gpu', 'tpu') # Some NaNs on accelerators.
def testMultivariateNormalSingularCovariance(self, method):
# Singular covariance matrix https://github.com/jax-ml/jax/discussions/13293
mu = jnp.zeros((2,))
sigma = jnp.ones((2, 2))
key = self.make_key(0)
result = random.multivariate_normal(key, mean=mu, cov=sigma, shape=(10,), method=method)
self.assertAllClose(result[:, 0], result[:, 1], atol=1e-3, rtol=1e-3)
# Cholesky fails for singular inputs.
if method == 'cholesky':
self.assertTrue(np.all(np.isnan(result)))
else:
self.assertFalse(np.any(np.isnan(result)))
def testIssue222(self):
x = random.randint(self.make_key(10003), (), 0, 0)
assert x == 0
def testFoldIn(self):
key = self.make_key(0)
keys = [random.key_data(random.fold_in(key, i)) for i in range(10)]
assert np.unique(keys, axis=0).shape[0] == 10
def testFoldInBig(self):
key = self.make_key(0)
seeds = [2 ** 32 - 2, 2 ** 32 - 1]
keys = [random.key_data(random.fold_in(key, seed)) for seed in seeds]
assert np.unique(keys, axis=0).shape[0] == 2
def testStaticShapeErrors(self):
if config.disable_jit.value:
raise SkipTest("test only relevant when jit enabled")
@jax.jit
def feature_map(n, d, sigma=1.0, seed=123):
key = self.make_key(seed)
W = random.normal(key, (d, n)) / sigma
w = random.normal(key, (d, )) / sigma
b = 2 * jnp.pi * random.uniform(key, (d, ))
phi = lambda x, t: jnp.sqrt(2.0 / d) * jnp.cos(jnp.matmul(W, x) + w*t + b)
return phi
self.assertRaisesRegex(TypeError, 'Shapes must be 1D.*',
lambda: feature_map(5, 3))
def testIssue756(self):
key = self.make_key(0)
w = random.normal(key, ())
self.assertEqual(w.dtype, dtypes.default_float_dtype())
def testIssue1789(self):
def f(x):
return random.gamma(self.make_key(0), x)
grad(lambda x: jnp.sum(vmap(f)(x)))(jnp.ones(2))
def testDtypeErrorMessage(self):
with self.assertRaisesRegex(ValueError, r"dtype argument to.*"):
random.normal(self.make_key(0), (), dtype=jnp.int32)
def testRandomBroadcast(self):
"""Issue 4033"""
# test for broadcast issue in https://github.com/jax-ml/jax/issues/4033
key = lambda: self.make_key(0)
shape = (10, 2)
with jax.numpy_rank_promotion('allow'):
x1 = random.uniform(key(), shape, minval=jnp.zeros(2), maxval=jnp.ones(2))
x2 = random.randint(key(), shape, jnp.array([0, 1]), jnp.array([1, 2]))
assert x1.shape == shape
assert x2.shape == shape
def testMaxwellSample(self):
num_samples = 10**5
rng = lambda: self.make_key(0)
rand = lambda x: random.maxwell(x, (num_samples, ))
crand = jax.jit(rand)
loc = jtu.to_default_dtype(scipy.stats.maxwell.mean())
std = jtu.to_default_dtype(scipy.stats.maxwell.std())
uncompiled_samples = rand(rng())
compiled_samples = crand(rng())
for samples in [uncompiled_samples, compiled_samples]:
# Check first and second moments.
self.assertEqual((num_samples,), samples.shape)
self.assertAllClose(np.mean(samples), loc, atol=0., rtol=0.1)
self.assertAllClose(np.std(samples), std, atol=0., rtol=0.1)
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.maxwell().cdf)
@parameterized.named_parameters(
('test1', 4.0, 1.0),
('test2', 2.0, 3.0))
def testWeibullSample(self, concentration, scale):
num_samples = 10**5
rng = lambda: self.make_key(0)
rand = lambda x: random.weibull_min(x, scale, concentration, (num_samples,))
crand = jax.jit(rand)
loc = jtu.to_default_dtype(scipy.stats.weibull_min.mean(c=concentration, scale=scale))
std = jtu.to_default_dtype(scipy.stats.weibull_min.std(c=concentration, scale=scale))
uncompiled_samples = rand(rng())
compiled_samples = crand(rng())
for samples in [uncompiled_samples, compiled_samples]:
# Check first and second moments.
self.assertEqual((num_samples,), samples.shape)
self.assertAllClose(np.mean(samples), loc, atol=0., rtol=0.1)
self.assertAllClose(np.std(samples), std, atol=0., rtol=0.1)
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.weibull_min(
c=concentration, scale=scale).cdf)
@parameterized.named_parameters(
('test1', 4.0, 1.0),
('test2', 2.0, 3.0))
def testDoublesidedMaxwellSample(self, loc, scale):
num_samples = 10**4
rng = lambda: self.make_key(0)
rand = lambda key: random.double_sided_maxwell(
rng(), loc, scale, (num_samples,))
crand = jax.jit(rand)
mean = loc
std = np.sqrt(3.) * scale
uncompiled_samples = rand(rng())
compiled_samples = crand(rng())
# Compute the double sided maxwell CDF through the one sided maxwell cdf.
# This is done as follows:
# P(DSM <= x) = P (loc + scale * radamacher_sample * one_sided_sample <=x) =
# P (radamacher_sample * one_sided_sample <= (x - loc) / scale) =
# 1/2 P(one_sided_sample <= (x - loc) / scale)
# + 1/2 P( - one_sided_sample <= (x - loc) / scale) =
# 1/2 P(one_sided_sample <= (x - loc) / scale)
# + 1/2 P(one_sided_sample >= - (x - loc) / scale) =
# 1/2 CDF_one_maxwell((x - loc) / scale))
# + 1/2 (1 - CDF_one_maxwell(- (x - loc) / scale)))
def double_sided_maxwell_cdf(x, loc, scale):
pos = scipy.stats.maxwell().cdf((x - loc) / scale)
neg = (1 - scipy.stats.maxwell().cdf((-x + loc) / scale))
return (pos + neg) / 2
for samples in [uncompiled_samples, compiled_samples]:
# Check first and second moments.
self.assertEqual((num_samples,), samples.shape)
self.assertAllClose(samples.mean(), jtu.to_default_dtype(mean), atol=0., rtol=0.1)
self.assertAllClose(samples.std(), jtu.to_default_dtype(std), atol=0., rtol=0.1)
self._CheckKolmogorovSmirnovCDF(
samples, lambda x: double_sided_maxwell_cdf(x, loc, scale))
def testRadamacher(self):
rng = lambda: self.make_key(0)
num_samples = 10**5
rand = lambda x: random.rademacher(x, (num_samples,))
crand = jax.jit(rand)
uncompiled_samples = rand(rng())
compiled_samples = crand(rng())
for samples in [uncompiled_samples, compiled_samples]:
unique_values, counts = np.unique(samples, return_counts=True)
assert len(unique_values) == 2
assert len(counts) == 2
self.assertAllClose(
counts[0] / num_samples, 0.5, rtol=1e-02, atol=1e-02)
self.assertAllClose(
counts[1] / num_samples, 0.5, rtol=1e-02, atol=1e-02)
def testChoiceShapeIsNotSequenceError(self):
key = self.make_key(0)
with self.assertRaises(TypeError):
random.choice(key, 5, 2, replace=False)
with self.assertRaises(TypeError):
random.choice(key, 5, 2, replace=True)
@jtu.sample_product(dtype=int_dtypes + uint_dtypes)
def test_randint_bounds(self, dtype):
min = np.iinfo(dtype).min
max = np.iinfo(dtype).max
key = lambda: self.make_key(1701)
shape = (10,)
if np.iinfo(dtype).bits < np.iinfo(dtypes.default_int_dtype()).bits:
expected = random.randint(key(), shape, min, max + 1, dtype)
self.assertArraysEqual(expected, random.randint(key(), shape, min - 12345, max + 12345, dtype))
else:
self.assertRaises(OverflowError, random.randint, key(), shape, min - 12345, max + 12345, dtype)
def test_randint_out_of_range(self):
key = self.make_key(0)
r = random.randint(key, (10,), 255, 256, np.uint8)
self.assertAllClose(r, jnp.full_like(r, 255))
key = self.make_key(0)
r = random.randint(key, (1000,), -128, 128, np.int8)
self.assertGreater((r == -128).sum(), 0)
self.assertGreater((r == 127).sum(), 0)
key = self.make_key(0)
r = random.randint(key, (1000,), -1000, 1000, np.uint8)
self.assertGreater((r == 0).sum(), 0)
self.assertGreater((r == 255).sum(), 0)
@jtu.sample_product(shape=[(3, 4)],
logits_shape_base=[(3, 4), (3, 1), (1, 4)],
axis=[-3, -2, -1, 0, 1, 2])
def test_categorical_shape_argument(self, shape, logits_shape_base, axis):
# https://github.com/jax-ml/jax/issues/13124
logits_shape = list(logits_shape_base)
logits_shape.insert(axis % (len(logits_shape_base) + 1), 10)
assert logits_shape[axis] == 10
logits = jnp.ones(logits_shape)
samples = random.categorical(self.make_key(0), logits=logits,
axis=axis, shape=shape)
self.assertEqual(samples.shape, shape)
@jtu.sample_product(
df = [0.2, 1., 10., 100.],
dtype=jtu.dtypes.floating)
def testChisquare(self, df, dtype):
key = lambda: self.make_key(1)
def rand(key, df):
return random.chisquare(key, df, shape=(10000,), dtype=dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key(), df)
compiled_samples = crand(key(), df)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.chi2(df).cdf)
@jtu.sample_product(
dfnum = [1., 2., 10. ,100.],
dfden = [1. ,2., 10., 100.],
dtype=jtu.dtypes.floating)
def testF(self, dfnum, dfden, dtype):
key = lambda: self.make_key(9)
rand = lambda key: random.f(key, dfnum, dfden, shape = (10000, ), dtype = dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.f(dfnum, dfden).cdf)
@jtu.sample_product(
scale= [0.2, 1., 2., 10. ,100.],
dtype=jtu.dtypes.floating)
def testRayleigh(self, scale, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.rayleigh(key, scale, shape = (10000, ), dtype = dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.rayleigh(scale=scale).cdf)
@jtu.sample_product(
mean= [0.2, 1., 2., 10. ,100.],
dtype=jtu.dtypes.floating)
def testWald(self, mean, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.wald(key, mean, shape=(10000, ), dtype=dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.invgauss(mu=mean).cdf)
@jtu.sample_product(
p=[0.2, 0.3, 0.4, 0.5 ,0.6],
dtype=jtu.dtypes.supported([np.int16, np.int32, np.int64]))
def testGeometric(self, p, dtype):
key = lambda: self.make_key(1)
rand = lambda key: random.geometric(key, p, shape=(10000, ), dtype=dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckChiSquared(samples, scipy.stats.geom(p).pmf)
self.assertAllClose(samples.mean(), 1 / p, rtol=0.02, check_dtypes=False)
self.assertAllClose(samples.var(), (1 - p) / (p * p) , rtol=0.05,
check_dtypes=False)
@jtu.sample_product(
left = [0.2, 0.5, 1., 2.],
mode = [3., 5., 8., 9.],
right= [10., 20., 30., 40.],
dtype= jtu.dtypes.floating)
def testTriangular(self, left, mode, right, dtype):
key = lambda: self.make_key(1)
rand = lambda key: random.triangular(key, left, mode, right, shape=(10000,),
dtype=dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.triang(
(mode - left) / (right - left), loc=left, scale=right - left).cdf)
@jtu.sample_product(
sigma = [0.2, 0.5, 1., 2.],
dtype=jtu.dtypes.floating)
def testLogNormal(self, sigma, dtype):
key = lambda: self.make_key(0)
rand = lambda key: random.lognormal(key, sigma, shape=(10000,), dtype=dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.lognorm(s=sigma).cdf)
@jtu.sample_product(
n= [5, 13, 21, 53, 500],
p= [0.1, 0.3, 0.5, 0.7, 0.9],
dtype= jtu.dtypes.floating)
def testBinomialSample(self, n, p, dtype):
key = lambda: self.make_key(12)
rand = lambda key: random.binomial(key, n, p, shape=(12000,), dtype=dtype)
crand = jax.jit(rand)
uncompiled_samples = rand(key())
compiled_samples = crand(key())
pmf = lambda x: scipy.stats.binom(n, p).pmf(x)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckChiSquared(samples.astype(int), pmf, pval=1e-3)
self.assertAllClose(samples.mean(), n * p, rtol=0.025, check_dtypes=False)
self.assertAllClose(samples.var(), n * p * (1 - p) , rtol=0.036,
check_dtypes=False)
def testBinomialCornerCases(self):
key = lambda: self.make_key(0)
# corner case n
n = jnp.array([-1, 0, jnp.nan, jnp.inf])
samples1 = random.binomial(key(), n, 0.5, shape=(4,))
# corner case p
p = jnp.array([jnp.nan, 0, -0.1, 1.1])
samples2 = random.binomial(key(), 5, p, shape=(4,))
# corner case n and p
# expect nan or illegal will lead to nan
n_cc = jnp.array([jnp.inf, -1, jnp.inf])
p_cc = jnp.array([jnp.nan, jnp.nan, -0.1])
samples3 = random.binomial(key(), n_cc, p_cc, shape=(3,))
self.assertArraysAllClose(samples1, jnp.array([jnp.nan, 0., jnp.nan, jnp.inf]), check_dtypes=False)
self.assertArraysAllClose(samples2, jnp.array([jnp.nan, 0., jnp.nan, jnp.nan]), check_dtypes=False)
self.assertArraysAllClose(samples3, jnp.array([jnp.nan, jnp.nan, jnp.nan]), check_dtypes=False)
def test_binomial_dtypes(self):
# Regression test for https://github.com/jax-ml/jax/pull/25688#discussion_r1938010569
key = jax.random.key(0)
n = jax.numpy.float16(100)
p = jax.numpy.float16(0.5)
jax.random.binomial(key, n, p) # doesn't error
def testMultinomialExample(self):
key = random.key(0)
probs = jnp.array([
[0.5, 0.2, 0.3],
[0.1, 0.2, 0.7],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
])
trials = 1e5
counts = random.multinomial(key, trials, probs)
freqs = counts / trials
self.assertAllClose(freqs, probs, atol=1e-2)
@jtu.sample_product(
categories=[1, 2, 3, 5, 7, 11],
trials=[1, 2, 3, 5, 7, 11],
dtype=[jnp.float32],
)
def testMultinomialNumpy(
self,
categories,
trials,
dtype,
test_samples=10**6,
tolerance=1e-1,
):
probs = jnp.linspace(-1, 2, categories)[::-1] ** 2
probs /= probs.sum(-1, keepdims=True)
rng = np.random.default_rng(0)
counts_numpy = jnp.array(rng.multinomial(trials, probs, size=test_samples), dtype)
shape = (test_samples,) + probs.shape
key = random.key(0)
counts_jax = random.multinomial(key, trials, probs, shape=shape, dtype=dtype)
assert counts_jax.shape == shape
energy_distance = get_energy_distance(counts_numpy, counts_jax)
assert energy_distance < tolerance
@jtu.sample_product([
dict(shape=shape, outcomes=outcomes)
for shape in [(5,), (2, 3), (2, 3, 5)]
for outcomes in [2, 3, 4]
])
def testMultinomialShape(self, shape, outcomes):
key = random.key(0)
key, subkey = random.split(key)
probs = random.dirichlet(subkey, jnp.ones(outcomes))
trials = 1e5
counts = random.multinomial(key, trials, probs, shape=(*shape, *probs.shape))
freqs = counts / trials
self.assertAllClose(freqs, jnp.broadcast_to(probs, freqs.shape), atol=1e-2)
@jtu.sample_product([
dict(n_dtype=n_dtype, p_dtype=p_dtype, dtype=dtype)
for n_dtype in jtu.dtypes.all_floating
for p_dtype in jtu.dtypes.all_floating
for dtype in jtu.dtypes.all_floating
])
@jax.numpy_dtype_promotion('standard')
def testMultinomialDtype(self, n_dtype, p_dtype, dtype):
key = random.key(0)
n = jnp.astype(10, n_dtype)
p = jnp.astype(jnp.ones(3) / 3, p_dtype)
random.multinomial(key, n, p)
def test_batched_key_errors(self):
keys = lambda: jax.random.split(self.make_key(0))
msg = "{} accepts a single key, but was given a key array of shape.*"
# Check a handful of functions that are expected to error.
with self.assertRaisesRegex(ValueError, msg.format('bits')):
jax.random.bits(keys(), shape=(2,))
with self.assertRaisesRegex(ValueError, msg.format('chisquare')):
jax.random.chisquare(keys(), 1.0, shape=(2,))
with self.assertRaisesRegex(ValueError, msg.format('dirichlet')):
jax.random.dirichlet(keys(), jnp.arange(2.0), shape=(2,))
with self.assertRaisesRegex(ValueError, msg.format('gamma')):
jax.random.gamma(keys(), 1.0, shape=(2,))
with self.assertRaisesRegex(ValueError, msg.format('loggamma')):
jax.random.loggamma(keys(), 1.0, shape=(2,))
with self.assertRaisesRegex(ValueError, msg.format('fold_in')):
jax.random.fold_in(keys(), 0)
with self.assertRaisesRegex(ValueError, msg.format('split')):
jax.random.split(keys())
# Shouldn't error or warn:
with self.assertNoWarnings():
jax.random.key_data(keys())
jax.random.key_impl(keys())
@jtu.sample_product(
dtype=['int8', 'uint8', 'int16', 'uint16']
)
def test_randint_narrow_int_bias(self, dtype):
# Regression test for https://github.com/jax-ml/jax/issues/27702
key = self.make_key(7534892)
n_samples = 100_000
n_bins = 100
data = jax.random.randint(key, (n_samples,), 0, n_bins, dtype=dtype)
# Check that counts within each bin are consistent with a uniform distribution:
# i.e. counts are poisson-distributed about the average count per bin.
counts = jnp.bincount(data, length=n_bins).astype(float)
self._CheckKolmogorovSmirnovCDF(counts, scipy.stats.poisson(n_samples / n_bins).cdf)
def get_energy_distance(samples_1, samples_2):
"""
Estimates the energy distance between two distributions, given
batches of independent samples from each.
For more information, see https://en.wikipedia.org/wiki/Energy_distance.
"""
x, xp = jnp.split(samples_1, 2)
y, yp = jnp.split(samples_2, 2)
return (
2 * jnp.linalg.norm(x - y, axis=-1)
- jnp.linalg.norm(x - xp, axis=-1)
- jnp.linalg.norm(y - yp, axis=-1)
).mean(0)
threefry_seed = prng_internal.threefry_seed
threefry_split = prng_internal.threefry_split
threefry_random_bits = prng_internal.threefry_random_bits
threefry_fold_in = prng_internal.threefry_fold_in
def _double_threefry_seed(seed):
int_t = seed.dtype.type if hasattr(seed, 'dtype') else type(seed)
s1, s2 = seed, seed ^ int_t(3)
return jnp.vstack([threefry_seed(s1),
threefry_seed(s2)])
def _double_threefry_split(key, shape):
return vmap(
threefry_split, (0, None), len(shape))(key, shape)
def _double_threefry_random_bits(key, bit_width, shape):
bits0 = threefry_random_bits(key[0], bit_width, shape)
bits1 = threefry_random_bits(key[1], bit_width, shape)
del bits1
# TODO(frostig): Currently this behaves like normal threefry, to
# avoid a few probabilistic test failures. Ideally we might want to
# test different generation behavior here (e.g. `bits0 ^ bits1`).
return bits0
def _double_threefry_fold_in(key, data):
return jnp.vstack([threefry_fold_in(key[0], data),
threefry_fold_in(key[1], data)])
double_threefry_prng_impl = prng_internal.PRNGImpl(
key_shape=(2, 2),
seed=_double_threefry_seed,
split=_double_threefry_split,
random_bits=_double_threefry_random_bits,
fold_in=_double_threefry_fold_in,
tag='fry2')
@jtu.with_config(jax_default_prng_impl='threefry2x32')
| DistributionsTest |
python | spyder-ide__spyder | external-deps/python-lsp-server/pylsp/plugins/pylint_lint.py | {
"start": 1441,
"end": 12372
} | class ____:
last_diags = collections.defaultdict(list)
@classmethod
def lint(cls, document, is_saved, flags=""):
"""Plugin interface to pylsp linter.
Args:
document: The document to be linted.
is_saved: Whether or not the file has been saved to disk.
flags: Additional flags to pass to pylint. Not exposed to
pylsp_lint, but used for testing.
Returns:
A list of dicts with the following format:
{
'source': 'pylint',
'range': {
'start': {
'line': start_line,
'character': start_column,
},
'end': {
'line': end_line,
'character': end_column,
},
}
'message': msg,
'severity': lsp.DiagnosticSeverity.*,
}
"""
if not is_saved:
# Pylint can only be run on files that have been saved to disk.
# Rather than return nothing, return the previous list of
# diagnostics. If we return an empty list, any diagnostics we'd
# previously shown will be cleared until the next save. Instead,
# continue showing (possibly stale) diagnostics until the next
# save.
return cls.last_diags[document.path]
cmd = [
sys.executable,
"-c",
"import sys; from pylint.lint import Run; Run(sys.argv[1:])",
"-f",
"json",
document.path,
] + (shlex.split(str(flags)) if flags else [])
log.debug("Calling pylint with '%s'", " ".join(cmd))
cwd = document._workspace.root_path
if not cwd:
cwd = os.path.dirname(__file__)
with Popen(
cmd, stdout=PIPE, stderr=PIPE, cwd=cwd, universal_newlines=True
) as process:
json_out, err = process.communicate()
if err != "":
log.error("Error calling pylint: '%s'", err)
# pylint prints nothing rather than [] when there are no diagnostics.
# json.loads will not parse an empty string, so just return.
if not json_out.strip():
cls.last_diags[document.path] = []
return []
# Pylint's JSON output is a list of objects with the following format.
#
# {
# "obj": "main",
# "path": "foo.py",
# "message": "Missing function docstring",
# "message-id": "C0111",
# "symbol": "missing-docstring",
# "column": 0,
# "type": "convention",
# "line": 5,
# "module": "foo"
# }
#
# The type can be any of:
#
# * convention
# * information
# * error
# * fatal
# * refactor
# * warning
diagnostics = []
for diag in json.loads(json_out):
# pylint lines index from 1, pylsp lines index from 0
line = diag["line"] - 1
err_range = {
"start": {
"line": line,
# Index columns start from 0
"character": diag["column"],
},
"end": {
"line": line,
# It's possible that we're linting an empty file. Even an empty
# file might fail linting if it isn't named properly.
"character": (
_find_end_of_identifier(document.lines[line], diag["column"])
if document.lines
else 0
),
},
}
if diag["type"] == "convention":
severity = lsp.DiagnosticSeverity.Information
elif diag["type"] == "information":
severity = lsp.DiagnosticSeverity.Information
elif diag["type"] == "error":
severity = lsp.DiagnosticSeverity.Error
elif diag["type"] == "fatal":
severity = lsp.DiagnosticSeverity.Error
elif diag["type"] == "refactor":
severity = lsp.DiagnosticSeverity.Hint
elif diag["type"] == "warning":
severity = lsp.DiagnosticSeverity.Warning
code = diag["message-id"]
diagnostic = {
"source": "pylint",
"range": err_range,
"message": "[{}] {}".format(diag["symbol"], diag["message"]),
"severity": severity,
"code": code,
}
if code in UNNECESSITY_CODES:
diagnostic["tags"] = [lsp.DiagnosticTag.Unnecessary]
if code in DEPRECATION_CODES:
diagnostic["tags"] = [lsp.DiagnosticTag.Deprecated]
diagnostics.append(diagnostic)
cls.last_diags[document.path] = diagnostics
return diagnostics
def _build_pylint_flags(settings):
"""Build arguments for calling pylint."""
pylint_args = settings.get("args")
if pylint_args is None:
return ""
return " ".join(pylint_args)
@hookimpl
def pylsp_settings():
# Default pylint to disabled because it requires a config
# file to be useful.
return {
"plugins": {
"pylint": {
"enabled": False,
"args": [],
# disabled by default as it can slow down the workflow
"executable": None,
}
}
}
@hookimpl
def pylsp_lint(config, workspace, document, is_saved):
"""Run pylint linter."""
with workspace.report_progress("lint: pylint"):
settings = config.plugin_settings("pylint")
log.debug("Got pylint settings: %s", settings)
# pylint >= 2.5.0 is required for working through stdin and only
# available with python3
if settings.get("executable") and sys.version_info[0] >= 3:
flags = build_args_stdio(settings)
pylint_executable = settings.get("executable", "pylint")
return pylint_lint_stdin(pylint_executable, document, flags)
flags = _build_pylint_flags(settings)
return PylintLinter.lint(document, is_saved, flags=flags)
def build_args_stdio(settings):
"""Build arguments for calling pylint.
:param settings: client settings
:type settings: dict
:return: arguments to path to pylint
:rtype: list
"""
pylint_args = settings.get("args")
if pylint_args is None:
return []
return pylint_args
def pylint_lint_stdin(pylint_executable, document, flags):
"""Run pylint linter from stdin.
This runs pylint in a subprocess with popen.
This allows passing the file from stdin and as a result
run pylint on unsaved files. Can slowdown the workflow.
:param pylint_executable: path to pylint executable
:type pylint_executable: string
:param document: document to run pylint on
:type document: pylsp.workspace.Document
:param flags: arguments to path to pylint
:type flags: list
:return: linting diagnostics
:rtype: list
"""
pylint_result = _run_pylint_stdio(pylint_executable, document, flags)
return _parse_pylint_stdio_result(document, pylint_result)
def _run_pylint_stdio(pylint_executable, document, flags):
"""Run pylint in popen.
:param pylint_executable: path to pylint executable
:type pylint_executable: string
:param document: document to run pylint on
:type document: pylsp.workspace.Document
:param flags: arguments to path to pylint
:type flags: list
:return: result of calling pylint
:rtype: string
"""
log.debug("Calling %s with args: '%s'", pylint_executable, flags)
try:
cmd = [pylint_executable]
cmd.extend(flags)
cmd.extend(["--from-stdin", document.path])
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
log.debug("Can't execute %s. Trying with 'python -m pylint'", pylint_executable)
cmd = [sys.executable, "-m", "pylint"]
cmd.extend(flags)
cmd.extend(["--from-stdin", document.path])
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate(document.source.encode())
if stderr:
log.error("Error while running pylint '%s'", stderr.decode())
return stdout.decode()
def _parse_pylint_stdio_result(document, stdout):
"""Parse pylint results.
:param document: document to run pylint on
:type document: pylsp.workspace.Document
:param stdout: pylint results to parse
:type stdout: string
:return: linting diagnostics
:rtype: list
"""
diagnostics = []
lines = stdout.splitlines()
for raw_line in lines:
parsed_line = re.match(r"(.*):(\d*):(\d*): (\w*): (.*)", raw_line)
if not parsed_line:
log.debug("Pylint output parser can't parse line '%s'", raw_line)
continue
parsed_line = parsed_line.groups()
if len(parsed_line) != 5:
log.debug("Pylint output parser can't parse line '%s'", raw_line)
continue
_, line, character, code, msg = parsed_line
line = int(line) - 1
character = int(character)
severity_map = {
"C": lsp.DiagnosticSeverity.Information,
"E": lsp.DiagnosticSeverity.Error,
"F": lsp.DiagnosticSeverity.Error,
"I": lsp.DiagnosticSeverity.Information,
"R": lsp.DiagnosticSeverity.Hint,
"W": lsp.DiagnosticSeverity.Warning,
}
severity = severity_map[code[0]]
diagnostic = {
"source": "pylint",
"code": code,
"range": {
"start": {"line": line, "character": character},
"end": {
"line": line,
"character": _find_end_of_identifier(
document.lines[line], character
),
},
},
"message": msg,
"severity": severity,
}
if code in UNNECESSITY_CODES:
diagnostic["tags"] = [lsp.DiagnosticTag.Unnecessary]
if code in DEPRECATION_CODES:
diagnostic["tags"] = [lsp.DiagnosticTag.Deprecated]
diagnostics.append(diagnostic)
return diagnostics
def _find_end_of_identifier(string, start):
"""Find the end of the identifier starting at the given position."""
for i in range(len(string), start, -1):
if string[start:i].isidentifier():
return i
return len(string) - 1
| PylintLinter |
python | lepture__authlib | authlib/oauth2/rfc6749/authenticate_client.py | {
"start": 626,
"end": 3985
} | class ____:
def __init__(self, query_client):
self.query_client = query_client
self._methods = {
"none": authenticate_none,
"client_secret_basic": authenticate_client_secret_basic,
"client_secret_post": authenticate_client_secret_post,
}
def register(self, method, func):
self._methods[method] = func
def authenticate(self, request, methods, endpoint):
for method in methods:
func = self._methods[method]
client = func(self.query_client, request)
if client and client.check_endpoint_auth_method(method, endpoint):
request.auth_method = method
return client
if "client_secret_basic" in methods:
raise InvalidClientError(
status_code=401,
description=f"The client cannot authenticate with methods: {methods}",
)
raise InvalidClientError(
description=f"The client cannot authenticate with methods: {methods}",
)
def __call__(self, request, methods, endpoint="token"):
return self.authenticate(request, methods, endpoint)
def authenticate_client_secret_basic(query_client, request):
"""Authenticate client by ``client_secret_basic`` method. The client
uses HTTP Basic for authentication.
"""
client_id, client_secret = extract_basic_authorization(request.headers)
if client_id and client_secret:
client = _validate_client(query_client, client_id, 401)
if client.check_client_secret(client_secret):
log.debug(f'Authenticate {client_id} via "client_secret_basic" success')
return client
log.debug(f'Authenticate {client_id} via "client_secret_basic" failed')
def authenticate_client_secret_post(query_client, request):
"""Authenticate client by ``client_secret_post`` method. The client
uses POST parameters for authentication.
"""
data = request.form
client_id = data.get("client_id")
client_secret = data.get("client_secret")
if client_id and client_secret:
client = _validate_client(query_client, client_id)
if client.check_client_secret(client_secret):
log.debug(f'Authenticate {client_id} via "client_secret_post" success')
return client
log.debug(f'Authenticate {client_id} via "client_secret_post" failed')
def authenticate_none(query_client, request):
"""Authenticate public client by ``none`` method. The client
does not have a client secret.
"""
client_id = request.payload.client_id
if client_id and not request.payload.data.get("client_secret"):
client = _validate_client(query_client, client_id)
log.debug(f'Authenticate {client_id} via "none" success')
return client
log.debug(f'Authenticate {client_id} via "none" failed')
def _validate_client(query_client, client_id, status_code=400):
if client_id is None:
raise InvalidClientError(
status_code=status_code,
description="Missing 'client_id' parameter.",
)
client = query_client(client_id)
if not client:
raise InvalidClientError(
status_code=status_code,
description="The client does not exist on this server.",
)
return client
| ClientAuthentication |
python | django__django | django/urls/converters.py | {
"start": 189,
"end": 339
} | class ____:
regex = "[^/]+"
def to_python(self, value):
return value
def to_url(self, value):
return value
| StringConverter |
python | jazzband__tablib | tests/test_tablib.py | {
"start": 29107,
"end": 36856
} | class ____(BaseTestCase):
def test_csv_format_detect(self):
"""Test CSV format detection."""
_csv = StringIO(
'1,2,3\n'
'4,5,6\n'
'7,8,9\n'
)
_bunk = StringIO(
'ยกยกยกยกยกยกยกยกยฃโขโยขยฃยงโยงยถโขยถยชโยถโขยชยบโขโขยชโยบยงโขโ โขยงยบยถโขโ ยฅยชโยบโขยงฦรธยฅยจยฉฯฦรธโ หยฅรงยฉยจโรธหยฅโโ ฦยฅรงยฉรธยจรงหยฅฦรงรธยถ'
)
fmt = registry.get_format('csv')
self.assertTrue(fmt.detect(_csv))
self.assertFalse(fmt.detect(_bunk))
def test_csv_import_set(self):
"""Generate and import CSV set serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_csv_import_set_semicolons(self):
"""Test for proper output with semicolon separated CSV."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_csv = data.get_csv(delimiter=';')
data.set_csv(_csv, delimiter=';')
self.assertEqual(_csv, data.get_csv(delimiter=';'))
def test_csv_import_set_with_spaces(self):
"""Generate and import CSV set serialization when row values have
spaces."""
data.append(('Bill Gates', 'Microsoft'))
data.append(('Steve Jobs', 'Apple'))
data.headers = ('Name', 'Company')
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_csv_import_set_semicolon_with_spaces(self):
"""Generate and import semicolon separated CSV set serialization when row values have
spaces."""
data.append(('Bill Gates', 'Microsoft'))
data.append(('Steve Jobs', 'Apple'))
data.headers = ('Name', 'Company')
_csv = data.get_csv(delimiter=';')
data.set_csv(_csv, delimiter=';')
self.assertEqual(_csv, data.get_csv(delimiter=';'))
def test_csv_import_set_with_newlines(self):
"""Generate and import CSV set serialization when row values have
newlines."""
data.append(('Markdown\n=======',
'A cool language\n\nwith paragraphs'))
data.append(('reStructedText\n==============',
'Another cool language\n\nwith paragraphs'))
data.headers = ('title', 'body')
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_csv_import_set_commas_embedded(self):
"""Comma-separated CSV can include commas inside quoted string."""
csv_text = (
'id,name,description,count\r\n'
'12,Smith,"Red, rounded",4\r\n'
)
data.csv = csv_text
self.assertEqual(data[0][2], 'Red, rounded')
self.assertEqual(data.csv, csv_text)
def test_csv_import_set_with_unicode_str(self):
"""Import CSV set with non-ascii characters in unicode literal"""
csv_text = (
"id,givenname,surname,loginname,email,pref_firstname,pref_lastname\n"
"13765,รvar,Arnfjรถrรฐ,testing,test@example.com,รvar,Arnfjรถrรฐ"
)
data.csv = csv_text
self.assertEqual(data.width, 7)
def test_csv_import_set_ragged(self):
"""Import CSV set when not all rows have the same length."""
csv_text = (
"H1,H2,H3\n"
"A,B\n"
"C,D,E\n"
"\n"
"F\n"
)
dataset = tablib.import_set(csv_text, format="csv")
self.assertEqual(
str(dataset),
'H1|H2|H3\n'
'--|--|--\n'
'A |B | \n'
'C |D |E \n'
'F | | '
)
def test_csv_import_set_skip_lines(self):
csv_text = (
'garbage,line\n'
'\n'
'id,name,description\n'
'12,Smith,rounded\n'
)
dataset = tablib.import_set(csv_text, format="csv", skip_lines=2)
self.assertEqual(dataset.headers, ['id', 'name', 'description'])
def test_csv_import_mac_os_lf(self):
csv_text = (
'id,name,description\r'
'12,Smith,rounded\r'
)
dataset = tablib.import_set(csv_text, format="csv")
self.assertEqual('id,name,description\r\n12,Smith,rounded\r\n', dataset.csv)
def test_csv_export(self):
"""Verify exporting dataset object as CSV."""
# Build up the csv string with headers first, followed by each row
csv = ''
for col in self.headers:
csv += col + ','
csv = csv.strip(',') + '\r\n'
for founder in self.founders:
for col in founder:
csv += str(col) + ','
csv = csv.strip(',') + '\r\n'
self.assertEqual(csv, self.founders.csv)
def test_csv_export_options(self):
"""Exporting support csv.writer() parameters."""
data.append(('1. a', '2. b', '3. c'))
result = data.export('csv', delimiter=' ', quotechar='|')
self.assertEqual(result, '|1. a| |2. b| |3. c|\r\n')
def test_csv_stream_export(self):
"""Verify exporting dataset object as CSV from file object."""
# Build up the csv string with headers first, followed by each row
csv = ''
for col in self.headers:
csv += col + ','
csv = csv.strip(',') + '\r\n'
for founder in self.founders:
for col in founder:
csv += str(col) + ','
csv = csv.strip(',') + '\r\n'
frm = registry.get_format('csv')
csv_stream = frm.export_stream_set(self.founders)
self.assertEqual(csv, csv_stream.getvalue())
def test_unicode_csv(self):
"""Check if unicode in csv export doesn't raise."""
data = tablib.Dataset()
data.append(['\xfc', '\xfd'])
data.csv
def test_csv_column_select(self):
"""Build up a CSV and test selecting a column"""
data = tablib.Dataset()
data.csv = self.founders.csv
headers = data.headers
self.assertIsInstance(headers[0], str)
orig_first_name = self.founders[self.headers[0]]
csv_first_name = data[headers[0]]
self.assertEqual(orig_first_name, csv_first_name)
def test_csv_column_delete(self):
"""Build up a CSV and test deleting a column"""
data = tablib.Dataset()
data.csv = self.founders.csv
target_header = data.headers[0]
self.assertIsInstance(target_header, str)
del data[target_header]
self.assertNotIn(target_header, data.headers)
def test_csv_column_sort(self):
"""Build up a CSV and test sorting a column by name"""
data = tablib.Dataset()
data.csv = self.founders.csv
orig_target_header = self.founders.headers[1]
target_header = data.headers[1]
self.founders.sort(orig_target_header)
data.sort(target_header)
self.assertEqual(self.founders[orig_target_header], data[target_header])
def test_csv_formatter_support_kwargs(self):
"""Test CSV import and export with formatter configuration."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
expected = 'first_name;last_name;gpa\nJohn;Adams;90\nGeorge;Washington;67\n'
kwargs = {'delimiter': ';', 'lineterminator': '\n'}
_csv = data.export('csv', **kwargs)
self.assertEqual(expected, _csv)
# the import works but consider default delimiter=','
d1 = tablib.import_set(_csv, format="csv")
self.assertEqual(1, len(d1.headers))
d2 = tablib.import_set(_csv, format="csv", **kwargs)
self.assertEqual(3, len(d2.headers))
| CSVTests |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 40681,
"end": 42348
} | class ____(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| CausalLMOutput |
python | numba__numba | numba/tests/test_extending.py | {
"start": 1510,
"end": 3967
} | class ____(types.Opaque):
def can_convert_to(self, context, toty):
if isinstance(toty, types.Number):
from numba.core.typeconv import Conversion
return Conversion.safe
mydummy_type = MyDummyType("mydummy")
mydummy = MyDummy()
@typeof_impl.register(MyDummy)
def typeof_mydummy(val, c):
return mydummy_type
@lower_cast(MyDummyType, types.Number)
def mydummy_to_number(context, builder, fromty, toty, val):
"""
Implicit conversion from MyDummy to int.
"""
return context.get_constant(toty, 42)
def get_dummy():
return mydummy
register_model(MyDummyType)(models.OpaqueModel)
@unbox(MyDummyType)
def unbox_index(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a second custom type but w/o implicit cast to Number
def base_dummy_type_factory(name):
class DynType(object):
pass
class DynTypeType(types.Opaque):
pass
dyn_type_type = DynTypeType(name)
@typeof_impl.register(DynType)
def typeof_mydummy(val, c):
return dyn_type_type
register_model(DynTypeType)(models.OpaqueModel)
return DynTypeType, DynType, dyn_type_type
MyDummyType2, MyDummy2, mydummy_type_2 = base_dummy_type_factory("mydummy2")
@unbox(MyDummyType2)
def unbox_index2(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a function's typing and implementation using the classical
# two-step API
def func1(x=None):
raise NotImplementedError
def type_func1_(context):
def typer(x=None):
if x in (None, types.none):
# 0-arg or 1-arg with None
return types.int32
elif isinstance(x, types.Float):
# 1-arg with float
return x
return typer
type_func1 = type_callable(func1)(type_func1_)
@lower_builtin(func1)
@lower_builtin(func1, types.none)
def func1_nullary(context, builder, sig, args):
return context.get_constant(sig.return_type, 42)
@lower_builtin(func1, types.Float)
def func1_unary(context, builder, sig, args):
def func1_impl(x):
return math.sqrt(2 * x)
return context.compile_internal(builder, func1_impl, sig, args)
# We can do the same for a known internal operation, here "print_item"
# which we extend to support MyDummyType.
@infer
| MyDummyType |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 191077,
"end": 193791
} | class ____(Response):
"""
Response of tasks.dequeue endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param dequeued: Number of tasks dequeued (0 or 1)
:type dequeued: int
"""
_service = "tasks"
_action = "dequeue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"dequeued": {
"description": "Number of tasks dequeued (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
updated: Optional[int] = None,
fields: Optional[dict] = None,
dequeued: Optional[int] = None,
**kwargs: Any
) -> None:
super(DequeueResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.dequeued = dequeued
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("dequeued")
def dequeued(self) -> Optional[int]:
return self._property_dequeued
@dequeued.setter
def dequeued(self, value: Optional[int]) -> None:
if value is None:
self._property_dequeued = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "dequeued", six.integer_types)
self._property_dequeued = value
| DequeueResponse |
python | davidhalter__jedi | jedi/inference/names.py | {
"start": 21358,
"end": 21408
} | class ____(ImportName):
_level = 1
| SubModuleName |
python | keras-team__keras | keras/src/metrics/hinge_metrics.py | {
"start": 2342,
"end": 3255
} | class ____(reduction_metrics.MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.2
"""
def __init__(self, name="categorical_hinge", dtype=None):
super().__init__(fn=categorical_hinge, name=name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
| CategoricalHinge |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 6054,
"end": 9094
} | class ____(fixtures.TablesTest, AssertsExecutionResults):
__only_on__ = "postgresql"
__dialect__ = postgresql.dialect()
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Integer),
)
@classmethod
def insert_data(cls, connection):
data_table = cls.tables.data_table
connection.execute(
data_table.insert().values(
[
{"data": 3},
{"data": 5},
{"data": 7},
{"data": 2},
{"data": 15},
{"data": 12},
{"data": 6},
{"data": 478},
{"data": 52},
{"data": 9},
]
)
)
def test_float_coercion(self, connection):
data_table = self.tables.data_table
for type_, result in [
(Numeric, decimal.Decimal("140.381230939")),
(Float, 140.381230939),
(Float(asdecimal=True), decimal.Decimal("140.381230939")),
(Numeric(asdecimal=False), 140.381230939),
]:
ret = connection.execute(
select(func.stddev_pop(data_table.c.data, type_=type_))
).scalar()
eq_(round_decimal(ret, 9), result)
ret = connection.execute(
select(cast(func.stddev_pop(data_table.c.data), type_))
).scalar()
eq_(round_decimal(ret, 9), result)
def test_arrays_pg(self, connection, metadata):
t1 = Table(
"t",
metadata,
Column("x", postgresql.ARRAY(Float)),
Column("y", postgresql.ARRAY(REAL)),
Column("z", postgresql.ARRAY(postgresql.DOUBLE_PRECISION)),
Column("w", postgresql.ARRAY(Double)),
Column("q", postgresql.ARRAY(Numeric)),
)
metadata.create_all(connection)
connection.execute(
t1.insert(),
dict(x=[5], y=[5], z=[6], w=[7], q=[decimal.Decimal("6.4")]),
)
row = connection.execute(t1.select()).first()
eq_(row, ([5], [5], [6], [7], [decimal.Decimal("6.4")]))
def test_arrays_base(self, connection, metadata):
t1 = Table(
"t",
metadata,
Column("x", sqltypes.ARRAY(Float)),
Column("y", sqltypes.ARRAY(REAL)),
Column("z", sqltypes.ARRAY(postgresql.DOUBLE_PRECISION)),
Column("w", sqltypes.ARRAY(Double)),
Column("q", sqltypes.ARRAY(Numeric)),
)
metadata.create_all(connection)
connection.execute(
t1.insert(),
dict(x=[5], y=[5], z=[6], w=[7], q=[decimal.Decimal("6.4")]),
)
row = connection.execute(t1.select()).first()
eq_(row, ([5], [5], [6], [7], [decimal.Decimal("6.4")]))
| FloatCoercionTest |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 198819,
"end": 199062
} | class ____:
_col_type = INT4MULTIRANGE
_col_str = "INT4MULTIRANGE"
def _data_str(self):
return "{[1,2), [3, 5), [9, 12)}"
def _data_obj(self):
return [Range(1, 2), Range(3, 5), Range(9, 12)]
| _Int4MultiRangeTests |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 3426,
"end": 10377
} | class ____(NonStrictDataModel):
"""
:param binary: Binary to use when running the script
:type binary: str
:param repository: Name of the repository where the script is located
:type repository: str
:param tag: Repository tag
:type tag: str
:param branch: Repository branch ID If not provided and tag not provided,
default repository branch is used.
:type branch: str
:param version_num: Version (changeset) number. Optional (default is head
version) Unused if tag is provided.
:type version_num: str
:param entry_point: Path to execute within the repository
:type entry_point: str
:param working_dir: Path to the folder from which to run the script Default -
root folder of repository
:type working_dir: str
:param requirements: A JSON object containing requirements strings by key
:type requirements: dict
:param diff: Uncommitted changes found in the repository when task was run
:type diff: str
"""
_schema = {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {"description": "Repository tag", "type": ["string", "null"]},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
binary: Optional[str] = "python",
repository: Optional[str] = None,
tag: Optional[str] = None,
branch: Optional[str] = None,
version_num: Optional[str] = None,
entry_point: Optional[str] = None,
working_dir: Optional[str] = None,
requirements: Optional[dict] = None,
diff: Optional[str] = None,
**kwargs: Any
) -> None:
super(Script, self).__init__(**kwargs)
self.binary = binary
self.repository = repository
self.tag = tag
self.branch = branch
self.version_num = version_num
self.entry_point = entry_point
self.working_dir = working_dir
self.requirements = requirements
self.diff = diff
@schema_property("binary")
def binary(self) -> Optional[str]:
return self._property_binary
@binary.setter
def binary(self, value: Optional[str]) -> None:
if value is None:
self._property_binary = None
return
self.assert_isinstance(value, "binary", six.string_types)
self._property_binary = value
@schema_property("repository")
def repository(self) -> Optional[str]:
return self._property_repository
@repository.setter
def repository(self, value: Optional[str]) -> None:
if value is None:
self._property_repository = None
return
self.assert_isinstance(value, "repository", six.string_types)
self._property_repository = value
@schema_property("tag")
def tag(self) -> Optional[str]:
return self._property_tag
@tag.setter
def tag(self, value: Optional[str]) -> None:
if value is None:
self._property_tag = None
return
self.assert_isinstance(value, "tag", six.string_types)
self._property_tag = value
@schema_property("branch")
def branch(self) -> Optional[str]:
return self._property_branch
@branch.setter
def branch(self, value: Optional[str]) -> None:
if value is None:
self._property_branch = None
return
self.assert_isinstance(value, "branch", six.string_types)
self._property_branch = value
@schema_property("version_num")
def version_num(self) -> Optional[str]:
return self._property_version_num
@version_num.setter
def version_num(self, value: Optional[str]) -> None:
if value is None:
self._property_version_num = None
return
self.assert_isinstance(value, "version_num", six.string_types)
self._property_version_num = value
@schema_property("entry_point")
def entry_point(self) -> Optional[str]:
return self._property_entry_point
@entry_point.setter
def entry_point(self, value: Optional[str]) -> None:
if value is None:
self._property_entry_point = None
return
self.assert_isinstance(value, "entry_point", six.string_types)
self._property_entry_point = value
@schema_property("working_dir")
def working_dir(self) -> Optional[str]:
return self._property_working_dir
@working_dir.setter
def working_dir(self, value: Optional[str]) -> None:
if value is None:
self._property_working_dir = None
return
self.assert_isinstance(value, "working_dir", six.string_types)
self._property_working_dir = value
@schema_property("requirements")
def requirements(self) -> Optional[dict]:
return self._property_requirements
@requirements.setter
def requirements(self, value: Optional[dict]) -> None:
if value is None:
self._property_requirements = None
return
self.assert_isinstance(value, "requirements", (dict,))
self._property_requirements = value
@schema_property("diff")
def diff(self) -> Optional[str]:
return self._property_diff
@diff.setter
def diff(self, value: Optional[str]) -> None:
if value is None:
self._property_diff = None
return
self.assert_isinstance(value, "diff", six.string_types)
self._property_diff = value
| Script |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 255984,
"end": 256785
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of MoveProjectCard"""
__schema__ = github_schema
__field_names__ = ("card_id", "column_id", "after_card_id", "client_mutation_id")
card_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="cardId")
"""The id of the card to move."""
column_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="columnId")
"""The id of the column to move it into."""
after_card_id = sgqlc.types.Field(ID, graphql_name="afterCardId")
"""Place the new card after the card with this id. Pass null to place
it at the top.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| MoveProjectCardInput |
python | facebook__pyre-check | tools/generate_taint_models/model.py | {
"start": 895,
"end": 1204
} | class ____(abc.ABC):
def __lt__(self, other: "Model") -> bool:
return str(self) < str(other)
@abc.abstractmethod
def __eq__(self) -> int:
...
@abc.abstractmethod
def __hash__(self) -> int:
...
@abc.abstractmethod
def __str__(self) -> str:
...
| Model |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/ecs/utils.py | {
"start": 681,
"end": 5389
} | class ____(Exception): ...
def run_ecs_task(ecs, run_task_kwargs) -> Mapping[str, Any]:
response = ecs.run_task(**run_task_kwargs)
tasks = response["tasks"]
if not tasks:
failures = response["failures"]
failure_messages = []
for failure in failures:
arn = failure.get("arn")
reason = failure.get("reason")
detail = failure.get("detail")
failure_message = (
"Task"
+ (f" {arn}" if arn else "")
+ " failed."
+ (f" Failure reason: {reason}" if reason else "")
+ (f" Failure details: {detail}" if detail else "")
)
failure_messages.append(failure_message)
failure_message = "\n".join(failure_messages) if failure_messages else "Task failed."
if "Capacity is unavailable at this time" in failure_message:
raise RetryableEcsException(failure_message)
raise Exception(failure_message)
return tasks[0]
def get_task_definition_family(
prefix: str,
job_origin: RemoteJobOrigin,
) -> str:
job_name = job_origin.job_name
repo_name = job_origin.repository_origin.repository_name
location_name = job_origin.repository_origin.code_location_origin.location_name
assert len(prefix) < 32
# Truncate the location name if it's too long (but add a unique suffix at the end so that no matter what it's unique)
# Relies on the fact that org name and deployment name are always <= 64 characters long to
# stay well underneath the 255 character limit imposed by ECS
final_family = f"{prefix}_{_get_family_hash(location_name)}_{_get_family_hash(repo_name)}_{_get_family_hash(job_name)}"
assert len(final_family) <= 255
return sanitize_family(final_family)
def task_definitions_match(
desired_task_definition_config: DagsterEcsTaskDefinitionConfig,
existing_task_definition: Mapping[str, Any],
container_name: str,
) -> bool:
if not any(
[
container["name"] == container_name
for container in existing_task_definition["containerDefinitions"]
]
):
return False
existing_task_definition_config = DagsterEcsTaskDefinitionConfig.from_task_definition_dict(
existing_task_definition, container_name
)
return existing_task_definition_config.matches_other_task_definition_config(
desired_task_definition_config
)
def get_task_logs(ecs, logs_client, cluster, task_arn, container_name, limit=10):
task = ecs.describe_tasks(cluster=cluster, tasks=[task_arn]).get("tasks")[0]
task_definition_arn = task.get("taskDefinitionArn")
task_definition = ecs.describe_task_definition(taskDefinition=task_definition_arn).get(
"taskDefinition"
)
matching_container_definitions = [
container_definition
for container_definition in task_definition.get("containerDefinitions", [])
if container_definition["name"] == container_name
]
if not matching_container_definitions:
raise Exception(f"Could not find container with name {container_name}")
container_definition = matching_container_definitions[0]
log_options = container_definition.get("logConfiguration", {}).get("options", {})
log_group = log_options.get("awslogs-group")
log_stream_prefix = log_options.get("awslogs-stream-prefix")
if not log_group or not log_stream_prefix:
return []
container_name = container_definition.get("name")
task_id = task_arn.split("/")[-1]
log_stream = f"{log_stream_prefix}/{container_name}/{task_id}"
events = logs_client.get_log_events(
logGroupName=log_group, logStreamName=log_stream, limit=limit
).get("events")
return [event.get("message") for event in events]
def is_transient_task_stopped_reason(stopped_reason: str) -> bool:
if "Timeout waiting for network interface provisioning to complete" in stopped_reason:
return True
if "Timeout waiting for EphemeralStorage provisioning to complete" in stopped_reason:
return True
if "CannotPullContainerError" in stopped_reason and "i/o timeout" in stopped_reason:
return True
if "CannotPullContainerError" in stopped_reason and (
"invalid argument" in stopped_reason or "EOF" in stopped_reason
):
return True
if (
"Unexpected EC2 error while attempting to Create Network Interface" in stopped_reason
and "AuthFailure" in stopped_reason
):
return True
if "The Service Discovery instance could not be registered" in stopped_reason:
return True
return False
| RetryableEcsException |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 33036,
"end": 33208
} | class ____(Reduction):
# Only supported for Series objects
reduction_aggregate = sum
@staticmethod
def reduction_chunk(ser):
return ser.nbytes
| NBytes |
python | gevent__gevent | src/gevent/resolver/_addresses.py | {
"start": 1243,
"end": 4795
} | class ____(ValueError):
pass
def _ipv4_inet_aton(text):
"""
Convert an IPv4 address in text form to binary struct.
*text*, a ``text``, the IPv4 address in textual form.
Returns a ``binary``.
"""
if not isinstance(text, bytes):
text = text.encode()
parts = text.split(b'.')
if len(parts) != 4:
raise AddressSyntaxError(text)
for part in parts:
if not part.isdigit():
raise AddressSyntaxError
if len(part) > 1 and part[0] == '0':
# No leading zeros
raise AddressSyntaxError(text)
try:
ints = [int(part) for part in parts]
return struct.pack('BBBB', *ints)
except:
raise AddressSyntaxError(text)
def _ipv6_inet_aton(text,
_v4_ending=re.compile(br'(.*):(\d+\.\d+\.\d+\.\d+)$'),
_colon_colon_start=re.compile(br'::.*'),
_colon_colon_end=re.compile(br'.*::$')):
"""
Convert an IPv6 address in text form to binary form.
*text*, a ``text``, the IPv6 address in textual form.
Returns a ``binary``.
"""
# pylint:disable=too-many-branches
#
# Our aim here is not something fast; we just want something that works.
#
if not isinstance(text, bytes):
text = text.encode()
if text == b'::':
text = b'0::'
#
# Get rid of the icky dot-quad syntax if we have it.
#
m = _v4_ending.match(text)
if not m is None:
b = bytearray(_ipv4_inet_aton(m.group(2)))
text = (u"{}:{:02x}{:02x}:{:02x}{:02x}".format(m.group(1).decode(),
b[0], b[1], b[2],
b[3])).encode()
#
# Try to turn '::<whatever>' into ':<whatever>'; if no match try to
# turn '<whatever>::' into '<whatever>:'
#
m = _colon_colon_start.match(text)
if not m is None:
text = text[1:]
else:
m = _colon_colon_end.match(text)
if not m is None:
text = text[:-1]
#
# Now canonicalize into 8 chunks of 4 hex digits each
#
chunks = text.split(b':')
l = len(chunks)
if l > 8:
raise SyntaxError
seen_empty = False
canonical = []
for c in chunks:
if c == b'':
if seen_empty:
raise AddressSyntaxError(text)
seen_empty = True
for _ in range(0, 8 - l + 1):
canonical.append(b'0000')
else:
lc = len(c)
if lc > 4:
raise AddressSyntaxError(text)
if lc != 4:
c = (b'0' * (4 - lc)) + c
canonical.append(c)
if l < 8 and not seen_empty:
raise AddressSyntaxError(text)
text = b''.join(canonical)
#
# Finally we can go to binary.
#
try:
return binascii.unhexlify(text)
except (binascii.Error, TypeError):
raise AddressSyntaxError(text)
def _is_addr(host, parse=_ipv4_inet_aton):
if not host or not isinstance(host, hostname_types):
return False
try:
parse(host)
except AddressSyntaxError:
return False
return True
# Return True if host is a valid IPv4 address
is_ipv4_addr = _is_addr
def is_ipv6_addr(host):
# Return True if host is a valid IPv6 address
if host and isinstance(host, hostname_types):
s = '%' if isinstance(host, str) else b'%'
host = host.split(s, 1)[0]
return _is_addr(host, _ipv6_inet_aton)
| AddressSyntaxError |
python | takluyver__flit | flit_core/flit_core/vendor/tomli/_parser.py | {
"start": 6341,
"end": 7369
} | class ____:
def __init__(self) -> None:
# The parsed content of the TOML document
self.dict: Dict[str, Any] = {}
def get_or_create_nest(
self,
key: Key,
*,
access_lists: bool = True,
) -> dict:
cont: Any = self.dict
for k in key:
if k not in cont:
cont[k] = {}
cont = cont[k]
if access_lists and isinstance(cont, list):
cont = cont[-1]
if not isinstance(cont, dict):
raise KeyError("There is no nest behind this key")
return cont
def append_nest_to_list(self, key: Key) -> None:
cont = self.get_or_create_nest(key[:-1])
last_key = key[-1]
if last_key in cont:
list_ = cont[last_key]
try:
list_.append({})
except AttributeError:
raise KeyError("An object other than list found behind this key")
else:
cont[last_key] = [{}]
| NestedDict |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 7774,
"end": 8008
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("ALL", "DISABLED", "NO_POLICY", "PRIVATE", "PUBLIC")
| EnterpriseMembersCanCreateRepositoriesSettingValue |
python | pytorch__pytorch | test/fx/test_fx_split.py | {
"start": 494,
"end": 4203
} | class ____(TestCase):
def test_split_preserve_node_meta(self):
class TestModule(torch.nn.Module):
def forward(self, x, y):
x = x + x
y = y * y
return x - y
gm = torch.fx.symbolic_trace(TestModule())
for node in gm.graph.nodes:
node.meta["name"] = node.name
if node.name == "add":
node.tag = "a"
elif node.name == "mul":
node.tag = "b"
elif node.name == "sub":
node.tag = "c"
split_gm = split_by_tags(gm, ["a", "b", "c"])
for m in split_gm.children():
for n in m.graph.nodes:
if n.op != "output":
self.assertIn("name", n.meta)
self.assertEqual(n.meta["name"], n.name)
# Validate that metadata is copied correctly for graph placeholder nodes
for node in split_gm.graph.nodes:
if node.op == "placeholder":
self.assertIn("name", node.meta)
self.assertEqual(node.meta["name"], node.name)
def test_dataclass_as_graph_entry(self):
"""
Test that splitting works when the graph entry is a dataclass instance
and a wrapped function is called with it, resulting in a call_function
node with no input dependencies. This tests the edge case fixed in D81232435
where call_function nodes with no dependencies should be handled properly
in the starter_nodes() method.
Graph visualization:
y (input) DummyDataClass(2,3,4) (no input deps, result as a call_function_node)
\ /
\ /
wrapped_add
|
z (output)
""" # noqa: W605
class TestModuleWithFunctionEntry(torch.nn.Module):
def forward(self, y):
# This creates a call_function node with no input dependencies
dummy_data_class = DummyDataClass(2, 3, 4)
z = wrapped_add(dummy_data_class, y)
return z
mod = TestModuleWithFunctionEntry()
gm = torch.fx.symbolic_trace(mod)
# Create custom operator support to mark wrapped_add as supported
class CustomOpSupport(op_support.OperatorSupportBase):
def is_node_supported(self, submodules, node) -> bool:
return node.target is wrapped_add
# Create a simple splitter to test the edge case
class TestSplitter(splitter_base._SplitterBase):
def __init__(self, module, sample_input, operator_support):
settings = splitter_base._SplitterSettingBase()
super().__init__(module, sample_input, operator_support, settings)
# Create splitter instance - this tests the fix where call_function nodes
# with no input dependencies are properly handled in starter_nodes()
splitter = TestSplitter(
module=gm,
sample_input=[torch.randn(2, 3)],
operator_support=CustomOpSupport(),
)
# This should not raise an exception (tests the fix from D81232435)
# The fix allows call_function nodes with no dependencies as valid starter nodes
split_result = splitter()
# Verify the splitting worked correctly
self.assertIsNotNone(split_result)
# Test that the split module produces the same result as the original
test_input = torch.randn(2, 3)
original_result = mod(test_input)
split_module_result = split_result(test_input)
self.assertTrue(torch.equal(original_result, split_module_result))
| TestFXSplit |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/components.py | {
"start": 15443,
"end": 15812
} | class ____(DpathExtractor):
def extract_records(self, response: requests.Response) -> List[Mapping[str, Any]]:
# We prefer response.iter_lines() to response.text.split_lines() as the later can missparse text properties embeding linebreaks
records = list(iter_dicts(response.iter_lines(decode_unicode=True)))
return records
| ExportDpathExtractor |
python | huggingface__transformers | src/transformers/models/glm46v/modeling_glm46v.py | {
"start": 26056,
"end": 39600
} | class ____(Glm46VPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
# Reference: fix gemma3 grad acc #37208
accepts_loss_kwargs = False
def __init__(self, config):
super().__init__(config)
self.model = Glm46VModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_video_features(
self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
):
return self.model.get_video_features(pixel_values_videos, video_grid_thw)
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
return self.model.get_image_features(pixel_values, image_grid_thw)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.Tensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Glm46VCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Glm46VForConditionalGeneration
>>> model = Glm46VForConditionalGeneration.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
>>> processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
>>> messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
>>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
return Glm46VCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=outputs.rope_deltas,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
pixel_values=None,
pixel_values_videos=None,
image_grid_thw=None,
video_grid_thw=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
use_cache=use_cache,
**kwargs,
)
# GLM-4.1V position_ids are prepareed with rope_deltas in forward
model_inputs["position_ids"] = None
if cache_position[0] != 0:
model_inputs["pixel_values"] = None
model_inputs["pixel_values_videos"] = None
return model_inputs
def _get_image_nums_and_video_nums(
self,
input_ids: Optional[torch.LongTensor],
inputs_embeds: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Returns:
image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
"""
if inputs_embeds is not None:
is_image = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.image_start_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
is_video_start = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.video_start_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
is_video_end = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.video_end_token_id, dtype=torch.long, device=inputs_embeds.device)
)
)[..., 0]
else:
is_image = input_ids == self.config.image_start_token_id
is_video_start = input_ids == self.config.video_start_token_id
is_video_end = input_ids == self.config.video_end_token_id
# Cumulative sum to track if we're inside a video span
# We'll assume well-formed video tags (i.e. matching starts and ends)
video_level = torch.cumsum(is_video_start.int() - is_video_end.int(), dim=1)
inside_video = video_level > 0 # shape (batch_size, seq_length)
# Mask out image tokens that are inside video spans
standalone_images = is_image & (~inside_video)
# Count per batch
image_counts = standalone_images.sum(dim=1)
video_counts = is_video_start.sum(dim=1)
return image_counts, video_counts
def _expand_inputs_for_generation(
self,
expand_size: int = 1,
is_encoder_decoder: bool = False,
input_ids: Optional[torch.LongTensor] = None,
**model_kwargs,
) -> tuple[torch.LongTensor, dict[str, Any]]:
# Overwritten -- Support for expanding tensors without a batch size dimension
# e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
# pixel_values.shape[0] is sum(seqlen_images for samples)
# image_grid_thw.shape[0] is sum(num_images for samples)
if expand_size == 1:
return input_ids, model_kwargs
visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"]
def _expand_dict_for_generation_visual(dict_to_expand):
image_grid_thw = model_kwargs.get("image_grid_thw", None)
video_grid_thw = model_kwargs.get("video_grid_thw", None)
image_nums, video_nums = self._get_image_nums_and_video_nums(
input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None)
)
def _repeat_interleave_samples(x, lengths, repeat_times):
samples = torch.split(x, lengths)
repeat_args = [repeat_times] + [1] * (x.dim() - 1)
result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0)
return result
for key in dict_to_expand:
if key == "pixel_values":
# split images into samples
samples = torch.split(image_grid_thw, list(image_nums))
# compute the sequence length of images for each sample
lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "image_grid_thw":
# get the num of images for each sample
lengths = list(image_nums)
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "pixel_values_videos":
samples = torch.split(video_grid_thw, list(video_nums))
lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "video_grid_thw":
lengths = list(video_nums)
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
)
elif key == "second_per_grid_ts":
dict_to_expand[key] = _repeat_interleave_samples(
dict_to_expand[key], lengths=list(video_nums), repeat_times=expand_size
)
return dict_to_expand
def _expand_dict_for_generation(dict_to_expand):
for key in dict_to_expand:
if (
key != "cache_position"
and dict_to_expand[key] is not None
and isinstance(dict_to_expand[key], torch.Tensor)
and key not in visual_keys
):
dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
return dict_to_expand
model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
if input_ids is not None:
input_ids = input_ids.repeat_interleave(expand_size, dim=0)
model_kwargs = _expand_dict_for_generation(model_kwargs)
if is_encoder_decoder:
if model_kwargs.get("encoder_outputs") is None:
raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
return input_ids, model_kwargs
__all__ = ["Glm46VModel", "Glm46VPreTrainedModel", "Glm46VForConditionalGeneration"]
| Glm46VForConditionalGeneration |
python | getsentry__sentry | src/sentry/integrations/slack/message_builder/issues.py | {
"start": 9025,
"end": 17091
} | class ____(TypedDict):
label: Mapping[str, str]
options: Sequence[Mapping[str, Any]]
def get_option_groups(group: Group) -> Sequence[OptionGroup]:
all_members = group.project.get_members_as_rpc_users()
members = list({m.id: m for m in all_members}.values())
teams = group.project.teams.all()
option_groups = []
if teams:
team_option_group: OptionGroup = {
"label": {"type": "plain_text", "text": "Teams"},
"options": format_actor_options_slack(teams),
}
option_groups.append(team_option_group)
if members:
member_option_group: OptionGroup = {
"label": {"type": "plain_text", "text": "People"},
"options": format_actor_options_slack(members),
}
option_groups.append(member_option_group)
return option_groups
def get_suggested_assignees(
project: Project, event: Event | GroupEvent, current_assignee: RpcUser | Team | None
) -> list[str]:
"""Get suggested assignees as a list of formatted strings"""
suggested_assignees, _ = ProjectOwnership.get_owners(project.id, event.data)
try:
suspect_commit_users = Actor.many_from_object(get_suspect_commit_users(project, event))
suggested_assignees.extend(suspect_commit_users)
except (Release.DoesNotExist, Commit.DoesNotExist):
logger.info("Skipping suspect committers because release does not exist.")
except Exception:
logger.exception("Could not get suspect committers. Continuing execution.")
if suggested_assignees:
suggested_assignees = dedupe_suggested_assignees(suggested_assignees)
assignee_texts = []
for assignee in suggested_assignees:
# skip over any suggested assignees that are the current assignee of the issue, if there is any
if assignee.is_team and not (
isinstance(current_assignee, Team) and assignee.id == current_assignee.id
):
assignee_texts.append(f"#{assignee.slug}")
elif assignee.is_user and not (
isinstance(current_assignee, RpcUser) and assignee.id == current_assignee.id
):
assignee_as_user = assignee.resolve()
if isinstance(assignee_as_user, RpcUser):
assignee_texts.append(assignee_as_user.get_display_name())
return assignee_texts
return []
def get_suspect_commit_text(group: Group) -> str | None:
"""Build up the suspect commit text for the given event"""
commit = group.get_suspect_commit()
if not commit:
return None
suspect_commit_text = "Suspect Commit: "
author = commit.author
commit_id = commit.key
if not (author and commit_id): # we need both the author and commit id to continue
return None
author_display = author.name if author.name else author.email
pull_request = PullRequest.objects.filter(
merge_commit_sha=commit.key, organization_id=group.project.organization_id
).first()
if pull_request:
repo = Repository.objects.get(id=pull_request.repository_id)
repo_base = repo.url
provider = repo.provider
if repo_base and provider in SUPPORTED_COMMIT_PROVIDERS:
if IntegrationProviderSlug.BITBUCKET.value in provider:
commit_link = f"<{repo_base}/commits/{commit_id}"
else:
commit_link = f"<{repo_base}/commit/{commit_id}"
commit_link += f"|{commit_id[:6]}>"
suspect_commit_text += f"{commit_link} by {author_display}"
else: # for unsupported providers
suspect_commit_text += f"{commit_id[:6]} by {author_display}"
if pull_request.date_added:
pr_date = time_since(pull_request.date_added)
else:
pr_date = pull_request.date_added
pr_id = pull_request.key
pr_title = pull_request.title
pr_link = pull_request.get_external_url()
if pr_date and pr_id and pr_title and pr_link:
suspect_commit_text += (
f" {pr_date} \n'{pr_title} (#{pr_id})' <{pr_link}|View Pull Request>"
)
else:
suspect_commit_text += f"{commit_id[:6]} by {author_display}"
return suspect_commit_text
def get_action_text(actions: Sequence[Any], identity: RpcIdentity) -> str:
action_text = "\n".join(
[
action_text
for action_text in [build_action_text(identity, action) for action in actions]
if action_text
]
)
return action_text
def build_actions(
group: Group,
project: Project,
text: str,
actions: Sequence[MessageAction | BlockKitMessageAction] | None = None,
identity: RpcIdentity | None = None,
) -> tuple[Sequence[MessageAction], str, bool]:
"""Having actions means a button will be shown on the Slack message e.g. ignore, resolve, assign."""
if actions and identity:
text = get_action_text(actions, identity)
return [], text, True
status = group.get_status()
def _ignore_button() -> MessageAction | None:
if group.issue_category == GroupCategory.FEEDBACK:
return None
if status == GroupStatus.IGNORED:
return MessageAction(
name="status",
label="Mark as Ongoing",
value="unresolved:ongoing",
action_id=encode_action_id(
action=SlackAction.UNRESOLVED_ONGOING,
organization_id=group.organization.id,
project_id=group.project.id,
),
)
return MessageAction(
name="status",
label="Archive",
value="archive_dialog",
action_id=encode_action_id(
action=SlackAction.ARCHIVE_DIALOG,
organization_id=group.organization.id,
project_id=group.project.id,
),
)
def _resolve_button() -> MessageAction:
if status == GroupStatus.RESOLVED:
return MessageAction(
name="unresolved:ongoing",
label="Unresolve",
value="unresolved:ongoing",
action_id=encode_action_id(
action=SlackAction.UNRESOLVED_ONGOING,
organization_id=group.organization.id,
project_id=group.project.id,
),
)
if not project.flags.has_releases:
return MessageAction(
name="status",
label="Resolve",
value="resolved",
action_id=encode_action_id(
action=SlackAction.STATUS,
organization_id=group.organization.id,
project_id=group.project.id,
),
)
return MessageAction(
name="status",
label="Resolve",
value="resolve_dialog",
action_id=encode_action_id(
action=SlackAction.RESOLVE_DIALOG,
organization_id=group.organization.id,
project_id=group.project.id,
),
)
def _assign_button() -> MessageAction:
try:
assignee = group.get_assignee()
except Actor.InvalidActor:
assignee = None
assign_button = MessageAction(
name="assign",
label="Select Assignee...",
type="select",
selected_options=format_actor_options_slack([assignee]) if assignee else [],
action_id=encode_action_id(
action=SlackAction.ASSIGN,
organization_id=group.organization.id,
project_id=group.project.id,
),
)
return assign_button
action_list = [
a
for a in [
_resolve_button(),
_ignore_button(),
_assign_button(),
]
if a is not None
]
return action_list, text, False
| OptionGroup |
python | Pylons__pyramid | src/pyramid/authentication.py | {
"start": 12550,
"end": 14572
} | class ____(CallbackAuthenticationPolicy):
"""A :app:`Pyramid` :term:`authentication policy` which
obtains data from the ``REMOTE_USER`` WSGI environment variable.
Constructor Arguments
``environ_key``
Default: ``REMOTE_USER``. The key in the WSGI environ which
provides the userid.
``callback``
Default: ``None``. A callback passed the userid and the request,
expected to return None if the userid doesn't exist or a sequence of
principal identifiers (possibly empty) representing groups if the
user does exist. If ``callback`` is None, the userid will be assumed
to exist with no group principals.
``debug``
Default: ``False``. If ``debug`` is ``True``, log messages to the
Pyramid debug logger about the results of various authentication
steps. The output from debugging is useful for reporting to maillist
or IRC channels when asking for support.
Objects of this class implement the interface described by
:class:`pyramid.interfaces.IAuthenticationPolicy`.
"""
def __init__(self, environ_key='REMOTE_USER', callback=None, debug=False):
self.environ_key = environ_key
self.callback = callback
self.debug = debug
def unauthenticated_userid(self, request):
"""The ``REMOTE_USER`` value found within the ``environ``."""
return request.environ.get(self.environ_key)
def remember(self, request, userid, **kw):
"""A no-op. The ``REMOTE_USER`` does not provide a protocol for
remembering the user. This will be application-specific and can
be done somewhere else or in a subclass."""
return []
def forget(self, request):
"""A no-op. The ``REMOTE_USER`` does not provide a protocol for
forgetting the user. This will be application-specific and can
be done somewhere else or in a subclass."""
return []
@implementer(IAuthenticationPolicy)
| RemoteUserAuthenticationPolicy |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py | {
"start": 1598,
"end": 1700
} | class ____(notabc.ABC, metaclass=abc.ABCMeta): # safe
def method(self):
foo()
| multi_super_2 |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 11600,
"end": 12079
} | class ____(PrefectBaseModel, OperatorMixin):
"""Filter by `TaskRun.tags`."""
all_: Optional[List[str]] = Field(
default=None,
examples=[["tag-1", "tag-2"]],
description=(
"A list of tags. Task runs will be returned only if their tags are a"
" superset of the list"
),
)
is_null_: Optional[bool] = Field(
default=None, description="If true, only include task runs without tags"
)
| TaskRunFilterTags |
python | huggingface__transformers | src/transformers/models/mllama/modeling_mllama.py | {
"start": 21440,
"end": 24446
} | class ____(nn.Module):
def __init__(self, config: MllamaTextConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.layer_idx = layer_idx
self.is_causal = True
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: torch.Tensor,
past_key_values=None,
cache_position=None,
**kwargs,
):
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.gemma2.modeling_gemma2.Gemma2MLP with Gemma2->MllamaText
| MllamaTextSelfAttention |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 6958,
"end": 9287
} | class ____:
def test_invalid_oid(self):
with pytest.raises(TypeError):
x509.UnrecognizedExtension(
"notanoid", # type:ignore[arg-type]
b"somedata",
)
def test_eq(self):
ext1 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.4"), b"\x03\x02\x01"
)
ext2 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.4"), b"\x03\x02\x01"
)
assert ext1 == ext2
def test_ne(self):
ext1 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.4"), b"\x03\x02\x01"
)
ext2 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.4"), b"\x03\x02\x02"
)
ext3 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.5"), b"\x03\x02\x01"
)
assert ext1 != ext2
assert ext1 != ext3
assert ext1 != object()
def test_repr(self):
ext1 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.4"), b"\x03\x02\x01"
)
assert repr(ext1) == (
"<UnrecognizedExtension(oid=<ObjectIdentifier(oid=1.2.3.4, "
"name=Unknown OID)>, value=b'\\x03\\x02\\x01')>"
)
def test_hash(self):
ext1 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.4"), b"\x03\x02\x01"
)
ext2 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.4"), b"\x03\x02\x01"
)
ext3 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.5"), b"\x03\x02\x01"
)
assert hash(ext1) == hash(ext2)
assert hash(ext1) != hash(ext3)
def test_public_bytes(self):
ext1 = x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.5"), b"\x03\x02\x01"
)
assert ext1.public_bytes() == b"\x03\x02\x01"
# The following creates a BasicConstraints extension with an invalid
# value. The serialization code should still handle it correctly by
# special-casing UnrecognizedExtension.
ext2 = x509.UnrecognizedExtension(
x509.oid.ExtensionOID.BASIC_CONSTRAINTS, b"\x03\x02\x01"
)
assert ext2.public_bytes() == b"\x03\x02\x01"
| TestUnrecognizedExtension |
python | dagster-io__dagster | python_modules/libraries/dagster-mysql/dagster_mysql/storage.py | {
"start": 731,
"end": 3847
} | class ____(DagsterStorage, ConfigurableClass):
"""MySQL-backed dagster storage.
Users should not directly instantiate this class; it is instantiated by internal machinery when
``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.
To use MySQL for storage, you can add a block such as the following to your
``dagster.yaml``:
.. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql.yaml
:caption: dagster.yaml
:language: YAML
Note that the fields in this config are :py:class:`~dagster.StringSource` and
:py:class:`~dagster.IntSource` and can be configured from environment variables.
"""
def __init__(self, mysql_url, inst_data: Optional[ConfigurableClassData] = None):
self.mysql_url = mysql_url
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
self._run_storage = MySQLRunStorage(mysql_url)
self._event_log_storage = MySQLEventLogStorage(mysql_url)
self._schedule_storage = MySQLScheduleStorage(mysql_url)
super().__init__()
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> UserConfigSchema:
return mysql_config()
@classmethod
def from_config_value( # pyright: ignore[reportIncompatibleMethodOverride]
cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig
) -> "DagsterMySQLStorage":
return DagsterMySQLStorage(
inst_data=inst_data,
mysql_url=mysql_url_from_config(config_value),
)
@property
def event_log_storage(self) -> EventLogStorage:
return self._event_log_storage
@property
def run_storage(self) -> RunStorage:
return self._run_storage
@property
def schedule_storage(self) -> ScheduleStorage:
return self._schedule_storage
@property
def event_storage_data(self) -> Optional[ConfigurableClassData]:
return (
ConfigurableClassData(
"dagster_mysql",
"MySQLEventLogStorage",
self.inst_data.config_yaml,
)
if self.inst_data
else None
)
@property
def run_storage_data(self) -> Optional[ConfigurableClassData]:
return (
ConfigurableClassData(
"dagster_mysql",
"MySQLRunStorage",
self.inst_data.config_yaml,
)
if self.inst_data
else None
)
@property
def schedule_storage_data(self) -> Optional[ConfigurableClassData]:
return (
ConfigurableClassData(
"dagster_mysql",
"MySQLScheduleStorage",
self.inst_data.config_yaml,
)
if self.inst_data
else None
)
| DagsterMySQLStorage |
python | django__django | tests/model_fields/tests.py | {
"start": 14130,
"end": 15957
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.foo1 = Foo.objects.create(a="a", d="12.35")
cls.foo2 = Foo.objects.create(a="b", d="12.34")
cls.bar1 = Bar.objects.create(a=cls.foo1, b="b")
cls.bar2 = Bar.objects.create(a=cls.foo2, b="a")
cls.field = Bar._meta.get_field("a")
def assertChoicesEqual(self, choices, objs):
self.assertEqual(choices, [(obj.pk, str(obj)) for obj in objs])
def test_get_choices(self):
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, ordering=("a",)),
[self.foo1, self.foo2],
)
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, ordering=("-a",)),
[self.foo2, self.foo1],
)
def test_get_choices_default_ordering(self):
self.addCleanup(setattr, Foo._meta, "ordering", Foo._meta.ordering)
Foo._meta.ordering = ("d",)
self.assertChoicesEqual(
self.field.get_choices(include_blank=False), [self.foo2, self.foo1]
)
def test_get_choices_reverse_related_field(self):
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False, ordering=("a",)),
[self.bar1, self.bar2],
)
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False, ordering=("-a",)),
[self.bar2, self.bar1],
)
def test_get_choices_reverse_related_field_default_ordering(self):
self.addCleanup(setattr, Bar._meta, "ordering", Bar._meta.ordering)
Bar._meta.ordering = ("b",)
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False),
[self.bar2, self.bar1],
)
| GetChoicesOrderingTests |
python | dagster-io__dagster | python_modules/libraries/dagster-wandb/dagster_wandb/types.py | {
"start": 239,
"end": 646
} | class ____(TypedDict, total=False):
"""W&B Artifacts IO Manager configuration. Useful for type checking."""
name: str
type: str
description: str
aliases: list[str]
add_dirs: list[dict[str, Any]]
add_files: list[dict[str, Any]]
add_references: list[dict[str, Any]]
serialization_module: SerializationModule
partitions: dict[str, dict[str, Any]]
| WandbArtifactConfiguration |
python | ray-project__ray | python/ray/experimental/gpu_object_manager/gpu_object_manager.py | {
"start": 1761,
"end": 3573
} | class ____(NamedTuple):
src_actor: "ray.actor.ActorHandle"
dst_actor: "ray.actor.ActorHandle"
send_ref: Optional[ObjectRef]
recv_ref: ObjectRef
communicator_meta: "CommunicatorMetadata"
backend: str
obj_id: str
timeout: float
# TODO(swang): Uncomment and add an API docs page and example usage.
# @PublicAPI(stability="alpha")
def wait_tensor_freed(tensor: "torch.Tensor", timeout: Optional[float] = None):
"""
Wait for the tensor to be freed.
This function is useful for cases where an actor keeps a reference to a
tensor after returning the tensor from a task annotated with
`@ray.method(tensor_transport=...)`. In this case, Ray will store a
*reference* to the tensor, so any in-place modifications made by the actor
that returned the tensor could be seen by other actors. See
:ref:`Ray Direct Transport (RDT) <direct-transport>` for more details.
Call this function for RDT objects to ensure that all corresponding
`ray.ObjectRefs` have gone out of scope and therefore the tensor is safe to
write to again.
Args:
tensor: The tensor to wait to be freed. This should be a tensor that was
previously returned by a task annotated with
`@ray.method(tensor_transport=...)` or stored via
`ray.put(_tensor_transport="...")`.
timeout: The timeout in seconds to wait for all references to the tensor
to go out of scope. Set to None to wait indefinitely. Note that if
None is used, this function could hang if the `ray.ObjectRefs` that
refer to this tensor never go out of scope.
"""
gpu_object_manager = ray.worker.global_worker.gpu_object_manager
gpu_object_manager.gpu_object_store.wait_tensor_freed(tensor, timeout)
| TransferMetadata |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/tokens.py | {
"start": 7519,
"end": 8147
} | class ____:
__slots__ = ()
def __call__(self, request, refresh_token=False):
raise NotImplementedError('Subclasses must implement this method.')
def validate_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
raise NotImplementedError('Subclasses must implement this method.')
def estimate_type(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
raise NotImplementedError('Subclasses must implement this method.')
| TokenBase |
python | getsentry__sentry | tests/sentry/api/test_api_owners.py | {
"start": 76,
"end": 694
} | class ____(TestCase):
teams: set[str] = set()
def setUp(self) -> None:
super().setUp()
code_owners_file = open(".github/CODEOWNERS")
lines = code_owners_file.readlines()
code_owners_file.close()
for line in lines:
if line.startswith("/src/"):
tokens = [s.strip() for s in line.split("@getsentry/")]
self.teams.update(tokens[1:])
def test_api_owner_is_a_valid_code_owner(self) -> None:
for owner in ApiOwner:
if owner != ApiOwner.UNOWNED:
assert owner.value in self.teams
| APIOwnersTestCase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/namedTuple4.py | {
"start": 243,
"end": 397
} | class ____(Class1):
some_class_member = 1
reveal_type(Class2(name="a"), expected_text="Class2")
Class3 = NamedTuple("Class3", [("name", str)])
| Class2 |
python | doocs__leetcode | lcof2/ๅๆ Offer II 011. 0 ๅ 1 ไธชๆฐ็ธๅ็ๅญๆฐ็ป/Solution.py | {
"start": 0,
"end": 304
} | class ____:
def findMaxLength(self, nums: List[int]) -> int:
d = {0: -1}
ans = s = 0
for i, x in enumerate(nums):
s += 1 if x else -1
if s in d:
ans = max(ans, i - d[s])
else:
d[s] = i
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py | {
"start": 54642,
"end": 58022
} | class ____(Wav2Vec2BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Audio frame classification does not support the use of Wav2Vec2Bert adapters (config.add_adapter=True)"
)
self.wav2vec2_bert = Wav2Vec2BertModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.num_labels = config.num_labels
self.post_init()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wav2vec2_bert.parameters():
param.requires_grad = False
@auto_docstring
def forward(
self,
input_features: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wav2vec2_bert(
input_features,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| Wav2Vec2BertForAudioFrameClassification |
python | graphql-python__graphene | graphene/relay/id_type.py | {
"start": 146,
"end": 539
} | class ____:
"""
Base class that define the required attributes/method for a type.
"""
graphene_type: Type[BaseType] = ID
@classmethod
def resolve_global_id(cls, info, global_id):
# return _type, _id
raise NotImplementedError
@classmethod
def to_global_id(cls, _type, _id):
# return _id
raise NotImplementedError
| BaseGlobalIDType |
python | scipy__scipy | scipy/stats/_distn_infrastructure.py | {
"start": 19254,
"end": 19471
} | class ____(rv_frozen):
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k): # No error
return self.dist.logpmf(k, *self.args, **self.kwds)
| rv_discrete_frozen |
python | pypa__pip | src/pip/_internal/utils/logging.py | {
"start": 4178,
"end": 4712
} | class ____(Console):
def on_broken_pipe(self) -> None:
# Reraise the original exception, rich 13.8.0+ exits by default
# instead, preventing our handler from firing.
raise BrokenPipeError() from None
def get_console(*, stderr: bool = False) -> Console:
if stderr:
assert _stderr_console is not None, "stderr rich console is missing!"
return _stderr_console
else:
assert _stdout_console is not None, "stdout rich console is missing!"
return _stdout_console
| PipConsole |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.