language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/ao/nn/quantized/reference/modules/conv.py | {
"start": 10763,
"end": 13219
} | class ____(_ConvTransposeNd, nn.ConvTranspose2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode="zeros",
device=None,
dtype=None,
weight_qparams: dict[str, Any] | None = None,
):
nn.ConvTranspose2d.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation,
# pyrefly: ignore [bad-argument-type]
padding_mode,
device,
dtype,
)
self._init_weight_qparams(weight_qparams, device)
def forward(
self, x: torch.Tensor, output_size: list[int] | None = None
) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.convTranspose2d ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.convTranspose2d --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized conv2d
"""
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
output_padding = self._output_padding(
input, # type: ignore[arg-type]
output_size,
self.stride, # type: ignore[arg-type]
self.padding, # type: ignore[arg-type]
self.kernel_size, # type: ignore[arg-type]
self.dilation, # type: ignore[arg-type]
)
weight_quant_dequant = self.get_weight()
result = F.conv_transpose2d(
x,
weight_quant_dequant,
self.bias,
self.stride,
self.padding,
output_padding,
self.groups,
self.dilation,
)
return result
def _get_name(self):
return "QuantizedConvTranspose2d(Reference)"
@classmethod
def from_float(cls, float_conv, weight_qparams): # type: ignore[override]
return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams)
| ConvTranspose2d |
python | getsentry__sentry | src/sentry/sentry_apps/token_exchange/util.py | {
"start": 266,
"end": 485
} | class ____:
AUTHORIZATION = AUTHORIZATION
REFRESH = REFRESH
CLIENT_SECRET_JWT = CLIENT_SECRET_JWT
def token_expiration() -> datetime:
return timezone.now() + timedelta(hours=TOKEN_LIFE_IN_HOURS)
| GrantTypes |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 62861,
"end": 63838
} | class ____(MMTemplateConfigMixin):
"""
Ensure that _should_scale_configs is False
"""
# TODO(coconutruben): remove this once all tests work
# with proper scaling on mm_plus_mm
def __init__(self) -> None:
super().__init__()
self.should_scale_configs = False
def _get_template_configs_impl(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> Generator[dict[str, Any], None, None]:
assert isinstance(kernel_inputs, MMKernelInputs), "Expect MMKernelInputs"
m, n, k = kernel_inputs.mnk_symbolic()
for kwargs in super()._get_template_configs_impl(kernel_inputs, op_name):
# Apply BLOCK_K constraint specific to mm_plus_mm
# see https://github.com/triton-lang/triton/issues/1298
# BLOCK_K = K causes llvm error
if V.graph.sizevars.statically_known_lt(kwargs.get("BLOCK_K", k), k):
yield kwargs
| MMPlusMMTemplateConfigMixin |
python | pypa__pipenv | pipenv/vendor/click/utils.py | {
"start": 2723,
"end": 5597
} | class ____:
"""A lazy file works like a regular file but it does not fully open
the file but it does perform some basic checks early to see if the
filename parameter does make sense. This is useful for safely opening
files for writing.
"""
def __init__(
self,
filename: t.Union[str, "os.PathLike[str]"],
mode: str = "r",
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
atomic: bool = False,
):
self.name: str = os.fspath(filename)
self.mode = mode
self.encoding = encoding
self.errors = errors
self.atomic = atomic
self._f: t.Optional[t.IO[t.Any]]
self.should_close: bool
if self.name == "-":
self._f, self.should_close = open_stream(filename, mode, encoding, errors)
else:
if "r" in mode:
# Open and close the file in case we're opening it for
# reading so that we can catch at least some errors in
# some cases early.
open(filename, mode).close()
self._f = None
self.should_close = True
def __getattr__(self, name: str) -> t.Any:
return getattr(self.open(), name)
def __repr__(self) -> str:
if self._f is not None:
return repr(self._f)
return f"<unopened file '{format_filename(self.name)}' {self.mode}>"
def open(self) -> t.IO[t.Any]:
"""Opens the file if it's not yet open. This call might fail with
a :exc:`FileError`. Not handling this error will produce an error
that Click shows.
"""
if self._f is not None:
return self._f
try:
rv, self.should_close = open_stream(
self.name, self.mode, self.encoding, self.errors, atomic=self.atomic
)
except OSError as e: # noqa: E402
from .exceptions import FileError
raise FileError(self.name, hint=e.strerror) from e
self._f = rv
return rv
def close(self) -> None:
"""Closes the underlying file, no matter what."""
if self._f is not None:
self._f.close()
def close_intelligently(self) -> None:
"""This function only closes the file if it was opened by the lazy
file wrapper. For instance this will never close stdin.
"""
if self.should_close:
self.close()
def __enter__(self) -> "LazyFile":
return self
def __exit__(
self,
exc_type: t.Optional[t.Type[BaseException]],
exc_value: t.Optional[BaseException],
tb: t.Optional[TracebackType],
) -> None:
self.close_intelligently()
def __iter__(self) -> t.Iterator[t.AnyStr]:
self.open()
return iter(self._f) # type: ignore
| LazyFile |
python | joke2k__faker | faker/providers/automotive/fi_FI/__init__.py | {
"start": 48,
"end": 276
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``fi_FI`` locale.
Source:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Finland
"""
license_formats = ("???-###",)
| Provider |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py | {
"start": 10513,
"end": 12384
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: HunYuanDenseV1Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = HunYuanDenseV1Attention(config=config, layer_idx=layer_idx)
self.mlp = HunYuanDenseV1MLP(config)
self.input_layernorm = HunYuanDenseV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = HunYuanDenseV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| HunYuanDenseV1DecoderLayer |
python | pytorch__pytorch | torch/testing/_internal/distributed/common_state_dict.py | {
"start": 4665,
"end": 4953
} | class ____(nn.Module):
def __init__(self, vocab_size: int, fusion_vocab_size: int, embed_dim: int) -> None:
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.fusion_embedding = nn.Embedding(fusion_vocab_size, embed_dim)
| FusionEmbedding |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 68057,
"end": 74565
} | class ____(EggProvider):
"""Resource support for zips and eggs"""
eagers: list[str] | None = None
_zip_manifests = MemoizedZipManifests()
# ZipProvider's loader should always be a zipimporter or equivalent
loader: zipimport.zipimporter
def __init__(self, module: _ZipLoaderModule) -> None:
super().__init__(module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre) :]
raise AssertionError(f"{fspath} is not a subpath of {self.zip_pre}")
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1 :].split(os.sep)
raise AssertionError(f"{fspath} is not a subpath of {self.egg_root}")
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(
self, manager: ResourceManager, resource_name: str
) -> str:
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
# FIXME: 'ZipProvider._extract_resource' is too complex (12)
def _extract_resource(self, manager: ResourceManager, zip_path) -> str: # noqa: C901
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(manager, os.path.join(zip_path, name))
# return the extracted directory name
return os.path.dirname(last)
timestamp, _size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise OSError(
'"os.rename" and "os.unlink" are not supported on this platform'
)
try:
if not self.egg_name:
raise OSError(
'"egg_name" is empty. This likely means no egg could be found from the "module_path".'
)
real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path))
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except OSError:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except OSError:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath) -> bool:
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath) -> bool:
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name: str):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name: str):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
| ZipProvider |
python | django__django | django/contrib/postgres/forms/array.py | {
"start": 3730,
"end": 5857
} | class ____(forms.Widget):
template_name = "postgres/widgets/split_array.html"
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super().__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [
self.widget.value_from_datadict(data, files, "%s_%s" % (name, index))
for index in range(self.size)
]
def value_omitted_from_data(self, data, files, name):
return all(
self.widget.value_omitted_from_data(data, files, "%s_%s" % (name, index))
for index in range(self.size)
)
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += "_0"
return id_
def get_context(self, name, value, attrs=None):
attrs = {} if attrs is None else attrs
context = super().get_context(name, value, attrs)
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
context["widget"]["subwidgets"] = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get("id")
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = {**final_attrs, "id": "%s_%s" % (id_, i)}
context["widget"]["subwidgets"].append(
self.widget.get_context(name + "_%s" % i, widget_value, final_attrs)[
"widget"
]
)
return context
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
| SplitArrayWidget |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_interop_test.py | {
"start": 13208,
"end": 13722
} | class ____(InteropTest):
def test(self):
tf_var = tf.Variable(2.0)
value = np.square(tf_var)
self.assertIsInstance(value, np.ndarray)
self.assertAllClose(4.0, value)
with tf.control_dependencies([tf_var.assign_add(value)]):
tf_var_value = tf_var.read_value()
self.assertAllClose(6.0, tf_var_value)
if __name__ == '__main__':
ops.set_dtype_conversion_mode('legacy')
np_math_ops.enable_numpy_methods_on_tensor()
tf.compat.v1.enable_eager_execution()
tf.test.main()
| VariableTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1302783,
"end": 1302994
} | class ____(VegaLiteSchema):
"""TextBaseline schema wrapper."""
_schema = {"$ref": "#/definitions/TextBaseline"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| TextBaseline |
python | lepture__mistune | src/mistune/renderers/rst.py | {
"start": 186,
"end": 5523
} | class ____(BaseRenderer):
"""A renderer for converting Markdown to ReST."""
NAME = "rst"
#: marker symbols for heading
HEADING_MARKERS = {
1: "=",
2: "-",
3: "~",
4: "^",
5: '"',
6: "'",
}
INLINE_IMAGE_PREFIX = "img-"
def iter_tokens(self, tokens: Iterable[Dict[str, Any]], state: BlockState) -> Iterable[str]:
prev = None
for tok in tokens:
# ignore blank line
if tok["type"] == "blank_line":
continue
tok["prev"] = prev
prev = tok
yield self.render_token(tok, state)
def __call__(self, tokens: Iterable[Dict[str, Any]], state: BlockState) -> str:
state.env["inline_images"] = []
out = self.render_tokens(tokens, state)
# special handle for line breaks
out += "\n\n".join(self.render_referrences(state)) + "\n"
return strip_end(out)
def render_referrences(self, state: BlockState) -> Iterable[str]:
images = state.env["inline_images"]
for index, token in enumerate(images):
attrs = token["attrs"]
alt = self.render_children(token, state)
ident = self.INLINE_IMAGE_PREFIX + str(index)
yield ".. |" + ident + "| image:: " + attrs["url"] + "\n :alt: " + alt
def render_children(self, token: Dict[str, Any], state: BlockState) -> str:
children = token["children"]
return self.render_tokens(children, state)
def text(self, token: Dict[str, Any], state: BlockState) -> str:
text = cast(str, token["raw"])
return text.replace("|", r"\|")
def emphasis(self, token: Dict[str, Any], state: BlockState) -> str:
return "*" + self.render_children(token, state) + "*"
def strong(self, token: Dict[str, Any], state: BlockState) -> str:
return "**" + self.render_children(token, state) + "**"
def link(self, token: Dict[str, Any], state: BlockState) -> str:
attrs = token["attrs"]
text = self.render_children(token, state)
return "`" + text + " <" + cast(str, attrs["url"]) + ">`__"
def image(self, token: Dict[str, Any], state: BlockState) -> str:
refs: List[Dict[str, Any]] = state.env["inline_images"]
index = len(refs)
refs.append(token)
return "|" + self.INLINE_IMAGE_PREFIX + str(index) + "|"
def codespan(self, token: Dict[str, Any], state: BlockState) -> str:
return "``" + cast(str, token["raw"]) + "``"
def linebreak(self, token: Dict[str, Any], state: BlockState) -> str:
return "<linebreak>"
def softbreak(self, token: Dict[str, Any], state: BlockState) -> str:
return " "
def inline_html(self, token: Dict[str, Any], state: BlockState) -> str:
# rst does not support inline html
return ""
def paragraph(self, token: Dict[str, Any], state: BlockState) -> str:
children = token["children"]
if len(children) == 1 and children[0]["type"] == "image":
image = children[0]
attrs = image["attrs"]
title = cast(str, attrs.get("title"))
alt = self.render_children(image, state)
text = ".. figure:: " + cast(str, attrs["url"])
if title:
text += "\n :alt: " + title
text += "\n\n" + indent(alt, " ")
else:
text = self.render_tokens(children, state)
lines = text.split("<linebreak>")
if len(lines) > 1:
text = "\n".join("| " + line for line in lines)
return text + "\n\n"
def heading(self, token: Dict[str, Any], state: BlockState) -> str:
attrs = token["attrs"]
text = self.render_children(token, state)
marker = self.HEADING_MARKERS[attrs["level"]]
return text + "\n" + marker * len(text) + "\n\n"
def thematic_break(self, token: Dict[str, Any], state: BlockState) -> str:
return "--------------\n\n"
def block_text(self, token: Dict[str, Any], state: BlockState) -> str:
return self.render_children(token, state) + "\n"
def block_code(self, token: Dict[str, Any], state: BlockState) -> str:
attrs = token.get("attrs", {})
info = cast(str, attrs.get("info"))
code = indent(cast(str, token["raw"]), " ")
if info:
lang = info.split()[0]
return ".. code:: " + lang + "\n\n" + code + "\n"
else:
return "::\n\n" + code + "\n\n"
def block_quote(self, token: Dict[str, Any], state: BlockState) -> str:
text = indent(self.render_children(token, state), " ")
prev = token["prev"]
ignore_blocks = (
"paragraph",
"thematic_break",
"linebreak",
"heading",
)
if prev and prev["type"] not in ignore_blocks:
text = "..\n\n" + text
return text
def block_html(self, token: Dict[str, Any], state: BlockState) -> str:
raw = token["raw"]
return ".. raw:: html\n\n" + indent(raw, " ") + "\n\n"
def block_error(self, token: Dict[str, Any], state: BlockState) -> str:
return ""
def list(self, token: Dict[str, Any], state: BlockState) -> str:
return render_list(self, token, state)
| RSTRenderer |
python | django__django | tests/queries/models.py | {
"start": 11127,
"end": 11363
} | class ____(models.Model):
first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name="first_rel")
second = models.ForeignKey(
SimpleCategory, models.CASCADE, related_name="second_rel"
)
| CategoryRelationship |
python | django__django | tests/gis_tests/test_fields.py | {
"start": 179,
"end": 477
} | class ____(SimpleTestCase):
def test_area_field_deepcopy(self):
field = AreaField(None)
self.assertEqual(copy.deepcopy(field), field)
def test_distance_field_deepcopy(self):
field = DistanceField(None)
self.assertEqual(copy.deepcopy(field), field)
| FieldsTests |
python | huggingface__transformers | src/transformers/models/encodec/modeling_encodec.py | {
"start": 6977,
"end": 9407
} | class ____(nn.Module):
"""ConvTranspose1d with asymmetric or causal padding and normalization."""
def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1):
super().__init__()
self.causal = config.use_causal_conv
self.trim_right_ratio = config.trim_right_ratio
self.norm_type = config.norm_type
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
)
self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
if config.norm_type == "weight_norm":
self.conv = weight_norm(self.conv)
elif config.norm_type == "time_group_norm":
self.norm = nn.GroupNorm(1, out_channels)
if not (self.causal or self.trim_right_ratio == 1.0):
raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions")
def forward(self, hidden_states):
kernel_size = self.conv.kernel_size[0]
stride = self.conv.stride[0]
padding_total = kernel_size - stride
hidden_states = self.conv(hidden_states)
if self.norm_type == "time_group_norm":
hidden_states = self.norm(hidden_states)
# We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
# removed at the very end, when keeping only the right length for the output,
# as removing it here would require also passing the length at the matching layer
# in the encoder.
if self.causal:
# Trim the padding on the right according to the specified ratio
# if trim_right_ratio = 1.0, trim everything from right
padding_right = math.ceil(padding_total * self.trim_right_ratio)
else:
# Asymmetric padding required for odd strides
padding_right = padding_total // 2
padding_left = padding_total - padding_right
# unpad
end = hidden_states.shape[-1] - padding_right
hidden_states = hidden_states[..., padding_left:end]
return hidden_states
| EncodecConvTranspose1d |
python | joke2k__faker | faker/providers/ssn/sv_SE/__init__.py | {
"start": 148,
"end": 3016
} | class ____(SsnProvider):
@staticmethod
def _org_to_vat(org_id: str) -> str:
org_id = org_id.replace("-", "")
if len(org_id) == 10:
org_id = "16" + org_id
return f"SE{org_id}01"
def ssn(
self,
min_age: int = 18,
max_age: int = 90,
long: bool = False,
dash: bool = True,
) -> str:
"""
Returns a 10 or 12 (long=True) digit Swedish SSN, "Personnummer".
It consists of 10 digits in the form (CC)YYMMDD-SSSQ, where
YYMMDD is the date of birth, SSS is a serial number
and Q is a control character (Luhn checksum).
Specifying dash=False will give a purely numeric string, suitable
for writing direct to databases.
http://en.wikipedia.org/wiki/Personal_identity_number_(Sweden)
"""
age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
birthday = datetime.datetime.now() - age
yr_fmt = "%Y" if long else "%y"
pnr_date = f"{birthday:{yr_fmt}%m%d}"
chk_date = pnr_date[2:] if long else pnr_date
suffix = f"{self.generator.random.randrange(0, 999):03}"
luhn_checksum = str(calculate_luhn(int(chk_date + suffix)))
hyphen = "-" if dash else ""
pnr = f"{pnr_date}{hyphen}{suffix}{luhn_checksum}"
return pnr
ORG_ID_DIGIT_1 = (1, 2, 3, 5, 6, 7, 8, 9)
def org_id(self, long: bool = False, dash: bool = True) -> str:
"""
Returns a 10 or 12 digit Organisation ID for a Swedish
company.
(In Swedish) https://sv.wikipedia.org/wiki/Organisationsnummer
"""
first_digits = list(self.ORG_ID_DIGIT_1)
random.shuffle(first_digits)
onr_one = str(first_digits.pop())
onr_one += str(self.generator.random.randrange(0, 9)).zfill(1)
onr_one += str(self.generator.random.randrange(20, 99))
onr_one += str(self.generator.random.randrange(0, 99)).zfill(2)
onr_two = str(self.generator.random.randrange(0, 999)).zfill(3)
luhn_checksum = str(calculate_luhn(int(onr_one + onr_two)))
prefix = "16" if long else ""
hyphen = "-" if dash else ""
org_id = f"{prefix}{onr_one}{hyphen}{onr_two}{luhn_checksum}"
return org_id
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Swedish VAT ID, based on a valid Org ID
"""
oid = self.org_id(long=True, dash=False)
vid = Provider._org_to_vat(oid)
return vid
def org_and_vat_id(self, long: bool = False, dash: bool = True) -> Tuple[str, str]:
"""Returns matching Org ID and VAT number"""
oid = self.org_id(long=long, dash=dash)
vid = Provider._org_to_vat(oid)
return oid, vid
| Provider |
python | pandas-dev__pandas | pandas/io/formats/style_render.py | {
"start": 1416,
"end": 77674
} | class ____:
"""
Base class to process rendering a Styler with a specified jinja2 template.
"""
this_dir = pathlib.Path(__file__).parent.resolve()
template_dir = this_dir / "templates"
loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader, trim_blocks=True)
template_html = env.get_template("html.tpl")
template_html_table = env.get_template("html_table.tpl")
template_html_style = env.get_template("html_style.tpl")
template_latex = env.get_template("latex.tpl")
template_typst = env.get_template("typst.tpl")
template_string = env.get_template("string.tpl")
def __init__(
self,
data: DataFrame | Series,
uuid: str | None = None,
uuid_len: int = 5,
table_styles: CSSStyles | None = None,
table_attributes: str | None = None,
caption: str | tuple | list | None = None,
cell_ids: bool = True,
precision: int | None = None,
) -> None:
# validate ordered args
if isinstance(data, Series):
data = data.to_frame()
if not isinstance(data, DataFrame):
raise TypeError("``data`` must be a Series or DataFrame")
self.data: DataFrame = data
self.index: Index = data.index
self.columns: Index = data.columns
if not isinstance(uuid_len, int) or uuid_len < 0:
raise TypeError("``uuid_len`` must be an integer in range [0, 32].")
self.uuid = uuid or uuid4().hex[: min(32, uuid_len)]
self.uuid_len = len(self.uuid)
self.table_styles = table_styles
self.table_attributes = table_attributes
self.caption = caption
self.cell_ids = cell_ids
self.css = {
"row_heading": "row_heading",
"col_heading": "col_heading",
"index_name": "index_name",
"col": "col",
"row": "row",
"col_trim": "col_trim",
"row_trim": "row_trim",
"level": "level",
"data": "data",
"blank": "blank",
"foot": "foot",
}
self.concatenated: list[StylerRenderer] = []
# add rendering variables
self.hide_index_names: bool = False
self.hide_column_names: bool = False
self.hide_index_: list = [False] * self.index.nlevels
self.hide_columns_: list = [False] * self.columns.nlevels
self.hidden_rows: Sequence[int] = [] # sequence for specific hidden rows/cols
self.hidden_columns: Sequence[int] = []
self.ctx: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)
self.ctx_index: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)
self.ctx_columns: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)
self.cell_context: DefaultDict[tuple[int, int], str] = defaultdict(str)
self._todo: list[tuple[Callable, tuple, dict]] = []
self.tooltips: Tooltips | None = None
precision = (
get_option("styler.format.precision") if precision is None else precision
)
self._display_funcs: DefaultDict[ # maps (row, col) -> format func
tuple[int, int], Callable[[Any], str]
] = defaultdict(lambda: partial(_default_formatter, precision=precision))
self._display_funcs_index: DefaultDict[ # maps (row, level) -> format func
tuple[int, int], Callable[[Any], str]
] = defaultdict(lambda: partial(_default_formatter, precision=precision))
self._display_funcs_index_names: DefaultDict[ # maps index level -> format func
int, Callable[[Any], str]
] = defaultdict(lambda: partial(_default_formatter, precision=precision))
self._display_funcs_columns: DefaultDict[ # maps (level, col) -> format func
tuple[int, int], Callable[[Any], str]
] = defaultdict(lambda: partial(_default_formatter, precision=precision))
self._display_funcs_column_names: DefaultDict[ # maps col level -> format func
int, Callable[[Any], str]
] = defaultdict(lambda: partial(_default_formatter, precision=precision))
def _render(
self,
sparse_index: bool,
sparse_columns: bool,
max_rows: int | None = None,
max_cols: int | None = None,
blank: str = "",
):
"""
Computes and applies styles and then generates the general render dicts.
Also extends the `ctx` and `ctx_index` attributes with those of concatenated
stylers for use within `_translate_latex`
"""
self._compute()
dxs = []
ctx_len = len(self.index)
for i, concatenated in enumerate(self.concatenated):
concatenated.hide_index_ = self.hide_index_
concatenated.hidden_columns = self.hidden_columns
foot = f"{self.css['foot']}{i}"
concatenated.css = {
**self.css,
"data": f"{foot}_data",
"row_heading": f"{foot}_row_heading",
"row": f"{foot}_row",
"foot": f"{foot}_foot",
}
dx = concatenated._render(
sparse_index, sparse_columns, max_rows, max_cols, blank
)
dxs.append(dx)
for (r, c), v in concatenated.ctx.items():
self.ctx[(r + ctx_len, c)] = v
for (r, c), v in concatenated.ctx_index.items():
self.ctx_index[(r + ctx_len, c)] = v
ctx_len += len(concatenated.index)
d = self._translate(
sparse_index, sparse_columns, max_rows, max_cols, blank, dxs
)
return d
def _render_html(
self,
sparse_index: bool,
sparse_columns: bool,
max_rows: int | None = None,
max_cols: int | None = None,
**kwargs,
) -> str:
"""
Renders the ``Styler`` including all applied styles to HTML.
Generates a dict with necessary kwargs passed to jinja2 template.
"""
d = self._render(sparse_index, sparse_columns, max_rows, max_cols, " ")
d.update(kwargs)
return self.template_html.render(
**d,
html_table_tpl=self.template_html_table,
html_style_tpl=self.template_html_style,
)
def _render_latex(
self, sparse_index: bool, sparse_columns: bool, clines: str | None, **kwargs
) -> str:
"""
Render a Styler in latex format
"""
d = self._render(sparse_index, sparse_columns, None, None)
self._translate_latex(d, clines=clines)
self.template_latex.globals["parse_wrap"] = _parse_latex_table_wrapping
self.template_latex.globals["parse_table"] = _parse_latex_table_styles
self.template_latex.globals["parse_cell"] = _parse_latex_cell_styles
self.template_latex.globals["parse_header"] = _parse_latex_header_span
d.update(kwargs)
return self.template_latex.render(**d)
def _render_typst(
self,
sparse_index: bool,
sparse_columns: bool,
max_rows: int | None = None,
max_cols: int | None = None,
**kwargs,
) -> str:
"""
Render a Styler in typst format
"""
d = self._render(sparse_index, sparse_columns, max_rows, max_cols)
d.update(kwargs)
return self.template_typst.render(**d)
def _render_string(
self,
sparse_index: bool,
sparse_columns: bool,
max_rows: int | None = None,
max_cols: int | None = None,
**kwargs,
) -> str:
"""
Render a Styler in string format
"""
d = self._render(sparse_index, sparse_columns, max_rows, max_cols)
d.update(kwargs)
return self.template_string.render(**d)
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .map. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
self.ctx.clear()
self.ctx_index.clear()
self.ctx_columns.clear()
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _translate(
self,
sparse_index: bool,
sparse_cols: bool,
max_rows: int | None = None,
max_cols: int | None = None,
blank: str = " ",
dxs: list[dict] | None = None,
):
"""
Process Styler data and settings into a dict for template rendering.
Convert data and settings from ``Styler`` attributes such as ``self.data``,
``self.tooltips`` including applying any methods in ``self._todo``.
Parameters
----------
sparse_index : bool
Whether to sparsify the index or print all hierarchical index elements.
Upstream defaults are typically to `pandas.options.styler.sparse.index`.
sparse_cols : bool
Whether to sparsify the columns or print all hierarchical column elements.
Upstream defaults are typically to `pandas.options.styler.sparse.columns`.
max_rows, max_cols : int, optional
Specific max rows and cols. max_elements always take precedence in render.
blank : str
Entry to top-left blank cells.
dxs : list[dict]
The render dicts of the concatenated Stylers.
Returns
-------
d : dict
The following structure: {uuid, table_styles, caption, head, body,
cellstyle, table_attributes}
"""
if dxs is None:
dxs = []
self.css["blank_value"] = blank
# construct render dict
d = {
"uuid": self.uuid,
"table_styles": format_table_styles(self.table_styles or []),
"caption": self.caption,
}
max_elements = get_option("styler.render.max_elements")
max_rows = max_rows if max_rows else get_option("styler.render.max_rows")
max_cols = max_cols if max_cols else get_option("styler.render.max_columns")
max_rows, max_cols = _get_trimming_maximums(
len(self.data.index),
len(self.data.columns),
max_elements,
max_rows,
max_cols,
)
self.cellstyle_map_columns: DefaultDict[tuple[CSSPair, ...], list[str]] = (
defaultdict(list)
)
head = self._translate_header(sparse_cols, max_cols)
d.update({"head": head})
# for sparsifying a MultiIndex and for use with latex clines
idx_lengths = _get_level_lengths(
self.index, sparse_index, max_rows, self.hidden_rows
)
d.update({"index_lengths": idx_lengths})
self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(
list
)
self.cellstyle_map_index: DefaultDict[tuple[CSSPair, ...], list[str]] = (
defaultdict(list)
)
body: list = self._translate_body(idx_lengths, max_rows, max_cols)
d.update({"body": body})
ctx_maps = {
"cellstyle": "cellstyle_map",
"cellstyle_index": "cellstyle_map_index",
"cellstyle_columns": "cellstyle_map_columns",
} # add the cell_ids styles map to the render dictionary in right format
for k, attr in ctx_maps.items():
map = [
{"props": list(props), "selectors": selectors}
for props, selectors in getattr(self, attr).items()
]
d.update({k: map})
for dx in dxs: # self.concatenated is not empty
d["body"].extend(dx["body"]) # type: ignore[union-attr]
d["cellstyle"].extend(dx["cellstyle"]) # type: ignore[union-attr]
d["cellstyle_index"].extend( # type: ignore[union-attr]
dx["cellstyle_index"]
)
table_attr = self.table_attributes
if not get_option("styler.html.mathjax"):
table_attr = table_attr or ""
if 'class="' in table_attr:
table_attr = table_attr.replace(
'class="', 'class="tex2jax_ignore mathjax_ignore '
)
else:
table_attr += ' class="tex2jax_ignore mathjax_ignore"'
d.update({"table_attributes": table_attr})
if self.tooltips:
d = self.tooltips._translate(self, d)
return d
def _translate_header(self, sparsify_cols: bool, max_cols: int):
"""
Build each <tr> within table <head> as a list
Using the structure:
+----------------------------+---------------+---------------------------+
| index_blanks ... | column_name_0 | column_headers (level_0) |
1) | .. | .. | .. |
| index_blanks ... | column_name_n | column_headers (level_n) |
+----------------------------+---------------+---------------------------+
2) | index_names (level_0 to level_n) ... | column_blanks ... |
+----------------------------+---------------+---------------------------+
Parameters
----------
sparsify_cols : bool
Whether column_headers section will add colspan attributes (>1) to elements.
max_cols : int
Maximum number of columns to render. If exceeded will contain `...` filler.
Returns
-------
head : list
The associated HTML elements needed for template rendering.
"""
# for sparsifying a MultiIndex
col_lengths = _get_level_lengths(
self.columns, sparsify_cols, max_cols, self.hidden_columns
)
clabels = self.data.columns.tolist()
if self.data.columns.nlevels == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels, strict=True))
head = []
# 1) column headers
for r, hide in enumerate(self.hide_columns_):
if hide or not clabels:
continue
header_row = self._generate_col_header_row(
(r, clabels), max_cols, col_lengths
)
head.append(header_row)
# 2) index names
if (
self.data.index.names
and com.any_not_none(*self.data.index.names)
and not all(self.hide_index_)
and not self.hide_index_names
):
index_names_row = self._generate_index_names_row(
clabels, max_cols, col_lengths
)
head.append(index_names_row)
return head
def _generate_col_header_row(
self, iter: Sequence, max_cols: int, col_lengths: dict
):
"""
Generate the row containing column headers:
+----------------------------+---------------+---------------------------+
| index_blanks ... | column_name_i | column_headers (level_i) |
+----------------------------+---------------+---------------------------+
Parameters
----------
iter : tuple
Looping variables from outer scope
max_cols : int
Permissible number of columns
col_lengths :
c
Returns
-------
list of elements
"""
r, clabels = iter
# number of index blanks is governed by number of hidden index levels
index_blanks = [
_element("th", self.css["blank"], self.css["blank_value"], True)
] * (self.index.nlevels - sum(self.hide_index_) - 1)
name = self.data.columns.names[r]
is_display = name is not None and not self.hide_column_names
value = name if is_display else self.css["blank_value"]
display_value = (
self._display_funcs_column_names[r](value) if is_display else None
)
column_name = [
_element(
"th",
(
f"{self.css['blank']} {self.css['level']}{r}"
if name is None
else f"{self.css['index_name']} {self.css['level']}{r}"
),
value,
not all(self.hide_index_),
display_value=display_value,
)
]
column_headers: list = []
visible_col_count: int = 0
for c, value in enumerate(clabels[r]):
header_element_visible = _is_visible(c, r, col_lengths)
if header_element_visible:
visible_col_count += col_lengths.get((r, c), 0)
if self._check_trim(
visible_col_count,
max_cols,
column_headers,
"th",
f"{self.css['col_heading']} {self.css['level']}{r} "
f"{self.css['col_trim']}",
):
break
header_element = _element(
"th",
(
f"{self.css['col_heading']} {self.css['level']}{r} "
f"{self.css['col']}{c}"
),
value,
header_element_visible,
display_value=self._display_funcs_columns[(r, c)](value),
attributes=(
f'colspan="{col_lengths.get((r, c), 0)}"'
if col_lengths.get((r, c), 0) > 1
else ""
),
)
if self.cell_ids:
header_element["id"] = f"{self.css['level']}{r}_{self.css['col']}{c}"
if (
header_element_visible
and (r, c) in self.ctx_columns
and self.ctx_columns[r, c]
):
header_element["id"] = f"{self.css['level']}{r}_{self.css['col']}{c}"
self.cellstyle_map_columns[tuple(self.ctx_columns[r, c])].append(
f"{self.css['level']}{r}_{self.css['col']}{c}"
)
column_headers.append(header_element)
return index_blanks + column_name + column_headers
def _generate_index_names_row(
self, iter: Sequence, max_cols: int, col_lengths: dict
):
"""
Generate the row containing index names
+----------------------------+---------------+---------------------------+
| index_names (level_0 to level_n) ... | column_blanks ... |
+----------------------------+---------------+---------------------------+
Parameters
----------
iter : tuple
Looping variables from outer scope
max_cols : int
Permissible number of columns
Returns
-------
list of elements
"""
clabels = iter
index_names = [
_element(
"th",
f"{self.css['index_name']} {self.css['level']}{c}",
self.css["blank_value"] if name is None else name,
not self.hide_index_[c],
display_value=(
None if name is None else self._display_funcs_index_names[c](name)
),
)
for c, name in enumerate(self.data.index.names)
]
column_blanks: list = []
visible_col_count: int = 0
if clabels:
last_level = self.columns.nlevels - 1 # use last level since never sparsed
for c, value in enumerate(clabels[last_level]):
header_element_visible = _is_visible(c, last_level, col_lengths)
if header_element_visible:
visible_col_count += 1
if self._check_trim(
visible_col_count,
max_cols,
column_blanks,
"th",
f"{self.css['blank']} {self.css['col']}{c} {self.css['col_trim']}",
self.css["blank_value"],
):
break
column_blanks.append(
_element(
"th",
f"{self.css['blank']} {self.css['col']}{c}",
self.css["blank_value"],
c not in self.hidden_columns,
)
)
return index_names + column_blanks
def _translate_body(self, idx_lengths: dict, max_rows: int, max_cols: int):
"""
Build each <tr> within table <body> as a list
Use the following structure:
+--------------------------------------------+---------------------------+
| index_header_0 ... index_header_n | data_by_column ... |
+--------------------------------------------+---------------------------+
Also add elements to the cellstyle_map for more efficient grouped elements in
<style></style> block
Parameters
----------
sparsify_index : bool
Whether index_headers section will add rowspan attributes (>1) to elements.
Returns
-------
body : list
The associated HTML elements needed for template rendering.
"""
rlabels = self.data.index.tolist()
if not isinstance(self.data.index, MultiIndex):
rlabels = [[x] for x in rlabels]
body: list = []
visible_row_count: int = 0
for r, row_tup in [
z for z in enumerate(self.data.itertuples()) if z[0] not in self.hidden_rows
]:
visible_row_count += 1
if self._check_trim(
visible_row_count,
max_rows,
body,
"row",
):
break
body_row = self._generate_body_row(
(r, row_tup, rlabels), max_cols, idx_lengths
)
body.append(body_row)
return body
def _check_trim(
self,
count: int,
max: int,
obj: list,
element: str,
css: str | None = None,
value: str = "...",
) -> bool:
"""
Indicates whether to break render loops and append a trimming indicator
Parameters
----------
count : int
The loop count of previous visible items.
max : int
The allowable rendered items in the loop.
obj : list
The current render collection of the rendered items.
element : str
The type of element to append in the case a trimming indicator is needed.
css : str, optional
The css to add to the trimming indicator element.
value : str, optional
The value of the elements display if necessary.
Returns
-------
result : bool
Whether a trimming element was required and appended.
"""
if count > max:
if element == "row":
obj.append(self._generate_trimmed_row(max))
else:
obj.append(_element(element, css, value, True, attributes=""))
return True
return False
def _generate_trimmed_row(self, max_cols: int) -> list:
"""
When a render has too many rows we generate a trimming row containing "..."
Parameters
----------
max_cols : int
Number of permissible columns
Returns
-------
list of elements
"""
index_headers = [
_element(
"th",
(
f"{self.css['row_heading']} {self.css['level']}{c} "
f"{self.css['row_trim']}"
),
"...",
not self.hide_index_[c],
attributes="",
)
for c in range(self.data.index.nlevels)
]
data: list = []
visible_col_count: int = 0
for c, _ in enumerate(self.columns):
data_element_visible = c not in self.hidden_columns
if data_element_visible:
visible_col_count += 1
if self._check_trim(
visible_col_count,
max_cols,
data,
"td",
f"{self.css['data']} {self.css['row_trim']} {self.css['col_trim']}",
):
break
data.append(
_element(
"td",
f"{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}",
"...",
data_element_visible,
attributes="",
)
)
return index_headers + data
def _generate_body_row(
self,
iter: tuple,
max_cols: int,
idx_lengths: dict,
):
"""
Generate a regular row for the body section of appropriate format.
+--------------------------------------------+---------------------------+
| index_header_0 ... index_header_n | data_by_column ... |
+--------------------------------------------+---------------------------+
Parameters
----------
iter : tuple
Iterable from outer scope: row number, row data tuple, row index labels.
max_cols : int
Number of permissible columns.
idx_lengths : dict
A map of the sparsification structure of the index
Returns
-------
list of elements
"""
r, row_tup, rlabels = iter
index_headers = []
for c, value in enumerate(rlabels[r]):
header_element_visible = (
_is_visible(r, c, idx_lengths) and not self.hide_index_[c]
)
header_element = _element(
"th",
(
f"{self.css['row_heading']} {self.css['level']}{c} "
f"{self.css['row']}{r}"
),
value,
header_element_visible,
display_value=self._display_funcs_index[(r, c)](value),
attributes=(
f'rowspan="{idx_lengths.get((c, r), 0)}"'
if idx_lengths.get((c, r), 0) > 1
else ""
),
)
if self.cell_ids:
header_element["id"] = (
f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given
)
if (
header_element_visible
and (r, c) in self.ctx_index
and self.ctx_index[r, c]
):
# always add id if a style is specified
header_element["id"] = f"{self.css['level']}{c}_{self.css['row']}{r}"
self.cellstyle_map_index[tuple(self.ctx_index[r, c])].append(
f"{self.css['level']}{c}_{self.css['row']}{r}"
)
index_headers.append(header_element)
data: list = []
visible_col_count: int = 0
for c, value in enumerate(row_tup[1:]):
data_element_visible = (
c not in self.hidden_columns and r not in self.hidden_rows
)
if data_element_visible:
visible_col_count += 1
if self._check_trim(
visible_col_count,
max_cols,
data,
"td",
f"{self.css['data']} {self.css['row']}{r} {self.css['col_trim']}",
):
break
# add custom classes from cell context
cls = ""
if (r, c) in self.cell_context:
cls = " " + self.cell_context[r, c]
data_element = _element(
"td",
(f"{self.css['data']} {self.css['row']}{r} {self.css['col']}{c}{cls}"),
value,
data_element_visible,
attributes="",
display_value=self._display_funcs[(r, c)](value),
)
if self.cell_ids:
data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}"
if data_element_visible and (r, c) in self.ctx and self.ctx[r, c]:
# always add id if needed due to specified style
data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}"
self.cellstyle_map[tuple(self.ctx[r, c])].append(
f"{self.css['row']}{r}_{self.css['col']}{c}"
)
data.append(data_element)
return index_headers + data
def _translate_latex(self, d: dict, clines: str | None) -> None:
r"""
Post-process the default render dict for the LaTeX template format.
Processing items included are:
- Remove hidden columns from the non-headers part of the body.
- Place cellstyles directly in td cells rather than use cellstyle_map.
- Remove hidden indexes or reinsert missing th elements if part of multiindex
or multirow sparsification (so that \multirow and \multicol work correctly).
"""
index_levels = self.index.nlevels
# GH 52218
visible_index_level_n = max(1, index_levels - sum(self.hide_index_))
d["head"] = [
[
{**col, "cellstyle": self.ctx_columns[r, c - visible_index_level_n]}
for c, col in enumerate(row)
if col["is_visible"]
]
for r, row in enumerate(d["head"])
]
def _concatenated_visible_rows(obj, n, row_indices):
"""
Extract all visible row indices recursively from concatenated stylers.
"""
row_indices.extend(
[r + n for r in range(len(obj.index)) if r not in obj.hidden_rows]
)
n += len(obj.index)
for concatenated in obj.concatenated:
n = _concatenated_visible_rows(concatenated, n, row_indices)
return n
def concatenated_visible_rows(obj):
row_indices: list[int] = []
_concatenated_visible_rows(obj, 0, row_indices)
# TODO try to consolidate the concat visible rows
# methods to a single function / recursion for simplicity
return row_indices
body = []
for r, row in zip(concatenated_visible_rows(self), d["body"], strict=True):
# note: cannot enumerate d["body"] because rows were dropped if hidden
# during _translate_body so must zip to acquire the true r-index associated
# with the ctx obj which contains the cell styles.
if all(self.hide_index_):
row_body_headers = []
else:
row_body_headers = [
{
**col,
"display_value": (
col["display_value"] if col["is_visible"] else ""
),
"cellstyle": self.ctx_index[r, c],
}
for c, col in enumerate(row[:index_levels])
if (col["type"] == "th" and not self.hide_index_[c])
]
row_body_cells = [
{**col, "cellstyle": self.ctx[r, c]}
for c, col in enumerate(row[index_levels:])
if (col["is_visible"] and col["type"] == "td")
]
body.append(row_body_headers + row_body_cells)
d["body"] = body
# clines are determined from info on index_lengths and hidden_rows and input
# to a dict defining which row clines should be added in the template.
if clines not in [
None,
"all;data",
"all;index",
"skip-last;data",
"skip-last;index",
]:
raise ValueError(
f"`clines` value of {clines} is invalid. Should either be None or one "
f"of 'all;data', 'all;index', 'skip-last;data', 'skip-last;index'."
)
if clines is not None:
data_len = len(row_body_cells) if "data" in clines and d["body"] else 0
d["clines"] = defaultdict(list)
visible_row_indexes: list[int] = [
r for r in range(len(self.data.index)) if r not in self.hidden_rows
]
visible_index_levels: list[int] = [
i for i in range(index_levels) if not self.hide_index_[i]
]
for rn, r in enumerate(visible_row_indexes):
for lvln, lvl in enumerate(visible_index_levels):
if lvl == index_levels - 1 and "skip-last" in clines:
continue
idx_len = d["index_lengths"].get((lvl, r), None)
if idx_len is not None: # i.e. not a sparsified entry
d["clines"][rn + idx_len].append(
f"\\cline{{{lvln + 1}-{len(visible_index_levels) + data_len}}}" # noqa: E501
)
def format(
self,
formatter: ExtFormatter | None = None,
subset: Subset | None = None,
na_rep: str | None = None,
precision: int | None = None,
decimal: str = ".",
thousands: str | None = None,
escape: str | None = None,
hyperlinks: str | None = None,
) -> StylerRenderer:
r"""
Format the text display value of cells.
Parameters
----------
formatter : str, callable, dict or None
Object to define how values are displayed. See notes.
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied.
precision : int, optional
Floating point precision to use for display purposes, if not determined by
the specified ``formatter``.
decimal : str, default "."
Character used as decimal separator for floats, complex and integers.
thousands : str, optional, default None
Character used as thousands separator for floats, complex and integers.
escape : str, optional
Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
in cell display string with HTML-safe sequences.
Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
LaTeX-safe sequences.
Use 'latex-math' to replace the characters the same way as in 'latex' mode,
except for math substrings, which either are surrounded
by two characters ``$`` or start with the character ``\(`` and
end with ``\)``. Escaping is done before ``formatter``.
hyperlinks : {"html", "latex"}, optional
Convert string patterns containing https://, http://, ftp:// or www. to
HTML <a> tags as clickable URL hyperlinks if "html", or LaTeX \href
commands if "latex".
Returns
-------
Styler
Returns itself for chaining.
See Also
--------
Styler.format_index: Format the text display value of index labels.
Notes
-----
This method assigns a formatting function, ``formatter``, to each cell in the
DataFrame. If ``formatter`` is ``None``, then the default formatter is used.
If a callable then that function should take a data value as input and return
a displayable representation, such as a string. If ``formatter`` is
given as a string this is assumed to be a valid Python format specification
and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given,
keys should correspond to column names, and values should be string or
callable, as above.
The default formatter currently expresses floats and complex numbers with the
pandas display precision unless using the ``precision`` argument here. The
default formatter does not adjust the representation of missing values unless
the ``na_rep`` argument is used.
The ``subset`` argument defines which region to apply the formatting function
to. If the ``formatter`` argument is given in dict form but does not include
all columns within the subset then these columns will have the default formatter
applied. Any columns in the formatter dict excluded from the subset will
be ignored.
When using a ``formatter`` string the dtypes must be compatible, otherwise a
`ValueError` will be raised.
When instantiating a Styler, default formatting can be applied by setting the
``pandas.options``:
- ``styler.format.formatter``: default None.
- ``styler.format.na_rep``: default None.
- ``styler.format.precision``: default 6.
- ``styler.format.decimal``: default ".".
- ``styler.format.thousands``: default None.
- ``styler.format.escape``: default None.
.. warning::
`Styler.format` is ignored when using the output format `Styler.to_excel`,
since Excel and Python have inherently different formatting structures.
However, it is possible to use the `number-format` pseudo CSS attribute
to force Excel permissible formatting. See examples.
Examples
--------
Using ``na_rep`` and ``precision`` with the default ``formatter``
>>> df = pd.DataFrame([[np.nan, 1.0, 'A'], [2.0, np.nan, 3.0]])
>>> df.style.format(na_rep='MISS', precision=3) # doctest: +SKIP
0 1 2
0 MISS 1.000 A
1 2.000 MISS 3.000
Using a ``formatter`` specification on consistent column dtypes
>>> df.style.format('{:.2f}', na_rep='MISS', subset=[0, 1]) # doctest: +SKIP
0 1 2
0 MISS 1.00 A
1 2.00 MISS 3.000000
Using the default ``formatter`` for unspecified columns
>>> df.style.format({0: '{:.2f}', 1: '£ {:.1f}'},
... na_rep='MISS', precision=1) # doctest: +SKIP
0 1 2
0 MISS £ 1.0 A
1 2.00 MISS 3.0
Multiple ``na_rep`` or ``precision`` specifications under the default
``formatter``.
>>> (df.style.format(na_rep='MISS', precision=1, subset=[0]).format(
... na_rep='PASS', precision=2, subset=[1, 2])) # doctest: +SKIP
0 1 2
0 MISS 1.00 A
1 2.0 PASS 3.00
Using a callable ``formatter`` function.
>>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT'
>>> df.style.format({0: '{:.1f}', 2: func},
... precision=4, na_rep='MISS') # doctest: +SKIP
0 1 2
0 MISS 1.0000 STRING
1 2.0 MISS FLOAT
Using a ``formatter`` with HTML ``escape`` and ``na_rep``.
>>> df = pd.DataFrame([['<div></div>', '"A&B"', None]])
>>> s = df.style.format(
... '<a href="a.com/{0}">{0}</a>', escape="html", na_rep="NA")
>>> s.to_html() # doctest: +SKIP
...
<td .. ><a href="a.com/<div></div>"><div></div></a></td>
<td .. ><a href="a.com/"A&B"">"A&B"</a></td>
<td .. >NA</td>
...
Using a ``formatter`` with ``escape`` in 'latex' mode.
>>> df = pd.DataFrame([["123"], ["~ ^"], ["$%#"]])
>>> df.style.format("\\textbf{{{}}}",
... escape="latex").to_latex() # doctest: +SKIP
\begin{tabular}{ll}
& 0 \\
0 & \textbf{123} \\
1 & \textbf{\textasciitilde \space \textasciicircum } \\
2 & \textbf{\$\%\#} \\
\end{tabular}
Applying ``escape`` in 'latex-math' mode. In the example below
we enter math mode using the character ``$``.
>>> df = pd.DataFrame([
... [r"$\sum_{i=1}^{10} a_i$ a~b $\alpha = \frac{\beta}{\zeta^2}$"],
... [r"%#^ $ \$x^2 $"]])
>>> df.style.format(escape="latex-math").to_latex() # doctest: +SKIP
\begin{tabular}{ll}
& 0 \\
0 & $\sum_{i=1}^{10} a_i$ a\textasciitilde b $\alpha = \frac{\beta}{\zeta^2}$ \\
1 & \%\#\textasciicircum \space $ \$x^2 $ \\
\end{tabular}
We can use the character ``\(`` to enter math mode and the character ``\)``
to close math mode.
>>> df = pd.DataFrame([
... [r"\(\sum_{i=1}^{10} a_i\) a~b \(\alpha = \frac{\beta}{\zeta^2}\)"],
... [r"%#^ \( \$x^2 \)"]])
>>> df.style.format(escape="latex-math").to_latex() # doctest: +SKIP
\begin{tabular}{ll}
& 0 \\
0 & \(\sum_{i=1}^{10} a_i\) a\textasciitilde b \(\alpha
= \frac{\beta}{\zeta^2}\) \\
1 & \%\#\textasciicircum \space \( \$x^2 \) \\
\end{tabular}
If we have in one DataFrame cell a combination of both shorthands
for math formulas, the shorthand with the sign ``$`` will be applied.
>>> df = pd.DataFrame([
... [r"\( x^2 \) $x^2$"],
... [r"$\frac{\beta}{\zeta}$ \(\frac{\beta}{\zeta}\)"]])
>>> df.style.format(escape="latex-math").to_latex() # doctest: +SKIP
\begin{tabular}{ll}
& 0 \\
0 & \textbackslash ( x\textasciicircum 2 \textbackslash ) $x^2$ \\
1 & $\frac{\beta}{\zeta}$ \textbackslash (\textbackslash
frac\{\textbackslash beta\}\{\textbackslash zeta\}\textbackslash ) \\
\end{tabular}
Pandas defines a `number-format` pseudo CSS attribute instead of the `.format`
method to create `to_excel` permissible formatting. Note that semi-colons are
CSS protected characters but used as separators in Excel's format string.
Replace semi-colons with the section separator character (ASCII-245) when
defining the formatting here.
>>> df = pd.DataFrame({"A": [1, 0, -1]})
>>> pseudo_css = "number-format: 0§[Red](0)§-§@;"
>>> filename = "formatted_file.xlsx"
>>> df.style.map(lambda v: pseudo_css).to_excel(filename) # doctest: +SKIP
.. figure:: ../../_static/style/format_excel_css.png
"""
if all(
(
formatter is None,
subset is None,
precision is None,
decimal == ".",
thousands is None,
na_rep is None,
escape is None,
hyperlinks is None,
)
):
self._display_funcs.clear()
return self # clear the formatter / revert to default and avoid looping
subset = slice(None) if subset is None else subset
subset = non_reducing_slice(subset)
data = self.data.loc[subset]
if not isinstance(formatter, dict):
formatter = dict.fromkeys(data.columns, formatter)
cis = self.columns.get_indexer_for(data.columns)
ris = self.index.get_indexer_for(data.index)
for ci in cis:
format_func = _maybe_wrap_formatter(
formatter.get(self.columns[ci]),
na_rep=na_rep,
precision=precision,
decimal=decimal,
thousands=thousands,
escape=escape,
hyperlinks=hyperlinks,
)
for ri in ris:
self._display_funcs[(ri, ci)] = format_func
return self
def format_index(
self,
formatter: ExtFormatter | None = None,
axis: Axis = 0,
level: Level | list[Level] | None = None,
na_rep: str | None = None,
precision: int | None = None,
decimal: str = ".",
thousands: str | None = None,
escape: str | None = None,
hyperlinks: str | None = None,
) -> StylerRenderer:
r"""
Format the text display value of index labels or column headers.
Parameters
----------
formatter : str, callable, dict or None
Object to define how values are displayed. See notes.
axis : {0, "index", 1, "columns"}
Whether to apply the formatter to the index or column headers.
level : int, str, list
The level(s) over which to apply the generic formatter.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied.
precision : int, optional
Floating point precision to use for display purposes, if not determined by
the specified ``formatter``.
decimal : str, default "."
Character used as decimal separator for floats, complex and integers.
thousands : str, optional, default None
Character used as thousands separator for floats, complex and integers.
escape : str, optional
Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
in cell display string with HTML-safe sequences.
Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
LaTeX-safe sequences.
Escaping is done before ``formatter``.
hyperlinks : {"html", "latex"}, optional
Convert string patterns containing https://, http://, ftp:// or www. to
HTML <a> tags as clickable URL hyperlinks if "html", or LaTeX \href
commands if "latex".
Returns
-------
Styler
Returns itself for chaining.
See Also
--------
Styler.format: Format the text display value of data cells.
Notes
-----
This method assigns a formatting function, ``formatter``, to each level label
in the DataFrame's index or column headers. If ``formatter`` is ``None``,
then the default formatter is used.
If a callable then that function should take a label value as input and return
a displayable representation, such as a string. If ``formatter`` is
given as a string this is assumed to be a valid Python format specification
and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given,
keys should correspond to MultiIndex level numbers or names, and values should
be string or callable, as above.
The default formatter currently expresses floats and complex numbers with the
pandas display precision unless using the ``precision`` argument here. The
default formatter does not adjust the representation of missing values unless
the ``na_rep`` argument is used.
The ``level`` argument defines which levels of a MultiIndex to apply the
method to. If the ``formatter`` argument is given in dict form but does
not include all levels within the level argument then these unspecified levels
will have the default formatter applied. Any levels in the formatter dict
specifically excluded from the level argument will be ignored.
When using a ``formatter`` string the dtypes must be compatible, otherwise a
`ValueError` will be raised.
.. warning::
`Styler.format_index` is ignored when using the output format
`Styler.to_excel`, since Excel and Python have inherently different
formatting structures.
However, it is possible to use the `number-format` pseudo CSS attribute
to force Excel permissible formatting. See documentation for `Styler.format`.
Examples
--------
Using ``na_rep`` and ``precision`` with the default ``formatter``
>>> df = pd.DataFrame([[1, 2, 3]], columns=[2.0, np.nan, 4.0])
>>> df.style.format_index(axis=1, na_rep='MISS', precision=3) # doctest: +SKIP
2.000 MISS 4.000
0 1 2 3
Using a ``formatter`` specification on consistent dtypes in a level
>>> df.style.format_index('{:.2f}', axis=1, na_rep='MISS') # doctest: +SKIP
2.00 MISS 4.00
0 1 2 3
Using the default ``formatter`` for unspecified levels
>>> df = pd.DataFrame([[1, 2, 3]],
... columns=pd.MultiIndex.from_arrays(
... [["a", "a", "b"], [2, np.nan, 4]]))
>>> df.style.format_index({0: lambda v: v.upper()}, axis=1, precision=1)
... # doctest: +SKIP
A B
2.0 nan 4.0
0 1 2 3
Using a callable ``formatter`` function.
>>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT'
>>> df.style.format_index(func, axis=1, na_rep='MISS')
... # doctest: +SKIP
STRING STRING
FLOAT MISS FLOAT
0 1 2 3
Using a ``formatter`` with HTML ``escape`` and ``na_rep``.
>>> df = pd.DataFrame([[1, 2, 3]], columns=['"A"', 'A&B', None])
>>> s = df.style.format_index('$ {0}', axis=1, escape="html", na_rep="NA")
... # doctest: +SKIP
<th .. >$ "A"</th>
<th .. >$ A&B</th>
<th .. >NA</td>
...
Using a ``formatter`` with LaTeX ``escape``.
>>> df = pd.DataFrame([[1, 2, 3]], columns=["123", "~", "$%#"])
>>> df.style.format_index("\\textbf{{{}}}", escape="latex", axis=1).to_latex()
... # doctest: +SKIP
\begin{tabular}{lrrr}
{} & {\textbf{123}} & {\textbf{\textasciitilde }} & {\textbf{\$\%\#}} \\
0 & 1 & 2 & 3 \\
\end{tabular}
"""
axis = self.data._get_axis_number(axis)
if axis == 0:
display_funcs_, obj = self._display_funcs_index, self.index
else:
display_funcs_, obj = self._display_funcs_columns, self.columns
levels_ = refactor_levels(level, obj)
if all(
(
formatter is None,
level is None,
precision is None,
decimal == ".",
thousands is None,
na_rep is None,
escape is None,
hyperlinks is None,
)
):
display_funcs_.clear()
return self # clear the formatter / revert to default and avoid looping
if not isinstance(formatter, dict):
formatter = dict.fromkeys(levels_, formatter)
else:
formatter = {
obj._get_level_number(level): formatter_
for level, formatter_ in formatter.items()
}
for lvl in levels_:
format_func = _maybe_wrap_formatter(
formatter.get(lvl),
na_rep=na_rep,
precision=precision,
decimal=decimal,
thousands=thousands,
escape=escape,
hyperlinks=hyperlinks,
)
for idx in [(i, lvl) if axis == 0 else (lvl, i) for i in range(len(obj))]:
display_funcs_[idx] = format_func
return self
def relabel_index(
self,
labels: Sequence | Index,
axis: Axis = 0,
level: Level | list[Level] | None = None,
) -> StylerRenderer:
r"""
Relabel the index, or column header, keys to display a set of specified values.
Parameters
----------
labels : list-like or Index
New labels to display. Must have same length as the underlying values not
hidden.
axis : {"index", 0, "columns", 1}
Apply to the index or columns.
level : int, str, list, optional
The level(s) over which to apply the new labels. If `None` will apply
to all levels of an Index or MultiIndex which are not hidden.
Returns
-------
Styler
Returns itself for chaining.
See Also
--------
Styler.format_index: Format the text display value of index or column headers.
Styler.hide: Hide the index, column headers, or specified data from display.
Notes
-----
As part of Styler, this method allows the display of an index to be
completely user-specified without affecting the underlying DataFrame data,
index, or column headers. This means that the flexibility of indexing is
maintained whilst the final display is customisable.
Since Styler is designed to be progressively constructed with method chaining,
this method is adapted to react to the **currently specified hidden elements**.
This is useful because it means one does not have to specify all the new
labels if the majority of an index, or column headers, have already been hidden.
The following produce equivalent display (note the length of ``labels`` in
each case).
.. code-block:: python
# relabel first, then hide
df = pd.DataFrame({"col": ["a", "b", "c"]})
df.style.relabel_index(["A", "B", "C"]).hide([0, 1])
# hide first, then relabel
df = pd.DataFrame({"col": ["a", "b", "c"]})
df.style.hide([0, 1]).relabel_index(["C"])
This method should be used, rather than :meth:`Styler.format_index`, in one of
the following cases (see examples):
- A specified set of labels are required which are not a function of the
underlying index keys.
- The function of the underlying index keys requires a counter variable,
such as those available upon enumeration.
Examples
--------
Basic use
>>> df = pd.DataFrame({"col": ["a", "b", "c"]})
>>> df.style.relabel_index(["A", "B", "C"]) # doctest: +SKIP
col
A a
B b
C c
Chaining with pre-hidden elements
>>> df.style.hide([0, 1]).relabel_index(["C"]) # doctest: +SKIP
col
C c
Using a MultiIndex
>>> midx = pd.MultiIndex.from_product([[0, 1], [0, 1], [0, 1]])
>>> df = pd.DataFrame({"col": list(range(8))}, index=midx)
>>> styler = df.style # doctest: +SKIP
col
0 0 0 0
1 1
1 0 2
1 3
1 0 0 4
1 5
1 0 6
1 7
>>> styler.hide(
... (midx.get_level_values(0) == 0) | (midx.get_level_values(1) == 0)
... )
... # doctest: +SKIP
>>> styler.hide(level=[0, 1]) # doctest: +SKIP
>>> styler.relabel_index(["binary6", "binary7"]) # doctest: +SKIP
col
binary6 6
binary7 7
We can also achieve the above by indexing first and then re-labeling
>>> styler = df.loc[[(1, 1, 0), (1, 1, 1)]].style
>>> styler.hide(level=[0, 1]).relabel_index(["binary6", "binary7"])
... # doctest: +SKIP
col
binary6 6
binary7 7
Defining a formatting function which uses an enumeration counter. Also note
that the value of the index key is passed in the case of string labels so it
can also be inserted into the label, using curly brackets (or double curly
brackets if the string if pre-formatted),
>>> df = pd.DataFrame({"samples": np.random.rand(10)})
>>> styler = df.loc[np.random.randint(0, 10, 3)].style
>>> styler.relabel_index([f"sample{i + 1} ({{}})" for i in range(3)])
... # doctest: +SKIP
samples
sample1 (5) 0.315811
sample2 (0) 0.495941
sample3 (2) 0.067946
"""
axis = self.data._get_axis_number(axis)
if axis == 0:
display_funcs_, obj = self._display_funcs_index, self.index
hidden_labels, hidden_lvls = self.hidden_rows, self.hide_index_
else:
display_funcs_, obj = self._display_funcs_columns, self.columns
hidden_labels, hidden_lvls = self.hidden_columns, self.hide_columns_
visible_len = len(obj) - len(set(hidden_labels))
if len(labels) != visible_len:
raise ValueError(
"``labels`` must be of length equal to the number of "
f"visible labels along ``axis`` ({visible_len})."
)
if level is None:
level = [i for i in range(obj.nlevels) if not hidden_lvls[i]]
levels_ = refactor_levels(level, obj)
def alias_(x, value):
if isinstance(value, str):
return value.format(x)
return value
for ai, i in enumerate([i for i in range(len(obj)) if i not in hidden_labels]):
if len(levels_) == 1:
idx = (i, levels_[0]) if axis == 0 else (levels_[0], i)
display_funcs_[idx] = partial(alias_, value=labels[ai])
else:
for aj, lvl in enumerate(levels_):
idx = (i, lvl) if axis == 0 else (lvl, i)
display_funcs_[idx] = partial(alias_, value=labels[ai][aj])
return self
def format_index_names(
self,
formatter: ExtFormatter | None = None,
axis: Axis = 0,
level: Level | list[Level] | None = None,
na_rep: str | None = None,
precision: int | None = None,
decimal: str = ".",
thousands: str | None = None,
escape: str | None = None,
hyperlinks: str | None = None,
) -> StylerRenderer:
r"""
Format the text display value of index names or column names.
.. versionadded:: 3.0
Parameters
----------
formatter : str, callable, dict or None
Object to define how values are displayed. See notes.
axis : {0, "index", 1, "columns"}
Whether to apply the formatter to the index or column headers.
level : int, str, list
The level(s) over which to apply the generic formatter.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied.
precision : int, optional
Floating point precision to use for display purposes, if not determined by
the specified ``formatter``.
decimal : str, default "."
Character used as decimal separator for floats, complex and integers.
thousands : str, optional, default None
Character used as thousands separator for floats, complex and integers.
escape : str, optional
Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
in cell display string with HTML-safe sequences.
Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
LaTeX-safe sequences.
Escaping is done before ``formatter``.
hyperlinks : {"html", "latex"}, optional
Convert string patterns containing https://, http://, ftp:// or www. to
HTML <a> tags as clickable URL hyperlinks if "html", or LaTeX \href
commands if "latex".
Returns
-------
Styler
Returns itself for chaining.
Raises
------
ValueError
If the `formatter` is a string and the dtypes are incompatible.
See Also
--------
Styler.format_index: Format the text display value of index labels
or column headers.
Notes
-----
This method has a similar signature to :meth:`Styler.format_index`. Since
`names` are generally label based, and often not numeric, the typical features
expected to be more frequently used here are ``escape`` and ``hyperlinks``.
.. warning::
`Styler.format_index_names` is ignored when using the output format
`Styler.to_excel`, since Excel and Python have inherently different
formatting structures.
Examples
--------
>>> df = pd.DataFrame(
... [[1, 2], [3, 4]],
... index=pd.Index(["a", "b"], name="idx"),
... )
>>> df # doctest: +SKIP
0 1
idx
a 1 2
b 3 4
>>> df.style.format_index_names(lambda x: x.upper(), axis=0) # doctest: +SKIP
0 1
IDX
a 1 2
b 3 4
"""
axis = self.data._get_axis_number(axis)
if axis == 0:
display_funcs_, obj = self._display_funcs_index_names, self.index
else:
display_funcs_, obj = self._display_funcs_column_names, self.columns
levels_ = refactor_levels(level, obj)
if all(
(
formatter is None,
level is None,
precision is None,
decimal == ".",
thousands is None,
na_rep is None,
escape is None,
hyperlinks is None,
)
):
display_funcs_.clear()
return self # clear the formatter / revert to default and avoid looping
if not isinstance(formatter, dict):
formatter = dict.fromkeys(levels_, formatter)
else:
formatter = {
obj._get_level_number(level): formatter_
for level, formatter_ in formatter.items()
}
for lvl in levels_:
format_func = _maybe_wrap_formatter(
formatter.get(lvl),
na_rep=na_rep,
precision=precision,
decimal=decimal,
thousands=thousands,
escape=escape,
hyperlinks=hyperlinks,
)
display_funcs_[lvl] = format_func
return self
def _element(
html_element: str,
html_class: str | None,
value: Any,
is_visible: bool,
**kwargs,
) -> dict:
"""
Template to return container with information for a <td></td> or <th></th> element.
"""
if "display_value" not in kwargs or kwargs["display_value"] is None:
kwargs["display_value"] = value
return {
"type": html_element,
"value": value,
"class": html_class,
"is_visible": is_visible,
**kwargs,
}
def _get_trimming_maximums(
rn,
cn,
max_elements,
max_rows=None,
max_cols=None,
scaling_factor: float = 0.8,
) -> tuple[int, int]:
"""
Recursively reduce the number of rows and columns to satisfy max elements.
Parameters
----------
rn, cn : int
The number of input rows / columns
max_elements : int
The number of allowable elements
max_rows, max_cols : int, optional
Directly specify an initial maximum rows or columns before compression.
scaling_factor : float
Factor at which to reduce the number of rows / columns to fit.
Returns
-------
rn, cn : tuple
New rn and cn values that satisfy the max_elements constraint
"""
def scale_down(rn, cn):
if cn >= rn:
return rn, int(cn * scaling_factor)
else:
return int(rn * scaling_factor), cn
if max_rows:
rn = max_rows if rn > max_rows else rn
if max_cols:
cn = max_cols if cn > max_cols else cn
while rn * cn > max_elements:
rn, cn = scale_down(rn, cn)
return rn, cn
def _get_level_lengths(
index: Index,
sparsify: bool,
max_index: int,
hidden_elements: Sequence[int] | None = None,
):
"""
Given an index, find the level length for each element.
Parameters
----------
index : Index
Index or columns to determine lengths of each element
sparsify : bool
Whether to hide or show each distinct element in a MultiIndex
max_index : int
The maximum number of elements to analyse along the index due to trimming
hidden_elements : sequence of int
Index positions of elements hidden from display in the index affecting
length
Returns
-------
Dict :
Result is a dictionary of (level, initial_position): span
"""
if isinstance(index, MultiIndex):
levels = index._format_multi(sparsify=lib.no_default, include_names=False)
else:
levels = index._format_flat(include_name=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if not isinstance(index, MultiIndex):
for i, value in enumerate(levels):
if i not in hidden_elements:
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
visible_row_count = 0 # used to break loop due to display trimming
for j, row in enumerate(lvl):
if visible_row_count > max_index:
break
if not sparsify:
# then lengths will always equal 1 since no aggregation.
if j not in hidden_elements:
lengths[(i, j)] = 1
visible_row_count += 1
elif (row is not lib.no_default) and (j not in hidden_elements):
# this element has not been sparsified so must be the start of section
last_label = j
lengths[(i, last_label)] = 1
visible_row_count += 1
elif row is not lib.no_default:
# even if the above is hidden, keep track of it in case length > 1 and
# later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif j not in hidden_elements:
# then element must be part of sparsified section and is visible
visible_row_count += 1
if visible_row_count > max_index:
break # do not add a length since the render trim limit reached
if lengths[(i, last_label)] == 0:
# if previous iteration was first-of-section but hidden then offset
last_label = j
lengths[(i, last_label)] = 1
else:
# else add to previous iteration
lengths[(i, last_label)] += 1
non_zero_lengths = {
element: length for element, length in lengths.items() if length >= 1
}
return non_zero_lengths
def _is_visible(idx_row, idx_col, lengths) -> bool:
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths
def format_table_styles(styles: CSSStyles) -> CSSStyles:
"""
looks for multiple CSS selectors and separates them:
[{'selector': 'td, th', 'props': 'a:v;'}]
---> [{'selector': 'td', 'props': 'a:v;'},
{'selector': 'th', 'props': 'a:v;'}]
"""
return [
{"selector": selector, "props": css_dict["props"]}
for css_dict in styles
for selector in css_dict["selector"].split(",")
]
def _default_formatter(x: Any, precision: int, thousands: bool = False) -> Any:
"""
Format the display of a value
Parameters
----------
x : Any
Input variable to be formatted
precision : Int
Floating point precision used if ``x`` is float or complex.
thousands : bool, default False
Whether to group digits with thousands separated with ",".
Returns
-------
value : Any
Matches input type, or string if input is float or complex or int with sep.
"""
if is_float(x) or is_complex(x):
return f"{x:,.{precision}f}" if thousands else f"{x:.{precision}f}"
elif is_integer(x):
return f"{x:,}" if thousands else str(x)
return x
def _wrap_decimal_thousands(
formatter: Callable, decimal: str, thousands: str | None
) -> Callable:
"""
Takes a string formatting function and wraps logic to deal with thousands and
decimal parameters, in the case that they are non-standard and that the input
is a (float, complex, int).
"""
def wrapper(x):
if is_float(x) or is_integer(x) or is_complex(x):
if decimal != "." and thousands is not None and thousands != ",":
return (
formatter(x)
.replace(",", "§_§-") # rare string to avoid "," <-> "." clash.
.replace(".", decimal)
.replace("§_§-", thousands)
)
elif decimal != "." and (thousands is None or thousands == ","):
return formatter(x).replace(".", decimal)
elif decimal == "." and thousands is not None and thousands != ",":
return formatter(x).replace(",", thousands)
return formatter(x)
return wrapper
def _str_escape(x, escape):
"""if escaping: only use on str, else return input"""
if isinstance(x, str):
if escape == "html":
return escape_html(x)
elif escape == "latex":
return _escape_latex(x)
elif escape == "latex-math":
return _escape_latex_math(x)
else:
raise ValueError(
f"`escape` only permitted in {{'html', 'latex', 'latex-math'}}, \
got {escape}"
)
return x
def _render_href(x, format):
"""uses regex to detect a common URL pattern and converts to href tag in format."""
if isinstance(x, str):
if format == "html":
href = '<a href="{0}" target="_blank">{0}</a>'
elif format == "latex":
href = r"\href{{{0}}}{{{0}}}"
else:
raise ValueError("``hyperlinks`` format can only be 'html' or 'latex'")
pat = r"((http|ftp)s?:\/\/|www.)[\w/\-?=%.:@]+\.[\w/\-&?=%.,':;~!@#$*()\[\]]+"
return re.sub(pat, lambda m: href.format(m.group(0)), x)
return x
def _maybe_wrap_formatter(
formatter: BaseFormatter | None = None,
na_rep: str | None = None,
precision: int | None = None,
decimal: str = ".",
thousands: str | None = None,
escape: str | None = None,
hyperlinks: str | None = None,
) -> Callable:
"""
Allows formatters to be expressed as str, callable or None, where None returns
a default formatting function. wraps with na_rep, and precision where they are
available.
"""
# Get initial func from input string, input callable, or from default factory
if isinstance(formatter, str):
func_0 = lambda x: formatter.format(x)
elif callable(formatter):
func_0 = formatter
elif formatter is None:
precision = (
get_option("styler.format.precision") if precision is None else precision
)
func_0 = partial(
_default_formatter, precision=precision, thousands=(thousands is not None)
)
else:
raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}")
# Replace chars if escaping
if escape is not None:
func_1 = lambda x: func_0(_str_escape(x, escape=escape))
else:
func_1 = func_0
# Replace decimals and thousands if non-standard inputs detected
if decimal != "." or (thousands is not None and thousands != ","):
func_2 = _wrap_decimal_thousands(func_1, decimal=decimal, thousands=thousands)
else:
func_2 = func_1
# Render links
if hyperlinks is not None:
func_3 = lambda x: func_2(_render_href(x, format=hyperlinks))
else:
func_3 = func_2
# Replace missing values if na_rep
if na_rep is None:
return func_3
else:
return lambda x: na_rep if (isna(x) is True) else func_3(x)
def non_reducing_slice(slice_: Subset):
"""
Ensure that a slice doesn't reduce to a Series or Scalar.
Any user-passed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = (ABCSeries, np.ndarray, Index, list, str)
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part) -> bool:
"""
Returns
-------
bool
True if slice does *not* reduce,
False if `part` is a tuple.
"""
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
if isinstance(part, tuple):
# GH#39421 check for sub-slice:
return any((isinstance(s, slice) or is_list_like(s)) for s in part)
else:
return isinstance(part, slice) or is_list_like(part)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
# error: Item "slice" of "Union[slice, Sequence[Any]]" has no attribute
# "__iter__" (not iterable) -> is specifically list_like in conditional
slice_ = [p if pred(p) else [p] for p in slice_] # type: ignore[union-attr]
return tuple(slice_)
def maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList:
"""
Convert css-string to sequence of tuples format if needed.
'color:red; border:1px solid black;' -> [('color', 'red'),
('border','1px solid red')]
"""
if isinstance(style, str):
if style and ":" not in style:
raise ValueError(
"Styles supplied as string must follow CSS rule formats, "
f"for example 'attr: val;'. '{style}' was given."
)
s = style.split(";")
return [
(x.split(":")[0].strip(), ":".join(x.split(":")[1:]).strip())
for x in s
if x.strip() != ""
]
return style
def refactor_levels(
level: Level | list[Level] | None,
obj: Index,
) -> list[int]:
"""
Returns a consistent levels arg for use in ``hide_index`` or ``hide_columns``.
Parameters
----------
level : int, str, list
Original ``level`` arg supplied to above methods.
obj:
Either ``self.index`` or ``self.columns``
Returns
-------
list : refactored arg with a list of levels to hide
"""
if level is None:
levels_: list[int] = list(range(obj.nlevels))
elif isinstance(level, int):
levels_ = [level]
elif isinstance(level, str):
levels_ = [obj._get_level_number(level)]
elif isinstance(level, list):
levels_ = [
obj._get_level_number(lev) if not isinstance(lev, int) else lev
for lev in level
]
else:
raise ValueError("`level` must be of type `int`, `str` or list of such")
return levels_
| StylerRenderer |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_relationship.py | {
"start": 85752,
"end": 87659
} | class ____(fixtures.MappedTest):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("t", String(5)),
)
Table(
"a_sub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a_id", Integer, ForeignKey("a.id")),
)
@classmethod
def setup_mappers(cls):
class A(cls.Comparable):
pass
class ASub(A):
pass
class B(cls.Comparable):
pass
cls.mapper_registry.map_imperatively(
A,
cls.tables.a,
polymorphic_on=cls.tables.a.c.t,
polymorphic_identity="a",
properties={"bs": relationship(B, cascade="all, delete-orphan")},
)
cls.mapper_registry.map_imperatively(
ASub,
cls.tables.a_sub,
inherits=A,
polymorphic_identity="asub",
properties={"bs": relationship(B, cascade="all, delete-orphan")},
)
cls.mapper_registry.map_imperatively(B, cls.tables.b)
def test_persist(self):
A, ASub, B = self.classes("A", "ASub", "B")
s = Session(testing.db)
s.add_all([A(bs=[B(), B(), B()]), ASub(bs=[B(), B(), B()])])
s.commit()
eq_(s.query(B).count(), 6)
for a in s.query(A):
eq_(len(a.bs), 3)
s.delete(a)
s.commit()
eq_(s.query(B).count(), 0)
| SameNameOnJoined |
python | conda__conda | conda/gateways/logging.py | {
"start": 2020,
"end": 7384
} | class ____(StreamHandler):
"""Log StreamHandler that always writes to the current sys stream."""
terminator = "\n"
def __init__(self, sys_stream):
"""
Args:
sys_stream: stream name, either "stdout" or "stderr" (attribute of module sys)
"""
super().__init__(getattr(sys, sys_stream))
self.sys_stream = sys_stream
del self.stream
def __getattr__(self, attr):
# always get current sys.stdout/sys.stderr, unless self.stream has been set explicitly
if attr == "stream":
return getattr(sys, self.sys_stream)
return super().__getattribute__(attr)
"""
def emit(self, record):
# in contrast to the Python 2.7 StreamHandler, this has no special Unicode handling;
# however, this backports the Python >=3.2 terminator attribute and additionally makes it
# further customizable by giving record an identically named attribute, e.g., via
# logger.log(..., extra={"terminator": ""}) or LoggerAdapter(logger, {"terminator": ""}).
try:
msg = self.format(record)
terminator = getattr(record, "terminator", self.terminator)
stream = self.stream
stream.write(msg)
stream.write(terminator)
self.flush()
except Exception:
self.handleError(record)
"""
# Updated Python 2.7.15's stdlib, with terminator and unicode support.
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s"
stream.write(fs % msg)
terminator = getattr(record, "terminator", self.terminator)
stream.write(terminator)
self.flush()
# How does conda handle Ctrl-C? Find out..
# except (KeyboardInterrupt, SystemExit):
# raise
except Exception:
self.handleError(record)
# Don't use initialize_logging/set_conda_log_level in
# cli.python_api! There we want the user to have control over their logging,
# e.g., using their own levels, handlers, formatters and propagation settings.
@cache
def initialize_logging():
# 'conda' gets level WARN and does not propagate to root.
getLogger("conda").setLevel(WARN)
set_conda_log_level()
initialize_std_loggers()
def initialize_std_loggers():
# Set up special loggers 'conda.stdout'/'conda.stderr' which output directly to the
# corresponding sys streams, filter token urls and don't propagate.
formatter = Formatter("%(message)s")
for stream in ("stdout", "stderr"):
logger = getLogger(f"conda.{stream}")
logger.handlers = []
logger.setLevel(INFO)
handler = StdStreamHandler(stream)
handler.setLevel(INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.addFilter(TokenURLFilter())
logger.propagate = False
stdlog_logger = getLogger(f"conda.{stream}log")
stdlog_logger.handlers = []
stdlog_logger.setLevel(DEBUG)
stdlog_handler = StdStreamHandler(stream)
stdlog_handler.terminator = ""
stdlog_handler.setLevel(DEBUG)
stdlog_handler.setFormatter(formatter)
stdlog_logger.addHandler(stdlog_handler)
stdlog_logger.propagate = False
verbose_logger = getLogger("conda.stdout.verbose")
verbose_logger.handlers = []
verbose_logger.setLevel(INFO)
verbose_handler = StdStreamHandler("stdout")
verbose_handler.setLevel(INFO)
verbose_handler.setFormatter(formatter)
verbose_handler.addFilter(TokenURLFilter())
verbose_logger.addHandler(verbose_handler)
verbose_logger.propagate = False
def set_conda_log_level(level=WARN):
attach_stderr_handler(level=level, logger_name="conda", filters=[TokenURLFilter()])
def set_all_logger_level(level=DEBUG):
formatter = Formatter("%(message)s\n") if level >= INFO else None
attach_stderr_handler(level, formatter=formatter, filters=[TokenURLFilter()])
set_conda_log_level(level)
# 'requests' loggers get their own handlers so that they always output messages in long format
# regardless of the level.
attach_stderr_handler(level, "requests", filters=[TokenURLFilter()])
attach_stderr_handler(
level, "requests.packages.urllib3", filters=[TokenURLFilter()]
)
@cache
def set_file_logging(logger_name=None, level=DEBUG, path=None):
if path is None:
timestamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
path = f".conda.{timestamp}.log"
conda_logger = getLogger(logger_name)
handler = logging.FileHandler(path)
handler.setFormatter(_FORMATTER)
handler.setLevel(level)
conda_logger.addHandler(handler)
def set_log_level(log_level: int):
set_all_logger_level(log_level)
log.debug("log_level set to %d", log_level)
| StdStreamHandler |
python | walkccc__LeetCode | solutions/2074. Reverse Nodes in Even Length Groups/2074.py | {
"start": 0,
"end": 1156
} | class ____:
def reverseEvenLengthGroups(self, head: ListNode | None) -> ListNode | None:
# prev -> (head -> ... -> tail) -> next -> ...
dummy = ListNode(0, head)
prev = dummy
tail = head
next = head.next
groupLength = 1
def getTailAndLength(head: ListNode | None, groupLength: int) -> tuple[ListNode | None, int]:
length = 1
tail = head
while length < groupLength and tail.next:
tail = tail.next
length += 1
return tail, length
def reverse(head: ListNode | None) -> ListNode | None:
prev = None
while head:
next = head.next
head.next = prev
prev = head
head = next
return prev
while True:
if groupLength % 2 == 1:
prev.next = head
prev = tail
else:
tail.next = None
prev.next = reverse(head)
# Prev -> (tail -> ... -> head) -> next -> ...
head.next = next
prev = head
if not next:
break
head = next
tail, length = getTailAndLength(head, groupLength + 1)
next = tail.next
groupLength = length
return dummy.next
| Solution |
python | astropy__astropy | astropy/modeling/rotations.py | {
"start": 6369,
"end": 8834
} | class ____(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity` ['angle']
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
n_inputs = 2
n_outputs = 2
phi = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="1st Euler angle (Quantity or value in deg)",
)
theta = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="2nd Euler angle (Quantity or value in deg)",
)
psi = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="3rd Euler angle (Quantity or value in deg)",
)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ["x", "y", "z"]
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3, "
f"got {axes_order}"
)
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError(
f"Unrecognized axis label {unrecognized}; should be one of {self.axes}"
)
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError(
"All parameters should be of the same type - float or Quantity."
)
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
self._inputs = ("alpha", "delta")
self._outputs = ("alpha", "delta")
@property
def inverse(self):
return self.__class__(
phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1],
)
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
| EulerAngleRotation |
python | ray-project__ray | rllib/examples/compute_adapted_gae_on_postprocess_trajectory.py | {
"start": 842,
"end": 5695
} | class ____(RLlibCallback):
@override(RLlibCallback)
def on_postprocess_trajectory(
self,
*,
worker,
episode,
agent_id,
policy_id,
policies,
postprocessed_batch,
original_batches,
**kwargs
):
super().on_postprocess_trajectory(
worker=worker,
episode=episode,
agent_id=agent_id,
policy_id=policy_id,
policies=policies,
postprocessed_batch=postprocessed_batch,
original_batches=original_batches,
**kwargs
)
if policies[policy_id].config.get("use_adapted_gae", False):
policy = policies[policy_id]
assert policy.config[
"use_gae"
], "Can't use adapted gae without use_gae=True!"
info_dicts = postprocessed_batch[SampleBatch.INFOS]
assert np.all(
["d_ts" in info_dict for info_dict in info_dicts]
), "Info dicts in sample batch must contain data 'd_ts' \
(=ts[i+1]-ts[i] length of time steps)!"
d_ts = np.array(
[np.float(info_dict.get("d_ts")) for info_dict in info_dicts]
)
assert np.all(
[e.is_integer() for e in d_ts]
), "Elements of 'd_ts' (length of time steps) must be integer!"
# Trajectory is actually complete -> last r=0.0.
if postprocessed_batch[SampleBatch.TERMINATEDS][-1]:
last_r = 0.0
# Trajectory has been truncated -> last r=VF estimate of last obs.
else:
# Input dict is provided to us automatically via the Model's
# requirements. It's a single-timestep (last one in trajectory)
# input_dict.
# Create an input dict according to the Model's requirements.
input_dict = postprocessed_batch.get_single_step_input_dict(
policy.model.view_requirements, index="last"
)
last_r = policy._value(**input_dict)
gamma = policy.config["gamma"]
lambda_ = policy.config["lambda"]
vpred_t = np.concatenate(
[postprocessed_batch[SampleBatch.VF_PREDS], np.array([last_r])]
)
delta_t = (
postprocessed_batch[SampleBatch.REWARDS]
+ gamma**d_ts * vpred_t[1:]
- vpred_t[:-1]
)
# This formula for the advantage is an adaption of
# "Generalized Advantage Estimation"
# (https://arxiv.org/abs/1506.02438) which accounts for time steps
# of irregular length (see proposal here ).
# NOTE: last time step delta is not required
postprocessed_batch[
Postprocessing.ADVANTAGES
] = generalized_discount_cumsum(delta_t, d_ts[:-1], gamma * lambda_)
postprocessed_batch[Postprocessing.VALUE_TARGETS] = (
postprocessed_batch[Postprocessing.ADVANTAGES]
+ postprocessed_batch[SampleBatch.VF_PREDS]
).astype(np.float32)
postprocessed_batch[Postprocessing.ADVANTAGES] = postprocessed_batch[
Postprocessing.ADVANTAGES
].astype(np.float32)
def generalized_discount_cumsum(
x: np.ndarray, deltas: np.ndarray, gamma: float
) -> np.ndarray:
"""Calculates the 'time-dependent' discounted cumulative sum over a
(reward) sequence `x`.
Recursive equations:
y[t] - gamma**deltas[t+1]*y[t+1] = x[t]
reversed(y)[t] - gamma**reversed(deltas)[t-1]*reversed(y)[t-1] =
reversed(x)[t]
Args:
x (np.ndarray): A sequence of rewards or one-step TD residuals.
deltas (np.ndarray): A sequence of time step deltas (length of time
steps).
gamma: The discount factor gamma.
Returns:
np.ndarray: The sequence containing the 'time-dependent' discounted
cumulative sums for each individual element in `x` till the end of
the trajectory.
.. testcode::
:skipif: True
x = np.array([0.0, 1.0, 2.0, 3.0])
deltas = np.array([1.0, 4.0, 15.0])
gamma = 0.9
generalized_discount_cumsum(x, deltas, gamma)
.. testoutput::
array([0.0 + 0.9^1.0*1.0 + 0.9^4.0*2.0 + 0.9^15.0*3.0,
1.0 + 0.9^4.0*2.0 + 0.9^15.0*3.0,
2.0 + 0.9^15.0*3.0,
3.0])
"""
reversed_x = x[::-1]
reversed_deltas = deltas[::-1]
reversed_y = np.empty_like(x)
reversed_y[0] = reversed_x[0]
for i in range(1, x.size):
reversed_y[i] = (
reversed_x[i] + gamma ** reversed_deltas[i - 1] * reversed_y[i - 1]
)
return reversed_y[::-1]
| MyCallbacks |
python | nedbat__coveragepy | tests/test_arcs.py | {
"start": 23941,
"end": 38459
} | class ____(CoverageTest):
"""Arc-measuring tests involving exception handling."""
def test_try_except(self) -> None:
self.check_coverage(
"""\
a, b = 1, 1
try:
a = 3
except:
b = 5
assert a == 3 and b == 1
""",
branchz="",
branchz_missing="",
)
def test_raise_followed_by_statement(self) -> None:
self.check_coverage(
"""\
a, b = 1, 1
try:
a = 3
raise Exception("Yikes!")
a = 5
except:
b = 7
assert a == 3 and b == 7
""",
branchz="",
branchz_missing="",
)
def test_hidden_raise(self) -> None:
self.check_coverage(
"""\
a, b = 1, 1
def oops(x):
if x % 2:
raise Exception("odd")
try:
a = 6
oops(1)
a = 8
except:
b = 10
assert a == 6 and b == 10
""",
branchz="34 3-2",
branchz_missing="3-2",
)
def test_except_with_type(self) -> None:
self.check_coverage(
"""\
a, b = 1, 1
def oops(x):
if x % 2:
raise ValueError("odd")
def try_it(x):
try:
a = 7
oops(x)
a = 9
except ValueError:
b = 11
return a
assert try_it(0) == 9 # C
assert try_it(1) == 7 # D
""",
branchz="34 3-2",
branchz_missing="",
)
def test_try_finally(self) -> None:
self.check_coverage(
"""\
a, c = 1, 1
try:
a = 3
finally:
c = 5
assert a == 3 and c == 5
""",
branchz="",
)
self.check_coverage(
"""\
a, c, d = 1, 1, 1
try:
try:
a = 4
finally:
c = 6
except:
d = 8
assert a == 4 and c == 6 and d == 1 # 9
""",
branchz="",
)
self.check_coverage(
"""\
a, c, d = 1, 1, 1
try:
try:
a = 4
raise Exception("Yikes!")
# line 6
finally:
c = 8
except:
d = 10 # A
assert a == 4 and c == 8 and d == 10 # B
""",
branchz="",
)
def test_finally_in_loop(self) -> None:
self.check_coverage(
"""\
a, c, d, i = 1, 1, 1, 99
try:
for i in range(5):
try:
a = 5
if i > 0:
raise Exception("Yikes!")
a = 8
finally:
c = 10
except:
d = 12 # C
assert a == 5 and c == 10 and d == 12 # D
""",
branchz="34 3D 67 68",
branchz_missing="3D",
)
self.check_coverage(
"""\
a, c, d, i = 1, 1, 1, 99
try:
for i in range(5):
try:
a = 5
if i > 10:
raise Exception("Yikes!")
a = 8
finally:
c = 10
except:
d = 12 # C
assert a == 8 and c == 10 and d == 1 # D
""",
branchz="34 3D 67 68",
branchz_missing="67",
)
def test_break_through_finally(self) -> None:
self.check_coverage(
"""\
a, c, d, i = 1, 1, 1, 99
try:
for i in range(3):
try:
a = 5
if i > 0:
break
a = 8
finally:
c = 10
except:
d = 12 # C
assert a == 5 and c == 10 and d == 1 # D
""",
branchz="34 3D 67 68",
branchz_missing="3D",
)
def test_break_continue_without_finally(self) -> None:
self.check_coverage(
"""\
a, c, d, i = 1, 1, 1, 99
try:
for i in range(3):
try:
a = 5
if i > 0:
break
continue
except:
c = 10
except:
d = 12 # C
assert a == 5 and c == 1 and d == 1 # D
""",
branchz="34 3D 67 68",
branchz_missing="3D",
)
def test_continue_through_finally(self) -> None:
self.check_coverage(
"""\
a, b, c, d, i = 1, 1, 1, 1, 99
try:
for i in range(3):
try:
a = 5
if i > 0:
continue
b = 8
finally:
c = 10
except:
d = 12 # C
assert (a, b, c, d) == (5, 8, 10, 1) # D
""",
branchz="34 3D 67 68",
branchz_missing="",
)
def test_finally_in_loop_bug_92(self) -> None:
self.check_coverage(
"""\
for i in range(5):
try:
j = 3
finally:
f = 5
g = 6
h = 7
""",
branchz="12 17",
branchz_missing="",
)
def test_bug_212(self) -> None:
# "except Exception as e" is crucial here.
# Bug 212 said that the "if exc" line was incorrectly marked as only
# partially covered.
self.check_coverage(
"""\
def b(exc):
try:
while "no peephole".upper():
raise Exception(exc) # 4
except Exception as e:
if exc != 'expected':
raise
q = 8
b('expected')
try:
b('unexpected') # C
except:
pass
""",
branchz="34 3-1 67 68",
branchz_missing="3-1",
)
def test_except_finally(self) -> None:
self.check_coverage(
"""\
a, b, c = 1, 1, 1
try:
a = 3
except:
b = 5
finally:
c = 7
assert a == 3 and b == 1 and c == 7
""",
branchz="",
)
self.check_coverage(
"""\
a, b, c = 1, 1, 1
def oops(x):
if x % 2: raise Exception("odd")
try:
a = 5
oops(1)
a = 7
except:
b = 9
finally:
c = 11
assert a == 5 and b == 9 and c == 11
""",
branchz="",
)
def test_multiple_except_clauses(self) -> None:
self.check_coverage(
"""\
a, b, c = 1, 1, 1
try:
a = 3
except ValueError:
b = 5
except IndexError:
a = 7
finally:
c = 9
assert a == 3 and b == 1 and c == 9
""",
branchz="",
)
self.check_coverage(
"""\
a, b, c = 1, 1, 1
try:
a = int("xyz") # ValueError
except ValueError:
b = 5
except IndexError:
a = 7
finally:
c = 9
assert a == 1 and b == 5 and c == 9
""",
branchz="",
)
self.check_coverage(
"""\
a, b, c = 1, 1, 1
try:
a = [1][3] # IndexError
except ValueError:
b = 5
except IndexError:
a = 7
finally:
c = 9
assert a == 7 and b == 1 and c == 9
""",
branchz="",
)
self.check_coverage(
"""\
a, b, c = 1, 1, 1
try:
try:
a = 4/0 # ZeroDivisionError
except ValueError:
b = 6
except IndexError:
a = 8
finally:
c = 10
except ZeroDivisionError:
pass
assert a == 1 and b == 1 and c == 10
""",
branchz="",
)
def test_return_finally(self) -> None:
self.check_coverage(
"""\
a = [1]
def check_token(data):
if data:
try:
return 5
finally:
a.append(7)
return 8
assert check_token(False) == 8
assert a == [1]
assert check_token(True) == 5
assert a == [1, 7]
""",
branchz="34 38",
branchz_missing="",
)
def test_except_jump_finally(self) -> None:
self.check_coverage(
"""\
def func(x):
a = f = g = 2
try:
for i in range(4):
try:
6/0
except ZeroDivisionError:
if x == 'break':
a = 9
break
elif x == 'continue':
a = 12
continue
elif x == 'return':
a = 15 # F
return a, f, g, i # G
elif x == 'raise': # H
a = 18 # I
raise ValueError() # J
finally:
f = 21 # L
except ValueError: # M
g = 23 # N
return a, f, g, i # O
assert func('break') == (9, 21, 2, 0) # Q
assert func('continue') == (12, 21, 2, 3) # R
assert func('return') == (15, 2, 2, 0) # S
assert func('raise') == (18, 21, 23, 0) # T
assert func('other') == (2, 21, 2, 3) # U 30
""",
branchz="45 4O 89 8B BC BE EF EH HI HL",
branchz_missing="",
)
def test_else_jump_finally(self) -> None:
self.check_coverage(
"""\
def func(x):
a = f = g = 2
try:
for i in range(4):
try:
b = 6
except ZeroDivisionError:
pass
else:
if x == 'break':
a = 11
break
elif x == 'continue':
a = 14
continue
elif x == 'return':
a = 17 # H
return a, f, g, i # I
elif x == 'raise': # J
a = 20 # K
raise ValueError() # L
finally:
f = 23 # N
except ValueError: # O
g = 25 # P
return a, f, g, i # Q
assert func('break') == (11, 23, 2, 0) # S
assert func('continue') == (14, 23, 2, 3) # T
assert func('return') == (17, 2, 2, 0) # U
assert func('raise') == (20, 23, 25, 0) # V
assert func('other') == (2, 23, 2, 3) # W 32
""",
branchz="45 4Q AB AD DE DG GH GJ JK JN",
branchz_missing="",
)
@pytest.mark.skipif(env.PYVERSION < (3, 11), reason="ExceptionGroup is new in Python 3.11")
def test_exception_group(self) -> None:
# PyPy3.11 traces this incorrectly: https://github.com/pypy/pypy/issues/5354
if env.PYPY:
missing = "5, 11"
else:
missing = "5-6, 11-12"
self.check_coverage(
"""\
a = 1
try:
raise ExceptionGroup("Zero!", [ZeroDivisionError()])
except* ValueError:
a = 5
b = 6/0
except* ZeroDivisionError:
a = 8
b = 9
except* Exception:
a = 11
b = 12/0
assert a == 8
assert b == 9
""",
lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
missing=missing,
branchz="",
branchz_missing="",
)
| ExceptionArcTest |
python | mkdocs__mkdocs | mkdocs/tests/config/config_options_tests.py | {
"start": 68068,
"end": 81616
} | class ____(TestCase):
def test_plugin_config_without_options(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {
'plugins': ['sample'],
}
conf = self.get_config(Schema, cfg)
assert_type(conf.plugins, PluginCollection)
self.assertIsInstance(conf.plugins, PluginCollection)
self.assertIn('sample', conf.plugins)
plugin = conf.plugins['sample']
assert_type(plugin, BasePlugin)
self.assertIsInstance(plugin, FakePlugin)
self.assertIsInstance(plugin.config, _FakePluginConfig)
expected = {
'foo': 'default foo',
'bar': 0,
'dir': None,
}
self.assertEqual(plugin.config, expected)
def test_plugin_config_with_options(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {
'plugins': [
{
'sample': {
'foo': 'foo value',
'bar': 42,
},
}
],
}
conf = self.get_config(Schema, cfg)
self.assertIsInstance(conf.plugins, PluginCollection)
self.assertIn('sample', conf.plugins)
self.assertIsInstance(conf.plugins['sample'], BasePlugin)
expected = {
'foo': 'foo value',
'bar': 42,
'dir': None,
}
self.assertEqual(conf.plugins['sample'].config, expected)
def test_plugin_config_as_dict(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {
'plugins': {
'sample': {
'foo': 'foo value',
'bar': 42,
},
},
}
conf = self.get_config(Schema, cfg)
self.assertIsInstance(conf.plugins, PluginCollection)
self.assertIn('sample', conf.plugins)
self.assertIsInstance(conf.plugins['sample'], BasePlugin)
expected = {
'foo': 'foo value',
'bar': 42,
'dir': None,
}
self.assertEqual(conf.plugins['sample'].config, expected)
def test_plugin_config_with_explicit_theme_namespace(self) -> None:
class Schema(Config):
theme = c.Theme(default='mkdocs')
plugins = c.Plugins(theme_key='theme')
cfg = {'theme': 'readthedocs', 'plugins': ['readthedocs/sub_plugin']}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'readthedocs/sub_plugin'})
self.assertIsInstance(conf.plugins['readthedocs/sub_plugin'], ThemePlugin)
cfg = {'plugins': ['readthedocs/sub_plugin']}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'readthedocs/sub_plugin'})
self.assertIsInstance(conf.plugins['readthedocs/sub_plugin'], ThemePlugin)
def test_plugin_config_with_deduced_theme_namespace(self) -> None:
class Schema(Config):
theme = c.Theme(default='mkdocs')
plugins = c.Plugins(theme_key='theme')
cfg = {'theme': 'readthedocs', 'plugins': ['sub_plugin']}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'readthedocs/sub_plugin'})
self.assertIsInstance(conf.plugins['readthedocs/sub_plugin'], ThemePlugin)
cfg = {'plugins': ['sub_plugin']}
with self.expect_error(plugins='The "sub_plugin" plugin is not installed'):
self.get_config(Schema, cfg)
def test_plugin_config_with_deduced_theme_namespace_overridden(self) -> None:
class Schema(Config):
theme = c.Theme(default='mkdocs')
plugins = c.Plugins(theme_key='theme')
cfg = {'theme': 'readthedocs', 'plugins': ['overridden']}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'readthedocs/overridden'})
self.assertIsInstance(next(iter(conf.plugins.values())), ThemePlugin2)
cfg = {'plugins': ['overridden']}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'overridden'})
self.assertIsInstance(conf.plugins['overridden'], FakePlugin2)
def test_plugin_config_with_explicit_empty_namespace(self) -> None:
class Schema(Config):
theme = c.Theme(default='mkdocs')
plugins = c.Plugins(theme_key='theme')
cfg = {'theme': 'readthedocs', 'plugins': ['/overridden']}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'overridden'})
self.assertIsInstance(next(iter(conf.plugins.values())), FakePlugin2)
cfg = {'plugins': ['/overridden']}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'overridden'})
self.assertIsInstance(conf.plugins['overridden'], FakePlugin2)
def test_plugin_config_enabled_for_any_plugin(self) -> None:
class Schema(Config):
theme = c.Theme(default='mkdocs')
plugins = c.Plugins(theme_key='theme')
cfg = {'theme': 'readthedocs', 'plugins': {'sample': {'enabled': False, 'bar': 3}}}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), set())
cfg = {'theme': 'readthedocs', 'plugins': {'sample': {'enabled': True, 'bar': 3}}}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'sample'})
self.assertEqual(conf.plugins['sample'].config.bar, 3)
cfg = {'theme': 'readthedocs', 'plugins': {'sample': {'enabled': 5}}}
with self.expect_error(
plugins="Plugin 'sample' option 'enabled': Expected boolean but received: <class 'int'>"
):
self.get_config(Schema, cfg)
def test_plugin_config_enabled_for_plugin_with_setting(self) -> None:
class Schema(Config):
theme = c.Theme(default='mkdocs')
plugins = c.Plugins(theme_key='theme')
cfg = {'theme': 'readthedocs', 'plugins': {'sample-e': {'enabled': False, 'bar': 3}}}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'sample-e'})
self.assertEqual(conf.plugins['sample-e'].config.enabled, False)
self.assertEqual(conf.plugins['sample-e'].config.bar, 3)
cfg = {'theme': 'readthedocs', 'plugins': {'sample-e': {'enabled': True, 'bar': 3}}}
conf = self.get_config(Schema, cfg)
self.assertEqual(set(conf.plugins), {'sample-e'})
self.assertEqual(conf.plugins['sample-e'].config.enabled, True)
self.assertEqual(conf.plugins['sample-e'].config.bar, 3)
cfg = {'theme': 'readthedocs', 'plugins': {'sample-e': {'enabled': 5}}}
with self.expect_error(
plugins="Plugin 'sample-e' option 'enabled': Expected type: <class 'bool'> but received: <class 'int'>"
):
self.get_config(Schema, cfg)
def test_plugin_config_with_multiple_instances(self) -> None:
class Schema(Config):
theme = c.Theme(default='mkdocs')
plugins = c.Plugins(theme_key='theme')
cfg = {
'plugins': [
{'sample2': {'foo': 'foo value', 'bar': 42}},
{'sample2': {'foo': 'foo2 value'}},
],
}
conf = self.get_config(Schema, cfg)
self.assertEqual(
set(conf.plugins),
{'sample2', 'sample2 #2'},
)
self.assertEqual(conf.plugins['sample2'].config['bar'], 42)
self.assertEqual(conf.plugins['sample2 #2'].config['bar'], 0)
def test_plugin_config_with_multiple_instances_and_warning(self) -> None:
class Schema(Config):
theme = c.Theme(default='mkdocs')
plugins = c.Plugins(theme_key='theme')
test_cfgs: list[dict[str, Any]] = [
{
'theme': 'readthedocs',
'plugins': [{'sub_plugin': {}}, {'sample2': {}}, {'sub_plugin': {}}, 'sample2'],
},
{
'theme': 'readthedocs',
'plugins': ['sub_plugin', 'sample2', 'sample2', 'sub_plugin'],
},
]
for cfg in test_cfgs:
conf = self.get_config(
Schema,
cfg,
warnings=dict(
plugins="Plugin 'readthedocs/sub_plugin' was specified multiple times - "
"this is likely a mistake, because the plugin doesn't declare "
"`supports_multiple_instances`."
),
)
self.assertEqual(
set(conf.plugins),
{'readthedocs/sub_plugin', 'readthedocs/sub_plugin #2', 'sample2', 'sample2 #2'},
)
def test_plugin_config_empty_list_with_empty_default(self) -> None:
class Schema(Config):
plugins = c.Plugins(default=[])
cfg: dict[str, Any] = {'plugins': []}
conf = self.get_config(Schema, cfg)
self.assertIsInstance(conf.plugins, PluginCollection)
self.assertEqual(len(conf.plugins), 0)
def test_plugin_config_empty_list_with_default(self) -> None:
class Schema(Config):
plugins = c.Plugins(default=['sample'])
# Default is ignored
cfg: dict[str, Any] = {'plugins': []}
conf = self.get_config(Schema, cfg)
self.assertIsInstance(conf.plugins, PluginCollection)
self.assertEqual(len(conf.plugins), 0)
def test_plugin_config_none_with_empty_default(self) -> None:
class Schema(Config):
plugins = c.Plugins(default=[])
cfg = {'plugins': None}
conf = self.get_config(Schema, cfg)
self.assertIsInstance(conf.plugins, PluginCollection)
self.assertEqual(len(conf.plugins), 0)
def test_plugin_config_none_with_default(self) -> None:
class Schema(Config):
plugins = c.Plugins(default=['sample'])
# Default is used.
cfg = {'plugins': None}
conf = self.get_config(Schema, cfg)
self.assertIsInstance(conf.plugins, PluginCollection)
self.assertIn('sample', conf.plugins)
self.assertIsInstance(conf.plugins['sample'], BasePlugin)
expected = {
'foo': 'default foo',
'bar': 0,
'dir': None,
}
self.assertEqual(conf.plugins['sample'].config, expected)
def test_plugin_config_uninstalled(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {'plugins': ['uninstalled']}
with self.expect_error(plugins='The "uninstalled" plugin is not installed'):
self.get_config(Schema, cfg)
def test_plugin_config_not_list(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {'plugins': 'sample'}
with self.expect_error(plugins="Invalid Plugins configuration. Expected a list or dict."):
self.get_config(Schema, cfg)
def test_plugin_config_multivalue_dict(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {
'plugins': [
{
'sample': {
'foo': 'foo value',
'bar': 42,
},
'extra_key': 'baz',
}
],
}
with self.expect_error(plugins="Invalid Plugins configuration"):
self.get_config(Schema, cfg)
cfg = {
'plugins': [
{},
],
}
with self.expect_error(plugins="Invalid Plugins configuration"):
self.get_config(Schema, cfg)
def test_plugin_config_not_string_or_dict(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {
'plugins': [('not a string or dict',)],
}
with self.expect_error(plugins="'('not a string or dict',)' is not a valid plugin name."):
self.get_config(Schema, cfg)
def test_plugin_config_options_not_dict(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {
'plugins': [{'sample': 'not a dict'}],
}
with self.expect_error(plugins="Invalid config options for the 'sample' plugin."):
self.get_config(Schema, cfg)
def test_plugin_config_sub_error(self) -> None:
class Schema(Config):
plugins = c.Plugins(default=['sample'])
cfg = {
'plugins': {
'sample': {'bar': 'not an int'},
}
}
with self.expect_error(
plugins="Plugin 'sample' option 'bar': Expected type: <class 'int'> but received: <class 'str'>"
):
self.get_config(Schema, cfg)
def test_plugin_config_sub_warning(self) -> None:
class Schema(Config):
plugins = c.Plugins()
cfg = {
'plugins': {
'sample2': {'depr': 'deprecated value'},
}
}
conf = self.get_config(
Schema,
cfg,
warnings=dict(
plugins="Plugin 'sample2' option 'depr': The configuration option "
"'depr' has been deprecated and will be removed in a future release."
),
)
self.assertIsInstance(conf.plugins, PluginCollection)
self.assertIn('sample2', conf.plugins)
| PluginsTest |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_concurrency.py | {
"start": 502,
"end": 2652
} | class ____(fixtures.TestBase):
def teardown_test(self):
clear_mappers()
@classmethod
def make_a(cls, Base):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
bs = relationship("B")
# need a strong ref so that the class is not gc'ed
cls.A = A
@classmethod
def query_a(cls, Base, result):
s = fixture_session()
time.sleep(random.random() / 100)
A = cls.A
try:
s.query(A).join(A.bs)
except orm_exc.UnmappedClassError as oe:
# this is the failure mode, where B is being handled by
# declarative and is in the registry but not mapped yet.
result[0] = oe
except exc.InvalidRequestError:
# if make_b() starts too slowly, we can reach here, because
# B isn't in the registry yet. We can't guard against this
# case in the library because a class can refer to a name that
# doesn't exist and that has to raise.
result[0] = True
else:
# no conflict
result[0] = True
@classmethod
def make_b(cls, Base):
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
@declared_attr
def data(cls):
time.sleep(0.001)
return Column(String)
a_id = Column(ForeignKey("a.id"))
cls.B = B
def test_concurrent_create(self):
for i in range(50):
Base = declarative_base()
clear_mappers()
self.make_a(Base)
result = [False]
threads = [
threading.Thread(target=self.make_b, args=(Base,)),
threading.Thread(target=self.query_a, args=(Base, result)),
]
for t in threads:
t.start()
for t in threads:
t.join()
if isinstance(result[0], orm_exc.UnmappedClassError):
raise result[0]
| ConcurrentUseDeclMappingTest |
python | astropy__astropy | astropy/table/tests/conftest.py | {
"start": 1343,
"end": 3436
} | class ____(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=["unmasked", "masked", "subclass"])
def table_types(request):
class TableTypes:
def __init__(self, request):
if request.param == "unmasked":
self.Table = table.Table
self.Column = table.Column
elif request.param == "masked":
self.Table = MaskedTable
self.Column = table.MaskedColumn
elif request.param == "subclass":
self.Table = MyTable
self.Column = MyColumn
return TableTypes(request)
# Fixture to run all the Column tests for both an unmasked (ndarray)
# and masked (MaskedArray) column.
@pytest.fixture(params=[False, True])
def table_data(request):
class TableData:
def __init__(self, request):
self.Table = MaskedTable if request.param else table.Table
self.Column = table.MaskedColumn if request.param else table.Column
self.COLS = [
self.Column(
name="a",
data=[1, 2, 3],
description="da",
format="%i",
meta={"ma": 1},
unit="ua",
),
self.Column(
name="b",
data=[4, 5, 6],
description="db",
format="%d",
meta={"mb": 1},
unit="ub",
),
self.Column(
name="c",
data=[7, 8, 9],
description="dc",
format="%f",
meta={"mc": 1},
unit="ub",
),
]
self.DATA = self.Table(self.COLS)
return TableData(request)
| MyTable |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_repo_path_parsing.py | {
"start": 410,
"end": 1671
} | class ____(APITestCase):
def setUp(self) -> None:
self.org = self.create_organization(owner=self.user, name="blap")
self.project = self.create_project(
name="foo", organization=self.org, teams=[self.create_team(organization=self.org)]
)
def make_post(
self,
source_url: str,
stack_path: str,
module: str | None = None,
abs_path: str | None = None,
platform: str | None = None,
project: Project | None = None,
user: SentryUser | None = None,
) -> Response:
self.login_as(user=user or self.user)
if not project:
project = self.project
assert project is not None
url = reverse(
"sentry-api-0-project-repo-path-parsing",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
return self.client.post(
url,
data={
"sourceUrl": source_url,
"stackPath": stack_path,
"module": module,
"absPath": abs_path,
"platform": platform,
},
)
| BaseStacktraceLinkTest |
python | getsentry__sentry | src/sentry/api/endpoints/organization_sampling_project_span_counts.py | {
"start": 1076,
"end": 3779
} | class ____(OrganizationEndpoint):
"""Endpoint for retrieving project span counts in all orgs."""
owner = ApiOwner.TELEMETRY_EXPERIENCE
permission_classes = (OrganizationPermission,)
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization: Organization) -> Response:
self._check_feature(request, organization)
start, end = get_date_range_from_params(request.GET)
# We are purposely not filtering on team membership, as all users should be able to see the span counts
# in order to show the dynamic sampling settings page with all valid data. Please do not remove this
# without consulting the owner of the endpoint
projects = list(
Project.objects.filter(organization=organization, status=ObjectStatus.ACTIVE)
)
transformer = MetricsAPIQueryResultsTransformer()
# Try to resolve the `target_project_id` tag first, as otherwise the query will
# fail to resolve the column and raise a validation error.
# When the tag is not present, we can simply return with an empty result set, as this
# means that there are no spans ingested yet.
if resolve_weak(UseCaseID.SPANS, organization.id, "target_project_id") == STRING_NOT_FOUND:
results = transformer.transform([])
return Response(status=200, data=results)
mql = f"sum({SpanMRI.COUNT_PER_ROOT_PROJECT.value}) by (project,target_project_id)"
query = MQLQuery(mql=mql, order=QueryOrder.DESC, limit=10000)
results = run_queries(
mql_queries=[query],
start=start,
end=end,
interval=self._interval_from_request(request),
organization=organization,
projects=projects,
environments=self.get_environments(request, organization),
referrer=Referrer.DYNAMIC_SAMPLING_SETTINGS_GET_SPAN_COUNTS.value,
query_type=QueryType.TOTALS,
).apply_transformer(transformer)
return Response(status=200, data=results)
def _check_feature(self, request: Request, organization: Organization) -> None:
if not features.has(
"organizations:dynamic-sampling-custom", organization, actor=request.user
):
raise ResourceDoesNotExist
def _interval_from_request(self, request: Request) -> int:
"""
Extracts the interval of the query from the request payload.
"""
interval = parse_stats_period(request.GET.get("interval", "1h"))
return int(3600 if interval is None else interval.total_seconds())
| OrganizationSamplingProjectSpanCountsEndpoint |
python | huggingface__transformers | src/transformers/integrations/tensor_parallel.py | {
"start": 22612,
"end": 24598
} | class ____(TensorParallelLayer):
"""
This class is used to isolate computation in a TP layer from the rest of the world.
Parameters need to be LOCAL, so not dtensors
"""
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh=None):
# annotate module input placements/sharding with input_layouts
input_tensor = inputs[0]
if isinstance(input_tensor, DTensor):
input_tensor = input_tensor.to_local()
return input_tensor
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh=None):
# TODO: figure out dynamo support for instance method and switch this to instance method
return outputs
def shard_tensor(
self,
param,
param_type=None,
param_casting_dtype=None,
to_contiguous=None,
rank=None,
device_mesh=None,
tensor_idx=None,
):
mesh = device_mesh or self.device_mesh
parameter = param[...].to(param_casting_dtype)
if mesh is not None:
parameter = parameter / mesh.size()
self.shard = None
return parameter, None
def partition_tensor(self, param, empty_param, param_type, param_casting_dtype, to_contiguous, rank, device_mesh):
param = param[...].to(param_casting_dtype)
if to_contiguous:
param = param.contiguous()
param = param / device_mesh.size() # TODO should be optionable
# TODO: assumes parent module will allreduce the output afterwards (e.g rowlinear bias is IsolatedParallel and parent module is GatherParallel)
return param
def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module:
distribute_module(
module,
device_mesh,
partial(self._prepare_input_fn, None, None),
partial(self._prepare_output_fn, None, None),
)
| IsolatedParallel |
python | bokeh__bokeh | setup.py | {
"start": 6482,
"end": 6732
} | class ____(sdist): # type: ignore
def run(self) -> None:
check_tags()
build_or_install_bokehjs(self.distribution.packages)
super().run()
setup(cmdclass={"build": Build, "editable_wheel": EditableWheel, "sdist": Sdist})
| Sdist |
python | tensorflow__tensorflow | tensorflow/compiler/tests/conv3d_test.py | {
"start": 1637,
"end": 20394
} | class ____(xla_test.XLATestCase, parameterized.TestCase):
def _VerifyValues(
self,
input_sizes=None,
filter_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NDHWC",
data_format_dst="NDHWC",
expected=None,
op_name="Conv3D",
):
"""Tests that tf.nn.conv3d produces the expected value.
Args:
input_sizes: Input tensor dimensions in [batch, input_rows, input_cols,
input_depth].
filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
strides: Strides.
dilations: RHS dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
op_name: Name of operation to test (Conv/Conv2D)
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(filter_sizes)
x1 = np.reshape(
[f * 1.0 / total_size_1 for f in range(1, total_size_1 + 1)],
input_sizes,
)
x2 = np.reshape(
[f * 1.0 / total_size_2 for f in range(1, total_size_2 + 1)],
filter_sizes,
)
strides = [1] + strides + [1]
if dilations is None:
dilations = [1, 1, 1]
dilations = [1] + dilations + [1]
# Convert between data formats.
expected = test_utils.ConvertBetweenDataFormats(
expected, data_format_src, data_format_dst
)
x1 = test_utils.ConvertBetweenDataFormats(
x1, data_format_src, data_format_dst
)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst
)
strides = test_utils.PermuteDimsBetweenDataFormats(
strides, data_format_src, data_format_dst
)
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst
)
with self.session() as sess:
t1 = array_ops.placeholder(dtypes.bfloat16, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.bfloat16, shape=filter_sizes)
with self.test_scope():
if op_name == "Conv":
conv_format = (
"CHANNELS_LAST"
if data_format_dst == "NDHWC"
else "CHANNELS_FIRST"
)
out = gen_nn_ops.conv(
t1,
t2,
strides=strides,
padding=padding,
data_format=conv_format,
dilations=dilations,
)
elif op_name == "Conv3D":
out = nn_ops.conv3d(
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format_dst,
dilations=dilations,
)
else:
raise ValueError("Invalid op name: %s" % op_name)
value = sess.run(out, {t1: x1, t2: x2})
self.assertAllCloseAccordingToType(expected, value)
@parameterized.named_parameters(*CONV_CONFIGS)
def testConv3D1x1x1Filter(self, data_format, op_name):
expected_output = np.reshape(
[
0.18518518518518517,
0.2222222222222222,
0.25925925925925924,
0.4074074074074074,
0.5,
0.5925925925925926,
0.6296296296296297,
0.7777777777777777,
0.9259259259259259,
0.8518518518518519,
1.0555555555555556,
1.259259259259259,
1.074074074074074,
1.3333333333333333,
1.5925925925925926,
1.2962962962962963,
1.6111111111111112,
1.9259259259259258,
],
[1, 2, 3, 1, 3],
)
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
input_sizes=[1, 2, 3, 1, 3],
filter_sizes=[1, 1, 1, 3, 3],
strides=[1, 1, 1],
padding="VALID",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
self._VerifyValues(
input_sizes=[1, 2, 1, 3, 3],
filter_sizes=[1, 1, 1, 3, 3],
strides=[1, 1, 1],
padding="VALID",
expected=np.reshape(expected_output, [1, 2, 1, 3, 3]),
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
self._VerifyValues(
input_sizes=[1, 1, 2, 3, 3],
filter_sizes=[1, 1, 1, 3, 3],
strides=[1, 1, 1],
padding="VALID",
expected=np.reshape(expected_output, [1, 1, 2, 3, 3]),
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
@parameterized.named_parameters(*CONV_CONFIGS)
def testConv3D1x1x1Filter2x1x1Dilation(self, data_format, op_name):
expected_output = np.reshape(
[
0.05555555555555555,
0.1111111111111111,
0.16666666666666666,
0.2222222222222222,
0.2777777777777778,
0.3333333333333333,
0.3888888888888889,
0.4444444444444444,
0.5,
0.5555555555555556,
0.6111111111111112,
0.6666666666666666,
0.7222222222222222,
0.7777777777777778,
0.8333333333333334,
0.8888888888888888,
0.9444444444444444,
1.0,
],
[1, 3, 6, 1, 1],
)
self._VerifyValues(
input_sizes=[1, 3, 6, 1, 1],
filter_sizes=[1, 1, 1, 1, 1],
strides=[1, 1, 1],
padding="VALID",
dilations=[2, 1, 1],
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
# Expected values computed using scipy's correlate function.
@parameterized.named_parameters(*CONV_CONFIGS)
def testConv3D2x2x2Filter(self, data_format, op_name):
expected_output = np.reshape(
[
3.7719907407407405,
3.850694444444445,
3.929398148148149,
4.265046296296295,
4.357638888888888,
4.450231481481481,
6.730324074074074,
6.892361111111109,
7.054398148148148,
7.223379629629629,
7.399305555555557,
7.575231481481481,
9.688657407407408,
9.934027777777779,
10.17939814814815,
10.181712962962962,
10.440972222222221,
10.700231481481481,
],
[1, 3, 1, 2, 3],
)
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
input_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
strides=[1, 1, 1],
padding="VALID",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
@parameterized.named_parameters(*CONV_CONFIGS)
def testConv3D2x2x2Filter1x2x1Dilation(self, data_format, op_name):
expected_output = np.reshape(
[
1.1388888888888888,
1.2013888888888888,
1.3263888888888888,
1.3888888888888888,
1.5138888888888888,
1.5763888888888888,
1.701388888888889,
1.763888888888889,
2.263888888888889,
2.3263888888888893,
2.451388888888889,
2.513888888888889,
2.6388888888888893,
2.701388888888889,
2.826388888888889,
2.888888888888889,
3.388888888888889,
3.451388888888889,
3.576388888888889,
3.6388888888888884,
3.7638888888888893,
3.8263888888888893,
3.9513888888888893,
4.013888888888889,
],
[1, 3, 4, 2, 1],
)
self._VerifyValues(
input_sizes=[1, 4, 6, 3, 1],
filter_sizes=[2, 2, 2, 1, 1],
strides=[1, 1, 1],
padding="VALID",
dilations=[1, 2, 1],
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
@parameterized.named_parameters(*CONV_CONFIGS)
def testConv3DStrides(self, data_format, op_name):
expected_output = np.reshape(
[
0.06071428571428571,
0.08988095238095238,
0.10238095238095238,
0.11488095238095238,
0.12738095238095237,
0.13988095238095238,
0.08452380952380953,
0.26071428571428573,
0.35238095238095235,
0.36488095238095236,
0.3773809523809524,
0.3898809523809524,
0.4023809523809524,
0.23452380952380952,
0.46071428571428574,
0.6148809523809524,
0.6273809523809524,
0.6398809523809523,
0.6523809523809524,
0.6648809523809525,
0.3845238095238095,
1.1273809523809524,
1.4898809523809524,
1.5023809523809524,
1.5148809523809523,
1.5273809523809523,
1.5398809523809525,
0.8845238095238095,
1.3273809523809526,
1.7523809523809522,
1.764880952380952,
1.7773809523809523,
1.7898809523809525,
1.8023809523809526,
1.0345238095238096,
1.5273809523809525,
2.0148809523809526,
2.0273809523809523,
2.0398809523809525,
2.052380952380952,
2.0648809523809524,
1.1845238095238095,
2.1940476190476192,
2.8898809523809526,
2.9023809523809527,
2.9148809523809525,
2.9273809523809526,
2.9398809523809524,
1.6845238095238095,
2.394047619047619,
3.1523809523809523,
3.1648809523809525,
3.177380952380952,
3.1898809523809524,
3.2023809523809526,
1.8345238095238097,
2.594047619047619,
3.4148809523809525,
3.427380952380952,
3.4398809523809524,
3.4523809523809526,
3.4648809523809523,
1.9845238095238096,
],
[1, 3, 3, 7, 1],
)
self._VerifyValues(
input_sizes=[1, 5, 8, 7, 1],
filter_sizes=[1, 2, 3, 1, 1],
strides=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
@parameterized.named_parameters(*CONV_CONFIGS)
def testConv3D2x2x2FilterStride2(self, data_format, op_name):
expected_output = np.reshape(
[
3.7719907407407405,
3.850694444444445,
3.929398148148149,
9.688657407407408,
9.934027777777779,
10.17939814814815,
],
[1, 2, 1, 1, 3],
)
self._VerifyValues(
input_sizes=[1, 4, 2, 3, 3],
filter_sizes=[2, 2, 2, 3, 3],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
@parameterized.named_parameters(*CONV_CONFIGS)
def testConv3DStride3(self, data_format, op_name):
expected_output = np.reshape(
[
1.5114087301587302,
1.5716765873015872,
1.6319444444444446,
1.5634920634920635,
1.6267361111111112,
1.6899801587301588,
1.6155753968253967,
1.681795634920635,
1.748015873015873,
1.9280753968253967,
2.012152777777778,
2.096230158730159,
1.9801587301587302,
2.067212301587302,
2.154265873015873,
2.0322420634920637,
2.122271825396825,
2.2123015873015874,
4.428075396825396,
4.65500992063492,
4.881944444444444,
4.480158730158729,
4.710069444444444,
4.939980158730158,
4.532242063492063,
4.7651289682539675,
4.9980158730158735,
4.844742063492064,
5.095486111111112,
5.346230158730158,
4.896825396825397,
5.150545634920635,
5.4042658730158735,
4.94890873015873,
5.205605158730158,
5.462301587301588,
],
[1, 2, 2, 3, 3],
)
self._VerifyValues(
input_sizes=[1, 6, 7, 8, 2],
filter_sizes=[3, 2, 1, 2, 3],
strides=[3, 3, 3],
padding="VALID",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
@parameterized.named_parameters(*CONV_CONFIGS)
def testConv3D2x2x2FilterStride2Same(self, data_format, op_name):
expected_output = np.reshape(
[
3.7719907407407405,
3.850694444444445,
3.929398148148149,
2.0162037037037037,
2.0659722222222223,
2.1157407407407405,
9.688657407407408,
9.934027777777779,
10.17939814814815,
4.599537037037037,
4.732638888888889,
4.8657407407407405,
],
[1, 2, 1, 2, 3],
)
self._VerifyValues(
input_sizes=[1, 4, 2, 3, 3],
filter_sizes=[2, 2, 2, 3, 3],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
@parameterized.named_parameters(*CONV_CONFIGS)
def testKernelSmallerThanStride(self, data_format, op_name):
expected_output = np.reshape(
[
0.037037037037037035,
0.1111111111111111,
0.25925925925925924,
0.3333333333333333,
0.7037037037037037,
0.7777777777777778,
0.9259259259259259,
1.0,
],
[1, 2, 2, 2, 1],
)
self._VerifyValues(
input_sizes=[1, 3, 3, 3, 1],
filter_sizes=[1, 1, 1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
self._VerifyValues(
input_sizes=[1, 3, 3, 3, 1],
filter_sizes=[1, 1, 1, 1, 1],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
expected_output = np.reshape(
[
0.5408163265306123,
0.5801749271137027,
0.28061224489795916,
0.8163265306122448,
0.8556851311953353,
0.4030612244897959,
0.41873177842565595,
0.43403790087463556,
0.19642857142857142,
2.4693877551020407,
2.5087463556851315,
1.1377551020408163,
2.7448979591836733,
2.7842565597667637,
1.260204081632653,
1.168731778425656,
1.1840379008746356,
0.5178571428571429,
1.0951166180758019,
1.1060495626822158,
0.4464285714285714,
1.1716472303206997,
1.1825801749271136,
0.4770408163265306,
0.3691690962099125,
0.37244897959183676,
0.125,
],
[1, 3, 3, 3, 1],
)
self._VerifyValues(
input_sizes=[1, 7, 7, 7, 1],
filter_sizes=[2, 2, 2, 1, 1],
strides=[3, 3, 3],
padding="SAME",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
expected_output = np.reshape(
[
0.5408163265306123,
0.5801749271137027,
0.8163265306122448,
0.8556851311953353,
2.4693877551020407,
2.5087463556851315,
2.7448979591836733,
2.7842565597667637,
],
[1, 2, 2, 2, 1],
)
self._VerifyValues(
input_sizes=[1, 7, 7, 7, 1],
filter_sizes=[2, 2, 2, 1, 1],
strides=[3, 3, 3],
padding="VALID",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
@parameterized.named_parameters(*CONV_CONFIGS)
def testKernelSizeMatchesInputSize(self, data_format, op_name):
expected_output = np.reshape([1.5625, 1.875], [1, 1, 1, 1, 2])
self._VerifyValues(
input_sizes=[1, 2, 1, 2, 1],
filter_sizes=[2, 1, 2, 1, 2],
strides=[1, 1, 1],
padding="VALID",
expected=expected_output,
data_format_src="NDHWC",
data_format_dst=data_format,
op_name=op_name,
)
def testConvExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 1, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 1, 3]
batch_dims = 2
filter_in_sizes = [1, 1, 1, 3, 3]
filter_in = np.arange(
1, np.prod(filter_in_sizes) + 1, dtype=np.float32
).reshape(filter_in_sizes)
x1 = np.arange(
1, np.prod(tensor_in_sizes_batch) + 1, dtype=np.float32
).reshape(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
with self.session() as sess:
t1 = array_ops.placeholder(dtypes.bfloat16, shape=tensor_in_sizes_batch)
t2 = array_ops.placeholder(
dtypes.bfloat16, shape=tensor_in_sizes_expanded_batch
)
filter_t = array_ops.placeholder(dtypes.bfloat16, shape=filter_in_sizes)
out1 = gen_nn_ops.conv(
t1, filter_t, strides=[1, 1, 1, 1, 1], padding="VALID"
)
out2 = gen_nn_ops.conv(
t2,
filter_t,
strides=[1, 1, 1, 1, 1],
padding="VALID",
batch_dims=batch_dims,
)
value1 = sess.run(out1, {t1: x1, filter_t: filter_in})
value2 = sess.run(out2, {t2: x2, filter_t: filter_in})
self.assertEqual(list(value1.shape), tensor_in_sizes_batch)
self.assertEqual(list(value2.shape), tensor_in_sizes_expanded_batch)
self.assertAllCloseAccordingToType(value1, value2.reshape(value1.shape))
# Test cloned from
# tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py
| Conv3DTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 2058,
"end": 26454
} | class ____(ABC):
"""An AssetSelection defines a query over a set of assets and asset checks, normally all that are defined in a project.
You can use the "|", "&", and "-" operators to create unions, intersections, and differences of selections, respectively.
AssetSelections are typically used with :py:func:`define_asset_job`.
By default, selecting assets will also select all of the asset checks that target those assets.
Examples:
.. code-block:: python
# Select all assets in group "marketing":
AssetSelection.groups("marketing")
# Select all assets in group "marketing", as well as the asset with key "promotion":
AssetSelection.groups("marketing") | AssetSelection.assets("promotion")
# Select all assets in group "marketing" that are downstream of asset "leads":
AssetSelection.groups("marketing") & AssetSelection.assets("leads").downstream()
# Select a list of assets:
AssetSelection.assets(*my_assets_list)
# Select all assets except for those in group "marketing"
AssetSelection.all() - AssetSelection.groups("marketing")
# Select all assets which are materialized by the same op as "projections":
AssetSelection.assets("projections").required_multi_asset_neighbors()
# Select all assets in group "marketing" and exclude their asset checks:
AssetSelection.groups("marketing") - AssetSelection.all_asset_checks()
# Select all asset checks that target a list of assets:
AssetSelection.checks_for_assets(*my_assets_list)
# Select a specific asset check:
AssetSelection.checks(my_asset_check)
"""
@public
@staticmethod
@beta_param(param="include_sources")
def all(include_sources: bool = False) -> "AllSelection":
"""Returns a selection that includes all assets and their asset checks.
Args:
include_sources (bool): If True, then include all external assets.
"""
return AllSelection(include_sources=include_sources)
@public
@staticmethod
def all_asset_checks() -> "AllAssetCheckSelection":
"""Returns a selection that includes all asset checks."""
return AllAssetCheckSelection()
@public
@staticmethod
def assets(
*assets_defs: Union[AssetsDefinition, CoercibleToAssetKey],
) -> "KeysAssetSelection":
"""Returns a selection that includes all of the provided assets and asset checks that target
them.
Args:
*assets_defs (Union[AssetsDefinition, str, Sequence[str], AssetKey]): The assets to
select.
Examples:
.. code-block:: python
AssetSelection.assets(AssetKey(["a"]))
AssetSelection.assets("a")
AssetSelection.assets(AssetKey(["a"]), AssetKey(["b"]))
AssetSelection.assets("a", "b")
@asset
def asset1():
...
AssetSelection.assets(asset1)
asset_key_list = [AssetKey(["a"]), AssetKey(["b"])]
AssetSelection.assets(*asset_key_list)
"""
return KeysAssetSelection(selected_keys=asset_keys_from_defs_and_coercibles(assets_defs))
@public
@staticmethod
@deprecated(
breaking_version="2.0",
additional_warn_text="Use AssetSelection.assets instead.",
)
def keys(*asset_keys: CoercibleToAssetKey) -> "KeysAssetSelection":
"""Returns a selection that includes assets with any of the provided keys and all asset
checks that target them.
Deprecated: use AssetSelection.assets instead.
Examples:
.. code-block:: python
AssetSelection.keys(AssetKey(["a"]))
AssetSelection.keys("a")
AssetSelection.keys(AssetKey(["a"]), AssetKey(["b"]))
AssetSelection.keys("a", "b")
asset_key_list = [AssetKey(["a"]), AssetKey(["b"])]
AssetSelection.keys(*asset_key_list)
"""
_asset_keys = [
AssetKey.from_user_string(key) if isinstance(key, str) else AssetKey.from_coercible(key)
for key in asset_keys
]
return KeysAssetSelection(selected_keys=_asset_keys)
@public
@staticmethod
@beta_param(param="include_sources")
def key_prefixes(
*key_prefixes: CoercibleToAssetKeyPrefix, include_sources: bool = False
) -> "KeyPrefixesAssetSelection":
"""Returns a selection that includes assets that match any of the provided key prefixes and all the asset checks that target them.
Args:
include_sources (bool): If True, then include external assets matching the key prefix(es)
in the selection.
Examples:
.. code-block:: python
# match any asset key where the first segment is equal to "a" or "b"
# e.g. AssetKey(["a", "b", "c"]) would match, but AssetKey(["abc"]) would not.
AssetSelection.key_prefixes("a", "b")
# match any asset key where the first two segments are ["a", "b"] or ["a", "c"]
AssetSelection.key_prefixes(["a", "b"], ["a", "c"])
"""
_asset_key_prefixes = [key_prefix_from_coercible(key_prefix) for key_prefix in key_prefixes]
return KeyPrefixesAssetSelection(
selected_key_prefixes=_asset_key_prefixes,
include_sources=include_sources,
)
@staticmethod
@beta_param(param="include_sources")
def key_substring(
key_substring: str, include_sources: bool = False
) -> "KeySubstringAssetSelection":
"""Returns a selection that includes assets whose string representation contains the provided substring and all the asset checks that target it.
Args:
include_sources (bool): If True, then include external assets matching the substring
in the selection.
Examples:
.. code-block:: python
# match any asset key containing "bc"
# e.g. AssetKey(["a", "bcd"]) would match, but not AssetKey(["ab", "cd"]).
AssetSelection.key_substring("bc")
# match any asset key containing "b/c"
# e.g. AssetKey(["ab", "cd"]) would match.
AssetSelection.key_substring("b/c")
"""
return KeySubstringAssetSelection(
selected_key_substring=key_substring, include_sources=include_sources
)
@public
@staticmethod
@beta_param(param="include_sources")
def groups(*group_strs, include_sources: bool = False) -> "GroupsAssetSelection":
"""Returns a selection that includes materializable assets that belong to any of the
provided groups and all the asset checks that target them.
Args:
include_sources (bool): If True, then include external assets matching the group in the
selection.
"""
check.tuple_param(group_strs, "group_strs", of_type=str)
return GroupsAssetSelection(
selected_groups=list(group_strs), include_sources=include_sources
)
@public
@staticmethod
@beta_param(param="include_sources")
def tag(key: str, value: str, include_sources: bool = False) -> "AssetSelection":
"""Returns a selection that includes materializable assets that have the provided tag, and
all the asset checks that target them.
Args:
include_sources (bool): If True, then include external assets matching the group in the
selection.
"""
return TagAssetSelection(key=key, value=value, include_sources=include_sources)
@staticmethod
def kind(kind: Optional[str], include_sources: bool = False) -> "AssetSelection":
"""Returns a selection that includes materializable assets that have the provided kind, and
all the asset checks that target them.
Args:
kind (str): The kind to select.
include_sources (bool): If True, then include external assets matching the kind in the
selection.
"""
return KindAssetSelection(kind_str=kind, include_sources=include_sources)
@staticmethod
@beta_param(param="include_sources")
def tag_string(string: str, include_sources: bool = False) -> "AssetSelection":
"""Returns a selection that includes materializable assets that have the provided tag, and
all the asset checks that target them.
Args:
include_sources (bool): If True, then include external assets matching the group in the
selection.
"""
split_by_equals_segments = string.split("=")
if len(split_by_equals_segments) == 1:
return TagAssetSelection(key=string, value="", include_sources=include_sources)
elif len(split_by_equals_segments) == 2:
key, value = split_by_equals_segments
return TagAssetSelection(key=key, value=value, include_sources=include_sources)
else:
check.failed(f"Invalid tag selection string: {string}. Must have no more than one '='.")
@staticmethod
def owner(owner: Optional[str]) -> "AssetSelection":
"""Returns a selection that includes assets that have the provided owner, and all the
asset checks that target them.
Args:
owner (str): The owner to select.
"""
return OwnerAssetSelection(selected_owner=owner)
@public
@staticmethod
def checks_for_assets(
*assets_defs: Union[AssetsDefinition, CoercibleToAssetKey],
) -> "AssetChecksForAssetKeysSelection":
"""Returns a selection with the asset checks that target the provided assets.
Args:
*assets_defs (Union[AssetsDefinition, str, Sequence[str], AssetKey]): The assets to
select checks for.
"""
return AssetChecksForAssetKeysSelection(
selected_asset_keys=asset_keys_from_defs_and_coercibles(assets_defs)
)
@public
@staticmethod
def checks(
*assets_defs_or_check_keys: Union[AssetsDefinition, AssetCheckKey],
) -> "AssetCheckKeysSelection":
"""Returns a selection that includes all of the provided asset checks or check keys."""
assets_defs = [ad for ad in assets_defs_or_check_keys if isinstance(ad, AssetsDefinition)]
check_keys = [key for key in assets_defs_or_check_keys if isinstance(key, AssetCheckKey)]
return AssetCheckKeysSelection(
selected_asset_check_keys=[
*(key for ad in assets_defs for key in ad.check_keys),
*check_keys,
]
)
@public
def downstream(
self, depth: Optional[int] = None, include_self: bool = True
) -> "DownstreamAssetSelection":
"""Returns a selection that includes all assets that are downstream of any of the assets in
this selection, selecting the assets in this selection by default. Includes the asset checks targeting the returned assets. Iterates through each
asset in this selection and returns the union of all downstream assets.
depth (Optional[int]): If provided, then only include assets to the given depth. A depth
of 2 means all assets that are children or grandchildren of the assets in this
selection.
include_self (bool): If True, then include the assets in this selection in the result.
If the include_self flag is False, return each downstream asset that is not part of the
original selection. By default, set to True.
"""
check.opt_int_param(depth, "depth")
check.opt_bool_param(include_self, "include_self")
return DownstreamAssetSelection(child=self, depth=depth, include_self=include_self)
@public
def upstream(
self, depth: Optional[int] = None, include_self: bool = True
) -> "UpstreamAssetSelection":
"""Returns a selection that includes all materializable assets that are upstream of any of
the assets in this selection, selecting the assets in this selection by default. Includes
the asset checks targeting the returned assets. Iterates through each asset in this
selection and returns the union of all upstream assets.
Because mixed selections of external and materializable assets are currently not supported,
keys corresponding to external assets will not be included as upstream of regular assets.
Args:
depth (Optional[int]): If provided, then only include assets to the given depth. A depth
of 2 means all assets that are parents or grandparents of the assets in this
selection.
include_self (bool): If True, then include the assets in this selection in the result.
If the include_self flag is False, return each upstream asset that is not part of the
original selection. By default, set to True.
"""
check.opt_int_param(depth, "depth")
check.opt_bool_param(include_self, "include_self")
return UpstreamAssetSelection(child=self, depth=depth, include_self=include_self)
@public
def sinks(self) -> "SinksAssetSelection":
"""Given an asset selection, returns a new asset selection that contains all of the sink
assets within the original asset selection. Includes the asset checks targeting the returned assets.
A sink asset is an asset that has no downstream dependencies within the asset selection.
The sink asset can have downstream dependencies outside of the asset selection.
"""
return SinksAssetSelection(child=self)
@public
def required_multi_asset_neighbors(self) -> "RequiredNeighborsAssetSelection":
"""Given an asset selection in which some assets are output from a multi-asset compute op
which cannot be subset, returns a new asset selection that contains all of the assets
required to execute the original asset selection. Includes the asset checks targeting the returned assets.
"""
return RequiredNeighborsAssetSelection(child=self)
@public
def roots(self) -> "RootsAssetSelection":
"""Given an asset selection, returns a new asset selection that contains all of the root
assets within the original asset selection. Includes the asset checks targeting the returned assets.
A root asset is an asset that has no upstream dependencies within the asset selection.
The root asset can have downstream dependencies outside of the asset selection.
Because mixed selections of external and materializable assets are currently not supported,
keys corresponding to external assets will not be included as roots. To select external assets,
use the `upstream_source_assets` method.
"""
return RootsAssetSelection(child=self)
@public
def materializable(self) -> "MaterializableAssetSelection":
"""Given an asset selection, returns a new asset selection that contains all of the assets
that are materializable. Removes any assets which are not materializable.
"""
return MaterializableAssetSelection(child=self)
@public
@deprecated(breaking_version="2.0", additional_warn_text="Use AssetSelection.roots instead.")
def sources(self) -> "RootsAssetSelection":
"""Given an asset selection, returns a new asset selection that contains all of the root
assets within the original asset selection. Includes the asset checks targeting the returned assets.
A root asset is a materializable asset that has no upstream dependencies within the asset
selection. The root asset can have downstream dependencies outside of the asset selection.
Because mixed selections of external and materializable assets are currently not supported,
keys corresponding to external assets will not be included as roots. To select external assets,
use the `upstream_source_assets` method.
"""
return self.roots()
@public
def upstream_source_assets(self) -> "ParentSourcesAssetSelection":
"""Given an asset selection, returns a new asset selection that contains all of the external
assets that are parents of assets in the original selection. Includes the asset checks
targeting the returned assets.
"""
return ParentSourcesAssetSelection(child=self)
@public
def without_checks(self) -> "AssetSelection":
"""Removes all asset checks in the selection."""
return self - AssetSelection.all_asset_checks()
def __or__(self, other: "AssetSelection") -> "OrAssetSelection":
check.inst_param(other, "other", AssetSelection)
operands = []
for selection in (self, other):
if isinstance(selection, OrAssetSelection):
operands.extend(selection.operands)
else:
operands.append(selection)
return OrAssetSelection(operands=operands)
def __and__(self, other: "AssetSelection") -> "AndAssetSelection":
check.inst_param(other, "other", AssetSelection)
operands = []
for selection in (self, other):
if isinstance(selection, AndAssetSelection):
operands.extend(selection.operands)
else:
operands.append(selection)
return AndAssetSelection(operands=operands)
def __bool__(self):
# Ensure that even if a subclass is a NamedTuple with no fields, it is still truthy
return True
def __sub__(self, other: "AssetSelection") -> "SubtractAssetSelection":
check.inst_param(other, "other", AssetSelection)
return SubtractAssetSelection(left=self, right=other)
def resolve(
self,
all_assets: Union[Iterable[Union[AssetsDefinition, SourceAsset]], BaseAssetGraph],
allow_missing: bool = False,
) -> AbstractSet[AssetKey]:
"""Returns the set of asset keys in all_assets that match this selection.
Args:
allow_missing (bool): If False, will raise an error if any of the leaf selections in the
asset selection target entities that don't exist in the set of provided assets.
"""
if isinstance(all_assets, BaseAssetGraph):
asset_graph = all_assets
else:
check.iterable_param(all_assets, "all_assets", (AssetsDefinition, SourceAsset))
asset_graph = AssetGraph.from_assets(all_assets)
return self.resolve_inner(asset_graph, allow_missing=allow_missing)
@abstractmethod
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
raise NotImplementedError()
def resolve_checks(
self, asset_graph: BaseAssetGraph, allow_missing: bool = False
) -> AbstractSet[AssetCheckKey]:
"""We don't need this method currently, but it makes things consistent with resolve_inner. Currently
we don't store checks in the RemoteAssetGraph, so we only support AssetGraph.
"""
return self.resolve_checks_inner(asset_graph, allow_missing=allow_missing)
def resolve_checks_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetCheckKey]:
"""By default, resolve to checks that target the selected assets. This is overriden for particular selections."""
asset_keys = self.resolve(asset_graph)
return {handle for handle in asset_graph.asset_check_keys if handle.asset_key in asset_keys}
@classmethod
@beta_param(param="include_sources")
def from_string(cls, string: str, include_sources=False) -> "AssetSelection":
from dagster._core.definitions.antlr_asset_selection.antlr_asset_selection import (
AntlrAssetSelectionParser,
)
try:
return AntlrAssetSelectionParser(string, include_sources).asset_selection
except:
pass
if string == "*":
return cls.all()
parts = parse_clause(string)
if parts is not None:
key_selection = cls.assets(parts.item_name)
if parts.up_depth and parts.down_depth:
selection = key_selection.upstream(parts.up_depth) | key_selection.downstream(
parts.down_depth
)
elif parts.up_depth:
selection = key_selection.upstream(parts.up_depth)
elif parts.down_depth:
selection = key_selection.downstream(parts.down_depth)
else:
selection = key_selection
return selection
elif string.startswith("tag:"):
tag_str = string[len("tag:") :]
return cls.tag_string(tag_str)
raise DagsterInvalidAssetSelectionError(f"Invalid selection string: {string}")
@classmethod
def from_coercible(cls, selection: CoercibleToAssetSelection) -> "AssetSelection":
if isinstance(selection, str):
return cls.from_string(selection)
elif isinstance(selection, AssetSelection):
return selection
elif isinstance(selection, collections.abc.Sequence) and all(
isinstance(el, str) for el in selection
):
return reduce(operator.or_, [cls.from_string(cast("str", s)) for s in selection])
elif isinstance(selection, collections.abc.Sequence) and all(
isinstance(el, (AssetsDefinition, SourceAsset)) for el in selection
):
return AssetSelection.assets(
*(
key
for el in selection
for key in (
el.keys
if isinstance(el, AssetsDefinition)
else [cast("SourceAsset", el).key]
)
)
)
elif isinstance(selection, collections.abc.Sequence) and all(
isinstance(el, AssetKey) for el in selection
):
return cls.assets(*cast("Sequence[AssetKey]", selection))
else:
raise DagsterError(
"selection argument must be one of str, Sequence[str], Sequence[AssetKey],"
" Sequence[AssetsDefinition], Sequence[SourceAsset], AssetSelection. Was"
f" {type(selection)}."
)
def to_serializable_asset_selection(self, asset_graph: BaseAssetGraph) -> "AssetSelection":
return KeysAssetSelection(selected_keys=list(self.resolve(asset_graph)))
def needs_parentheses_when_operand(self) -> bool:
"""When generating a string representation of an asset selection and this asset selection
is an operand in a larger expression, whether it needs to be surrounded by parentheses.
"""
return False
def operand_to_selection_str(self) -> str:
"""Returns a string representation of the selection when it is a child of a boolean expression,
for example, in an `AndAssetSelection` or `OrAssetSelection`. The main difference from `to_selection_str`
is that this method may include additional parentheses around the selection to ensure that the
expression is parsed correctly.
"""
return (
f"({self.to_selection_str()})"
if self.needs_parentheses_when_operand()
else self.to_selection_str()
)
def to_selection_str(self) -> str:
"""Returns an Antlr string representation of the selection that can be parsed by `from_string`."""
raise NotImplementedError(
f"{self.__class__.__name__} does not support conversion to a string."
)
def __str__(self) -> str:
# Attempt to use the to-Antlr-selection-string method if it's implemented,
# otherwise fall back to the default Python string representation
try:
return self.to_selection_str()
except NotImplementedError:
return super().__str__()
@whitelist_for_serdes
@record
| AssetSelection |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/uninitializedVariable2.py | {
"start": 687,
"end": 718
} | class ____(Abstract2):
pass
| E |
python | scikit-learn__scikit-learn | sklearn/metrics/_scorer.py | {
"start": 2964,
"end": 7262
} | class ____:
"""Callable for multimetric scoring used to avoid repeated calls
to `predict_proba`, `predict`, and `decision_function`.
`_MultimetricScorer` will return a dictionary of scores corresponding to
the scorers in the dictionary. Note that `_MultimetricScorer` can be
created with a dictionary with one key (i.e. only one actual scorer).
Parameters
----------
scorers : dict
Dictionary mapping names to callable scorers.
raise_exc : bool, default=True
Whether to raise the exception in `__call__` or not. If set to `False`
a formatted string of the exception details is passed as result of
the failing scorer.
"""
def __init__(self, *, scorers, raise_exc=True):
self._scorers = scorers
self._raise_exc = raise_exc
def __call__(self, estimator, *args, **kwargs):
"""Evaluate predicted target values."""
scores = {}
cache = {} if self._use_cache(estimator) else None
cached_call = partial(_cached_call, cache)
if _routing_enabled():
routed_params = process_routing(self, "score", **kwargs)
else:
# Scorers all get the same args, and get all of them except sample_weight.
# Only the ones having `sample_weight` in their signature will receive it.
# This does not work for metadata other than sample_weight, and for those
# users have to enable metadata routing.
common_kwargs = {
arg: value for arg, value in kwargs.items() if arg != "sample_weight"
}
routed_params = Bunch(
**{name: Bunch(score=common_kwargs.copy()) for name in self._scorers}
)
if "sample_weight" in kwargs:
for name, scorer in self._scorers.items():
if scorer._accept_sample_weight():
routed_params[name].score["sample_weight"] = kwargs[
"sample_weight"
]
for name, scorer in self._scorers.items():
try:
if isinstance(scorer, _BaseScorer):
score = scorer._score(
cached_call, estimator, *args, **routed_params.get(name).score
)
else:
score = scorer(estimator, *args, **routed_params.get(name).score)
scores[name] = score
except Exception as e:
if self._raise_exc:
raise e
else:
scores[name] = format_exc()
return scores
def __repr__(self):
scorers = ", ".join([f'"{s}"' for s in self._scorers])
return f"MultiMetricScorer({scorers})"
def _accept_sample_weight(self):
# TODO(slep006): remove when metadata routing is the only way
return any(scorer._accept_sample_weight() for scorer in self._scorers.values())
def _use_cache(self, estimator):
"""Return True if using a cache is beneficial, thus when a response method will
be called several time.
"""
if len(self._scorers) == 1: # Only one scorer
return False
counter = Counter(
[
_check_response_method(estimator, scorer._response_method).__name__
for scorer in self._scorers.values()
if isinstance(scorer, _BaseScorer)
]
)
if any(val > 1 for val in counter.values()):
# The exact same response method or iterable of response methods
# will be called more than once.
return True
return False
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.3
Returns
-------
routing : MetadataRouter
A :class:`~utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
return MetadataRouter(owner=self).add(
**self._scorers,
method_mapping=MethodMapping().add(caller="score", callee="score"),
)
| _MultimetricScorer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 402031,
"end": 402899
} | class ____(sgqlc.types.Interface):
"""Entities that can be minimized."""
__schema__ = github_schema
__field_names__ = ("is_minimized", "minimized_reason", "viewer_can_minimize")
is_minimized = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isMinimized")
"""Returns whether or not a comment has been minimized."""
minimized_reason = sgqlc.types.Field(String, graphql_name="minimizedReason")
"""Returns why the comment was minimized. One of `abuse`, `off-
topic`, `outdated`, `resolved`, `duplicate` and `spam`. Note that
the case and formatting of these values differs from the inputs to
the `MinimizeComment` mutation.
"""
viewer_can_minimize = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanMinimize")
"""Check if the current viewer can minimize this object."""
| Minimizable |
python | pola-rs__polars | py-polars/src/polars/datatype_expr/struct.py | {
"start": 122,
"end": 2008
} | class ____:
"""Namespace for struct datatype expressions."""
_accessor = "struct"
def __init__(self, expr: pl.DataTypeExpr) -> None:
self._pydatatype_expr = expr._pydatatype_expr
def __getitem__(self, item: str | int) -> pl.DataTypeExpr:
if isinstance(item, str):
return self.field_dtype(item)
elif isinstance(item, int):
return pl.DataTypeExpr._from_pydatatype_expr(
self._pydatatype_expr.struct_field_dtype_by_index(item)
)
else:
msg = f"expected type 'int | str', got {qualified_type_name(item)!r} ({item!r})"
raise TypeError(msg)
def field_dtype(self, field_name: str) -> pl.DataTypeExpr:
"""
Get the DataType of field with a specific field name.
Notes
-----
The `struct` namespace has implemented `__getitem__` so you can also access
fields by index:
>>> (
... pl.Struct({"x": pl.Int64, "y": pl.String})
... .to_dtype_expr()
... .struct[1]
... .collect_dtype({})
... )
String
"""
return pl.DataTypeExpr._from_pydatatype_expr(
self._pydatatype_expr.struct_field_dtype_by_name(field_name)
)
def field_names(self) -> pl.Expr:
"""
Get the field names in a struct as a list.
Examples
--------
>>> pl.select(
... pl.Struct({"x": pl.Int64, "y": pl.String})
... .to_dtype_expr()
... .struct.field_names()
... )
shape: (2, 1)
┌─────────┐
│ literal │
│ --- │
│ str │
╞═════════╡
│ x │
│ y │
└─────────┘
"""
return pl.Expr._from_pyexpr(self._pydatatype_expr.struct_field_names())
| DataTypeExprStructNameSpace |
python | cherrypy__cherrypy | cherrypy/process/servers.py | {
"start": 9166,
"end": 9885
} | class ____(object):
"""Adapter for a flup.server.cgi.WSGIServer."""
def __init__(self, *args, **kwargs):
"""Initialize the flup CGI Server plugin."""
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the CGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.cgi import WSGIServer
self.cgiserver = WSGIServer(*self.args, **self.kwargs)
self.ready = True
self.cgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
| FlupCGIServer |
python | wandb__wandb | wandb/vendor/pygments/lexers/haskell.py | {
"start": 22758,
"end": 23443
} | class ____(LiterateLexer):
"""
For Literate Agda source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 2.0
"""
name = 'Literate Agda'
aliases = ['lagda', 'literate-agda']
filenames = ['*.lagda']
mimetypes = ['text/x-literate-agda']
def __init__(self, **options):
agdalexer = AgdaLexer(**options)
LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options)
| LiterateAgdaLexer |
python | yaml__pyyaml | tests/legacy_tests/test_multi_constructor.py | {
"start": 513,
"end": 553
} | class ____(yaml.FullLoader):
pass
| Multi1 |
python | spack__spack | lib/spack/spack/database.py | {
"start": 73730,
"end": 73865
} | class ____(SpackError):
"""Raised to signal Database.reindex that the reindex should happen via spec.json"""
| DatabaseNotReadableError |
python | pydantic__pydantic | pydantic/types.py | {
"start": 74935,
"end": 76047
} | class ____(EncoderProtocol):
"""URL-safe Base64 encoder."""
@classmethod
def decode(cls, data: bytes) -> bytes:
"""Decode the data from base64 encoded bytes to original bytes data.
Args:
data: The data to decode.
Returns:
The decoded data.
"""
try:
return base64.urlsafe_b64decode(data)
except ValueError as e:
raise PydanticCustomError('base64_decode', "Base64 decoding error: '{error}'", {'error': str(e)})
@classmethod
def encode(cls, value: bytes) -> bytes:
"""Encode the data from bytes to a base64 encoded bytes.
Args:
value: The data to encode.
Returns:
The encoded data.
"""
return base64.urlsafe_b64encode(value)
@classmethod
def get_json_format(cls) -> Literal['base64url']:
"""Get the JSON format for the encoded data.
Returns:
The JSON format for the encoded data.
"""
return 'base64url'
@_dataclasses.dataclass(**_internal_dataclass.slots_true)
| Base64UrlEncoder |
python | spack__spack | lib/spack/spack/reporters/junit.py | {
"start": 181,
"end": 1045
} | class ____(Reporter):
"""Generate reports of spec installations for JUnit."""
_jinja_template = "reports/junit.xml"
def concretization_report(self, filename, msg):
pass
def build_report(self, filename, specs):
for spec in specs:
spec.summarize()
if not (os.path.splitext(filename))[1]:
# Ensure the report name will end with the proper extension;
# otherwise, it currently defaults to the "directory" name.
filename = filename + ".xml"
report_data = {"specs": specs}
with open(filename, "w", encoding="utf-8") as f:
env = spack.tengine.make_environment()
t = env.get_template(self._jinja_template)
f.write(t.render(report_data))
def test_report(self, filename, specs):
self.build_report(filename, specs)
| JUnit |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-form-subsequence-with-target-sum.py | {
"start": 2615,
"end": 3452
} | class ____(object):
def minOperations(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def floor_log2_x(x):
return x.bit_length()-1
if sum(nums) < target:
return -1
cnt = [0]*(floor_log2_x(max(nums))+1)
for x in nums:
cnt[floor_log2_x(x)] += 1
result = i = 0
while i < len(cnt):
if target&(1<<i):
if not cnt[i]:
j = next(j for j in xrange(i, len(cnt)) if cnt[j])
result += j-i
j = i
cnt[i] -= 1
continue
cnt[i] -= 1
if i+1 < len(cnt):
cnt[i+1] += cnt[i]//2
i += 1
return result
| Solution4 |
python | huggingface__transformers | src/transformers/models/rt_detr/modeling_rt_detr.py | {
"start": 24919,
"end": 26120
} | class ____(nn.Module):
"""
Convolutional backbone using the modeling_rt_detr_resnet.py.
nn.BatchNorm2d layers are replaced by RTDetrFrozenBatchNorm2d as defined above.
https://github.com/lyuwenyu/RT-DETR/blob/main/rtdetr_pytorch/src/nn/backbone/presnet.py#L142
"""
def __init__(self, config):
super().__init__()
backbone = load_backbone(config)
if config.freeze_backbone_batch_norms:
# replace batch norm by frozen batch norm
with torch.no_grad():
replace_batch_norm(backbone)
self.model = backbone
self.intermediate_channel_sizes = self.model.channels
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
# send pixel_values through the model to get list of feature maps
features = self.model(pixel_values).feature_maps
out = []
for feature_map in features:
# downsample pixel_mask to match shape of corresponding feature_map
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
| RTDetrConvEncoder |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/axislines.py | {
"start": 5356,
"end": 6536
} | class ____(_FixedAxisArtistHelperBase):
def __init__(self, axes, loc):
super().__init__(loc)
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
# TICK
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
angle_normal, angle_tangent = {0: (90, 0), 1: (0, 90)}[self.nth_coord]
major = self.axis.major
major_locs = major.locator()
major_labels = major.formatter.format_ticks(major_locs)
minor = self.axis.minor
minor_locs = minor.locator()
minor_labels = minor.formatter.format_ticks(minor_locs)
tick_to_axes = self.get_tick_transform(axes) - axes.transAxes
def _f(locs, labels):
for loc, label in zip(locs, labels):
c = self._to_xy(loc, const=self._pos)
# check if the tick point is inside axes
c2 = tick_to_axes.transform(c)
if mpl.transforms._interval_contains_close((0, 1), c2[self.nth_coord]):
yield c, angle_normal, angle_tangent, label
return _f(major_locs, major_labels), _f(minor_locs, minor_labels)
| FixedAxisArtistHelperRectilinear |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_mlab.py | {
"start": 35135,
"end": 36665
} | class ____:
def test_kde_integer_input(self):
"""Regression test for #1181."""
x1 = np.arange(5)
kde = mlab.GaussianKDE(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869,
0.13480721]
np.testing.assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_gaussian_kde_covariance_caching(self):
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754,
0.01664475]
# set it to the default bandwidth.
kde2 = mlab.GaussianKDE(x1, 'scott')
y2 = kde2(xs)
np.testing.assert_array_almost_equal(y_expected, y2, decimal=7)
def test_kde_bandwidth_method(self):
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = mlab.GaussianKDE(xn)
# Supply a callable
gkde2 = mlab.GaussianKDE(xn, 'scott')
# Supply a scalar
gkde3 = mlab.GaussianKDE(xn, bw_method=gkde.factor)
xs = np.linspace(-7, 7, 51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert kdepdf.all() == kdepdf2.all()
kdepdf3 = gkde3.evaluate(xs)
assert kdepdf.all() == kdepdf3.all()
| TestGaussianKDE |
python | marshmallow-code__apispec | tests/test_ext_marshmallow_openapi.py | {
"start": 15290,
"end": 23081
} | class ____:
def test_schema2jsonschema_with_nested_fields(self, spec_fixture):
res = spec_fixture.openapi.schema2jsonschema(PetSchema)
props = res["properties"]
assert props["category"]["items"] == build_ref(
spec_fixture.spec, "schema", "Category"
)
@pytest.mark.parametrize("modifier", ("only", "exclude"))
def test_schema2jsonschema_with_nested_fields_only_exclude(
self, spec_fixture, modifier
):
class Child(Schema):
i = fields.Int()
j = fields.Int()
class Parent(Schema):
child = fields.Nested(Child, **{modifier: ("i",)})
spec_fixture.openapi.schema2jsonschema(Parent)
props = get_schemas(spec_fixture.spec)["Child"]["properties"]
assert ("i" in props) == (modifier == "only")
assert ("j" not in props) == (modifier == "only")
def test_schema2jsonschema_with_plucked_field(self, spec_fixture):
class PetSchema(Schema):
breed = fields.Pluck(CategorySchema, "breed")
category_schema = spec_fixture.openapi.schema2jsonschema(CategorySchema)
pet_schema = spec_fixture.openapi.schema2jsonschema(PetSchema)
assert (
pet_schema["properties"]["breed"] == category_schema["properties"]["breed"]
)
def test_schema2jsonschema_with_nested_fields_with_adhoc_changes(
self, spec_fixture
):
category_schema = CategorySchema()
category_schema.fields["id"].required = True
class PetSchema(Schema):
category = fields.Nested(category_schema, many=True)
name = fields.Str()
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
props = get_schemas(spec_fixture.spec)
assert props["Category"] == spec_fixture.openapi.schema2jsonschema(
category_schema
)
assert set(props["Category"]["required"]) == {"id", "name"}
props["Category"]["required"] = ["name"]
assert props["Category"] == spec_fixture.openapi.schema2jsonschema(
CategorySchema
)
def test_schema2jsonschema_with_plucked_fields_with_adhoc_changes(
self, spec_fixture
):
category_schema = CategorySchema()
category_schema.fields["breed"].dump_only = True
class PetSchema(Schema):
breed = fields.Pluck(category_schema, "breed", many=True)
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
props = get_schemas(spec_fixture.spec)["Pet"]["properties"]
assert props["breed"]["items"]["readOnly"] is True
def test_schema2jsonschema_with_nested_excluded_fields(self, spec):
category_schema = CategorySchema(exclude=("breed",))
class PetSchema(Schema):
category = fields.Nested(category_schema)
spec.components.schema("Pet", schema=PetSchema)
category_props = get_schemas(spec)["Category"]["properties"]
assert "breed" not in category_props
def test_openapi_tools_validate_v2():
ma_plugin = MarshmallowPlugin()
spec = APISpec(
title="Pets", version="0.1", plugins=(ma_plugin,), openapi_version="2.0"
)
openapi = ma_plugin.converter
assert openapi is not None
spec.components.schema("Category", schema=CategorySchema)
spec.components.schema("Pet", {"discriminator": "name"}, schema=PetSchema)
spec.path(
view=None,
path="/category/{category_id}",
operations={
"get": {
"parameters": [
{"name": "q", "in": "query", "type": "string"},
{
"name": "category_id",
"in": "path",
"required": True,
"type": "string",
},
openapi._field2parameter(
field=fields.List(
fields.Str(),
validate=validate.OneOf(["freddie", "roger"]),
),
location="query",
name="body",
),
]
+ openapi.schema2parameters(PageSchema, location="query"),
"responses": {200: {"schema": PetSchema, "description": "A pet"}},
},
"post": {
"parameters": (
[
{
"name": "category_id",
"in": "path",
"required": True,
"type": "string",
}
]
+ openapi.schema2parameters(CategorySchema, location="body")
),
"responses": {201: {"schema": PetSchema, "description": "A pet"}},
},
},
)
try:
validate_spec(spec)
except exceptions.OpenAPIError as error:
pytest.fail(str(error))
def test_openapi_tools_validate_v3():
ma_plugin = MarshmallowPlugin()
spec = APISpec(
title="Pets", version="0.1", plugins=(ma_plugin,), openapi_version="3.0.0"
)
openapi = ma_plugin.converter
assert openapi is not None
spec.components.schema("Category", schema=CategorySchema)
spec.components.schema("Pet", schema=PetSchema)
spec.path(
view=None,
path="/category/{category_id}",
operations={
"get": {
"parameters": [
{"name": "q", "in": "query", "schema": {"type": "string"}},
{
"name": "category_id",
"in": "path",
"required": True,
"schema": {"type": "string"},
},
openapi._field2parameter(
field=fields.List(
fields.Str(),
validate=validate.OneOf(["freddie", "roger"]),
),
location="query",
name="body",
),
]
+ openapi.schema2parameters(PageSchema, location="query"),
"responses": {
200: {
"description": "success",
"content": {"application/json": {"schema": PetSchema}},
}
},
},
"post": {
"parameters": (
[
{
"name": "category_id",
"in": "path",
"required": True,
"schema": {"type": "string"},
}
]
),
"requestBody": {
"content": {"application/json": {"schema": CategorySchema}}
},
"responses": {
201: {
"description": "created",
"content": {"application/json": {"schema": PetSchema}},
}
},
},
},
)
try:
validate_spec(spec)
except exceptions.OpenAPIError as error:
pytest.fail(str(error))
def test_openapi_converter_openapi_version_types():
spec = APISpec(title="Pets", version="0.1", openapi_version="2.0")
converter_with_version = OpenAPIConverter(Version("3.1"), None, spec)
converter_with_str_version = OpenAPIConverter("3.1", None, spec)
assert (
converter_with_version.openapi_version
== converter_with_str_version.openapi_version
)
| TestNesting |
python | great-expectations__great_expectations | great_expectations/execution_engine/pandas_execution_engine.py | {
"start": 2245,
"end": 31078
} | class ____(ExecutionEngine[str]):
"""PandasExecutionEngine instantiates the ExecutionEngine API to support computations using Pandas.
Constructor builds a PandasExecutionEngine, using provided configuration options.
Args:
*args: Positional arguments for configuring PandasExecutionEngine
**kwargs: Keyword arguments for configuring PandasExecutionEngine
For example:
```python
execution_engine: ExecutionEngine = PandasExecutionEngine(batch_data_dict={batch.id: batch.data})
```
--ge-feature-maturity-info--
id: validation_engine_pandas
title: Validation Engine - Pandas
icon:
short_description: Use Pandas DataFrame to validate data
description: Use Pandas DataFrame to validate data
how_to_guide_url:
maturity: Production
maturity_details:
api_stability: Stable
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: N/A -> see relevant Datasource evaluation
documentation_completeness: Complete
bug_risk: Low
expectation_completeness: Complete
--ge-feature-maturity-info--
""" # noqa: E501 # FIXME CoP
recognized_batch_spec_defaults = {
"reader_method",
"reader_options",
}
def __init__(self, *args, **kwargs) -> None:
self.discard_subset_failing_expectations = kwargs.pop(
"discard_subset_failing_expectations", False
)
boto3_options: Dict[str, dict] = kwargs.pop("boto3_options", {})
azure_options: Dict[str, dict] = kwargs.pop("azure_options", {})
gcs_options: Dict[str, dict] = kwargs.pop("gcs_options", {})
s3_client = kwargs.pop("s3_client", None)
# Instantiate cloud provider clients as None at first.
# They will be instantiated if/when passed cloud-specific in BatchSpec is passed in
self._s3: BaseClient | None = None
self._azure: azure.BlobServiceClient | None = None
self._gcs: google.Client | None = None
super().__init__(*args, **kwargs)
self._config.update(
{
"discard_subset_failing_expectations": self.discard_subset_failing_expectations,
"boto3_options": boto3_options,
"azure_options": azure_options,
"gcs_options": gcs_options,
"s3_client": s3_client,
}
)
self._data_partitioner = PandasDataPartitioner()
self._data_sampler = PandasDataSampler()
def _instantiate_azure_client(self) -> None:
self._azure = None
if azure.BlobServiceClient: # type: ignore[truthy-function] # False if NotImported
azure_options = self.config.get("azure_options", {})
try:
if "conn_str" in azure_options:
self._azure = azure.BlobServiceClient.from_connection_string(**azure_options)
else:
self._azure = azure.BlobServiceClient(**azure_options)
except (TypeError, AttributeError):
# If exception occurs, then "self._azure = None" remains in effect.
pass
def _instantiate_s3_client(self) -> None:
# If s3_client was passed in (from data source) use it, otherwise create our own
self._s3 = self._config.get("s3_client") or aws.boto3.client(
"s3", **self.config.get("boto3_options", {})
)
def _instantiate_gcs_client(self) -> None:
"""
Helper method for instantiating GCS client when GCSBatchSpec is passed in.
The method accounts for 3 ways that a GCS connection can be configured:
1. setting an environment variable, which is typically GOOGLE_APPLICATION_CREDENTIALS
2. passing in explicit credentials via gcs_options
3. running Great Expectations from within a GCP container, at which you would be able to create a Client
without passing in an additional environment variable or explicit credentials
""" # noqa: E501 # FIXME CoP
gcs_options = self.config.get("gcs_options", {})
try:
credentials = None # If configured with gcloud CLI / env vars
if "filename" in gcs_options:
filename = gcs_options.pop("filename")
credentials = google.service_account.Credentials.from_service_account_file(
filename=filename
)
elif "info" in gcs_options:
info = gcs_options.pop("info")
credentials = google.service_account.Credentials.from_service_account_info(
info=info
)
self._gcs = google.storage.Client(credentials=credentials, **gcs_options)
# This exception handling causes a TypeError if google dependency not installed
except (TypeError, AttributeError, google.DefaultCredentialsError):
self._gcs = None
@override
def configure_validator(self, validator) -> None:
super().configure_validator(validator)
validator.expose_dataframe_methods = True
@override
def load_batch_data(
self,
batch_id: str,
batch_data: Union[PandasBatchData, pd.DataFrame], # type: ignore[override] # FIXME CoP
) -> None:
if isinstance(batch_data, pd.DataFrame):
batch_data = PandasBatchData(self, batch_data)
elif not isinstance(batch_data, PandasBatchData):
raise gx_exceptions.GreatExpectationsError( # noqa: TRY003 # FIXME CoP
"PandasExecutionEngine requires batch data that is either a DataFrame or a PandasBatchData object" # noqa: E501 # FIXME CoP
)
super().load_batch_data(batch_id=batch_id, batch_data=batch_data)
@override
def get_batch_data_and_markers( # noqa: C901, PLR0912, PLR0915 # FIXME CoP
self, batch_spec: BatchSpec | PandasBatchSpecProtocol
) -> Tuple[PandasBatchData, BatchMarkers]: # batch_data
# We need to build a batch_markers to be used in the dataframe
batch_markers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
batch_data: Any
if isinstance(batch_spec, RuntimeDataBatchSpec):
# batch_data != None is already checked when RuntimeDataBatchSpec is instantiated
batch_data = batch_spec.batch_data
if isinstance(batch_data, str):
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
f"""PandasExecutionEngine has been passed a string type batch_data, "{batch_data}", which is illegal. Please check your config.
""" # noqa: E501 # FIXME CoP
)
if isinstance(batch_spec.batch_data, pd.DataFrame):
df = batch_spec.batch_data
elif isinstance(batch_spec.batch_data, PandasBatchData):
df = batch_spec.batch_data.dataframe
else:
raise ValueError( # noqa: TRY003, TRY004 # FIXME CoP
"RuntimeDataBatchSpec must provide a Pandas DataFrame or PandasBatchData object." # noqa: E501 # FIXME CoP
)
batch_spec.batch_data = "PandasDataFrame"
elif isinstance(batch_spec, S3BatchSpec):
if self._s3 is None:
self._instantiate_s3_client()
s3_engine = self._s3
try:
reader_method: str = batch_spec.reader_method
reader_options: dict = batch_spec.reader_options or {}
path: str = batch_spec.path
s3_url = S3Url(path)
if "compression" not in reader_options:
inferred_compression_param = sniff_s3_compression(s3_url)
if inferred_compression_param is not None:
reader_options["compression"] = inferred_compression_param
if s3_engine:
s3_object: dict = s3_engine.get_object(Bucket=s3_url.bucket, Key=s3_url.key)
except (
aws.exceptions.ParamValidationError,
aws.exceptions.ClientError,
) as error:
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
f"""PandasExecutionEngine encountered the following error while trying to read data from S3 Bucket: {error}""" # noqa: E501 # FIXME CoP
)
logger.debug(f"Fetching s3 object. Bucket: {s3_url.bucket} Key: {s3_url.key}")
reader_fn: DataFrameFactoryFn = self._get_reader_fn(reader_method, s3_url.key)
buf = BytesIO(s3_object["Body"].read()) # type: ignore[possibly-undefined] # FIXME
buf.seek(0)
df = reader_fn(buf, **reader_options)
elif isinstance(batch_spec, AzureBatchSpec):
if self._azure is None:
self._instantiate_azure_client()
# if we were not able to instantiate Azure client, then raise error
if self._azure is None:
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
"""PandasExecutionEngine has been passed a AzureBatchSpec,
but the ExecutionEngine does not have an Azure client configured. Please check your config.""" # noqa: E501 # FIXME CoP
)
azure_engine = self._azure
reader_method = batch_spec.reader_method
reader_options = batch_spec.reader_options or {}
path = batch_spec.path
azure_url = AzureUrl(path)
blob_client = azure_engine.get_blob_client(
container=azure_url.container, blob=azure_url.blob
)
azure_object = blob_client.download_blob()
logger.debug(
f"Fetching Azure blob. Container: {azure_url.container} Blob: {azure_url.blob}"
)
reader_fn = self._get_reader_fn(reader_method, azure_url.blob)
buf = BytesIO(azure_object.readall())
buf.seek(0)
df = reader_fn(buf, **reader_options)
elif isinstance(batch_spec, GCSBatchSpec):
if self._gcs is None:
self._instantiate_gcs_client()
# if we were not able to instantiate GCS client, then raise error
if self._gcs is None:
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
"""PandasExecutionEngine has been passed a GCSBatchSpec,
but the ExecutionEngine does not have an GCS client configured. Please check your config.""" # noqa: E501 # FIXME CoP
)
gcs_engine = self._gcs
gcs_url = GCSUrl(batch_spec.path)
reader_method = batch_spec.reader_method
reader_options = batch_spec.reader_options or {}
try:
gcs_bucket = gcs_engine.get_bucket(gcs_url.bucket)
gcs_blob = gcs_bucket.blob(gcs_url.blob)
logger.debug(f"Fetching GCS blob. Bucket: {gcs_url.bucket} Blob: {gcs_url.blob}")
except google.GoogleAPIError as error:
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
f"""PandasExecutionEngine encountered the following error while trying to read data from GCS \
Bucket: {error}""" # noqa: E501 # FIXME CoP
)
reader_fn = self._get_reader_fn(reader_method, gcs_url.blob)
buf = BytesIO(gcs_blob.download_as_bytes())
buf.seek(0)
df = reader_fn(buf, **reader_options)
# Experimental datasources will go down this code path
elif isinstance(batch_spec, PathBatchSpec):
reader_method = batch_spec.reader_method
reader_options = batch_spec.reader_options
path = batch_spec.path
reader_fn = self._get_reader_fn(reader_method, path)
df = reader_fn(path, **reader_options)
elif isinstance(batch_spec, PandasBatchSpec):
reader_method = batch_spec.reader_method
reader_options = batch_spec.reader_options
reader_fn = self._get_reader_fn(reader_method)
reader_fn_result: pd.DataFrame | list[pd.DataFrame] = execute_pandas_reader_fn(
reader_fn, reader_options
)
if isinstance(reader_fn_result, list):
if len(reader_fn_result) > 1:
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
"Pandas reader method must return a single DataFrame, "
f'but "{reader_method}" returned {len(reader_fn_result)} DataFrames.'
)
else:
df = reader_fn_result[0]
else:
df = reader_fn_result
elif isinstance(batch_spec, FabricBatchSpec):
reader_fn = batch_spec.get_reader_function()
df = reader_fn(**batch_spec.reader_options)
else:
raise gx_exceptions.BatchSpecError( # noqa: TRY003 # FIXME CoP
f"""batch_spec must be of type RuntimeDataBatchSpec, PandasBatchSpec, PathBatchSpec, S3BatchSpec, AzureBatchSpec or FabricBatchSpec \
not {batch_spec.__class__.__name__}""" # noqa: E501 # FIXME CoP
)
df = self._apply_partitioning_and_sampling_methods(batch_spec, df) # type: ignore[arg-type] # FIXME CoP
if df.memory_usage().sum() < HASH_THRESHOLD:
batch_markers["pandas_data_fingerprint"] = hash_pandas_dataframe(df)
typed_batch_data = PandasBatchData(execution_engine=self, dataframe=df)
return typed_batch_data, batch_markers
def _apply_partitioning_and_sampling_methods(
self,
batch_spec: BatchSpec | PandasBatchSpecProtocol,
batch_data: PandasBatchData,
):
# partitioning and sampling not supported for FabricBatchSpec
if isinstance(batch_spec, BatchSpec):
partitioner_method_name: Optional[str] = batch_spec.get("partitioner_method")
if partitioner_method_name:
partitioner_fn: Callable = self._data_partitioner.get_partitioner_method(
partitioner_method_name
)
partitioner_kwargs: dict = batch_spec.get("partitioner_kwargs") or {}
batch_data = partitioner_fn(batch_data, **partitioner_kwargs)
sampler_method_name: Optional[str] = batch_spec.get("sampling_method")
if sampler_method_name:
sampling_fn: Callable = self._data_sampler.get_sampler_method(sampler_method_name)
batch_data = sampling_fn(batch_data, batch_spec)
return batch_data
@property
def dataframe(self) -> pd.DataFrame:
"""Tests whether or not a Batch has been loaded. If the loaded batch does not exist, raises a
ValueError Exception
""" # noqa: E501 # FIXME CoP
# Changed to is None because was breaking prior
if self.batch_manager.active_batch_data is None:
raise ValueError( # noqa: TRY003 # FIXME CoP
"Batch has not been loaded - please run load_batch_data() to load a batch."
)
return cast("PandasBatchData", self.batch_manager.active_batch_data).dataframe
# NOTE Abe 20201105: Any reason this shouldn't be a private method?
@staticmethod
def guess_reader_method_from_path(path: str): # noqa: C901, PLR0911 # FIXME CoP
"""Helper method for deciding which reader to use to read in a certain path.
Args:
path (str): the to use to guess
Returns:
ReaderMethod to use for the filepath
"""
path = path.lower()
if path.endswith(".csv") or path.endswith(".tsv"):
return {"reader_method": "read_csv"}
elif path.endswith(".parquet") or path.endswith(".parq") or path.endswith(".pqt"):
return {"reader_method": "read_parquet"}
elif path.endswith(".xlsx") or path.endswith(".xls"):
return {"reader_method": "read_excel"}
elif path.endswith(".json"):
return {"reader_method": "read_json"}
elif path.endswith(".pkl"):
return {"reader_method": "read_pickle"}
elif path.endswith(".feather"):
return {"reader_method": "read_feather"}
elif path.endswith(".csv.gz") or path.endswith(".tsv.gz"):
return {
"reader_method": "read_csv",
"reader_options": {"compression": "gzip"},
}
elif path.endswith(".sas7bdat") or path.endswith(".xpt"):
return {"reader_method": "read_sas"}
else:
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
f'Unable to determine reader method from path: "{path}".'
)
@overload
def _get_reader_fn(
self, reader_method: str = ..., path: Optional[str] = ...
) -> DataFrameFactoryFn: ...
@overload
def _get_reader_fn(self, reader_method: None = ..., path: str = ...) -> DataFrameFactoryFn: ...
def _get_reader_fn(
self, reader_method: Optional[str] = None, path: Optional[str] = None
) -> DataFrameFactoryFn:
"""Static helper for parsing reader types. If reader_method is not provided, path will be used to guess the
correct reader_method.
Args:
reader_method (str): the name of the reader method to use, if available.
path (str): the path used to guess
Returns:
ReaderMethod to use for the filepath
""" # noqa: E501 # FIXME CoP
if reader_method is None and path is None:
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
"Unable to determine pandas reader function without reader_method or path."
)
reader_options = {}
if reader_method is None:
path_guess = self.guess_reader_method_from_path(path) # type: ignore[arg-type] # see overload
reader_method = path_guess["reader_method"]
reader_options = path_guess.get(
"reader_options"
) # This may not be there; use None in that case
try:
reader_fn = getattr(pd, reader_method)
if reader_options:
reader_fn = partial(reader_fn, **reader_options)
return reader_fn
except AttributeError:
raise gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
f'Unable to find reader_method "{reader_method}" in pandas.'
)
@override
def resolve_metric_bundle(self, metric_fn_bundle) -> dict[MetricConfigurationID, Any]:
"""Resolve a bundle of metrics with the same compute Domain as part of a single trip to the compute engine.""" # noqa: E501 # FIXME CoP
return {} # This is NO-OP for "PandasExecutionEngine" (no bundling for direct execution computational backend). # noqa: E501 # FIXME CoP
def _apply_row_condition_filter(
self, data: pd.DataFrame, row_condition: Any, domain_kwargs: dict
) -> pd.DataFrame:
"""Apply row condition filter to DataFrame.
Args:
data: The DataFrame to filter
row_condition: The condition to apply (can be Condition object, dict, or string)
domain_kwargs: Domain kwargs containing condition_parser if needed
Returns:
Filtered DataFrame
"""
# Convert dict to Condition object if needed
if isinstance(row_condition, dict):
row_condition = deserialize_row_condition(row_condition)
if isinstance(row_condition, PassThroughCondition):
# Use pass_through_filter for pandas query syntax
# Uses DataFrame.query() directly with the pass-through string
return data.query(row_condition.pass_through_filter)
elif isinstance(row_condition, Condition):
# Handle other Condition objects using condition_to_filter_clause
return data.query(self.condition_to_filter_clause(row_condition))
else:
# Legacy string-based conditions
condition_parser = domain_kwargs.get("condition_parser", None)
if (
condition_parser
and condition_parser == CONDITION_PARSER_PANDAS
and isinstance(row_condition, str)
):
return data.query(row_condition, parser=condition_parser)
else:
raise gx_exceptions.ValidationError( # noqa: TRY003 # FIXME CoP
"condition_parser for Pandas is required when setting a row_condition."
)
@override
def get_domain_records( # noqa: C901, PLR0912 # FIXME CoP
self,
domain_kwargs: dict,
) -> pd.DataFrame:
"""Uses the given Domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to obtain and/or query a Batch of data.
Args:
domain_kwargs (dict) - A dictionary consisting of the Domain kwargs specifying which data to obtain
Returns:
A DataFrame (the data on which to compute returned in the format of a Pandas DataFrame)
""" # noqa: E501 # FIXME CoP
table = domain_kwargs.get("table", None)
if table:
raise ValueError( # noqa: TRY003 # FIXME CoP
"PandasExecutionEngine does not currently support multiple named tables."
)
batch_id = domain_kwargs.get("batch_id")
if batch_id is None:
# We allow no batch id specified if there is only one batch
if self.batch_manager.active_batch_data_id is not None:
data = cast("PandasBatchData", self.batch_manager.active_batch_data).dataframe
else:
raise gx_exceptions.ValidationError( # noqa: TRY003 # FIXME CoP
"No batch is specified, but could not identify a loaded batch."
)
else: # noqa: PLR5501 # FIXME CoP
if batch_id in self.batch_manager.batch_data_cache:
data = cast(
"PandasBatchData", self.batch_manager.batch_data_cache[batch_id]
).dataframe
else:
raise gx_exceptions.ValidationError( # noqa: TRY003 # FIXME CoP
f"Unable to find batch with batch_id {batch_id}"
)
# Filtering by row condition.
row_condition = domain_kwargs.get("row_condition", None)
if row_condition:
data = self._apply_row_condition_filter(data, row_condition, domain_kwargs)
if "column" in domain_kwargs:
return data
if (
"column_A" in domain_kwargs
and "column_B" in domain_kwargs
and "ignore_row_if" in domain_kwargs
):
# noinspection PyPep8Naming
column_A_name = domain_kwargs["column_A"]
# noinspection PyPep8Naming
column_B_name = domain_kwargs["column_B"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "both_values_are_missing":
data = data.dropna(
axis=0,
how="all",
subset=[column_A_name, column_B_name],
)
elif ignore_row_if == "either_value_is_missing":
data = data.dropna(
axis=0,
how="any",
subset=[column_A_name, column_B_name],
)
else: # noqa: PLR5501 # FIXME CoP
if ignore_row_if != "neither":
raise ValueError(f'Unrecognized value of ignore_row_if ("{ignore_row_if}").') # noqa: TRY003 # FIXME CoP
return data
if "column_list" in domain_kwargs and "ignore_row_if" in domain_kwargs:
column_list = domain_kwargs["column_list"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "all_values_are_missing":
data = data.dropna(
axis=0,
how="all",
subset=column_list,
)
elif ignore_row_if == "any_value_is_missing":
data = data.dropna(
axis=0,
how="any",
subset=column_list,
)
else: # noqa: PLR5501 # FIXME CoP
if ignore_row_if != "never":
raise ValueError(f'Unrecognized value of ignore_row_if ("{ignore_row_if}").') # noqa: TRY003 # FIXME CoP
return data
return data
@override
def get_compute_domain(
self,
domain_kwargs: dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) -> Tuple[pd.DataFrame, dict, dict]:
"""Uses the given Domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to obtain and/or query a batch.
Returns in the format of a Pandas DataFrame along with Domain arguments required for computing. If the Domain \
is a single column, this is added to 'accessor Domain kwargs' and used for later access.
Args:
domain_kwargs (dict): a dictionary consisting of the Domain kwargs specifying which data to obtain
domain_type (str or MetricDomainTypes): an Enum value indicating which metric Domain the user would like \
to be using, or a corresponding string value representing it. String types include "column", \
"column_pair", "table", and "other". Enum types include capitalized versions of these from the class \
MetricDomainTypes.
accessor_keys (str iterable): keys that are part of the compute Domain but should be ignored when \
describing the Domain and simply transferred with their associated values into accessor_domain_kwargs.
Returns:
A tuple including:
- a DataFrame (the data on which to compute)
- a dictionary of compute_domain_kwargs, describing the DataFrame
- a dictionary of accessor_domain_kwargs, describing any accessors needed to
identify the Domain within the compute domain
""" # noqa: E501 # FIXME CoP
table: Optional[str] = domain_kwargs.get("table", None)
if table:
raise ValueError( # noqa: TRY003 # FIXME CoP
"PandasExecutionEngine does not currently support multiple named tables."
)
data: pd.DataFrame = self.get_domain_records(domain_kwargs=domain_kwargs)
partition_domain_kwargs: PartitionDomainKwargs = self._partition_domain_kwargs(
domain_kwargs, domain_type, accessor_keys
)
return data, partition_domain_kwargs.compute, partition_domain_kwargs.accessor
@override
def _comparison_condition_to_filter_clause(self, condition: ComparisonCondition) -> str:
col, op, val = condition.column.name, condition.operator, condition.parameter
if op in (Operator.IN, Operator.NOT_IN):
values = ", ".join(map(repr, val))
connector = "in" if op == Operator.IN else "not in"
return f"{col} {connector} [{values}]"
return f"{col} {op} {val!r}"
@override
def _nullity_condition_to_filter_clause(self, condition: NullityCondition) -> str:
col = condition.column.name
return f"{col}.isnull()" if condition.is_null else f"~{col}.isnull()"
@override
def _and_condition_to_filter_clause(self, condition: AndCondition) -> str:
parts = [self.condition_to_filter_clause(c) for c in condition.conditions]
return "(" + " and ".join(parts) + ")"
@override
def _or_condition_to_filter_clause(self, condition: OrCondition) -> str:
parts = [self.condition_to_filter_clause(c) for c in condition.conditions]
return "(" + " or ".join(parts) + ")"
def hash_pandas_dataframe(df):
try:
obj = pd.util.hash_pandas_object(df, index=True).values
except TypeError:
# In case of facing unhashable objects (like dict), use pickle
obj = pickle.dumps(df, pickle.HIGHEST_PROTOCOL)
return hashlib.md5(obj).hexdigest()
| PandasExecutionEngine |
python | redis__redis-py | tests/test_connection.py | {
"start": 11350,
"end": 13579
} | class ____:
@pytest.mark.parametrize(
"max_conn", (-1, "str"), ids=("non-positive", "wrong type")
)
def test_throws_error_on_incorrect_max_connections(self, max_conn):
with pytest.raises(
ValueError, match='"max_connections" must be a positive integer'
):
ConnectionPool(
max_connections=max_conn,
)
def test_throws_error_on_cache_enable_in_resp2(self):
with pytest.raises(
RedisError, match="Client caching is only supported with RESP version 3"
):
ConnectionPool(protocol=2, cache_config=CacheConfig())
def test_throws_error_on_incorrect_cache_implementation(self):
with pytest.raises(ValueError, match="Cache must implement CacheInterface"):
ConnectionPool(protocol=3, cache="wrong")
def test_returns_custom_cache_implementation(self, mock_cache):
connection_pool = ConnectionPool(protocol=3, cache=mock_cache)
assert mock_cache == connection_pool.cache
connection_pool.disconnect()
def test_creates_cache_with_custom_cache_factory(
self, mock_cache_factory, mock_cache
):
mock_cache_factory.get_cache.return_value = mock_cache
connection_pool = ConnectionPool(
protocol=3,
cache_config=CacheConfig(max_size=5),
cache_factory=mock_cache_factory,
)
assert connection_pool.cache == mock_cache
connection_pool.disconnect()
def test_creates_cache_with_given_configuration(self, mock_cache):
connection_pool = ConnectionPool(
protocol=3, cache_config=CacheConfig(max_size=100)
)
assert isinstance(connection_pool.cache, CacheInterface)
assert connection_pool.cache.config.get_max_size() == 100
assert isinstance(connection_pool.cache.eviction_policy, LRUPolicy)
connection_pool.disconnect()
def test_make_connection_proxy_connection_on_given_cache(self):
connection_pool = ConnectionPool(protocol=3, cache_config=CacheConfig())
assert isinstance(connection_pool.make_connection(), CacheProxyConnection)
connection_pool.disconnect()
| TestUnitConnectionPool |
python | getsentry__sentry | tests/sentry/conf/test_scopes.py | {
"start": 147,
"end": 1248
} | class ____(TestCase):
def test_scope_hierarchy_maintained(self) -> None:
assert "org:superuser" not in SENTRY_SCOPES
for scope in SENTRY_SCOPES:
assert scope in SENTRY_SCOPE_HIERARCHY_MAPPING
# exclude special OAuth scopes
if ":" not in scope:
continue
resource, access_level = scope.split(":")
# check that scope is in its own mapping
assert scope in SENTRY_SCOPE_HIERARCHY_MAPPING[scope]
# check that write grants read
if access_level == "write":
assert resource + ":read" in SENTRY_SCOPE_HIERARCHY_MAPPING[scope]
# # check that admin grants read+write
if access_level == "admin":
assert resource + ":read" in SENTRY_SCOPE_HIERARCHY_MAPPING[scope]
assert resource + ":write" in SENTRY_SCOPE_HIERARCHY_MAPPING[scope]
def test_readonly_scopes(self) -> None:
for scope in SENTRY_SCOPES:
if ":read" in scope:
assert scope in SENTRY_READONLY_SCOPES
| ScopesTest |
python | huggingface__transformers | src/transformers/models/dinov3_vit/modeling_dinov3_vit.py | {
"start": 16083,
"end": 17868
} | class ____(GradientCheckpointingLayer):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = DINOv3ViTAttention(config)
self.layer_scale1 = DINOv3ViTLayerScale(config)
self.drop_path = DINOv3ViTDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.use_gated_mlp:
self.mlp = DINOv3ViTGatedMLP(config)
else:
self.mlp = DINOv3ViTMLP(config)
self.layer_scale2 = DINOv3ViTLayerScale(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
) -> torch.Tensor:
# Attention with residual connection
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states, _ = self.attention(
hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
)
hidden_states = self.layer_scale1(hidden_states)
hidden_states = self.drop_path(hidden_states) + residual
# MLP with residual connection
residual = hidden_states
hidden_states = self.norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.layer_scale2(hidden_states)
hidden_states = self.drop_path(hidden_states) + residual
return hidden_states
@auto_docstring
| DINOv3ViTLayer |
python | encode__httpx | httpx/_types.py | {
"start": 2676,
"end": 2965
} | class ____:
async def __aiter__(self) -> AsyncIterator[bytes]:
raise NotImplementedError(
"The '__aiter__' method must be implemented."
) # pragma: no cover
yield b"" # pragma: no cover
async def aclose(self) -> None:
pass
| AsyncByteStream |
python | celery__celery | celery/worker/consumer/agent.py | {
"start": 130,
"end": 525
} | class ____(bootsteps.StartStopStep):
"""Agent starts :pypi:`cell` actors."""
conditional = True
requires = (Connection,)
def __init__(self, c, **kwargs):
self.agent_cls = self.enabled = c.app.conf.worker_agent
super().__init__(c, **kwargs)
def create(self, c):
agent = c.agent = self.instantiate(self.agent_cls, c.connection)
return agent
| Agent |
python | falconry__falcon | tests/test_response_media.py | {
"start": 4337,
"end": 5377
} | class ____:
def test_text(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.text = 'body'
resp.data = b'data'
resp.media = ['media']
assert resp.render_body() == b'body'
def test_data(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.data = b'data'
resp.media = ['media']
assert resp.render_body() == b'data'
def test_media(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.media = ['media']
assert json.loads(resp.render_body().decode('utf-8')) == ['media']
def test_media_rendered_cached(client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.media = {'foo': 'bar'}
first = resp.render_body()
assert first is resp.render_body()
assert first is resp._media_rendered
resp.media = 123
assert first is not resp.render_body()
| TestRenderBodyPrecedence |
python | Pylons__pyramid | src/pyramid/predicates.py | {
"start": 4572,
"end": 5178
} | class ____:
def __init__(self, val, config):
val = as_sorted_tuple(val)
self.val = val
reqs = [p.split('=', 1) for p in val]
self.reqs = [(x.strip(), y.strip()) for x, y in reqs]
def text(self):
return 'match_param %s' % ','.join([f'{x}={y}' for x, y in self.reqs])
phash = text
def __call__(self, context, request):
if not request.matchdict:
# might be None
return False
for k, v in self.reqs:
if request.matchdict.get(k) != v:
return False
return True
| MatchParamPredicate |
python | python__mypy | mypy/suggestions.py | {
"start": 6434,
"end": 7473
} | class ____(Exception):
pass
def is_explicit_any(typ: AnyType) -> bool:
# Originally I wanted to count as explicit anything derived from an explicit any, but that
# seemed too strict in some testing.
# return (typ.type_of_any == TypeOfAny.explicit
# or (typ.source_any is not None and typ.source_any.type_of_any == TypeOfAny.explicit))
# Important question: what should we do with source_any stuff? Does that count?
# And actually should explicit anys count at all?? Maybe not!
return typ.type_of_any == TypeOfAny.explicit
def is_implicit_any(typ: Type) -> bool:
typ = get_proper_type(typ)
return isinstance(typ, AnyType) and not is_explicit_any(typ)
def _arg_accepts_function(typ: ProperType) -> bool:
return (
# TypeVar / Callable
isinstance(typ, (TypeVarType, CallableType))
or
# Protocol with __call__
isinstance(typ, Instance)
and typ.type.is_protocol
and typ.type.get_method("__call__") is not None
)
| SuggestionFailure |
python | viewflow__viewflow | viewflow/views/create.py | {
"start": 917,
"end": 4673
} | class ____(
FormLayoutMixin, FormDependentSelectMixin, FormAjaxCompleteMixin, generic.CreateView
):
viewset = None
layout = None
form_widgets = None
template_name_suffix = "_create"
def has_add_permission(self, request):
if self.viewset is not None:
return self.viewset.has_add_permission(request.user)
else:
return has_object_perm(request.user, "add", self.model)
def get_object_url(self, obj):
if self.viewset is not None and hasattr(self.viewset, "get_object_url"):
return self.viewset.get_object_url(self.request, obj)
elif hasattr(obj, "get_absolute_url"):
if has_object_perm(self.request.user, "change", obj):
return obj.get_absolute_url()
def message_user(self):
url = self.get_object_url(self.object)
link = ""
if url:
link = format_html('<a href="{}">{}</a>', urlquote(url), _("View"))
message = format_html(
_("The {obj} was added successfully. {link}"),
obj=str(self.object),
link=link,
)
messages.add_message(
self.request, messages.SUCCESS, message, fail_silently=True
)
@viewprop
def queryset(self):
if self.viewset is not None and hasattr(self.viewset, "get_queryset"):
return self.viewset.get_queryset(self.request)
return None
def get_form_widgets(self):
if self.form_widgets is not None:
return self.form_widgets
elif self.viewset and hasattr(self.viewset, "get_create_form_widgets"):
return self.viewset.get_create_form_widgets(self.request)
elif self.viewset and hasattr(self.viewset, "get_form_widgets"):
return self.viewset.get_form_widgets(self.request)
return None
def get_form_class(self):
if self.form_class is not None:
return self.form_class
elif self.viewset and hasattr(self.viewset, "get_create_form_class"):
return self.viewset.get_create_form_class(self.request)
elif self.viewset and hasattr(self.viewset, "get_form_class"):
return self.viewset.get_form_class(self.request)
else:
return modelform_factory(
self.model,
form=ModelForm,
fields=self.fields,
widgets=self.get_form_widgets(),
)
def get_template_names(self):
"""
List of templates for the view.
If no `self.template_name` defined, uses::
[<app_label>/<model_label>_<suffix>.html,
<app_label>/<model_label>_form.html,
'viewflow/views/form.html']
"""
if self.template_name is None:
opts = self.model._meta
return [
"{}/{}{}.html".format(
opts.app_label, opts.model_name, self.template_name_suffix
),
"{}/{}_form.html".format(opts.app_label, opts.model_name),
"viewflow/views/form.html",
]
return [self.template_name]
def form_valid(self, *args, **kwargs):
response = super(CreateModelView, self).form_valid(*args, **kwargs)
self.message_user()
return response
def get_success_url(self):
if self.viewset and hasattr(self.viewset, "get_success_url"):
return self.viewset.get_success_url(self.request, obj=self.object)
return "../"
def dispatch(self, request, *args, **kwargs):
if not self.has_add_permission(self.request):
raise PermissionDenied
return super(CreateModelView, self).dispatch(request, *args, **kwargs)
| CreateModelView |
python | ray-project__ray | release/ray_release/cluster_manager/cluster_manager.py | {
"start": 631,
"end": 5045
} | class ____(abc.ABC):
def __init__(
self,
test: Test,
project_id: str,
sdk: Optional["AnyscaleSDK"] = None,
smoke_test: bool = False,
log_streaming_limit: int = LAST_LOGS_LENGTH,
):
self.sdk = sdk or get_anyscale_sdk()
self.test = test
self.smoke_test = smoke_test
self.project_id = project_id
self.project_name = get_project_name(self.project_id, self.sdk)
self.log_streaming_limit = log_streaming_limit
self.cluster_name = (
f"{test.get_name()}{'-smoke-test' if smoke_test else ''}_{int(time.time())}"
)
self.cluster_id = None
self.cluster_env = None
self.cluster_env_name = None
self.cluster_env_id = None
self.cluster_env_build_id = None
self.cluster_compute = None
self.cluster_compute_name = None
self.cluster_compute_id = None
self.cloud_provider = None
self.autosuspend_minutes = DEFAULT_AUTOSUSPEND_MINS
self.maximum_uptime_minutes = DEFAULT_MAXIMUM_UPTIME_MINS
def set_cluster_env(self):
byod_image_name_normalized = (
self.test.get_anyscale_byod_image()
.replace("/", "_")
.replace(":", "_")
.replace(".", "_")
)
self.cluster_env_name = (
f"{byod_image_name_normalized}"
f"__env__{dict_hash(self.test.get_byod_runtime_env())}"
)
def set_cluster_compute(
self,
cluster_compute: Dict[str, Any],
extra_tags: Optional[Dict[str, str]] = None,
):
extra_tags = extra_tags or {}
self.cluster_compute = cluster_compute
self.cluster_compute.setdefault(
"idle_termination_minutes", self.autosuspend_minutes
)
self.cluster_compute.setdefault(
"maximum_uptime_minutes", self.maximum_uptime_minutes
)
self.cloud_provider = self._get_cloud_provider(cluster_compute)
self.cluster_compute = self._annotate_cluster_compute(
self.cluster_compute,
cloud_provider=self.cloud_provider,
extra_tags=extra_tags,
)
self.cluster_compute_name = (
f"{self.project_name}_{self.project_id[4:8]}"
f"__compute__{self.test.get_name()}__"
f"{dict_hash(self.cluster_compute)}"
)
def _get_cloud_provider(self, cluster_compute: Dict[str, Any]) -> Optional[str]:
if not cluster_compute or "cloud_id" not in cluster_compute:
return None
try:
return self.sdk.get_cloud(cluster_compute["cloud_id"]).result.provider
except Exception as e:
raise CloudInfoError(f"Could not obtain cloud information: {e}") from e
def _annotate_cluster_compute(
self,
cluster_compute: Dict[str, Any],
cloud_provider: str,
extra_tags: Dict[str, str],
) -> Dict[str, Any]:
if not extra_tags or cloud_provider != "AWS":
return cluster_compute
cluster_compute = cluster_compute.copy()
if "aws" in cluster_compute:
raise ValueError(
"aws field is invalid in compute config, "
"use advanced_configurations_json instead"
)
aws = cluster_compute.get("advanced_configurations_json", {})
cluster_compute["advanced_configurations_json"] = add_tags_to_aws_config(
aws, extra_tags, RELEASE_AWS_RESOURCE_TYPES_TO_TRACK_FOR_BILLING
)
return cluster_compute
def build_configs(self, timeout: float = 30.0):
raise NotImplementedError
def delete_configs(self):
raise NotImplementedError
def start_cluster(self, timeout: float = 600.0):
raise NotImplementedError
def terminate_cluster(self, wait: bool = False):
try:
self.terminate_cluster_ex(wait=False)
except Exception as e:
logger.exception(f"Could not terminate cluster: {e}")
def terminate_cluster_ex(self, wait: bool = False):
raise NotImplementedError
def get_cluster_address(self) -> str:
raise NotImplementedError
def get_cluster_url(self) -> Optional[str]:
if not self.project_id or not self.cluster_id:
return None
return anyscale_cluster_url(self.project_id, self.cluster_id)
| ClusterManager |
python | huggingface__transformers | src/transformers/models/afmoe/modeling_afmoe.py | {
"start": 6651,
"end": 7806
} | class ____(nn.Module):
"""
Token-choice top-K router for MoE routing.
This router assigns each token to the top-K experts based on sigmoid scores, matching the released checkpoints.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.top_k = config.num_experts_per_tok
self.num_experts = config.num_experts
self.route_scale = config.route_scale
self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
def forward(self, hidden_states: torch.Tensor, expert_bias: torch.Tensor):
_, _, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
scores = torch.sigmoid(self.gate(hidden_states).to(torch.float32))
_, selected_experts = torch.topk(scores + expert_bias, k=self.top_k, dim=1)
top_scores = scores.gather(dim=1, index=selected_experts)
denominator = top_scores.sum(dim=-1, keepdim=True) + 1e-20
top_scores = top_scores / denominator
top_scores = top_scores * self.route_scale
return top_scores, selected_experts
| AfmoeTokenChoiceRouter |
python | ansible__ansible | test/units/module_utils/common/test_sys_info.py | {
"start": 1276,
"end": 4604
} | class ____:
"""Tests for get_distribution that have to find something"""
def test_distro_known(self):
with patch('ansible.module_utils.distro.id', return_value="alpine"):
assert get_distribution() == "Alpine"
with patch('ansible.module_utils.distro.id', return_value="arch"):
assert get_distribution() == "Arch"
with patch('ansible.module_utils.distro.id', return_value="centos"):
assert get_distribution() == "Centos"
with patch('ansible.module_utils.distro.id', return_value="clear-linux-os"):
assert get_distribution() == "Clear-linux-os"
with patch('ansible.module_utils.distro.id', return_value="coreos"):
assert get_distribution() == "Coreos"
with patch('ansible.module_utils.distro.id', return_value="debian"):
assert get_distribution() == "Debian"
with patch('ansible.module_utils.distro.id', return_value="flatcar"):
assert get_distribution() == "Flatcar"
with patch('ansible.module_utils.distro.id', return_value="linuxmint"):
assert get_distribution() == "Linuxmint"
with patch('ansible.module_utils.distro.id', return_value="opensuse"):
assert get_distribution() == "Opensuse"
with patch('ansible.module_utils.distro.id', return_value="oracle"):
assert get_distribution() == "Oracle"
with patch('ansible.module_utils.distro.id', return_value="raspian"):
assert get_distribution() == "Raspian"
with patch('ansible.module_utils.distro.id', return_value="rhel"):
assert get_distribution() == "Redhat"
with patch('ansible.module_utils.distro.id', return_value="ubuntu"):
assert get_distribution() == "Ubuntu"
with patch('ansible.module_utils.distro.id', return_value="virtuozzo"):
assert get_distribution() == "Virtuozzo"
with patch('ansible.module_utils.distro.id', return_value="foo"):
assert get_distribution() == "Foo"
def test_distro_unknown(self):
with patch('ansible.module_utils.distro.id', return_value=""):
assert get_distribution() == "OtherLinux"
def test_distro_amazon_linux_short(self):
with patch('ansible.module_utils.distro.id', return_value="amzn"):
assert get_distribution() == "Amazon"
def test_distro_amazon_linux_long(self):
with patch('ansible.module_utils.distro.id', return_value="amazon"):
assert get_distribution() == "Amazon"
#
# get_distribution_version tests
#
@pytest.mark.parametrize(
('system', 'version'),
(
('Darwin', '19.6.0'),
('SunOS', '11.4'),
('FreeBSD', '12.1'),
),
)
def test_get_distribution_version_not_linux(mocker, system, version):
"""If it's not Linux, then it has no distribution"""
mocker.patch('platform.system', return_value=system)
mocker.patch('ansible.module_utils.common.sys_info.distro.version', return_value=version)
assert get_distribution_version() == version
@pytest.mark.usefixtures("platform_linux")
def test_distro_found():
with patch('ansible.module_utils.distro.version', return_value="1"):
assert get_distribution_version() == "1"
#
# Tests for get_platform_subclass
#
| TestGetDistribution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 225882,
"end": 226242
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "pull_request")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
| ClosePullRequestPayload |
python | google__jax | jax/_src/pallas/mosaic/interpret/interpret_pallas_call.py | {
"start": 31599,
"end": 41491
} | class ____:
id: int
src_device_id: int
src_local_core_id: int
src_memory_space: int
src_buffer_id: int
src_transforms: tuple[Any, ...]
dst_device_id: int
dst_local_core_id: int
dst_memory_space: int
dst_buffer_id: int
dst_transforms: tuple[Any, ...]
src_sem: memory.Semaphore | None
dst_sem: memory.Semaphore
virtual_device_id: int
clock: vc.VectorClock
source_info: source_info_util.SourceInfo | None = None
state: DmaState = DmaState.STARTED
data: np.ndarray | None = None
lock: threading.Lock = dataclasses.field(default_factory=threading.Lock)
@property
def data_size(self) -> int:
assert self.data is not None
return self.data.itemsize * self.data.size
@property
def detect_races(self) -> bool:
return self.dst_sem.detect_races
@property
def src_global_core_id(self) -> int:
return self.dst_sem.get_global_core_id(
self.src_device_id, self.src_local_core_id
)
@property
def dst_global_core_id(self) -> int:
return self.dst_sem.get_global_core_id(
self.dst_device_id, self.dst_local_core_id
)
def execute_read(self):
"""Executes the reading part of this DMA.
Note that the caller must not hold the lock on the shared memory (because
`get` is called in this method).
"""
# Must acquire the lock on `self` because:
# - `self.state` is inspected and modified in this method.
# - `self.data` is assigned in this method.
with self.lock:
if self.state != DmaState.STARTED:
return
if self.detect_races:
vc.inc_vector_clock(self.clock, self.virtual_device_id)
self.data = get(
self.src_device_id,
self.src_local_core_id,
self.src_memory_space,
self.src_buffer_id,
self.src_transforms,
clock=vc.copy_vector_clock(self.clock),
src_device_id=self.id,
src_local_core_id=0,
source_info=self.source_info,
)
if self.detect_races:
vc.inc_vector_clock(self.clock, self.virtual_device_id)
# Signal the send semaphore.
if self.src_sem is not None:
self.src_sem.signal(
self.data_size, self.src_global_core_id, clock=self.clock
)
self.state = DmaState.READ
def execute_write(self):
"""Executes the writing part of this DMA.
Note that the caller must not hold the lock on the shared memory (because
`store` is called in this method).
"""
# Must acquire the lock on `self` because:
# - `self.state` is inspected and modified in this method.
# - `self.data` is assigned in this method.
with self.lock:
assert self.state in (DmaState.READ, DmaState.COMPLETED)
if self.state == DmaState.COMPLETED:
return
assert self.data is not None
if self.detect_races:
vc.inc_vector_clock(self.clock, self.virtual_device_id)
store(
self.dst_device_id,
self.dst_local_core_id,
self.dst_memory_space,
self.dst_buffer_id,
self.dst_transforms,
self.data,
clock=vc.copy_vector_clock(self.clock),
src_device_id=self.id,
src_local_core_id=0,
source_info=self.source_info,
)
if self.detect_races:
vc.inc_vector_clock(self.clock, self.virtual_device_id)
self.dst_sem.signal(
self.data_size, self.dst_global_core_id, clock=self.clock
)
self.data = None
self.state = DmaState.COMPLETED
def execute_read_and_write(self):
"""Executes this DMA, bot the reading and writing parts.
Note that the caller must not hold the lock on the shared memory.
"""
self.execute_read()
self.execute_write()
def dma_start(
device_id,
src_local_core_id,
src_memory_space,
src_id,
src_transforms,
dst_memory_space,
dst_id,
dst_transforms,
dst_sem_id,
src_sem_id,
dst_device_id,
source_info=None,
):
shared_memory = _get_shared_memory()
device_id = int(device_id)
src_local_core_id = int(src_local_core_id)
src_global_core_id = shared_memory.get_global_core_id(
device_id, src_local_core_id
)
src_memory_space, src_id = int(src_memory_space), int(src_id)
src_transforms = jax.tree.map(int, src_transforms)
dst_memory_space, dst_id = int(dst_memory_space), int(dst_id)
dst_transforms = jax.tree.map(int, dst_transforms)
dst_sem_id = int(dst_sem_id)
src_sem_id = int(src_sem_id) if src_sem_id is not None else None
if dst_device_id is not None:
dst_device_id = int(dst_device_id)
else:
dst_device_id = device_id
dst_global_core_id = shared_memory.get_global_core_id(
dst_device_id, src_local_core_id # Same core on destination device as on source.
)
(src_sem, dst_sem), clock = shared_memory.get_semaphores_and_increment_clock(
(src_sem_id, dst_sem_id), src_global_core_id
)
assert dma_id_counter is not None
id = dma_id_counter.get_next()
dma = DMA(
id,
device_id,
src_local_core_id,
src_memory_space,
src_id,
src_transforms,
dst_device_id,
src_local_core_id, # Same core on destination device as on source.
dst_memory_space,
dst_id,
dst_transforms,
src_sem,
dst_sem,
virtual_device_id = shared_memory.get_random_virtual_device_id(),
clock=clock,
source_info=source_info,
)
if shared_memory.dma_execution_mode == 'on_wait':
if src_sem_id is None:
shared_memory.append_semaphore_task(
dst_sem_id, dst_global_core_id, dma.execute_read_and_write
)
else:
shared_memory.append_semaphore_task(
src_sem_id, src_global_core_id, dma.execute_read
)
shared_memory.append_semaphore_task(
dst_sem_id,
dst_global_core_id,
# This task for the waiting semaphore with ID `dst_sem_id` may be
# executed before the corresponding DMA task for the sending semaphore
# that does the DMA read. We therefore have to append a read-and-write
# task here, instead of just a write task. If the reading for the DMA
# has already been executed, the DMA's state will indicate this and
# the read-write-task appended here will do the write only.
# (Alternatively, we could have the DMA write task wait on the
# `send_semphore`. This issue with this approach is that we do not
# know the number of bytes transferred that `send_semaphore` should be
# waiting for until after the reader task is done.)
dma.execute_read_and_write,
)
return
assert shared_memory.dma_execution_mode == 'eager'
dma.execute_read_and_write()
def dma_wait(device_id, local_core_id, sem_id, size):
shared_memory = _get_shared_memory()
device_id = int(device_id)
local_core_id = int(local_core_id)
sem_id = int(sem_id)
size = int(size)
global_core_id = shared_memory.get_global_core_id(device_id, local_core_id)
(sem,), _ = shared_memory.get_semaphores_and_increment_clock(
{sem_id}, global_core_id
)
assert sem is not None
sem.wait(size, global_core_id, has_tasks=True)
def semaphore_signal(
device_id,
local_core_id,
sem_id,
inc,
target_device_id,
target_local_core_id,
):
shared_memory = _get_shared_memory()
device_id = int(device_id)
local_core_id = int(local_core_id)
sem_id = int(sem_id)
inc = int(inc)
src_global_core_id = shared_memory.get_global_core_id(
device_id, local_core_id
)
if target_device_id is None:
target_device_id = device_id
else:
target_device_id = int(target_device_id)
if target_local_core_id is None:
target_local_core_id = 0
(sem,), clock = shared_memory.get_semaphores_and_increment_clock(
{sem_id}, src_global_core_id
)
assert sem is not None
sem.signal(
inc,
shared_memory.get_global_core_id(target_device_id, target_local_core_id),
clock,
)
def semaphore_wait(device_id, local_core_id, sem_id, value):
shared_memory = _get_shared_memory()
device_id = int(device_id)
local_core_id = int(local_core_id)
sem_id = int(sem_id)
value = int(value)
global_core_id = shared_memory.get_global_core_id(device_id, local_core_id)
(sem,), _ = shared_memory.get_semaphores_and_increment_clock(
{sem_id}, global_core_id
)
assert sem is not None
sem.wait(value, global_core_id)
def _compute_transformed_shape_and_dtype(shape, dtype, transforms):
for transform in transforms:
if transform is None:
continue
shape = transform.transform_shape(shape)
dtype = transform.transform_dtype(dtype)
return shape, dtype
def _device_coords_to_logical_id(device_coords, axis_sizes):
if not isinstance(device_coords, tuple):
device_coords = (device_coords,)
assert len(device_coords) == len(axis_sizes)
sizes = list(axis_sizes.values())
ret = 0
for i in range(len(device_coords)):
ret += device_coords[i] * math.prod(sizes[i+1:])
return ret
def _device_id_to_logical(device_id, device_id_type, axis_sizes):
if device_id is None:
return None
if device_id_type == primitives.DeviceIdType.MESH:
return _device_coords_to_logical_id(device_id, axis_sizes)
elif device_id_type == primitives.DeviceIdType.LOGICAL:
return device_id
else:
raise ValueError(f'Unsupported device ID type: {device_id_type}')
@lu.cache
def _to_jaxpr(flat_fun, in_avals):
new_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fun, in_avals)
new_jaxpr = jax_core.ClosedJaxpr(new_jaxpr, consts)
return new_jaxpr
def _is_any(memory_space):
return ((memory_space == mosaic_core.MemorySpace.ANY) or
(memory_space == pallas_core.MemorySpace.ANY))
def _is_float(dtype):
return jnp.issubdtype(dtype, jnp.floating)
_SENTINEL = jnp.inf
@dataclasses.dataclass(frozen=True)
| DMA |
python | modin-project__modin | modin/tests/pandas/dataframe/test_default.py | {
"start": 8979,
"end": 55725
} | class ____:
@pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"])
@pytest.mark.parametrize("backend", [None, "pyarrow"])
def test_corr(self, method, backend):
eval_general(
*create_test_dfs(test_data["int_data"], backend=backend),
lambda df: df.corr(method=method),
)
# Modin result may slightly differ from pandas result
# due to floating pointing arithmetic.
eval_general(
*create_test_dfs(test_data["float_nan_data"], backend=backend),
lambda df: df.corr(method=method),
comparator=modin_df_almost_equals_pandas,
)
@pytest.mark.parametrize("min_periods", [1, 3, 5, 6])
def test_corr_min_periods(self, min_periods):
# only 3 valid values (a valid value is considered a row with no NaNs)
eval_general(
*create_test_dfs({"a": [1, 2, 3], "b": [3, 1, 5]}),
lambda df: df.corr(min_periods=min_periods),
)
# only 5 valid values (a valid value is considered a row with no NaNs)
eval_general(
*create_test_dfs(
{"a": [1, 2, 3, 4, 5, np.nan], "b": [1, 2, 1, 4, 5, np.nan]}
),
lambda df: df.corr(min_periods=min_periods),
)
# only 4 valid values (a valid value is considered a row with no NaNs)
eval_general(
*create_test_dfs(
{"a": [1, np.nan, 3, 4, 5, 6], "b": [1, 2, 1, 4, 5, np.nan]}
),
lambda df: df.corr(min_periods=min_periods),
)
if StorageFormat.get() == "Pandas":
# only 4 valid values located in different partitions (a valid value is considered a row with no NaNs)
modin_df, pandas_df = create_test_dfs(
{"a": [1, np.nan, 3, 4, 5, 6], "b": [1, 2, 1, 4, 5, np.nan]}
)
modin_df = pd.concat([modin_df.iloc[:3], modin_df.iloc[3:]])
assert modin_df._query_compiler._modin_frame._partitions.shape == (2, 1)
eval_general(
modin_df, pandas_df, lambda df: df.corr(min_periods=min_periods)
)
@pytest.mark.parametrize("numeric_only", [True, False])
def test_corr_non_numeric(self, numeric_only):
if not numeric_only:
pytest.xfail(reason="https://github.com/modin-project/modin/issues/7023")
eval_general(
*create_test_dfs({"a": [1, 2, 3], "b": [3, 2, 5], "c": ["a", "b", "c"]}),
lambda df: df.corr(numeric_only=numeric_only),
)
@pytest.mark.skipif(
StorageFormat.get() != "Pandas",
reason="doesn't make sense for non-partitioned executions",
)
def test_corr_nans_in_different_partitions(self):
# NaN in the first partition
modin_df, pandas_df = create_test_dfs(
{"a": [np.nan, 2, 3, 4, 5, 6], "b": [3, 4, 2, 0, 7, 8]}
)
modin_df = pd.concat([modin_df.iloc[:2], modin_df.iloc[2:4], modin_df.iloc[4:]])
assert modin_df._query_compiler._modin_frame._partitions.shape == (3, 1)
eval_general(modin_df, pandas_df, lambda df: df.corr())
# NaN in the last partition
modin_df, pandas_df = create_test_dfs(
{"a": [1, 2, 3, 4, 5, np.nan], "b": [3, 4, 2, 0, 7, 8]}
)
modin_df = pd.concat([modin_df.iloc[:2], modin_df.iloc[2:4], modin_df.iloc[4:]])
assert modin_df._query_compiler._modin_frame._partitions.shape == (3, 1)
eval_general(modin_df, pandas_df, lambda df: df.corr())
# NaN in two partitions
modin_df, pandas_df = create_test_dfs(
{"a": [np.nan, 2, 3, 4, 5, 6], "b": [3, 4, 2, 0, 7, np.nan]}
)
modin_df = pd.concat([modin_df.iloc[:2], modin_df.iloc[2:4], modin_df.iloc[4:]])
assert modin_df._query_compiler._modin_frame._partitions.shape == (3, 1)
eval_general(modin_df, pandas_df, lambda df: df.corr())
# NaN in all partitions
modin_df, pandas_df = create_test_dfs(
{"a": [np.nan, 2, 3, np.nan, 5, 6], "b": [3, 4, 2, 0, 7, np.nan]}
)
modin_df = pd.concat([modin_df.iloc[:2], modin_df.iloc[2:4], modin_df.iloc[4:]])
assert modin_df._query_compiler._modin_frame._partitions.shape == (3, 1)
eval_general(modin_df, pandas_df, lambda df: df.corr())
@pytest.mark.parametrize("min_periods", [1, 3, 5], ids=lambda x: f"min_periods={x}")
@pytest.mark.parametrize("ddof", [1, 2, 4], ids=lambda x: f"ddof={x}")
@pytest.mark.parametrize("backend", [None, "pyarrow"])
def test_cov(min_periods, ddof, backend):
eval_general(
*create_test_dfs(test_data["int_data"], backend=backend),
lambda df: df.cov(min_periods=min_periods, ddof=ddof),
comparator=df_equals,
)
# Modin result may slightly differ from pandas result
# due to floating pointing arithmetic. That's why we use `modin_df_almost_equals_pandas`.
eval_general(
*create_test_dfs(test_data["float_nan_data"], backend=backend),
lambda df: df.cov(min_periods=min_periods),
comparator=modin_df_almost_equals_pandas,
)
@pytest.mark.parametrize("numeric_only", [True, False])
def test_cov_numeric_only(numeric_only):
if not numeric_only:
pytest.xfail(reason="https://github.com/modin-project/modin/issues/7023")
eval_general(
*create_test_dfs({"a": [1, 2, 3], "b": [3, 2, 5], "c": ["a", "b", "c"]}),
lambda df: df.cov(numeric_only=numeric_only),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=pandas_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test dataframe input
modin_result = modin_df.dot(modin_df.T)
pandas_result = pandas_df.dot(pandas_df.T)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_df.dot(pd.Series(np.arange(col_len)))
# Test case when left dataframe has size (n x 1)
# and right dataframe has size (1 x n)
modin_df = pd.DataFrame(modin_series)
pandas_df = pandas.DataFrame(pandas_series)
modin_result = modin_df.dot(modin_df.T)
pandas_result = pandas_df.dot(pandas_df.T)
df_equals(modin_result, pandas_result)
# Test case when left dataframe has size (1 x 1)
# and right dataframe has size (1 x n)
modin_result = pd.DataFrame([1]).dot(modin_df.T)
pandas_result = pandas.DataFrame([1]).dot(pandas_df.T)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_matmul(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df @ arr
pandas_result = pandas_df @ arr
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_df @ np.arange(col_len + 10)
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=pandas_df.columns)
modin_result = modin_df @ modin_series
pandas_result = pandas_df @ pandas_series
df_equals(modin_result, pandas_result)
# Test dataframe input
modin_result = modin_df @ modin_df.T
pandas_result = pandas_df @ pandas_df.T
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_df @ pd.Series(np.arange(col_len))
def test_first():
i = pd.date_range("2010-04-09", periods=400, freq="2D")
modin_df = pd.DataFrame({"A": list(range(400)), "B": list(range(400))}, index=i)
pandas_df = pandas.DataFrame(
{"A": list(range(400)), "B": list(range(400))}, index=i
)
with pytest.warns(FutureWarning, match="first is deprecated and will be removed"):
modin_result = modin_df.first("3D")
df_equals(modin_result, pandas_df.first("3D"))
df_equals(modin_df.first("20D"), pandas_df.first("20D"))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_info_default_param(data):
with io.StringIO() as first, io.StringIO() as second:
eval_general(
pd.DataFrame(data),
pandas.DataFrame(data),
verbose=None,
max_cols=None,
memory_usage=None,
operation=lambda df, **kwargs: df.info(**kwargs),
buf=lambda df: second if isinstance(df, pandas.DataFrame) else first,
)
modin_info = first.getvalue().splitlines()
pandas_info = second.getvalue().splitlines()
assert modin_info[0] == str(pd.DataFrame)
assert pandas_info[0] == str(pandas.DataFrame)
assert modin_info[1:] == pandas_info[1:]
# randint data covers https://github.com/modin-project/modin/issues/5137
@pytest.mark.parametrize(
"data", [test_data_values[0], np.random.randint(0, 100, (10, 10))]
)
@pytest.mark.parametrize("verbose", [True, False])
@pytest.mark.parametrize("max_cols", [10, 99999999])
@pytest.mark.parametrize("memory_usage", [True, False, "deep"])
@pytest.mark.parametrize("show_counts", [True, False])
def test_info(data, verbose, max_cols, memory_usage, show_counts):
with io.StringIO() as first, io.StringIO() as second:
eval_general(
pd.DataFrame(data),
pandas.DataFrame(data),
operation=lambda df, **kwargs: df.info(**kwargs),
verbose=verbose,
max_cols=max_cols,
memory_usage=memory_usage,
show_counts=show_counts,
buf=lambda df: second if isinstance(df, pandas.DataFrame) else first,
)
modin_info = first.getvalue().splitlines()
pandas_info = second.getvalue().splitlines()
assert modin_info[0] == str(pd.DataFrame)
assert pandas_info[0] == str(pandas.DataFrame)
assert modin_info[1:] == pandas_info[1:]
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize("numeric_only", [False, True])
@pytest.mark.parametrize("method", ["kurtosis", "kurt"])
def test_kurt_kurtosis(axis, skipna, numeric_only, method):
data = test_data["float_nan_data"]
eval_general(
*create_test_dfs(data),
lambda df: getattr(df, method)(
axis=axis, skipna=skipna, numeric_only=numeric_only
),
)
def test_last():
modin_index = pd.date_range("2010-04-09", periods=400, freq="2D")
pandas_index = pandas.date_range("2010-04-09", periods=400, freq="2D")
modin_df = pd.DataFrame(
{"A": list(range(400)), "B": list(range(400))}, index=modin_index
)
pandas_df = pandas.DataFrame(
{"A": list(range(400)), "B": list(range(400))}, index=pandas_index
)
with pytest.warns(FutureWarning, match="last is deprecated and will be removed"):
modin_result = modin_df.last("3D")
df_equals(modin_result, pandas_df.last("3D"))
df_equals(modin_df.last("20D"), pandas_df.last("20D"))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"id_vars", [lambda df: df.columns[0], lambda df: df.columns[:4], None]
)
@pytest.mark.parametrize(
"value_vars", [lambda df: df.columns[-1], lambda df: df.columns[-4:], None]
)
def test_melt(data, id_vars, value_vars):
def melt(df, *args, **kwargs):
return df.melt(*args, **kwargs).sort_values(["variable", "value"])
eval_general(
*create_test_dfs(data),
lambda df, *args, **kwargs: melt(df, *args, **kwargs).reset_index(drop=True),
id_vars=id_vars,
value_vars=value_vars,
)
# Functional test for BUG:7206
def test_melt_duplicate_col_names():
data = {"data": [[1, 2], [3, 4]], "columns": ["dupe", "dupe"]}
def melt(df, *args, **kwargs):
return df.melt(*args, **kwargs).sort_values(["variable", "value"])
eval_general(
*create_test_dfs(**data),
lambda df, *args, **kwargs: melt(df, *args, **kwargs).reset_index(drop=True),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index",
[lambda df: df.columns[0], lambda df: df.columns[:2], lib.no_default],
ids=["one_column_index", "several_columns_index", "default"],
)
@pytest.mark.parametrize(
"columns", [lambda df: df.columns[len(df.columns) // 2]], ids=["one_column"]
)
@pytest.mark.parametrize(
"values",
[lambda df: df.columns[-1], lambda df: df.columns[-2:], lib.no_default],
ids=["one_column_values", "several_columns_values", "default"],
)
def test_pivot(data, index, columns, values, request):
current_execution = get_current_execution()
if (
"one_column_values-one_column-default-float_nan_data"
in request.node.callspec.id
or "default-one_column-several_columns_index" in request.node.callspec.id
or "default-one_column-one_column_index" in request.node.callspec.id
or (
(current_execution == "BaseOnPython" or current_execution_is_native())
and index is lib.no_default
)
):
pytest.xfail(reason="https://github.com/modin-project/modin/issues/7010")
expected_exception = None
if index is not lib.no_default:
expected_exception = ValueError(
"Index contains duplicate entries, cannot reshape"
)
eval_general(
*create_test_dfs(data),
lambda df, *args, **kwargs: df.pivot(*args, **kwargs),
index=index,
columns=columns,
values=values,
expected_exception=expected_exception,
)
@pytest.mark.parametrize("data", [test_data["int_data"]], ids=["int_data"])
@pytest.mark.parametrize(
"index",
[
pytest.param(lambda df: df.columns[0], id="single_index_col"),
pytest.param(
lambda df: [*df.columns[0:2], *df.columns[-7:-4]], id="multiple_index_cols"
),
pytest.param(None, id="default_index"),
],
)
@pytest.mark.parametrize(
"columns",
[
pytest.param(lambda df: df.columns[len(df.columns) // 2], id="single_col"),
pytest.param(
lambda df: [
*df.columns[(len(df.columns) // 2) : (len(df.columns) // 2 + 4)],
df.columns[-7],
],
id="multiple_cols",
),
pytest.param(None, id="default_columns"),
],
)
@pytest.mark.parametrize(
"values",
[
pytest.param(lambda df: df.columns[-1], id="single_value_col"),
pytest.param(lambda df: df.columns[-4:-1], id="multiple_value_cols"),
pytest.param(None, id="default_values"),
],
)
@pytest.mark.parametrize(
"aggfunc",
[
pytest.param(np.mean, id="callable_tree_reduce_func"),
pytest.param("mean", id="tree_reduce_func"),
pytest.param("nunique", id="full_axis_func"),
],
)
def test_pivot_table_data(data, index, columns, values, aggfunc, request):
if (
"callable_tree_reduce_func-single_value_col-multiple_cols-multiple_index_cols"
in request.node.callspec.id
or "callable_tree_reduce_func-multiple_value_cols-multiple_cols-multiple_index_cols"
in request.node.callspec.id
or "tree_reduce_func-single_value_col-multiple_cols-multiple_index_cols"
in request.node.callspec.id
or "tree_reduce_func-multiple_value_cols-multiple_cols-multiple_index_cols"
in request.node.callspec.id
or "full_axis_func-single_value_col-multiple_cols-multiple_index_cols"
in request.node.callspec.id
or "full_axis_func-multiple_value_cols-multiple_cols-multiple_index_cols"
in request.node.callspec.id
):
pytest.xfail(reason="https://github.com/modin-project/modin/issues/7011")
md_df, pd_df = create_test_dfs(data)
# when values is None the output will be huge-dimensional,
# so reducing dimension of testing data at that case
if values is None:
md_df, pd_df = md_df.iloc[:42, :42], pd_df.iloc[:42, :42]
expected_exception = None
if "default_columns-default_index" in request.node.callspec.id:
expected_exception = ValueError("No group keys passed!")
elif (
"callable_tree_reduce_func" in request.node.callspec.id
and "int_data" in request.node.callspec.id
):
expected_exception = TypeError("'numpy.float64' object is not callable")
eval_general(
md_df,
pd_df,
operation=lambda df, *args, **kwargs: df.pivot_table(
*args, **kwargs
).sort_index(axis=int(index is not None)),
index=index,
columns=columns,
values=values,
aggfunc=aggfunc,
expected_exception=expected_exception,
)
@pytest.mark.parametrize("data", [test_data["int_data"]], ids=["int_data"])
@pytest.mark.parametrize(
"index",
[
pytest.param([], id="no_index_cols"),
pytest.param(lambda df: df.columns[0], id="single_index_column"),
pytest.param(
lambda df: [df.columns[0], df.columns[len(df.columns) // 2 - 1]],
id="multiple_index_cols",
),
],
)
@pytest.mark.parametrize(
"columns",
[
pytest.param(lambda df: df.columns[len(df.columns) // 2], id="single_column"),
pytest.param(
lambda df: [
*df.columns[(len(df.columns) // 2) : (len(df.columns) // 2 + 4)],
df.columns[-7],
],
id="multiple_cols",
),
],
)
@pytest.mark.parametrize(
"values",
[
pytest.param(lambda df: df.columns[-1], id="single_value"),
pytest.param(lambda df: df.columns[-4:-1], id="multiple_values"),
],
)
@pytest.mark.parametrize(
"aggfunc",
[
pytest.param(["mean", "sum"], id="list_func"),
pytest.param(
lambda df: {df.columns[5]: "mean", df.columns[-5]: "sum"}, id="dict_func"
),
],
)
@pytest.mark.parametrize(
"margins_name",
[pytest.param("Custom name", id="str_name")],
)
@pytest.mark.parametrize("fill_value", [None, 0])
@pytest.mark.parametrize("backend", [None, "pyarrow"])
def test_pivot_table_margins(
data,
index,
columns,
values,
aggfunc,
margins_name,
fill_value,
backend,
request,
):
expected_exception = None
if "dict_func" in request.node.callspec.id:
expected_exception = KeyError("Column(s) ['col28', 'col38'] do not exist")
eval_general(
*create_test_dfs(data, backend=backend),
operation=lambda df, *args, **kwargs: df.pivot_table(*args, **kwargs),
index=index,
columns=columns,
values=values,
aggfunc=aggfunc,
margins=True,
margins_name=margins_name,
fill_value=fill_value,
expected_exception=expected_exception,
)
@pytest.mark.parametrize(
"aggfunc",
[
pytest.param("sum", id="MapReduce_func"),
pytest.param("nunique", id="FullAxis_func"),
],
)
@pytest.mark.parametrize("margins", [True, False])
def test_pivot_table_fill_value(aggfunc, margins):
md_df, pd_df = create_test_dfs(test_data["int_data"])
eval_general(
md_df,
pd_df,
operation=lambda df, *args, **kwargs: df.pivot_table(*args, **kwargs),
index=md_df.columns[0],
columns=md_df.columns[1],
values=md_df.columns[2],
aggfunc=aggfunc,
margins=margins,
fill_value=10,
)
@pytest.mark.parametrize("data", [test_data["int_data"]], ids=["int_data"])
def test_pivot_table_dropna(data):
eval_general(
*create_test_dfs(data),
operation=lambda df, *args, **kwargs: df.pivot_table(*args, **kwargs),
index=lambda df: df.columns[0],
columns=lambda df: df.columns[1],
values=lambda df: df.columns[-1],
dropna=False,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for left, right in zipped_plot_lines:
if isinstance(left.get_xdata(), np.ma.core.MaskedArray) and isinstance(
right.get_xdata(), np.ma.core.MaskedArray
):
assert all((left.get_xdata() == right.get_xdata()).data)
else:
assert np.array_equal(left.get_xdata(), right.get_xdata())
if isinstance(left.get_ydata(), np.ma.core.MaskedArray) and isinstance(
right.get_ydata(), np.ma.core.MaskedArray
):
assert all((left.get_ydata() == right.get_ydata()).data)
else:
assert np.array_equal(left.get_xdata(), right.get_xdata())
def test_replace():
modin_df = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [5, 6, 7, 8, 9], "C": ["a", "b", "c", "d", "e"]}
)
pandas_df = pandas.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [5, 6, 7, 8, 9], "C": ["a", "b", "c", "d", "e"]}
)
modin_result = modin_df.replace({"A": 0, "B": 5}, 100)
pandas_result = pandas_df.replace({"A": 0, "B": 5}, 100)
df_equals(modin_result, pandas_result)
modin_result = modin_df.replace({"A": {0: 100, 4: 400}})
pandas_result = pandas_df.replace({"A": {0: 100, 4: 400}})
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame({"A": ["bat", "foo", "bait"], "B": ["abc", "bar", "xyz"]})
pandas_df = pandas.DataFrame(
{"A": ["bat", "foo", "bait"], "B": ["abc", "bar", "xyz"]}
)
modin_result = modin_df.replace(regex={r"^ba.$": "new", "foo": "xyz"})
pandas_result = pandas_df.replace(regex={r"^ba.$": "new", "foo": "xyz"})
df_equals(modin_result, pandas_result)
modin_result = modin_df.replace(regex=[r"^ba.$", "foo"], value="new")
pandas_result = pandas_df.replace(regex=[r"^ba.$", "foo"], value="new")
df_equals(modin_result, pandas_result)
modin_df.replace(regex=[r"^ba.$", "foo"], value="new", inplace=True)
pandas_df.replace(regex=[r"^ba.$", "foo"], value="new", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("rule", ["5min", pandas.offsets.Hour()])
@pytest.mark.parametrize("axis", [0])
def test_resampler(rule, axis):
data, index = (
test_data_resample["data"],
test_data_resample["index"],
)
modin_resampler = pd.DataFrame(data, index=index).resample(rule, axis=axis)
pandas_resampler = pandas.DataFrame(data, index=index).resample(rule, axis=axis)
assert pandas_resampler.indices == modin_resampler.indices
assert pandas_resampler.groups == modin_resampler.groups
df_equals(
modin_resampler.get_group(name=list(modin_resampler.groups)[0]),
pandas_resampler.get_group(name=list(pandas_resampler.groups)[0]),
)
@pytest.mark.parametrize("rule", ["5min"])
@pytest.mark.parametrize("axis", ["index", "columns"])
@pytest.mark.parametrize(
"method",
[
*("count", "sum", "std", "sem", "size", "prod", "ohlc", "quantile"),
*("min", "median", "mean", "max", "last", "first", "nunique", "var"),
*("interpolate", "asfreq", "nearest", "bfill", "ffill"),
],
)
def test_resampler_functions(rule, axis, method):
data, index = (
test_data_resample["data"],
test_data_resample["index"],
)
modin_df = pd.DataFrame(data, index=index)
pandas_df = pandas.DataFrame(data, index=index)
if axis == "columns":
columns = pandas.date_range(
"31/12/2000", periods=len(pandas_df.columns), freq="min"
)
modin_df.columns = columns
pandas_df.columns = columns
expected_exception = None
if method in ("interpolate", "asfreq", "nearest", "bfill", "ffill"):
# It looks like pandas is preparing to completely
# remove `axis` parameter for `resample` function.
expected_exception = AssertionError("axis must be 0")
eval_general(
modin_df,
pandas_df,
lambda df: getattr(df.resample(rule, axis=axis), method)(),
expected_exception=expected_exception,
)
@pytest.mark.parametrize("rule", ["5min"])
@pytest.mark.parametrize("axis", ["index", "columns"])
@pytest.mark.parametrize(
"method_arg",
[
("pipe", lambda x: x.max() - x.min()),
("transform", lambda x: (x - x.mean()) / x.std()),
("apply", ["sum", "mean", "max"]),
("aggregate", ["sum", "mean", "max"]),
],
)
def test_resampler_functions_with_arg(rule, axis, method_arg):
data, index = (
test_data_resample["data"],
test_data_resample["index"],
)
modin_df = pd.DataFrame(data, index=index)
pandas_df = pandas.DataFrame(data, index=index)
if axis == "columns":
columns = pandas.date_range(
"31/12/2000", periods=len(pandas_df.columns), freq="min"
)
modin_df.columns = columns
pandas_df.columns = columns
method, arg = method_arg[0], method_arg[1]
expected_exception = None
if method in ("apply", "aggregate"):
expected_exception = NotImplementedError("axis other than 0 is not supported")
eval_general(
modin_df,
pandas_df,
lambda df: getattr(df.resample(rule, axis=axis), method)(arg),
expected_exception=expected_exception,
)
@pytest.mark.parametrize("rule", ["5min"])
@pytest.mark.parametrize("closed", ["left", "right"])
@pytest.mark.parametrize("label", ["right", "left"])
@pytest.mark.parametrize(
"on",
[
None,
pytest.param(
"DateColumn",
marks=pytest.mark.xfail(
condition=Engine.get() in ("Ray", "Unidist", "Dask", "Python")
and StorageFormat.get() != "Base",
reason="https://github.com/modin-project/modin/issues/6399",
),
),
],
)
@pytest.mark.parametrize("level", [None, 1])
def test_resample_specific(rule, closed, label, on, level):
data, index = (
test_data_resample["data"],
test_data_resample["index"],
)
modin_df = pd.DataFrame(data, index=index)
pandas_df = pandas.DataFrame(data, index=index)
if on is None and level is not None:
index = pandas.MultiIndex.from_product(
[
["a", "b", "c", "d"],
pandas.date_range("31/12/2000", periods=len(pandas_df) // 4, freq="h"),
]
)
pandas_df.index = index
modin_df.index = index
else:
level = None
if on is not None:
pandas_df[on] = pandas.date_range(
"22/06/1941", periods=len(pandas_df), freq="min"
)
modin_df[on] = pandas.date_range(
"22/06/1941", periods=len(modin_df), freq="min"
)
pandas_resampler = pandas_df.resample(
rule,
closed=closed,
label=label,
on=on,
level=level,
)
modin_resampler = modin_df.resample(
rule,
closed=closed,
label=label,
on=on,
level=level,
)
df_equals(modin_resampler.var(0), pandas_resampler.var(0))
if on is None and level is None:
df_equals(
modin_resampler.fillna(method="nearest"),
pandas_resampler.fillna(method="nearest"),
)
@pytest.mark.parametrize(
"columns",
[
"volume",
"date",
["volume"],
("volume",),
pandas.Series(["volume"]),
pandas.Index(["volume"]),
["volume", "volume", "volume"],
["volume", "price", "date"],
],
ids=[
"column",
"only_missed_column",
"list",
"tuple",
"series",
"index",
"duplicate_column",
"missed_column",
],
)
def test_resample_getitem(columns, request):
index = pandas.date_range("1/1/2013", periods=9, freq="min")
data = {
"price": range(9),
"volume": range(10, 19),
}
expected_exception = None
if "only_missed_column" in request.node.callspec.id:
expected_exception = KeyError("Column not found: date")
elif "missed_column" in request.node.callspec.id:
expected_exception = KeyError("Columns not found: 'date'")
eval_general(
*create_test_dfs(data, index=index),
lambda df: df.resample("3min")[columns].mean(),
expected_exception=expected_exception,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("index", ["default", "ndarray", "has_duplicates"])
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("periods", [0, 1, -1, 10, -10, 1000000000, -1000000000])
def test_shift(data, index, axis, periods):
modin_df, pandas_df = create_test_dfs(data)
if index == "ndarray":
data_column_length = len(data[next(iter(data))])
modin_df.index = pandas_df.index = np.arange(2, data_column_length + 2)
elif index == "has_duplicates":
modin_df.index = pandas_df.index = list(modin_df.index[:-3]) + [0, 1, 2]
df_equals(
modin_df.shift(periods=periods, axis=axis),
pandas_df.shift(periods=periods, axis=axis),
)
df_equals(
modin_df.shift(periods=periods, axis=axis, fill_value=777),
pandas_df.shift(periods=periods, axis=axis, fill_value=777),
)
@pytest.mark.parametrize("is_multi_idx", [True, False], ids=["idx_multi", "idx_index"])
@pytest.mark.parametrize("is_multi_col", [True, False], ids=["col_multi", "col_index"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_stack(data, is_multi_idx, is_multi_col):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
if is_multi_idx:
if len(pandas_df.index) == 256:
index = pd.MultiIndex.from_product(
[
["a", "b", "c", "d"],
["x", "y", "z", "last"],
["i", "j", "k", "index"],
[1, 2, 3, 4],
]
)
elif len(pandas_df.index) == 100:
index = pd.MultiIndex.from_product(
[
["x", "y", "z", "last"],
["a", "b", "c", "d", "f"],
["i", "j", "k", "l", "index"],
]
)
else:
index = pd.MultiIndex.from_tuples(
[(i, i * 2, i * 3) for i in range(len(pandas_df.index))]
)
else:
index = pandas_df.index
if is_multi_col:
if len(pandas_df.columns) == 64:
columns = pd.MultiIndex.from_product(
[["A", "B", "C", "D"], ["xx", "yy", "zz", "LAST"], [10, 20, 30, 40]]
)
elif len(pandas_df.columns) == 100:
columns = pd.MultiIndex.from_product(
[
["xx", "yy", "zz", "LAST"],
["A", "B", "C", "D", "F"],
["I", "J", "K", "L", "INDEX"],
]
)
else:
columns = pd.MultiIndex.from_tuples(
[(i, i * 2, i * 3) for i in range(len(pandas_df.columns))]
)
else:
columns = pandas_df.columns
pandas_df.columns = columns
pandas_df.index = index
modin_df.columns = columns
modin_df.index = index
df_equals(modin_df.stack(), pandas_df.stack())
if is_multi_col:
df_equals(modin_df.stack(level=0), pandas_df.stack(level=0))
df_equals(modin_df.stack(level=[0, 1]), pandas_df.stack(level=[0, 1]))
df_equals(modin_df.stack(level=[0, 1, 2]), pandas_df.stack(level=[0, 1, 2]))
@pytest.mark.parametrize("sort", [True, False])
def test_stack_sort(sort):
# Example frame slightly modified from pandas docs to be unsorted
cols = pd.MultiIndex.from_tuples([("weight", "pounds"), ("weight", "kg")])
modin_df, pandas_df = create_test_dfs(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=cols
)
df_equals(modin_df.stack(sort=sort), pandas_df.stack(sort=sort))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis1", [0, 1])
@pytest.mark.parametrize("axis2", [0, 1])
def test_swapaxes(data, axis1, axis2):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.swapaxes(axis1, axis2)
modin_result = modin_df.swapaxes(axis1, axis2)
df_equals(modin_result, pandas_result)
def test_swapaxes_axes_names():
modin_df = pd.DataFrame(test_data_values[0])
modin_result1 = modin_df.swapaxes(0, 1)
modin_result2 = modin_df.swapaxes("columns", "index")
df_equals(modin_result1, modin_result2)
def test_swaplevel():
data = np.random.randint(1, 100, 12)
modin_df = pd.DataFrame(
data,
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
pandas_df = pandas.DataFrame(
data,
index=pandas.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
df_equals(
modin_df.swaplevel("Number", "Color"),
pandas_df.swaplevel("Number", "Color"),
)
df_equals(modin_df.swaplevel(), pandas_df.swaplevel())
df_equals(modin_df.swaplevel(0, 1), pandas_df.swaplevel(0, 1))
def test_take():
modin_df = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
pandas_df = pandas.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
df_equals(modin_df.take([0, 3]), pandas_df.take([0, 3]))
df_equals(modin_df.take([2], axis=1), pandas_df.take([2], axis=1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_records(data):
# `to_records` doesn't work when `index` is among column names
eval_general(
*create_test_dfs(data),
lambda df: (
df.dropna().drop("index", axis=1) if "index" in df.columns else df.dropna()
).to_records(),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_string(data):
eval_general(
*create_test_dfs(data),
lambda df: df.to_string(),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truncate(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
before = 1
after = len(modin_df - 3)
df_equals(modin_df.truncate(before, after), pandas_df.truncate(before, after))
before = 1
after = 3
df_equals(modin_df.truncate(before, after), pandas_df.truncate(before, after))
before = modin_df.columns[1]
after = modin_df.columns[-3]
try:
pandas_result = pandas_df.truncate(before, after, axis=1)
except Exception as err:
with pytest.raises(type(err)):
modin_df.truncate(before, after, axis=1)
else:
modin_result = modin_df.truncate(before, after, axis=1)
df_equals(modin_result, pandas_result)
before = modin_df.columns[1]
after = modin_df.columns[3]
try:
pandas_result = pandas_df.truncate(before, after, axis=1)
except Exception as err:
with pytest.raises(type(err)):
modin_df.truncate(before, after, axis=1)
else:
modin_result = modin_df.truncate(before, after, axis=1)
df_equals(modin_result, pandas_result)
before = None
after = None
df_equals(modin_df.truncate(before, after), pandas_df.truncate(before, after))
try:
pandas_result = pandas_df.truncate(before, after, axis=1)
except Exception as err:
with pytest.raises(type(err)):
modin_df.truncate(before, after, axis=1)
else:
modin_result = modin_df.truncate(before, after, axis=1)
df_equals(modin_result, pandas_result)
def test_truncate_before_greater_than_after():
df = pd.DataFrame([[1, 2, 3]])
with pytest.raises(ValueError, match="Truncate: 1 must be after 2"):
df.truncate(before=2, after=1)
def test_tz_convert():
modin_idx = pd.date_range(
"1/1/2012", periods=500, freq="2D", tz="America/Los_Angeles"
)
pandas_idx = pandas.date_range(
"1/1/2012", periods=500, freq="2D", tz="America/Los_Angeles"
)
data = np.random.randint(0, 100, size=(len(modin_idx), 4))
modin_df = pd.DataFrame(data, index=modin_idx)
pandas_df = pandas.DataFrame(data, index=pandas_idx)
modin_result = modin_df.tz_convert("UTC", axis=0)
pandas_result = pandas_df.tz_convert("UTC", axis=0)
df_equals(modin_result, pandas_result)
modin_multi = pd.MultiIndex.from_arrays([modin_idx, range(len(modin_idx))])
pandas_multi = pandas.MultiIndex.from_arrays([pandas_idx, range(len(modin_idx))])
modin_series = pd.DataFrame(data, index=modin_multi)
pandas_series = pandas.DataFrame(data, index=pandas_multi)
df_equals(
modin_series.tz_convert("UTC", axis=0, level=0),
pandas_series.tz_convert("UTC", axis=0, level=0),
)
def test_tz_localize():
idx = pd.date_range("1/1/2012", periods=400, freq="2D")
data = np.random.randint(0, 100, size=(len(idx), 4))
modin_df = pd.DataFrame(data, index=idx)
pandas_df = pandas.DataFrame(data, index=idx)
df_equals(modin_df.tz_localize("UTC", axis=0), pandas_df.tz_localize("UTC", axis=0))
df_equals(
modin_df.tz_localize("America/Los_Angeles", axis=0),
pandas_df.tz_localize("America/Los_Angeles", axis=0),
)
@pytest.mark.parametrize("is_multi_idx", [True, False], ids=["idx_multi", "idx_index"])
@pytest.mark.parametrize("is_multi_col", [True, False], ids=["col_multi", "col_index"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_unstack(data, is_multi_idx, is_multi_col):
modin_df, pandas_df = create_test_dfs(data)
if is_multi_idx:
index = generate_multiindex(len(pandas_df), nlevels=4, is_tree_like=True)
else:
index = pandas_df.index
if is_multi_col:
columns = generate_multiindex(
len(pandas_df.columns), nlevels=3, is_tree_like=True
)
else:
columns = pandas_df.columns
pandas_df.columns = modin_df.columns = columns
pandas_df.index = modin_df.index = index
df_equals(modin_df.unstack(), pandas_df.unstack())
df_equals(modin_df.unstack(level=1), pandas_df.unstack(level=1))
if is_multi_idx:
df_equals(modin_df.unstack(level=[0, 1]), pandas_df.unstack(level=[0, 1]))
df_equals(modin_df.unstack(level=[0, 1, 2]), pandas_df.unstack(level=[0, 1, 2]))
df_equals(
modin_df.unstack(level=[0, 1, 2, 3]), pandas_df.unstack(level=[0, 1, 2, 3])
)
@pytest.mark.parametrize(
"multi_col", ["col_multi_tree", "col_multi_not_tree", "col_index"]
)
@pytest.mark.parametrize(
"multi_idx", ["idx_multi_tree", "idx_multi_not_tree", "idx_index"]
)
def test_unstack_multiindex_types(multi_col, multi_idx):
MAX_NROWS = MAX_NCOLS = 36
pandas_df = pandas.DataFrame(test_data["int_data"]).iloc[:MAX_NROWS, :MAX_NCOLS]
modin_df = pd.DataFrame(test_data["int_data"]).iloc[:MAX_NROWS, :MAX_NCOLS]
def get_new_index(index, cond):
if cond == "col_multi_tree" or cond == "idx_multi_tree":
return generate_multiindex(len(index), nlevels=3, is_tree_like=True)
elif cond == "col_multi_not_tree" or cond == "idx_multi_not_tree":
return generate_multiindex(len(index), nlevels=3)
else:
return index
pandas_df.columns = modin_df.columns = get_new_index(pandas_df.columns, multi_col)
pandas_df.index = modin_df.index = get_new_index(pandas_df.index, multi_idx)
df_equals(modin_df.unstack(), pandas_df.unstack())
df_equals(modin_df.unstack(level=1), pandas_df.unstack(level=1))
if multi_idx != "idx_index":
df_equals(modin_df.unstack(level=[0, 1]), pandas_df.unstack(level=[0, 1]))
df_equals(modin_df.unstack(level=[0, 1, 2]), pandas_df.unstack(level=[0, 1, 2]))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("copy_kwargs", ({"copy": True}, {"copy": None}, {}))
@pytest.mark.parametrize(
"get_array, get_array_name",
(
(lambda df, copy_kwargs: df.__array__(**copy_kwargs), "__array__"),
(lambda df, copy_kwargs: np.array(df, **copy_kwargs), "np.array"),
),
)
def test___array__(data, copy_kwargs, get_array, get_array_name):
if (
get_array_name == "np.array"
and Version(np.__version__) < Version("2")
and "copy" in copy_kwargs
and copy_kwargs["copy"] is None
):
pytest.skip(reason="np.array does not support copy=None before numpy 2.0")
assert_array_equal(*(get_array(df, copy_kwargs) for df in create_test_dfs(data)))
@pytest.mark.xfail(
condition=Backend.get() != "Pandas",
raises=AssertionError,
reason="https://github.com/modin-project/modin/issues/4650",
)
def test___array__copy_false_creates_view():
def do_in_place_update_via_copy(df):
array = np.array(df, copy=False)
array[0, 0] += 1
eval_general(
*create_test_dfs([[11]]), do_in_place_update_via_copy, __inplace__=True
)
@pytest.mark.parametrize("data", [[False], [True], [1, 2]])
def test___bool__(data):
eval_general(
*create_test_dfs(data),
lambda df: df.__bool__(),
expected_exception=ValueError(
"The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all()."
),
)
@pytest.mark.parametrize(
"is_sparse_data", [True, False], ids=["is_sparse", "is_not_sparse"]
)
def test_hasattr_sparse(is_sparse_data):
modin_df, pandas_df = (
create_test_dfs(pandas.arrays.SparseArray(test_data["float_nan_data"].values()))
if is_sparse_data
else create_test_dfs(test_data["float_nan_data"])
)
eval_general(modin_df, pandas_df, lambda df: hasattr(df, "sparse"))
def test_setattr_axes():
# Test that setting .index or .columns does not warn
df = pd.DataFrame([[1, 2], [3, 4]])
with warnings.catch_warnings():
if get_current_execution() != "BaseOnPython":
# In BaseOnPython, setting columns raises a warning because get_axis
# defaults to pandas.
warnings.simplefilter("error")
df.index = ["foo", "bar"]
# Check that ensure_index was called
pd.testing.assert_index_equal(df.index, pandas.Index(["foo", "bar"]))
df.columns = [9, 10]
pd.testing.assert_index_equal(df.columns, pandas.Index([9, 10]))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_attrs(data):
modin_df, pandas_df = create_test_dfs(data)
eval_general(modin_df, pandas_df, lambda df: df.attrs)
def test_df_from_series_with_tuple_name():
# Tests that creating a DataFrame from a series with a tuple name results in
# a DataFrame with MultiIndex columns.
pandas_result = pandas.DataFrame(pandas.Series(name=("a", 1)))
# 1. Creating a Modin DF from native pandas Series
df_equals(pd.DataFrame(pandas.Series(name=("a", 1))), pandas_result)
# 2. Creating a Modin DF from Modin Series
df_equals(pd.DataFrame(pd.Series(name=("a", 1))), pandas_result)
def test_large_df_warns_distributing_takes_time():
# https://github.com/modin-project/modin/issues/6574
regex = r"Distributing (.*) object\. This may take some time\."
with pytest.warns(UserWarning, match=regex):
pd.DataFrame(np.random.randint(1_000_000, size=(100_000, 10)))
def test_large_series_warns_distributing_takes_time():
# https://github.com/modin-project/modin/issues/6574
regex = r"Distributing (.*) object\. This may take some time\."
with pytest.warns(UserWarning, match=regex):
pd.Series(np.random.randint(1_000_000, size=(2_500_000)))
def test_df_does_not_warn_distributing_takes_time():
# https://github.com/modin-project/modin/issues/6574
regex = r"Distributing (.*) object\. This may take some time\."
with warnings.catch_warnings():
warnings.filterwarnings("error", regex, UserWarning)
pd.DataFrame(np.random.randint(1_000_000, size=(100_000, 9)))
def test_series_does_not_warn_distributing_takes_time():
# https://github.com/modin-project/modin/issues/6574
regex = r"Distributing (.*) object\. This may take some time\."
with warnings.catch_warnings():
warnings.filterwarnings("error", regex, UserWarning)
pd.Series(np.random.randint(1_000_000, size=(2_400_000)))
@pytest.mark.parametrize("dtype", [np.int64, pd.ArrowDtype(pa.int64())])
def test_empty_df_dtypes(dtype):
df = pd.DataFrame({"A": []}, dtype=dtype)
assert df.dtypes["A"] == dtype
def test_array_ufunc():
modin_df, pandas_df = create_test_dfs([[1, 2], [3, 4]])
eval_general(modin_df, pandas_df, np.sqrt)
modin_ser, pandas_ser = create_test_series([1, 2, 3, 4, 9])
eval_general(modin_ser, pandas_ser, np.sqrt)
| TestCorr |
python | scrapy__scrapy | tests/test_request_cb_kwargs.py | {
"start": 5746,
"end": 7153
} | class ____:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
@inlineCallbacks
def test_callback_kwargs(self):
crawler = get_crawler(KeywordArgumentsSpider)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver)
assert all(crawler.spider.checks)
assert len(crawler.spider.checks) == crawler.stats.get_value("boolean_checks")
# check exceptions for argument mismatch
exceptions = {}
for line in log.records:
for key in ("takes_less", "takes_more"):
if key in line.getMessage():
exceptions[key] = line
assert exceptions["takes_less"].exc_info[0] is TypeError
assert str(exceptions["takes_less"].exc_info[1]).endswith(
"parse_takes_less() got an unexpected keyword argument 'number'"
), "Exception message: " + str(exceptions["takes_less"].exc_info[1])
assert exceptions["takes_more"].exc_info[0] is TypeError
assert str(exceptions["takes_more"].exc_info[1]).endswith(
"parse_takes_more() missing 1 required positional argument: 'other'"
), "Exception message: " + str(exceptions["takes_more"].exc_info[1])
| TestCallbackKeywordArguments |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-microsoft-onedrive/source_microsoft_onedrive/stream_reader.py | {
"start": 699,
"end": 770
} | class ____(RemoteFile):
download_url: str
| MicrosoftOneDriveRemoteFile |
python | keras-team__keras | guides/making_new_layers_and_models_via_subclassing.py | {
"start": 2348,
"end": 3309
} | class ____(keras.layers.Layer):
def __init__(self, input_dim):
super().__init__()
self.total = self.add_weight(
initializer="zeros", shape=(input_dim,), trainable=False
)
def call(self, inputs):
self.total.assign_add(ops.sum(inputs, axis=0))
return self.total
x = ops.ones((2, 2))
my_sum = ComputeSum(2)
y = my_sum(x)
print(y.numpy())
y = my_sum(x)
print(y.numpy())
"""
It's part of `layer.weights`, but it gets categorized as a non-trainable weight:
"""
print("weights:", len(my_sum.weights))
print("non-trainable weights:", len(my_sum.non_trainable_weights))
# It's not included in the trainable weights:
print("trainable_weights:", my_sum.trainable_weights)
"""
## Best practice: deferring weight creation until the shape of the inputs is known
Our `Linear` layer above took an `input_dim` argument that was used to compute
the shape of the weights `w` and `b` in `__init__()`:
"""
| ComputeSum |
python | scrapy__scrapy | tests/test_downloadermiddleware_redirect.py | {
"start": 495,
"end": 44181
} | class ____:
class Test:
def test_priority_adjust(self):
req = Request("http://a.com")
rsp = self.get_response(req, "http://a.com/redirected")
req2 = self.mw.process_response(req, rsp)
assert req2.priority > req.priority
def test_dont_redirect(self):
url = "http://www.example.com/301"
url2 = "http://www.example.com/redirected"
req = Request(url, meta={"dont_redirect": True})
rsp = self.get_response(req, url2)
r = self.mw.process_response(req, rsp)
assert isinstance(r, Response)
assert r is rsp
# Test that it redirects when dont_redirect is False
req = Request(url, meta={"dont_redirect": False})
rsp = self.get_response(req, url2)
r = self.mw.process_response(req, rsp)
assert isinstance(r, Request)
def test_post(self):
url = "http://www.example.com/302"
url2 = "http://www.example.com/redirected2"
req = Request(
url,
method="POST",
body="test",
headers={"Content-Type": "text/plain", "Content-length": "4"},
)
rsp = self.get_response(req, url2)
req2 = self.mw.process_response(req, rsp)
assert isinstance(req2, Request)
assert req2.url == url2
assert req2.method == "GET"
assert "Content-Type" not in req2.headers, (
"Content-Type header must not be present in redirected request"
)
assert "Content-Length" not in req2.headers, (
"Content-Length header must not be present in redirected request"
)
assert not req2.body, f"Redirected body must be empty, not '{req2.body}'"
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request("http://scrapytest.org/302")
rsp = self.get_response(req, "/redirected")
req = self.mw.process_response(req, rsp)
assert isinstance(req, Request)
assert "redirect_times" in req.meta
assert req.meta["redirect_times"] == 1
with pytest.raises(IgnoreRequest):
self.mw.process_response(req, rsp)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request("http://scrapytest.org/302", meta={"redirect_ttl": 1})
rsp = self.get_response(req, "/a")
req = self.mw.process_response(req, rsp)
assert isinstance(req, Request)
with pytest.raises(IgnoreRequest):
self.mw.process_response(req, rsp)
def test_redirect_urls(self):
req1 = Request("http://scrapytest.org/first")
rsp1 = self.get_response(req1, "/redirected")
req2 = self.mw.process_response(req1, rsp1)
rsp2 = self.get_response(req1, "/redirected2")
req3 = self.mw.process_response(req2, rsp2)
assert req2.url == "http://scrapytest.org/redirected"
assert req2.meta["redirect_urls"] == ["http://scrapytest.org/first"]
assert req3.url == "http://scrapytest.org/redirected2"
assert req3.meta["redirect_urls"] == [
"http://scrapytest.org/first",
"http://scrapytest.org/redirected",
]
def test_redirect_reasons(self):
req1 = Request("http://scrapytest.org/first")
rsp1 = self.get_response(req1, "/redirected1")
req2 = self.mw.process_response(req1, rsp1)
rsp2 = self.get_response(req2, "/redirected2")
req3 = self.mw.process_response(req2, rsp2)
assert req2.meta["redirect_reasons"] == [self.reason]
assert req3.meta["redirect_reasons"] == [self.reason, self.reason]
def test_cross_origin_header_dropping(self):
safe_headers = {"A": "B"}
cookie_header = {"Cookie": "a=b"}
authorization_header = {"Authorization": "Bearer 123456"}
original_request = Request(
"https://example.com",
headers={**safe_headers, **cookie_header, **authorization_header},
)
# Redirects to the same origin (same scheme, same domain, same port)
# keep all headers.
internal_response = self.get_response(
original_request, "https://example.com/a"
)
internal_redirect_request = self.mw.process_response(
original_request, internal_response
)
assert isinstance(internal_redirect_request, Request)
assert original_request.headers == internal_redirect_request.headers
# Redirects to the same origin (same scheme, same domain, same port)
# keep all headers also when the scheme is http.
http_request = Request(
"http://example.com",
headers={**safe_headers, **cookie_header, **authorization_header},
)
http_response = self.get_response(http_request, "http://example.com/a")
http_redirect_request = self.mw.process_response(
http_request, http_response
)
assert isinstance(http_redirect_request, Request)
assert http_request.headers == http_redirect_request.headers
# For default ports, whether the port is explicit or implicit does not
# affect the outcome, it is still the same origin.
to_explicit_port_response = self.get_response(
original_request, "https://example.com:443/a"
)
to_explicit_port_redirect_request = self.mw.process_response(
original_request, to_explicit_port_response
)
assert isinstance(to_explicit_port_redirect_request, Request)
assert original_request.headers == to_explicit_port_redirect_request.headers
# For default ports, whether the port is explicit or implicit does not
# affect the outcome, it is still the same origin.
to_implicit_port_response = self.get_response(
original_request, "https://example.com/a"
)
to_implicit_port_redirect_request = self.mw.process_response(
original_request, to_implicit_port_response
)
assert isinstance(to_implicit_port_redirect_request, Request)
assert original_request.headers == to_implicit_port_redirect_request.headers
# A port change drops the Authorization header because the origin
# changes, but keeps the Cookie header because the domain remains the
# same.
different_port_response = self.get_response(
original_request, "https://example.com:8080/a"
)
different_port_redirect_request = self.mw.process_response(
original_request, different_port_response
)
assert isinstance(different_port_redirect_request, Request)
assert {
**safe_headers,
**cookie_header,
} == different_port_redirect_request.headers.to_unicode_dict()
# A domain change drops both the Authorization and the Cookie header.
external_response = self.get_response(
original_request, "https://example.org/a"
)
external_redirect_request = self.mw.process_response(
original_request, external_response
)
assert isinstance(external_redirect_request, Request)
assert safe_headers == external_redirect_request.headers.to_unicode_dict()
# A scheme upgrade (http → https) drops the Authorization header
# because the origin changes, but keeps the Cookie header because the
# domain remains the same.
upgrade_response = self.get_response(http_request, "https://example.com/a")
upgrade_redirect_request = self.mw.process_response(
http_request, upgrade_response
)
assert isinstance(upgrade_redirect_request, Request)
assert {
**safe_headers,
**cookie_header,
} == upgrade_redirect_request.headers.to_unicode_dict()
# A scheme downgrade (https → http) drops the Authorization header
# because the origin changes, and the Cookie header because its value
# cannot indicate whether the cookies were secure (HTTPS-only) or not.
#
# Note: If the Cookie header is set by the cookie management
# middleware, as recommended in the docs, the dropping of Cookie on
# scheme downgrade is not an issue, because the cookie management
# middleware will add again the Cookie header to the new request if
# appropriate.
downgrade_response = self.get_response(
original_request, "http://example.com/a"
)
downgrade_redirect_request = self.mw.process_response(
original_request, downgrade_response
)
assert isinstance(downgrade_redirect_request, Request)
assert safe_headers == downgrade_redirect_request.headers.to_unicode_dict()
def test_meta_proxy_http_absolute(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("http://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_http_relative(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("http://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "/a")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "/a")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_https_absolute(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("https://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_https_relative(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("https://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "/a")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "/a")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_http_to_https(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("http://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_https_to_http(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("https://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_http_absolute(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_http_relative(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "/a")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "/a")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_https_absolute(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"https_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("https://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_https_relative(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"https_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("https://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "/a")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "/a")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_proxied_http_to_proxied_https(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
"https_proxy": "https://b:@b.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic Yjo="
assert request2.meta["_auth_proxy"] == "https://b.example"
assert request2.meta["proxy"] == "https://b.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_proxied_http_to_unproxied_https(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_unproxied_http_to_proxied_https(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"https_proxy": "https://b:@b.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert "Proxy-Authorization" not in request1.headers
assert "_auth_proxy" not in request1.meta
assert "proxy" not in request1.meta
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic Yjo="
assert request2.meta["_auth_proxy"] == "https://b.example"
assert request2.meta["proxy"] == "https://b.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
def test_system_proxy_unproxied_http_to_unproxied_https(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert "Proxy-Authorization" not in request1.headers
assert "_auth_proxy" not in request1.meta
assert "proxy" not in request1.meta
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
def test_system_proxy_proxied_https_to_proxied_http(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
"https_proxy": "https://b:@b.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("https://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic Yjo="
assert request1.meta["_auth_proxy"] == "https://b.example"
assert request1.meta["proxy"] == "https://b.example"
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic Yjo="
assert request3.meta["_auth_proxy"] == "https://b.example"
assert request3.meta["proxy"] == "https://b.example"
def test_system_proxy_proxied_https_to_unproxied_http(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"https_proxy": "https://b:@b.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("https://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic Yjo="
assert request1.meta["_auth_proxy"] == "https://b.example"
assert request1.meta["proxy"] == "https://b.example"
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic Yjo="
assert request3.meta["_auth_proxy"] == "https://b.example"
assert request3.meta["proxy"] == "https://b.example"
def test_system_proxy_unproxied_https_to_proxied_http(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("https://example.com")
proxy_mw.process_request(request1)
assert "Proxy-Authorization" not in request1.headers
assert "_auth_proxy" not in request1.meta
assert "proxy" not in request1.meta
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
def test_system_proxy_unproxied_https_to_unproxied_http(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("https://example.com")
proxy_mw.process_request(request1)
assert "Proxy-Authorization" not in request1.headers
assert "_auth_proxy" not in request1.meta
assert "proxy" not in request1.meta
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
| Base |
python | google__pytype | pytype/attribute_test.py | {
"start": 459,
"end": 4925
} | class ____(test_base.UnitTest):
"""Tests for get_attribute's `valself` parameter."""
def setUp(self):
super().setUp()
options = config.Options.create(
python_version=self.python_version, color="never"
)
self.ctx = test_utils.make_context(options)
self.node = self.ctx.root_node
self.attribute_handler = self.ctx.attribute_handler
def test_instance_no_valself(self):
instance = abstract.Instance(self.ctx.convert.int_type, self.ctx)
_, attr_var = self.attribute_handler.get_attribute(
self.node, instance, "real"
)
(attr_binding,) = attr_var.bindings
self.assertEqual(attr_binding.data.cls, self.ctx.convert.int_type)
# Since `valself` was not passed to get_attribute, a binding to
# `instance` is not among the attribute's origins.
self.assertNotIn(instance, [o.data for o in _get_origins(attr_binding)])
def test_instance_with_valself(self):
instance = abstract.Instance(self.ctx.convert.int_type, self.ctx)
valself = instance.to_binding(self.node)
_, attr_var = self.attribute_handler.get_attribute(
self.node, instance, "real", valself
)
(attr_binding,) = attr_var.bindings
self.assertEqual(attr_binding.data.cls, self.ctx.convert.int_type)
# Since `valself` was passed to get_attribute, it is added to the
# attribute's origins.
self.assertIn(valself, _get_origins(attr_binding))
def test_class_no_valself(self):
meta_members = {"x": self.ctx.convert.none.to_variable(self.node)}
meta = abstract.InterpreterClass(
"M", [], meta_members, None, None, (), self.ctx
)
cls = abstract.InterpreterClass("X", [], {}, meta, None, (), self.ctx)
_, attr_var = self.attribute_handler.get_attribute(self.node, cls, "x")
# Since `valself` was not passed to get_attribute, we do not look at the
# metaclass, so M.x is not returned.
self.assertIsNone(attr_var)
def test_class_with_instance_valself(self):
meta_members = {"x": self.ctx.convert.none.to_variable(self.node)}
meta = abstract.InterpreterClass(
"M", [], meta_members, None, None, (), self.ctx
)
cls = abstract.InterpreterClass("X", [], {}, meta, None, (), self.ctx)
valself = abstract.Instance(cls, self.ctx).to_binding(self.node)
_, attr_var = self.attribute_handler.get_attribute(
self.node, cls, "x", valself
)
# Since `valself` is an instance of X, we do not look at the metaclass, so
# M.x is not returned.
self.assertIsNone(attr_var)
def test_class_with_class_valself(self):
meta_members = {"x": self.ctx.convert.none.to_variable(self.node)}
meta = abstract.InterpreterClass(
"M", [], meta_members, None, None, (), self.ctx
)
cls = abstract.InterpreterClass("X", [], {}, meta, None, (), self.ctx)
valself = cls.to_binding(self.node)
_, attr_var = self.attribute_handler.get_attribute(
self.node, cls, "x", valself
)
# Since `valself` is X itself, we look at the metaclass and return M.x.
self.assertEqual(attr_var.data, [self.ctx.convert.none])
def test_getitem_no_valself(self):
cls = abstract.InterpreterClass("X", [], {}, None, None, (), self.ctx)
_, attr_var = self.attribute_handler.get_attribute(
self.node, cls, "__getitem__"
)
(attr,) = attr_var.data
# Since we looked up __getitem__ on a class without passing in `valself`,
# the class is treated as an annotation.
self.assertIs(attr.func.__func__, abstract.AnnotationClass.getitem_slot)
def test_getitem_with_instance_valself(self):
cls = abstract.InterpreterClass("X", [], {}, None, None, (), self.ctx)
valself = abstract.Instance(cls, self.ctx).to_binding(self.node)
_, attr_var = self.attribute_handler.get_attribute(
self.node, cls, "__getitem__", valself
)
# Since we passed in `valself` for this lookup of __getitem__ on a class,
# it is treated as a normal lookup; X.__getitem__ does not exist.
self.assertIsNone(attr_var)
def test_getitem_with_class_valself(self):
cls = abstract.InterpreterClass("X", [], {}, None, None, (), self.ctx)
valself = cls.to_binding(self.node)
_, attr_var = self.attribute_handler.get_attribute(
self.node, cls, "__getitem__", valself
)
# Since we passed in `valself` for this lookup of __getitem__ on a class,
# it is treated as a normal lookup; X.__getitem__ does not exist.
self.assertIsNone(attr_var)
| ValselfTest |
python | pytorch__pytorch | torch/_export/serde/schema.py | {
"start": 1098,
"end": 1248
} | class ____(IntEnum):
Unknown = 0
ContiguousFormat = 1
ChannelsLast = 2
ChannelsLast3d = 3
PreserveFormat = 4
@dataclass
| MemoryFormat |
python | ApeWorX__ape | src/ape/logging.py | {
"start": 11046,
"end": 12368
} | class ____:
rich_console_map: dict[str, "RichConsole"] = {}
def get_console(self, file: Optional[IO[str]] = None, **kwargs) -> "RichConsole":
# Configure custom file console
file_id = str(file)
if file_id not in self.rich_console_map:
# perf: delay importing from rich, as it is slow.
from rich.console import Console as RichConsole
self.rich_console_map[file_id] = RichConsole(file=file, width=100, **kwargs)
return self.rich_console_map[file_id]
_factory = _RichConsoleFactory()
def get_rich_console(file: Optional[IO[str]] = None, **kwargs) -> "RichConsole":
"""
Get an Ape-configured rich console.
Args:
file (Optional[IO[str]]): The file to output to. Will default
to using stdout.
Returns:
``rich.Console``.
"""
return _factory.get_console(file, **kwargs)
def silenced(func: Callable):
"""
A decorator for ensuring a function does not output any logs.
Args:
func (Callable): The function to call silently.
"""
def wrapper(*args, **kwargs):
with logger.disabled():
return func(*args, **kwargs)
return wrapper
__all__ = ["DEFAULT_LOG_LEVEL", "ApeLogger", "LogLevel", "get_rich_console", "logger", "silenced"]
| _RichConsoleFactory |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 9773,
"end": 11149
} | class ____(TypedDict, total=False):
type: Required[ExpectedSerializationTypes]
def simple_ser_schema(type: ExpectedSerializationTypes) -> SimpleSerSchema:
"""
Returns a schema for serialization with a custom type.
Args:
type: The type to use for serialization
"""
return SimpleSerSchema(type=type)
# (input_value: Any, /) -> Any
GeneralPlainNoInfoSerializerFunction = Callable[[Any], Any]
# (input_value: Any, info: FieldSerializationInfo, /) -> Any
GeneralPlainInfoSerializerFunction = Callable[[Any, SerializationInfo[Any]], Any]
# (model: Any, input_value: Any, /) -> Any
FieldPlainNoInfoSerializerFunction = Callable[[Any, Any], Any]
# (model: Any, input_value: Any, info: FieldSerializationInfo, /) -> Any
FieldPlainInfoSerializerFunction = Callable[[Any, Any, FieldSerializationInfo[Any]], Any]
SerializerFunction = Union[
GeneralPlainNoInfoSerializerFunction,
GeneralPlainInfoSerializerFunction,
FieldPlainNoInfoSerializerFunction,
FieldPlainInfoSerializerFunction,
]
WhenUsed = Literal['always', 'unless-none', 'json', 'json-unless-none']
"""
Values have the following meanings:
* `'always'` means always use
* `'unless-none'` means use unless the value is `None`
* `'json'` means use when serializing to JSON
* `'json-unless-none'` means use when serializing to JSON and the value is not `None`
"""
| SimpleSerSchema |
python | bokeh__bokeh | src/bokeh/models/annotations/arrows.py | {
"start": 2573,
"end": 3169
} | class ____(ArrowHead):
''' Render a closed-body arrow head.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
line_props = Include(LineProps, help="""
The {prop} values for the arrow head outline.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the arrow head interior.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the arrow head interior.
""")
fill_color = Override(default="black")
| NormalHead |
python | openai__openai-python | src/openai/cli/_errors.py | {
"start": 161,
"end": 196
} | class ____(OpenAIError): ...
| CLIError |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 93643,
"end": 95959
} | class ____(system_info):
# BLAS_SRC is deprecated, please do not use this!
# Build or install a BLAS library via your package manager or from
# source separately.
section = 'blas_src'
dir_env_var = 'BLAS_SRC'
notfounderror = BlasSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['blas']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'daxpy.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
blas1 = '''
caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
scabs1
'''
blas2 = '''
cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
'''
blas3 = '''
cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
'''
sources = [os.path.join(src_dir, f + '.f') \
for f in (blas1 + blas2 + blas3).split()]
#XXX: should we check here actual existence of source files?
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
| blas_src_info |
python | django__django | tests/test_utils/tests.py | {
"start": 25748,
"end": 39541
} | class ____(SimpleTestCase):
def test_html_parser(self):
element = parse_html("<div><p>Hello</p></div>")
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, "p")
self.assertEqual(element.children[0].children[0], "Hello")
parse_html("<p>")
parse_html("<p attr>")
dom = parse_html("<p>foo")
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, "p")
self.assertEqual(dom[0], "foo")
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>')
parse_html(
"""
<script>
var js_sha_link='<p>***</p>';
</script>
"""
)
# script content will be parsed to text
dom = parse_html(
"""
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
"""
)
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_void_elements(self):
for tag in VOID_ELEMENTS:
with self.subTest(tag):
dom = parse_html("<p>Hello <%s> world</p>" % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], "Hello")
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], "world")
dom = parse_html("<p>Hello <%s /> world</p>" % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], "Hello")
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], "world")
def test_simple_equal_html(self):
self.assertHTMLEqual("", "")
self.assertHTMLEqual("<p></p>", "<p></p>")
self.assertHTMLEqual("<p></p>", " <p> </p> ")
self.assertHTMLEqual("<div><p>Hello</p></div>", "<div><p>Hello</p></div>")
self.assertHTMLEqual("<div><p>Hello</p></div>", "<div> <p>Hello</p> </div>")
self.assertHTMLEqual("<div>\n<p>Hello</p></div>", "<div><p>Hello</p></div>\n")
self.assertHTMLEqual(
"<div><p>Hello\nWorld !</p></div>", "<div><p>Hello World\n!</p></div>"
)
self.assertHTMLEqual(
"<div><p>Hello\nWorld !</p></div>", "<div><p>Hello World\n!</p></div>"
)
self.assertHTMLEqual("<p>Hello World !</p>", "<p>Hello World\n\n!</p>")
self.assertHTMLEqual("<p> </p>", "<p></p>")
self.assertHTMLEqual("<p/>", "<p></p>")
self.assertHTMLEqual("<p />", "<p></p>")
self.assertHTMLEqual("<input checked>", '<input checked="checked">')
self.assertHTMLEqual("<p>Hello", "<p> Hello")
self.assertHTMLEqual("<p>Hello</p>World", "<p>Hello</p> World")
def test_ignore_comments(self):
self.assertHTMLEqual(
"<div>Hello<!-- this is a comment --> World!</div>",
"<div>Hello World!</div>",
)
def test_unequal_html(self):
self.assertHTMLNotEqual("<p>Hello</p>", "<p>Hello!</p>")
self.assertHTMLNotEqual("<p>foobar</p>", "<p>foo bar</p>")
self.assertHTMLNotEqual("<p>foo bar</p>", "<p>foo bar</p>")
self.assertHTMLNotEqual("<p>foo nbsp</p>", "<p>foo </p>")
self.assertHTMLNotEqual("<p>foo #20</p>", "<p>foo </p>")
self.assertHTMLNotEqual(
"<p><span>Hello</span><span>World</span></p>",
"<p><span>Hello</span>World</p>",
)
self.assertHTMLNotEqual(
"<p><span>Hello</span>World</p>",
"<p><span>Hello</span><span>World</span></p>",
)
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />', '<input id="id_name" type="text" />'
)
self.assertHTMLEqual(
"""<input type='text' id="id_name" />""",
'<input id="id_name" type="text" />',
)
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />',
)
def test_class_attribute(self):
pairs = [
('<p class="foo bar"></p>', '<p class="bar foo"></p>'),
('<p class=" foo bar "></p>', '<p class="bar foo"></p>'),
('<p class=" foo bar "></p>', '<p class="bar foo"></p>'),
('<p class="foo\tbar"></p>', '<p class="bar foo"></p>'),
('<p class="\tfoo\tbar\t"></p>', '<p class="bar foo"></p>'),
('<p class="\t\t\tfoo\t\t\tbar\t\t\t"></p>', '<p class="bar foo"></p>'),
('<p class="\t \nfoo \t\nbar\n\t "></p>', '<p class="bar foo"></p>'),
]
for html1, html2 in pairs:
with self.subTest(html1):
self.assertHTMLEqual(html1, html2)
def test_boolean_attribute(self):
html1 = "<input checked>"
html2 = '<input checked="">'
html3 = '<input checked="checked">'
self.assertHTMLEqual(html1, html2)
self.assertHTMLEqual(html1, html3)
self.assertHTMLEqual(html2, html3)
self.assertHTMLNotEqual(html1, '<input checked="invalid">')
self.assertEqual(str(parse_html(html1)), "<input checked>")
self.assertEqual(str(parse_html(html2)), "<input checked>")
self.assertEqual(str(parse_html(html3)), "<input checked>")
def test_non_boolean_attibutes(self):
html1 = "<input value>"
html2 = '<input value="">'
html3 = '<input value="value">'
self.assertHTMLEqual(html1, html2)
self.assertHTMLNotEqual(html1, html3)
self.assertEqual(str(parse_html(html1)), '<input value="">')
self.assertEqual(str(parse_html(html2)), '<input value="">')
def test_normalize_refs(self):
pairs = [
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
]
for pair in pairs:
with self.subTest(repr(pair)):
self.assertHTMLEqual(*pair)
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""", # NOQA
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""",
)
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""",
"""
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""",
)
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html("<p>foo")
dom2 = parse_html("<p>foo</p>")
self.assertIn(dom1, dom2)
self.assertIn(dom2, dom1)
dom2 = parse_html("<div><p>foo</p></div>")
self.assertIn(dom1, dom2)
self.assertNotIn(dom2, dom1)
self.assertNotIn("<p>foo</p>", dom2)
self.assertIn("foo", dom2)
# when a root element is used ...
dom1 = parse_html("<p>foo</p><p>bar</p>")
dom2 = parse_html("<p>foo</p><p>bar</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<p>foo</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<p>bar</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<div><p>foo</p><p>bar</p></div>")
self.assertIn(dom2, dom1)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html("<p>foo")
dom2 = parse_html("<p>foo</p>")
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo</p><p>bar</p>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo foo</p><p>foo</p>")
self.assertEqual(dom2.count("foo"), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count("bar"), 0)
self.assertEqual(dom2.count("class"), 0)
self.assertEqual(dom2.count("p"), 0)
self.assertEqual(dom2.count("o"), 2)
dom2 = parse_html("<p>foo</p><p>foo</p>")
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<div><div><p>foo</p></div></div>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo<p>foo</p></p>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo<p>bar</p></p>")
self.assertEqual(dom2.count(dom1), 0)
# HTML with a root element contains the same HTML with no root element.
dom1 = parse_html("<p>foo</p><p>bar</p>")
dom2 = parse_html("<div><p>foo</p><p>bar</p></div>")
self.assertEqual(dom2.count(dom1), 1)
# Target of search is a sequence of child elements and appears more
# than once.
dom2 = parse_html("<div><p>foo</p><p>bar</p><p>foo</p><p>bar</p></div>")
self.assertEqual(dom2.count(dom1), 2)
# Searched HTML has additional children.
dom1 = parse_html("<a/><b/>")
dom2 = parse_html("<a/><b/><c/>")
self.assertEqual(dom2.count(dom1), 1)
# No match found in children.
dom1 = parse_html("<b/><a/>")
self.assertEqual(dom2.count(dom1), 0)
# Target of search found among children and grandchildren.
dom1 = parse_html("<b/><b/>")
dom2 = parse_html("<a><b/><b/></a><b/><b/>")
self.assertEqual(dom2.count(dom1), 2)
def test_root_element_escaped_html(self):
html = "<br>"
parsed = parse_html(html)
self.assertEqual(str(parsed), html)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual("<p>", "")
with self.assertRaises(AssertionError):
self.assertHTMLEqual("", "<p>")
error_msg = (
"First argument is not valid HTML:\n"
"('Unexpected end tag `div` (Line 1, Column 6)', (1, 6))"
)
with self.assertRaisesMessage(AssertionError, error_msg):
self.assertHTMLEqual("< div></div>", "<div></div>")
with self.assertRaises(HTMLParseError):
parse_html("</p>")
def test_escaped_html_errors(self):
msg = "<p>\n<foo>\n</p> != <p>\n<foo>\n</p>\n"
with self.assertRaisesMessage(AssertionError, msg):
self.assertHTMLEqual("<p><foo></p>", "<p><foo></p>")
with self.assertRaisesMessage(AssertionError, msg):
self.assertHTMLEqual("<p><foo></p>", "<p><foo></p>")
def test_contains_html(self):
response = HttpResponse(
"""<body>
This is a form: <form method="get">
<input type="text" name="Hello" />
</form></body>"""
)
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form method="get">', html=True)
invalid_response = HttpResponse("""<body <bad>>""")
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, "<p></p>")
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse(
'<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>'
)
self.assertContains(
response,
'<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>',
html=True,
)
| HTMLEqualTests |
python | pallets__werkzeug | tests/test_http.py | {
"start": 26198,
"end": 29882
} | class ____:
def test_best_match_works(self):
# was a bug in 0.6
rv = http.parse_accept_header(
"foo=,application/xml,application/xhtml+xml,"
"text/html;q=0.9,text/plain;q=0.8,"
"image/png,*/*;q=0.5",
datastructures.MIMEAccept,
).best_match(["foo/bar"])
assert rv == "foo/bar"
@pytest.mark.parametrize(
"value",
[
"Basic V2Vya3pldWc6V2VrcnpldWc=",
'Digest username=Mufasa, realm="testrealm@host.invalid",'
' nonce=dcd98b7102dd2f0e8b11d0f600bfb0c093, uri="/dir/index.html", qop=auth,'
" nc=00000001, cnonce=0a4f113b, response=6629fae49393a05397450978507c4ef1,"
" opaque=5ccc069c403ebaf9f0171e9517f40e41",
],
)
def test_authorization_to_header(value: str) -> None:
parsed = Authorization.from_header(value)
assert parsed is not None
assert parsed.to_header() == value
@pytest.mark.parametrize(
("value", "expect"),
[
(
"Sun, 06 Nov 1994 08:49:37 GMT ",
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
),
(
"Sunday, 06-Nov-94 08:49:37 GMT",
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
),
(
" Sun Nov 6 08:49:37 1994",
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
),
("foo", None),
(
" Sun 02 Feb 1343 08:49:37 GMT",
datetime(1343, 2, 2, 8, 49, 37, tzinfo=timezone.utc),
),
(
"Thu, 01 Jan 1970 00:00:00 GMT",
datetime(1970, 1, 1, tzinfo=timezone.utc),
),
("Thu, 33 Jan 1970 00:00:00 GMT", None),
],
)
def test_parse_date(value, expect):
assert http.parse_date(value) == expect
@pytest.mark.parametrize(
("value", "expect"),
[
(
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
"Sun, 06 Nov 1994 08:49:37 GMT",
),
(
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone(timedelta(hours=-8))),
"Sun, 06 Nov 1994 16:49:37 GMT",
),
(datetime(1994, 11, 6, 8, 49, 37), "Sun, 06 Nov 1994 08:49:37 GMT"),
(0, "Thu, 01 Jan 1970 00:00:00 GMT"),
(datetime(1970, 1, 1), "Thu, 01 Jan 1970 00:00:00 GMT"),
(datetime(1, 1, 1), "Mon, 01 Jan 0001 00:00:00 GMT"),
(datetime(999, 1, 1), "Tue, 01 Jan 0999 00:00:00 GMT"),
(datetime(1000, 1, 1), "Wed, 01 Jan 1000 00:00:00 GMT"),
(datetime(2020, 1, 1), "Wed, 01 Jan 2020 00:00:00 GMT"),
(date(2020, 1, 1), "Wed, 01 Jan 2020 00:00:00 GMT"),
],
)
def test_http_date(value, expect):
assert http.http_date(value) == expect
@pytest.mark.parametrize("value", [".5", "+0.5", "0.5_1", "🯰.🯵"])
def test_accept_invalid_float(value):
quoted = urllib.parse.quote(value)
if quoted == value:
q = f"q={value}"
else:
q = f"q*=UTF-8''{value}"
a = http.parse_accept_header(f"en,jp;{q}")
assert list(a.values()) == ["en"]
def test_accept_valid_int_one_zero():
assert http.parse_accept_header("en;q=1") == http.parse_accept_header("en;q=1.0")
assert http.parse_accept_header("en;q=0") == http.parse_accept_header("en;q=0.0")
assert http.parse_accept_header("en;q=5") == http.parse_accept_header("en;q=5.0")
@pytest.mark.parametrize("value", ["🯱🯲🯳", "+1-", "1-1_23"])
def test_range_invalid_int(value):
assert http.parse_range_header(value) is None
@pytest.mark.parametrize("value", ["*/🯱🯲🯳", "1-+2/3", "1_23-125/*"])
def test_content_range_invalid_int(value):
assert http.parse_content_range_header(f"bytes {value}") is None
| TestRegression |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 874284,
"end": 875072
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for PullRequestReviewComment."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("PullRequestReviewCommentEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("PullRequestReviewComment"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| PullRequestReviewCommentConnection |
python | kamyu104__LeetCode-Solutions | Python/valid-parentheses.py | {
"start": 29,
"end": 401
} | class ____(object):
# @return a boolean
def isValid(self, s):
stack, lookup = [], {"(": ")", "{": "}", "[": "]"}
for parenthese in s:
if parenthese in lookup:
stack.append(parenthese)
elif len(stack) == 0 or lookup[stack.pop()] != parenthese:
return False
return len(stack) == 0
| Solution |
python | matplotlib__matplotlib | tools/boilerplate.py | {
"start": 2453,
"end": 3349
} | class ____:
"""
Format function default values as needed for inspect.formatargspec.
The interesting part is a hard-coded list of functions used
as defaults in pyplot methods.
"""
def __init__(self, value):
if value is mlab.detrend_none:
self._repr = "mlab.detrend_none"
elif value is mlab.window_hanning:
self._repr = "mlab.window_hanning"
elif value is np.mean:
self._repr = "np.mean"
elif value is _api.deprecation._deprecated_parameter:
self._repr = "_api.deprecation._deprecated_parameter"
elif isinstance(value, Enum):
# Enum str is Class.Name whereas their repr is <Class.Name: value>.
self._repr = f'{type(value).__name__}.{value.name}'
else:
self._repr = repr(value)
def __repr__(self):
return self._repr
| value_formatter |
python | python-poetry__poetry | src/poetry/puzzle/transaction.py | {
"start": 494,
"end": 7842
} | class ____:
def __init__(
self,
current_packages: list[Package],
result_packages: list[Package] | dict[Package, TransitivePackageInfo],
installed_packages: list[Package] | None = None,
root_package: Package | None = None,
marker_env: Mapping[str, Any] | None = None,
groups: set[NormalizedName] | None = None,
) -> None:
self._current_packages = current_packages
self._result_packages = result_packages
if installed_packages is None:
installed_packages = []
self._installed_packages = {pkg.name: pkg for pkg in installed_packages}
self._root_package = root_package
self._marker_env = marker_env
self._groups = groups
def get_solved_packages(self) -> dict[Package, TransitivePackageInfo]:
assert isinstance(self._result_packages, dict)
return self._result_packages
def calculate_operations(
self,
*,
with_uninstalls: bool = True,
synchronize: bool = False,
skip_directory: bool = False,
extras: set[NormalizedName] | None = None,
system_site_packages: set[NormalizedName] | None = None,
) -> list[Operation]:
from poetry.installation.operations import Install
from poetry.installation.operations import Uninstall
from poetry.installation.operations import Update
if not system_site_packages:
system_site_packages = set()
operations: list[Operation] = []
extra_packages: set[NormalizedName] = set()
if self._marker_env:
marker_env_with_extras = dict(self._marker_env)
if extras is not None:
marker_env_with_extras["extra"] = extras
elif extras is not None:
assert self._root_package is not None
extra_packages = get_extra_package_names(
self._result_packages,
{k: [d.name for d in v] for k, v in self._root_package.extras.items()},
extras,
)
if isinstance(self._result_packages, dict):
priorities = {
pkg: info.depth for pkg, info in self._result_packages.items()
}
else:
priorities = defaultdict(int)
relevant_result_packages: set[NormalizedName] = set()
for result_package in self._result_packages:
is_unsolicited_extra = False
if self._marker_env:
assert self._groups is not None
assert isinstance(self._result_packages, dict)
info = self._result_packages[result_package]
if info.groups & self._groups and info.get_marker(
self._groups
).validate(marker_env_with_extras):
relevant_result_packages.add(result_package.name)
elif result_package.optional:
is_unsolicited_extra = True
else:
continue
else:
is_unsolicited_extra = extras is not None and (
result_package.optional
and result_package.name not in extra_packages
)
if not is_unsolicited_extra:
relevant_result_packages.add(result_package.name)
if installed_package := self._installed_packages.get(result_package.name):
# Extras that were not requested are not relevant.
if is_unsolicited_extra:
pass
# We have to perform an update if the version or another
# attribute of the package has changed (source type, url, ref, ...).
elif result_package.version != installed_package.version or (
(
# This has to be done because installed packages cannot
# have type "legacy". If a package with type "legacy"
# is installed, the installed package has no source_type.
# Thus, if installed_package has no source_type and
# the result_package has source_type "legacy" (negation of
# the following condition), update must not be performed.
# This quirk has the side effect that when switching
# from PyPI to legacy (or vice versa),
# no update is performed.
installed_package.source_type
or result_package.source_type != "legacy"
)
and not result_package.is_same_package_as(installed_package)
):
operations.append(
Update(
installed_package,
result_package,
priority=priorities[result_package],
)
)
else:
operations.append(Install(result_package).skip("Already installed"))
elif not (skip_directory and result_package.source_type == "directory"):
op = Install(result_package, priority=priorities[result_package])
if is_unsolicited_extra:
op.skip("Not required")
operations.append(op)
if with_uninstalls:
uninstalls: set[NormalizedName] = set()
result_packages = {package.name for package in self._result_packages}
for current_package in self._current_packages:
if current_package.name not in (result_packages | uninstalls) and (
installed_package := self._installed_packages.get(
current_package.name
)
):
uninstalls.add(installed_package.name)
if installed_package.name not in system_site_packages:
operations.append(Uninstall(installed_package))
if synchronize:
# We preserve pip when not managed by poetry, this is done to avoid
# externally managed virtual environments causing unnecessary removals.
preserved_package_names = {"pip"} - relevant_result_packages
for installed_package in self._installed_packages.values():
if installed_package.name in uninstalls:
continue
if (
self._root_package
and installed_package.name == self._root_package.name
):
continue
if installed_package.name in preserved_package_names:
continue
if installed_package.name not in relevant_result_packages:
uninstalls.add(installed_package.name)
if installed_package.name not in system_site_packages:
operations.append(Uninstall(installed_package))
return sorted(
operations,
key=lambda o: (
-o.priority,
o.package.name,
o.package.version,
),
)
| Transaction |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/kms.py | {
"start": 1553,
"end": 6336
} | class ____(GoogleBaseHook):
"""
Hook for Google Cloud Key Management service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._conn: KeyManagementServiceClient | None = None
def get_conn(self) -> KeyManagementServiceClient:
"""
Retrieve connection to Cloud Key Management service.
:return: Cloud Key Management service object
"""
if not self._conn:
self._conn = KeyManagementServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._conn
def encrypt(
self,
key_name: str,
plaintext: bytes,
authenticated_data: bytes | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""
Encrypts a plaintext message using Google Cloud KMS.
:param key_name: The Resource Name for the key (or key version)
to be used for encryption. Of the form
``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:param plaintext: The message to be encrypted.
:param authenticated_data: Optional additional authenticated data that
must also be provided to decrypt the message.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: The base 64 encoded ciphertext of the original message.
"""
response = self.get_conn().encrypt(
request={
"name": key_name,
"plaintext": plaintext,
"additional_authenticated_data": authenticated_data,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
ciphertext = _b64encode(response.ciphertext)
return ciphertext
def decrypt(
self,
key_name: str,
ciphertext: str,
authenticated_data: bytes | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> bytes:
"""
Decrypts a ciphertext message using Google Cloud KMS.
:param key_name: The Resource Name for the key to be used for decryption.
Of the form ``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:param ciphertext: The message to be decrypted.
:param authenticated_data: Any additional authenticated data that was
provided when encrypting the message.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: The original message.
"""
response = self.get_conn().decrypt(
request={
"name": key_name,
"ciphertext": _b64decode(ciphertext),
"additional_authenticated_data": authenticated_data,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response.plaintext
| CloudKMSHook |
python | pydantic__pydantic | pydantic/aliases.py | {
"start": 2683,
"end": 4937
} | class ____:
"""!!! abstract "Usage Documentation"
[Using an `AliasGenerator`](../concepts/alias.md#using-an-aliasgenerator)
A data class used by `alias_generator` as a convenience to create various aliases.
Attributes:
alias: A callable that takes a field name and returns an alias for it.
validation_alias: A callable that takes a field name and returns a validation alias for it.
serialization_alias: A callable that takes a field name and returns a serialization alias for it.
"""
alias: Callable[[str], str] | None = None
validation_alias: Callable[[str], str | AliasPath | AliasChoices] | None = None
serialization_alias: Callable[[str], str] | None = None
def _generate_alias(
self,
alias_kind: Literal['alias', 'validation_alias', 'serialization_alias'],
allowed_types: tuple[type[str] | type[AliasPath] | type[AliasChoices], ...],
field_name: str,
) -> str | AliasPath | AliasChoices | None:
"""Generate an alias of the specified kind. Returns None if the alias generator is None.
Raises:
TypeError: If the alias generator produces an invalid type.
"""
alias = None
if alias_generator := getattr(self, alias_kind):
alias = alias_generator(field_name)
if alias and not isinstance(alias, allowed_types):
raise TypeError(
f'Invalid `{alias_kind}` type. `{alias_kind}` generator must produce one of `{allowed_types}`'
)
return alias
def generate_aliases(self, field_name: str) -> tuple[str | None, str | AliasPath | AliasChoices | None, str | None]:
"""Generate `alias`, `validation_alias`, and `serialization_alias` for a field.
Returns:
A tuple of three aliases - validation, alias, and serialization.
"""
alias = self._generate_alias('alias', (str,), field_name)
validation_alias = self._generate_alias('validation_alias', (str, AliasChoices, AliasPath), field_name)
serialization_alias = self._generate_alias('serialization_alias', (str,), field_name)
return alias, validation_alias, serialization_alias # type: ignore
| AliasGenerator |
python | tensorflow__tensorflow | tensorflow/python/saved_model/save_options.py | {
"start": 998,
"end": 3536
} | class ____(enum.Enum):
"""Enum defining options for variable handling when saving.
NONE
No policy applied: Distributed variables are saved as one variable, with no
device attached.
SAVE_VARIABLE_DEVICES
When saving variables, also save their device assignment.
This is useful if one wants to hardcode devices in saved models, but it also
makes them non-portable if soft device placement is disabled (more details
in `tf.config.set_soft_device_placement`). This is currently not
fully supported by `saved_model.load`, and is mainly intended to be used
when one will be reading the saved model at a lower API level. In the
example below, the graph saved by the call to `saved_model.save` will have
the variable devices correctly specified:
```python
exported = tf.train.Checkpoint()
with tf.device('/GPU:0'):
exported.x_gpu = tf.Variable(1.0)
with tf.device('/CPU:0'):
exported.x_cpu = tf.Variable(1.0)
tf.saved_model.save(exported, export_dir,
options = tf.saved_model.SaveOptions(
experimental_variable_policy=
tf.saved_model.experimental.VariablePolicy.SAVE_VARIABLE_DEVICES))
```
Distributed variables are still saved as one variable under this policy.
EXPAND_DISTRIBUTED_VARIABLES
Distributed variables will be saved with information about their components,
allowing for their restoration on load. Also, the saved graph will contain
references to those variables. This is useful when one wants to use the
model for training in environments where the original distribution strategy
is not available.
"""
NONE = None
SAVE_VARIABLE_DEVICES = "save_variable_devices"
EXPAND_DISTRIBUTED_VARIABLES = "expand_distributed_variables"
def _save_variable_devices(self):
"""Checks whether variable devices should be saved."""
return self != VariablePolicy.NONE
def _expand_distributed_variables(self):
"""Checks whether distributed variables should be expanded."""
return self == VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES
@staticmethod
def from_obj(obj):
"""Tries to convert `obj` to a VariablePolicy instance."""
if obj is None:
return VariablePolicy.NONE
if isinstance(obj, VariablePolicy):
return obj
key = str(obj).lower()
for policy in VariablePolicy:
if key == policy.value:
return policy
raise ValueError(f"Received invalid VariablePolicy value: {obj}.")
@tf_export("saved_model.SaveOptions")
| VariablePolicy |
python | kamyu104__LeetCode-Solutions | Python/task-scheduler.py | {
"start": 71,
"end": 402
} | class ____(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
counter = Counter(tasks)
_, max_count = counter.most_common(1)[0]
return max((max_count-1) * (n+1) + counter.values().count(max_count), len(tasks))
| Solution |
python | django__django | tests/admin_views/models.py | {
"start": 23262,
"end": 23363
} | class ____(models.Model):
"""
Simple model with nothing on it for use in testing
"""
| Simple |
python | pydantic__pydantic | pydantic/experimental/pipeline.py | {
"start": 1102,
"end": 1270
} | class ____:
func: Callable[[], type[Any]]
@cached_property
def tp(self) -> type[Any]:
return self.func()
@dataclass(**_slots_frozen)
| _ValidateAsDefer |
python | pytorch__pytorch | benchmarks/dynamo/genai_layers/kernels.py | {
"start": 14149,
"end": 17781
} | class ____(BenchmarkKernel):
def __init__(self, script_args):
super().__init__(script_args)
self.available_backends = [
"eager",
"compiled",
"quack",
"liger",
]
def get_shapes(self) -> tuple[tuple[int, ...], ...]:
# TODO: OOM for (32768, 65536) on h100
return (
(32768, 256),
(32768, 512),
(32768, 1024),
(32768, 2048),
(32768, 4096),
(32768, 8192),
(32768, 16384),
) + extra_shapes_for_norm
def get_memory_bytes(self, args, kwargs) -> int:
x, w, dy = args
# x, dy: [M, N], w: [N]
M, N = x.shape
# Read x, w, dy, write dx, dw
return 3 * M * N * x.dtype.itemsize + 2 * N * w.dtype.itemsize
def rms_norm_ref(self, x, w):
x_f32 = x.float()
return (
x_f32
* torch.rsqrt(torch.mean(x_f32.square(), dim=-1, keepdim=True) + 1e-6)
* w
).to(x.dtype)
def eager(self, args, kwargs=None) -> Any:
assert kwargs is None
x, w, dy = args
y = self.rms_norm_ref(x, w)
return lambda: torch.autograd.grad(
y, [x, w], grad_outputs=dy, retain_graph=True
)
def compiled(self, args, kwargs=None) -> Any:
assert kwargs is None
x, w, dy = args
y = torch.compile(self.rms_norm_ref, mode=self.compile_mode, fullgraph=True)(
x, w
)
return lambda: torch.autograd.grad(
y, [x, w], grad_outputs=dy, retain_graph=True
)
def compute_rstd(self, x, eps):
return torch.rsqrt(torch.mean(x.float().square(), dim=-1, keepdim=True) + eps)
def quack(self, args, kwargs=None) -> Any:
from quack.rmsnorm import _get_sm_count, _rmsnorm_bwd
(
x,
w,
dy,
) = args
M, N = x.shape
rstd = self.compute_rstd(x, eps=1e-6)
dx = torch.empty_like(x)
sm_count = _get_sm_count(x.size(1), x.device)
dw_partial = torch.empty(
sm_count, x.size(1), device=x.device, dtype=torch.float32
)
def quack_bwd():
_rmsnorm_bwd(
x,
w,
dy,
rstd,
dx,
dw_partial,
db_partial=None,
dresidual_out=None,
dresidual=None,
sm_count=sm_count,
)
dw = dw_partial.sum(dim=0).to(w.dtype)
return dx, dw
return quack_bwd
def liger(self, args, kwargs=None) -> Any:
from liger_kernel.transformers.rms_norm import LigerRMSNorm
x, w, dy = args
M, N = x.shape
liger_rmsnorm = LigerRMSNorm(
hidden_size=N, eps=1e-6, casting_mode="gemma"
).cuda()
liger_rmsnorm.weight.data.copy_(w)
y = liger_rmsnorm(x)
return lambda: torch.autograd.grad(
y, [x, liger_rmsnorm.weight], grad_outputs=dy, retain_graph=True
)
def benchmark(self):
for M, N in self.get_shapes():
print(f"Tensor dimensions: [{M}, {N}]")
torch_dtype = cutlass_torch.dtype(cutlass.BFloat16)
x = torch.randn(M, N, device="cuda", dtype=torch_dtype, requires_grad=True)
w = torch.randn(N, device="cuda", dtype=torch.float32, requires_grad=True)
dy = torch.randn(M, N, device="cuda", dtype=torch_dtype)
self.benchmark_single_shape((x, w, dy), setting=f"shape: [{M}, {N}]")
| RMSNormBackward |
python | pypa__pipenv | pipenv/patched/pip/_internal/models/direct_url.py | {
"start": 3974,
"end": 4435
} | class ____:
name: ClassVar = "dir_info"
editable: bool = False
@classmethod
def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]:
if d is None:
return None
return cls(editable=_get_required(d, bool, "editable", default=False))
def _to_dict(self) -> Dict[str, Any]:
return _filter_none(editable=self.editable or None)
InfoType = Union[ArchiveInfo, DirInfo, VcsInfo]
@dataclass
| DirInfo |
python | cython__cython | tests/run/withstat_py27.py | {
"start": 2590,
"end": 2703
} | class ____(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
| ExitRaises |
python | plotly__plotly.py | plotly/io/_defaults.py | {
"start": 42,
"end": 411
} | class ____(object):
"""
Class to store default settings for image generation.
"""
def __init__(self):
self.default_format = "png"
self.default_width = 700
self.default_height = 500
self.default_scale = 1
self.mathjax = None
self.topojson = None
self.plotlyjs = None
defaults = _Defaults()
| _Defaults |
python | django__django | tests/modeladmin/models.py | {
"start": 75,
"end": 306
} | class ____(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
class Meta:
ordering = ("name",)
def __str__(self):
return self.name
| Band |
python | huggingface__transformers | tests/models/qwen2/test_tokenization_qwen2.py | {
"start": 883,
"end": 2971
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "Qwen/Qwen2.5-VL-7B-Instruct"
tokenizer_class = Qwen2Tokenizer
integration_expected_tokens = ['This', 'Ġis', 'Ġa', 'Ġtest', 'ĠðŁĺ', 'Ĭ', 'Ċ', 'I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ', '9', '2', '0', '0', '0', ',', 'Ġand', 'Ġthis', 'Ġis', 'Ġfals', 'é', '.Ċ', 'çĶŁæ´»çļĦ', '羣', 'è°Ľ', 'æĺ¯', 'Ċ', 'Hi', 'Ġ', 'ĠHello', 'Ċ', 'Hi', 'ĠĠ', 'ĠHello', 'ĊĊ', 'ĠĊĠĠĊ', 'ĠHello', 'Ċ', '<s', '>Ċ', 'hi', '<s', '>', 'there', 'Ċ', 'The', 'Ġfollowing', 'Ġstring', 'Ġshould', 'Ġbe', 'Ġproperly', 'Ġencoded', ':', 'ĠHello', '.Ċ', 'But', 'Ġ', 'ird', 'Ġand', 'Ġ', 'à¸Ľ', 'ี', 'ĠĠ', 'Ġ', 'ird', 'ĠĠ', 'Ġ', 'à¸Ķ', 'Ċ', 'Hey', 'Ġhow', 'Ġare', 'Ġyou', 'Ġdoing'] # fmt: skip
integration_expected_token_ids = [1986, 374, 264, 1273, 26525, 232, 198, 40, 572, 9223, 304, 220, 24, 17, 15, 15, 15, 11, 323, 419, 374, 31932, 963, 624, 105301, 88051, 116109, 20412, 198, 13048, 220, 21927, 198, 13048, 256, 21927, 271, 48426, 21927, 198, 44047, 397, 6023, 44047, 29, 18532, 198, 785, 2701, 914, 1265, 387, 10277, 20498, 25, 21927, 624, 3983, 220, 2603, 323, 220, 54684, 28319, 256, 220, 2603, 256, 220, 37033, 198, 18665, 1246, 525, 498, 3730] # fmt: skip
expected_tokens_from_ids = ['This', 'Ġis', 'Ġa', 'Ġtest', 'ĠðŁĺ', 'Ĭ', 'Ċ', 'I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ', '9', '2', '0', '0', '0', ',', 'Ġand', 'Ġthis', 'Ġis', 'Ġfals', 'é', '.Ċ', 'çĶŁæ´»çļĦ', '羣', 'è°Ľ', 'æĺ¯', 'Ċ', 'Hi', 'Ġ', 'ĠHello', 'Ċ', 'Hi', 'ĠĠ', 'ĠHello', 'ĊĊ', 'ĠĊĠĠĊ', 'ĠHello', 'Ċ', '<s', '>Ċ', 'hi', '<s', '>', 'there', 'Ċ', 'The', 'Ġfollowing', 'Ġstring', 'Ġshould', 'Ġbe', 'Ġproperly', 'Ġencoded', ':', 'ĠHello', '.Ċ', 'But', 'Ġ', 'ird', 'Ġand', 'Ġ', 'à¸Ľ', 'ี', 'ĠĠ', 'Ġ', 'ird', 'ĠĠ', 'Ġ', 'à¸Ķ', 'Ċ', 'Hey', 'Ġhow', 'Ġare', 'Ġyou', 'Ġdoing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊\nI was born in 92000, and this is falsé.\n生活的真谛是\nHi Hello\nHi Hello\n\n \n \n Hello\n<s>\nhi<s>there\nThe following string should be properly encoded: Hello.\nBut ird and ปี ird ด\nHey how are you doing"
| Qwen2TokenizationTest |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 37241,
"end": 37421
} | class ____(axis_ticks_major_y, axis_ticks_minor_y):
"""
y major and minor axis tick lines
Parameters
----------
theme_element : element_line
"""
| axis_ticks_y |
python | wandb__wandb | wandb/sdk/artifacts/_generated/project_artifact_type.py | {
"start": 343,
"end": 549
} | class ____(GQLResult):
artifact_type: Optional[ArtifactTypeFragment] = Field(alias="artifactType")
ProjectArtifactType.model_rebuild()
ProjectArtifactTypeProject.model_rebuild()
| ProjectArtifactTypeProject |
python | fastapi__sqlmodel | docs_src/tutorial/connect/select/tutorial003_py310.py | {
"start": 222,
"end": 2147
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_heroes():
with Session(engine) as session:
statement = select(Hero, Team).join(Team, isouter=True)
results = session.exec(statement)
for hero, team in results:
print("Hero:", hero, "Team:", team)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | kamyu104__LeetCode-Solutions | Python/number-of-music-playlists.py | {
"start": 33,
"end": 611
} | class ____(object):
def numMusicPlaylists(self, N, L, K):
"""
:type N: int
:type L: int
:type K: int
:rtype: int
"""
M = 10**9+7
dp = [[0 for _ in xrange(1+L)] for _ in xrange(2)]
dp[0][0] = dp[1][1] = 1
for n in xrange(1, N+1):
dp[n % 2][n] = (dp[(n-1) % 2][n-1] * n) % M
for l in xrange(n+1, L+1):
dp[n % 2][l] = ((dp[n % 2][l-1] * max(n-K, 0)) % M + \
(dp[(n-1) % 2][l-1] * n) % M) % M
return dp[N % 2][L]
| Solution |
python | ray-project__ray | python/ray/air/tests/mocked_wandb_integration.py | {
"start": 899,
"end": 1688
} | class ____:
"""Thread-safe.
Note: Not implemented to mock re-init behavior properly. Proceed with caution."""
def __init__(self):
self.logs = []
self.config = _FakeConfig()
def init(self, *args, **kwargs):
mock = Mock()
mock.args = args
mock.kwargs = kwargs
if "config" in kwargs:
self.config.update(kwargs["config"])
return mock
def log(self, data, step=None):
try:
json_dumps_safer(data)
except Exception:
self.logs.append("serialization error")
else:
self.logs.append(data)
def finish(self):
pass
def get_logs(self):
return self.logs
def get_config(self):
return self.config.config
| _MockWandbAPI |
python | getsentry__sentry | src/sentry/models/projecttemplate.py | {
"start": 283,
"end": 1125
} | class ____(DefaultFieldsModelExisting):
"""
Identifies a project template that can be used to create new projects.
This model links the project template options to the organization that owns them.
"""
__relocation_scope__ = RelocationScope.Organization
name = models.CharField(max_length=200)
organization = FlexibleForeignKey("sentry.Organization")
class Meta:
app_label = "sentry"
db_table = "sentry_projecttemplate"
constraints = [
models.UniqueConstraint(
fields=["name", "organization"], name="unique_projecttemplate_name_per_org"
)
]
__repr__ = sane_repr("name", "organization_id")
def get_audit_log_data(self) -> dict[str, Any]:
return {"name": self.name, "organization_id": self.organization_id}
| ProjectTemplate |
python | pytorch__pytorch | torch/_inductor/loop_body.py | {
"start": 785,
"end": 1809
} | class ____(torch.fx.Interpreter):
@staticmethod
@functools.cache
def _dummy_gm():
return torch.fx.symbolic_trace(identity)
def __init__(self, graph, submodules):
# call super() with a placeholder to avoid constructing a
# GraphModule which is very expensive (it does codegen).
super().__init__(self._dummy_gm(), garbage_collect_values=False)
self.module = self # type: ignore[assignment]
self.graph = graph
self.submodules = submodules
self.extra_traceback = False
self.fetch_attr = submodules.__getitem__ # type: ignore[method-assign]
self.current_node = None
def run_node(self, n: torch.fx.Node) -> Any:
# pyrefly: ignore [bad-assignment]
self.current_node = n
return super().run_node(n)
def run(self, *args, **kwargs):
with V.set_interpreter_handler(self):
return super().run(*args, **kwargs)
# We don't need the nn.Module and constant handling in Tracer
| InterpreterShim |
python | pypa__hatch | tests/env/plugin/test_interface.py | {
"start": 20012,
"end": 24857
} | class ____:
def test_default(self, isolation, isolated_data_dir, platform, global_application):
config = {"project": {"name": "my_app", "version": "0.0.1"}}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.features == environment.features == []
def test_invalid_type(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"envs": {"default": {"features": 9000}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(TypeError, match="Field `tool.hatch.envs.default.features` must be an array of strings"):
_ = environment.features
def test_correct(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "optional-dependencies": {"foo-bar": [], "baz": []}},
"tool": {"hatch": {"envs": {"default": {"features": ["Foo...Bar", "Baz", "baZ"]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.features == ["baz", "foo-bar"]
def test_feature_not_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "optional-dependencies": {"foo": [], "bar": []}},
"tool": {"hatch": {"envs": {"default": {"features": [9000]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(TypeError, match="Feature #1 of field `tool.hatch.envs.default.features` must be a string"):
_ = environment.features
def test_feature_empty_string(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "optional-dependencies": {"foo": [], "bar": []}},
"tool": {"hatch": {"envs": {"default": {"features": [""]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
ValueError, match="Feature #1 of field `tool.hatch.envs.default.features` cannot be an empty string"
):
_ = environment.features
def test_feature_undefined(self, isolation, isolated_data_dir, platform, global_application):
config = {
"project": {"name": "my_app", "version": "0.0.1", "optional-dependencies": {"foo": []}},
"tool": {"hatch": {"envs": {"default": {"features": ["foo", "bar", ""]}}}},
}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with pytest.raises(
ValueError,
match=(
"Feature `bar` of field `tool.hatch.envs.default.features` is not defined in "
"field `project.optional-dependencies`"
),
):
_ = environment.features
| TestFeatures |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.