language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/esm/modeling_esmfold.py
|
{
"start": 72237,
"end": 85514
}
|
class ____(EsmPreTrainedModel):
_no_split_modules = ["EsmFoldStructureModule", "EsmFoldTriangularSelfAttentionBlock"]
_supports_flash_attn = False
_supports_sdpa = False
_supports_attention_backend = False
_can_record_outputs = None
def __init__(self, config):
super().__init__(config)
self.config = config
self.distogram_bins = 64
self.esm = EsmModel(config, add_pooling_layer=False)
self.esm.requires_grad_(False)
if self.config.esmfold_config.fp16_esm:
self.esm.half()
self.esm_feats = self.config.hidden_size
self.esm_attns = self.config.num_hidden_layers * self.config.num_attention_heads
self.esm_layers = self.config.num_hidden_layers
self.register_buffer("af2_to_esm", self._af2_to_esm_from_vocab_list(config.vocab_list))
self.esm_s_combine = nn.Parameter(torch.zeros(self.esm_layers + 1))
trunk_config = self.config.esmfold_config.trunk
c_s = trunk_config.sequence_state_dim
c_z = trunk_config.pairwise_state_dim
self.esm_s_mlp = nn.Sequential(
LayerNorm(self.esm_feats),
nn.Linear(self.esm_feats, c_s),
nn.ReLU(),
nn.Linear(c_s, c_s),
)
# 0 is padding, N is unknown residues, N + 1 is mask.
self.n_tokens_embed = residue_constants.restype_num + 3
self.pad_idx = 0
self.unk_idx = self.n_tokens_embed - 2
self.mask_idx = self.n_tokens_embed - 1
self.esm_dict_cls_idx = self.config.vocab_list.index("<cls>")
self.esm_dict_mask_idx = self.config.vocab_list.index("<mask>")
self.esm_dict_eos_idx = self.config.vocab_list.index("<eos>")
self.esm_dict_padding_idx = self.config.vocab_list.index("<pad>")
if self.config.esmfold_config.embed_aa:
self.embedding = nn.Embedding(self.n_tokens_embed, c_s, padding_idx=0)
self.trunk = EsmFoldingTrunk(trunk_config)
self.distogram_head = nn.Linear(c_z, self.distogram_bins)
self.ptm_head = nn.Linear(c_z, self.distogram_bins)
self.lm_head = nn.Linear(c_s, self.n_tokens_embed)
self.lddt_bins = 50
structure_module_config = trunk_config.structure_module
self.lddt_head = nn.Sequential(
nn.LayerNorm(structure_module_config.sequence_dim),
nn.Linear(structure_module_config.sequence_dim, self.config.esmfold_config.lddt_head_hid_dim),
nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, self.config.esmfold_config.lddt_head_hid_dim),
nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, 37 * self.lddt_bins),
)
self.post_init()
@staticmethod
def _af2_to_esm_from_vocab_list(vocab_list: list[str]) -> torch.Tensor:
# Remember that t is shifted from residue_constants by 1 (0 is padding).
esm_reorder = [vocab_list.index("<pad>")] + [vocab_list.index(v) for v in residue_constants.restypes_with_x]
return torch.tensor(esm_reorder)
@auto_docstring
def forward(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
masking_pattern: Optional[torch.Tensor] = None,
num_recycles: Optional[int] = None,
output_hidden_states: Optional[bool] = False,
) -> EsmForProteinFoldingOutput:
r"""
masking_pattern (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Locations of tokens to mask during training as a form of regularization. Mask values selected in `[0, 1]`.
num_recycles (`int`, *optional*, defaults to `None`):
Number of times to recycle the input sequence. If `None`, defaults to `config.num_recycles`. "Recycling"
consists of passing the output of the folding trunk back in as input to the trunk. During training, the
number of recycles should vary with each batch, to ensure that the model learns to output valid predictions
after each recycle. During inference, num_recycles should be set to the highest value that the model was
trained with for maximum accuracy. Accordingly, when this value is set to `None`, config.max_recycles is
used.
Example:
```python
>>> from transformers import AutoTokenizer, EsmForProteinFolding
>>> model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1")
>>> inputs = tokenizer(["MLKNVQVQLV"], return_tensors="pt", add_special_tokens=False) # A tiny random peptide
>>> outputs = model(**inputs)
>>> folded_positions = outputs.positions
```
"""
cfg = self.config.esmfold_config
aa = input_ids # B x L
B = aa.shape[0]
L = aa.shape[1]
device = input_ids.device
if attention_mask is None:
attention_mask = torch.ones_like(aa, device=device)
if position_ids is None:
position_ids = torch.arange(L, device=device).expand_as(input_ids)
# === ESM ===
esmaa = self.af2_idx_to_esm_idx(aa, attention_mask)
if masking_pattern is not None:
masked_aa, esmaa, mlm_targets = self.bert_mask(aa, esmaa, attention_mask, masking_pattern)
else:
masked_aa = aa
mlm_targets = None
# We get sequence and pair representations from whatever version of ESM /
# configuration we are using. The sequence representation esm_s is always
# present. The pair embedding esm_z may be present depending on the
# configuration of the model. If esm_z is not used by the model then it
# is returned as None here.
esm_s = self.compute_language_model_representations(esmaa)
# Convert esm_s and esm_z, if present, to the precision used by the trunk and
# the structure module. These tensors may be a lower precision if, for example,
# we're running the language model in fp16 precision.
esm_s = esm_s.to(self.esm_s_combine.dtype)
if cfg.esm_ablate_sequence:
esm_s = esm_s * 0
esm_s = esm_s.detach()
# === preprocessing ===
esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2)
s_s_0 = self.esm_s_mlp(esm_s)
s_z_0 = s_s_0.new_zeros(B, L, L, cfg.trunk.pairwise_state_dim)
if self.config.esmfold_config.embed_aa:
s_s_0 += self.embedding(masked_aa)
structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles)
# Documenting what we expect:
structure = {
k: v
for k, v in structure.items()
if k
in [
"s_z",
"s_s",
"frames",
"sidechain_frames",
"unnormalized_angles",
"angles",
"positions",
"states",
]
}
# Add BERT mask for the loss to use, if available.
if mlm_targets:
structure["mlm_targets"] = mlm_targets
disto_logits = self.distogram_head(structure["s_z"])
disto_logits = (disto_logits + disto_logits.transpose(1, 2)) / 2
structure["distogram_logits"] = disto_logits
lm_logits = self.lm_head(structure["s_s"])
structure["lm_logits"] = lm_logits
structure["aatype"] = aa
make_atom14_masks(structure)
# Of course, this doesn't respect the true mask because it doesn't know about it...
# We're not going to properly mask change of index tensors:
# "residx_atom14_to_atom37",
# "residx_atom37_to_atom14",
for k in [
"atom14_atom_exists",
"atom37_atom_exists",
]:
structure[k] *= attention_mask.unsqueeze(-1)
structure["residue_index"] = position_ids
lddt_head = self.lddt_head(structure["states"]).reshape(structure["states"].shape[0], B, L, -1, self.lddt_bins)
structure["lddt_head"] = lddt_head
plddt = categorical_lddt(lddt_head[-1], bins=self.lddt_bins)
structure["plddt"] = plddt
ptm_logits = self.ptm_head(structure["s_z"])
structure["ptm_logits"] = ptm_logits
structure["ptm"] = compute_tm(ptm_logits, max_bin=31, no_bins=self.distogram_bins)
structure.update(compute_predicted_aligned_error(ptm_logits, max_bin=31, no_bins=self.distogram_bins))
return EsmForProteinFoldingOutput(**structure)
def af2_idx_to_esm_idx(self, aa, mask):
# avoid indexing on different devices
if self.af2_to_esm.device != aa.device:
self.af2_to_esm = self.af2_to_esm.to(aa.device)
aa = (aa + 1).masked_fill(mask != 1, 0)
return self.af2_to_esm[aa]
def compute_language_model_representations(self, esmaa: torch.Tensor) -> torch.Tensor:
device = next(self.parameters()).device
B, L = esmaa.shape # B = batch size, L = sequence length.
if self.config.esmfold_config.bypass_lm:
esm_s = torch.zeros(B, L, self.esm_s_combine.size[0], -1, self.esm_feats, device=device)
return esm_s
bosi, eosi = self.esm_dict_cls_idx, self.esm_dict_eos_idx
bos = esmaa.new_full((B, 1), bosi)
eos = esmaa.new_full((B, 1), self.esm_dict_padding_idx)
esmaa = torch.cat([bos, esmaa, eos], dim=1)
# Use the first padding index as eos during inference.
esmaa[range(B), (esmaa != 1).sum(1)] = eosi
# _, esm_z, esm_s = self.esm(esmaa, return_pairs=self.config.esmfold_config.use_esm_attn_map)
# Because we do not support use_esm_attn_map in the HF port as it is not used in any public models,
# esm_z is always None
esm_hidden_states = self.esm(esmaa, attention_mask=esmaa != 1, output_hidden_states=True)["hidden_states"]
esm_s = torch.stack(esm_hidden_states, dim=2)
esm_s = esm_s[:, 1:-1] # B, L, nLayers, C
return esm_s
def bert_mask(self, aa, esmaa, mask, pattern):
new_aa = aa.clone()
target = aa.clone()
new_esmaa = esmaa.clone()
new_aa[pattern == 1] = self.mask_idx
target[pattern != 1] = 0
new_esmaa[pattern == 1] = self.esm_dict_mask_idx
return new_aa, new_esmaa, target
@torch.no_grad()
def infer(
self,
seqs: Union[str, list[str]],
position_ids=None,
):
if isinstance(seqs, str):
lst = [seqs]
else:
lst = seqs
# Returns the raw outputs of the model given an input sequence.
device = next(self.parameters()).device
aatype = collate_dense_tensors(
[
torch.from_numpy(
residue_constants.sequence_to_onehot(
sequence=seq,
mapping=residue_constants.restype_order_with_x,
map_unknown_to_x=True,
)
)
.to(device)
.argmax(dim=1)
for seq in lst
]
) # B=1 x L
mask = collate_dense_tensors([aatype.new_ones(len(seq)) for seq in lst])
position_ids = (
torch.arange(aatype.shape[1], device=device).expand(len(lst), -1)
if position_ids is None
else position_ids.to(device)
)
if position_ids.ndim == 1:
position_ids = position_ids.unsqueeze(0)
return self.forward(
aatype,
mask,
position_ids=position_ids,
)
@staticmethod
def output_to_pdb(output: dict) -> list[str]:
"""Returns the pdb (file) string from the model given the model output."""
output = {k: v.to("cpu").numpy() for k, v in output.items()}
pdbs = []
final_atom_positions = atom14_to_atom37(output["positions"][-1], output)
final_atom_mask = output["atom37_atom_exists"]
for i in range(output["aatype"].shape[0]):
aa = output["aatype"][i]
pred_pos = final_atom_positions[i]
mask = final_atom_mask[i]
resid = output["residue_index"][i] + 1
pred = OFProtein(
aatype=aa,
atom_positions=pred_pos,
atom_mask=mask,
residue_index=resid,
b_factors=output["plddt"][i],
)
pdbs.append(to_pdb(pred))
return pdbs
def infer_pdb(self, seqs, *args, **kwargs) -> str:
"""Returns the pdb (file) string from the model given an input sequence."""
assert isinstance(seqs, str)
output = self.infer(seqs, *args, **kwargs)
return self.output_to_pdb(output)[0]
def infer_pdbs(self, seqs: list[str], *args, **kwargs) -> list[str]:
"""Returns the pdb (file) string from the model given an input sequence."""
output = self.infer(seqs, *args, **kwargs)
return self.output_to_pdb(output)
__all__ = ["EsmForProteinFolding", "EsmFoldPreTrainedModel"]
|
EsmForProteinFolding
|
python
|
Netflix__metaflow
|
metaflow/_vendor/click/core.py
|
{
"start": 54653,
"end": 63228
}
|
class ____(object):
r"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: a callback that should be executed after the parameter
was matched. This is called as ``fn(ctx, param,
value)`` and needs to return the value.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple).
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
.. versionchanged:: 7.1
Empty environment variables are ignored rather than taking the
empty string value. This makes it possible for scripts to clear
variables if they can't unset them.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. The old callback format will still work, but it will
raise a warning to give you a chance to migrate the code easier.
"""
param_type_name = "parameter"
def __init__(
self,
param_decls=None,
type=None,
required=False,
default=None,
callback=None,
nargs=None,
metavar=None,
expose_value=True,
is_eager=False,
envvar=None,
autocompletion=None,
):
self.name, self.opts, self.secondary_opts = self._parse_decls(
param_decls or (), expose_value
)
self.type = convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = False
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self.autocompletion = autocompletion
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name)
@property
def human_readable_name(self):
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name
def make_metavar(self):
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += "..."
return metavar
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
def add_to_parser(self, parser, ctx):
pass
def consume_value(self, ctx, opts):
value = opts.get(self.name)
if value is None:
value = self.value_from_envvar(ctx)
if value is None:
value = ctx.lookup_default(self.name)
return value
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError(
"Attempted to invoke composite type but nargs has"
" been set to {}. This is not supported; nargs"
" needs to be set to a fixed value > 1.".format(self.nargs)
)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
return _convert(value, (self.nargs != 1) + bool(self.multiple))
def process_value(self, ctx, value):
"""Given a value and context this runs the logic to convert the
value as necessary.
"""
# If the value we were given is None we do nothing. This way
# code that calls this can easily figure out if something was
# not provided. Otherwise it would be converted into an empty
# tuple for multiple invocations which is inconvenient.
if value is not None:
return self.type_cast_value(ctx, value)
def value_is_missing(self, value):
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def full_process_value(self, ctx, value):
value = self.process_value(ctx, value)
if value is None and not ctx.resilient_parsing:
value = self.get_default(ctx)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
return value
def resolve_envvar_value(self, ctx):
if self.envvar is None:
return
if isinstance(self.envvar, (tuple, list)):
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv is not None:
return rv
else:
rv = os.environ.get(self.envvar)
if rv != "":
return rv
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(self, ctx, opts, args):
with augment_usage_errors(ctx, param=self):
value = self.consume_value(ctx, opts)
try:
value = self.full_process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.callback is not None:
try:
value = invoke_param_callback(self.callback, ctx, self, value)
except Exception:
if not ctx.resilient_parsing:
raise
if self.expose_value:
ctx.params[self.name] = value
return value, args
def get_help_record(self, ctx):
pass
def get_usage_pieces(self, ctx):
return []
def get_error_hint(self, ctx):
"""Get a stringified version of the param for use in error messages to
indicate which param caused the error.
"""
hint_list = self.opts or [self.human_readable_name]
return " / ".join(repr(x) for x in hint_list)
|
Parameter
|
python
|
allegroai__clearml
|
clearml/backend_interface/task/repo/scriptinfo.py
|
{
"start": 27517,
"end": 62785
}
|
class ____(object):
_sagemaker_metadata_path = "/opt/ml/metadata/resource-metadata.json"
max_diff_size_bytes = 500000
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _get_logger(cls) -> logging.Logger:
return get_logger("Repository Detection")
@classmethod
def _jupyter_install_post_store_hook(
cls,
jupyter_notebook_filename: str,
notebook_name: str = None,
log_history: bool = False,
) -> None:
# noinspection PyBroadException
try:
if "IPython" in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(
jupyter_notebook_filename,
notebook_name=notebook_name,
log_history=log_history,
)
get_ipython().events.register("pre_run_cell", _JupyterObserver.signal_sync)
if log_history:
get_ipython().events.register("post_run_cell", _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls) -> str:
# check if we are running in vscode, we have the jupyter notebook defined:
if "IPython" in sys.modules:
# noinspection PyBroadException
try:
from IPython import get_ipython # noqa
ip = get_ipython()
# vscode-jupyter PR #8531 added this variable
local_ipynb_file = ip.__dict__.get("user_ns", {}).get("__vsc_ipynb_file__") if ip else None
if local_ipynb_file:
# now replace the .ipynb with .py
# we assume we will have that file available for monitoring
local_ipynb_file = Path(local_ipynb_file)
script_entry_point = local_ipynb_file.with_suffix(".py").as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file.as_posix(), log_history=False)
return script_entry_point
except Exception:
pass
if (
not (
sys.argv[0].endswith(os.path.sep + "ipykernel_launcher.py")
or sys.argv[0].endswith(os.path.join(os.path.sep, "ipykernel", "__main__.py"))
or sys.argv[0].endswith(os.path.sep + "colab_kernel_launcher.py")
)
or len(sys.argv) < 3
or not sys.argv[2].endswith(".json")
):
return None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
jupyter_servers = []
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from notebook.notebookapp import (
list_running_servers,
) # noqa <= Notebook v6
# noinspection PyBroadException
try:
jupyter_servers += list(list_running_servers())
except Exception:
server_info = cls.__legacy_jupyter_notebook_server_json_parsing()
if server_info:
jupyter_servers += [server_info]
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from jupyter_server.serverapp import list_running_servers # noqa
# noinspection PyBroadException
try:
jupyter_servers += list(list_running_servers())
except Exception:
server_info = cls.__legacy_jupyter_notebook_server_json_parsing()
if server_info:
jupyter_servers += [server_info]
except Exception:
pass
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace("kernel-", "").replace(".json", "")
notebook_path = None
notebook_name = None
for server_index, server_info in enumerate(jupyter_servers):
cookies = None
password = None
if server_info and server_info.get("password"):
password, cookies = cls._authenticate_jupyter(server_info)
if not password:
return os.path.join(os.getcwd(), "error_notebook_not_found.py")
# get api token from ENV - if not defined then from server info
auth_token = os.getenv("JUPYTERHUB_API_TOKEN") or server_info.get("token") or ""
verify = True
try:
r = requests.get(
url=server_info["url"] + "api/sessions",
cookies=cookies,
headers={
"Authorization": "token {}".format(auth_token),
},
verify=verify,
)
except requests.exceptions.SSLError:
verify = False
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
# noinspection PyUnresolvedReferences
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info["url"] + "api/sessions",
cookies=cookies,
headers={
"Authorization": "token {}".format(auth_token),
},
verify=verify,
)
# enable SSL check warning
import warnings
warnings.simplefilter("default", InsecureRequestWarning)
if r.status_code == 403 and server_info.get("password", False) is False:
# server info json might set password=False even tho that is not the case
# retry the request
password, cookies = cls._authenticate_jupyter(server_info)
if not password:
return os.path.join(os.getcwd(), "error_notebook_not_found.py")
r = requests.get(
url=server_info["url"] + "api/sessions",
cookies=cookies,
headers={
"Authorization": "token {}".format(auth_token),
},
verify=verify,
)
# send request to the jupyter server
try:
r.raise_for_status()
except Exception as ex:
# raise on last one only
if server_index == len(jupyter_servers) - 1:
cls._get_logger().warning(
"Failed accessing the jupyter server{}: {}".format(
" [password={}]".format(password) if server_info.get("password") else "",
ex,
)
)
return os.path.join(os.getcwd(), "error_notebook_not_found.py")
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n["kernel"]["id"] == current_kernel:
cur_notebook = n
break
# notebook not found
if not cur_notebook:
continue
notebook_path = cur_notebook["notebook"].get("path", "")
notebook_name = cur_notebook["notebook"].get("name", "")
if notebook_path:
break
if (not notebook_name or not notebook_path) and ScriptInfo.is_sagemaker():
notebook_path, notebook_name = ScriptInfo._get_sagemaker_notebook(current_kernel)
is_google_colab = False
log_history = False
colab_name = None
# check if this is `google.colab`, then there is no local file
is_google_colab = ScriptInfo.is_google_colab()
if is_google_colab:
# check if we can get the notebook
colab_name, colab_notebook = cls._get_colab_notebook()
if colab_name is not None:
notebook_name = colab_name
log_history = False
script_entry_point = (
str(notebook_name or "notebook").replace(">", "_").replace("<", "_").replace(".ipynb", ".py")
)
if not script_entry_point.lower().endswith(".py"):
script_entry_point += ".py"
local_ipynb_file = None
elif notebook_path is not None:
# always slash, because this is from uri (so never backslash not even on windows)
entry_point_filename = notebook_path.split("/")[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# fix for VSCode pushing uuid at the end of the notebook name.
if not entry_point.exists():
# noinspection PyBroadException
try:
alternative_entry_point = "-".join(entry_point_filename.split("-")[:-5]) + ".ipynb"
# now we should try to find the actual file
entry_point_alternative = (Path.cwd() / alternative_entry_point).absolute()
if not entry_point_alternative.is_file():
entry_point_alternative = (Path.cwd() / alternative_entry_point).absolute()
# If we found it replace it
if entry_point_alternative.exists():
entry_point = entry_point_alternative
except Exception as ex:
cls._get_logger().warning("Failed accessing jupyter notebook {}: {}".format(notebook_path, ex))
# if we failed to get something we can access, print an error
if not entry_point.exists():
cls._get_logger().warning(
"Jupyter Notebook auto-logging failed, could not access: {}".format(entry_point)
)
return "error_notebook_not_found.py"
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix(".py")
script_entry_point = entry_point.as_posix()
else:
# we could not find and access any jupyter server
cls._get_logger().warning("Failed accessing the jupyter server(s): {}".format(jupyter_servers))
return None # 'error_notebook_not_found.py'
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file, notebook_name=colab_name, log_history=log_history)
return script_entry_point
except Exception:
return None
@classmethod
def _authenticate_jupyter(cls, server_info: Dict[str, Any]) -> Tuple[Optional[str], Optional[Dict[str, Any]]]:
"""
Authenticate to the Jupyter server using a password.
The password is fetched from `CLEARML_JUPYTER_PASSWORD` env var or the
`sdk.development.jupyter_server_password` configuration entry. The env var
has a higher priority than the configuration entry.
:param server_info: A dictionary containing Jupyter server information
:return: If the authentication succeded, return a tuple contain the password used
for authentication and the cookies obtained after authenticating. Otherwise,
return a tuple of Nones
"""
cookies = None
password = None
# we need to get the password
from ....config import config
from ....config.defs import JUPYTER_PASSWORD
password = JUPYTER_PASSWORD.get(default=config.get("development.jupyter_server_password", ""))
if not password:
cls._get_logger().warning(
"Password protected Jupyter Notebook server was found! "
"Add `sdk.development.jupyter_server_password=<jupyter_password>` to ~/clearml.conf"
)
return None, None
r = requests.get(url=server_info["url"] + "login")
cookies = {"_xsrf": r.cookies.get("_xsrf", "")}
r = requests.post(
server_info["url"] + "login?next",
cookies=cookies,
data={"_xsrf": cookies["_xsrf"], "password": password},
)
cookies.update(r.cookies)
return password, cookies
@classmethod
def is_sagemaker(cls) -> bool:
return Path(cls._sagemaker_metadata_path).is_file()
@classmethod
def _get_sagemaker_notebook(cls, current_kernel: str, timeout: int = 30) -> Tuple[Optional[str], Optional[str]]:
# noinspection PyBroadException
try:
# we expect to find boto3 in the sagemaker env
import boto3
with open(cls._sagemaker_metadata_path) as f:
notebook_data = json.load(f)
client = boto3.client("sagemaker")
response = client.create_presigned_domain_url(
DomainId=notebook_data["DomainId"],
UserProfileName=notebook_data["UserProfileName"],
)
authorized_url = response["AuthorizedUrl"]
authorized_url_parsed = urlparse(authorized_url)
unauthorized_url = authorized_url_parsed.scheme + "://" + authorized_url_parsed.netloc
with requests.Session() as s:
s.get(authorized_url, timeout=timeout)
jupyter_sessions = s.get(unauthorized_url + "/jupyter/default/api/sessions", timeout=timeout).json()
for jupyter_session in jupyter_sessions:
if jupyter_session.get("kernel", {}).get("id") == current_kernel:
return jupyter_session.get("path", ""), jupyter_session.get("name", "")
except Exception as e:
cls._get_logger().warning("Failed finding Notebook in SageMaker environment. Error is: '{}'".format(e))
return None, None
@classmethod
def _get_colab_notebook(cls, timeout: int = 30) -> Tuple[Optional[str], Optional[str]]:
# returns tuple (notebook name, raw string notebook)
# None, None if fails
try:
from google.colab import _message # noqa
notebook = _message.blocking_request("get_ipynb", timeout_sec=timeout)["ipynb"]
notebook_name = notebook.get("metadata", {}).get("colab", {}).get("name", "colab.ipynb")
if not notebook_name.endswith(".ipynb"):
notebook_name += ".ipynb"
# encoding to json
return notebook_name, json.dumps(notebook)
except: # noqa
return None, None
@classmethod
def _get_entry_point(cls, repo_root: str, script_path: str) -> str:
if VCS_ENTRY_POINT.get():
return VCS_ENTRY_POINT.get()
repo_root = Path(repo_root).absolute()
script_path = Path(script_path)
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(
str(os.path.realpath(script_path.as_posix())),
str(cls._get_working_dir(repo_root, return_abs=True)),
)
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _cwd(cls) -> Path:
# return the current working directory (solve for hydra changing it)
# check if running with hydra
if sys.modules.get("hydra"):
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
import hydra # noqa
return Path(hydra.utils.get_original_cwd()).absolute()
except Exception:
pass
return Path.cwd().absolute()
@classmethod
def _get_working_dir(cls, repo_root: str, return_abs: bool = False) -> str:
if VCS_WORK_DIR.get():
if return_abs and repo_root:
return (Path(repo_root) / VCS_WORK_DIR.get()).absolute().as_posix()
return VCS_WORK_DIR.get()
# get the repository working directory (might be different from actual cwd)
repo_root = Path(repo_root).absolute()
cwd = cls._cwd()
try:
# do not change: test if we are under the repo root folder, it will throw an exception if we are not
relative = cwd.relative_to(repo_root).as_posix()
return cwd.as_posix() if return_abs else relative
except ValueError:
# Working directory not under repository root, default to repo root
return repo_root.as_posix() if return_abs else "."
@classmethod
def _absolute_path(cls, file_path: str, cwd: str) -> str:
# return the absolute path, relative to a specific working directory (cwd)
file_path = Path(file_path)
if file_path.is_absolute():
return file_path.as_posix()
# Convert to absolute and squash 'path/../folder'
return os.path.abspath((Path(cwd).absolute() / file_path).as_posix())
@classmethod
def _get_script_code(cls, script_path: str) -> str:
# allow to override with env variable
# noinspection PyBroadException
try:
diff = VCS_DIFF.get()
if diff:
return diff
except Exception:
pass
# noinspection PyBroadException
try:
with open(script_path, "r", encoding="utf-8") as f:
script_code = f.read()
return script_code
except Exception:
pass
return ""
@classmethod
def _get_script_info(
cls,
filepaths: List[str],
check_uncommitted: bool = True,
create_requirements: bool = True,
log: Optional[logging.Logger] = None,
uncommitted_from_remote: bool = False,
detect_jupyter_notebook: bool = True,
add_missing_installed_packages: bool = False,
detailed_req_report: Optional[bool] = None,
force_single_script: bool = False,
) -> Tuple["ScriptInfoResult", Optional[ScriptRequirements]]:
jupyter_filepath = cls._get_jupyter_notebook_filename() if detect_jupyter_notebook else None
if jupyter_filepath:
scripts_path = [Path(os.path.normpath(jupyter_filepath)).absolute()]
else:
cwd = cls._cwd()
scripts_path = [Path(cls._absolute_path(os.path.normpath(f), cwd)) for f in filepaths if f]
scripts_path = [f for f in scripts_path if f.exists()]
if not scripts_path:
for f in filepaths or []:
if f and f.endswith("/<stdin>"):
raise ScriptInfoError("python console detected")
raise ScriptInfoError("Script file {} could not be found".format(filepaths))
scripts_dir = [f if f.is_dir() else f.parent for f in scripts_path]
def _log(msg: str, *args: Any, **kwargs: Any) -> None:
if not log:
return
log.warning("Failed auto-detecting task repository: {}".format(msg.format(*args, **kwargs)))
script_dir = scripts_dir[0]
script_path = scripts_path[0]
if force_single_script:
plugin = None
else:
plugin = next((p for p in cls.plugins if p.exists(script_dir)), None)
repo_info = DetectionResult()
messages = []
auxiliary_git_diff = None
# print logs
if jupyter_filepath:
if log:
log.info("Storing jupyter notebook directly as code")
elif not plugin:
if log:
log.info("No repository found, storing script code instead")
# if we found a vcs plugin use it
if plugin:
try:
repo_info = plugin.get_info(
str(script_dir),
include_diff=check_uncommitted,
diff_from_remote=uncommitted_from_remote,
)
except SystemExit:
raise
except Exception as ex:
_log("no info for {} ({})", scripts_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", scripts_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = VCS_WORK_DIR.get() or "."
entry_point = VCS_ENTRY_POINT.get() or str(script_path.name)
else:
# allow to override the VCS working directory (notice relative to the git repo)
# because we can have a sync folder on remote pycharm sessions
# not syncing from the Git repo, but from a subfolder, so the pycharm plugin need to pass the override
working_dir = cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
# check if we are running with torch distributed, or transformers accelerate
# make sure we change the entry point to reflect it.
entry_point = cls._detect_distributed_execution(entry_point, log)
if check_uncommitted:
# if we have a jupyter notebook, always store the entire notebook (instead of the git diff)
if jupyter_filepath:
diff = cls._get_script_code(script_path.as_posix())
else:
diff = (
cls._get_script_code(script_path.as_posix())
if not plugin or not repo_info.commit
else repo_info.diff
)
if VCS_DIFF.exists():
diff = VCS_DIFF.get() or ""
# make sure diff is not too big:
if len(diff) > cls.max_diff_size_bytes:
messages.append(
"======> WARNING! Git diff too large to store "
"({}kb), skipping uncommitted changes <======".format(len(diff) // 1024)
)
auxiliary_git_diff = diff
diff = (
"# WARNING! git diff too large to store, clear this section to execute without it.\n"
"# full git diff available in Artifacts/auxiliary_git_diff\n"
"# Clear the section before enqueueing Task!\n"
)
else:
diff = ""
# if this is not jupyter, get the requirements.txt
requirements = ""
conda_requirements = ""
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version("2.2"):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix()
)
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=script_path.as_posix()
if not repo_info.url and script_path.is_file()
else None,
add_missing_installed_packages=add_missing_installed_packages,
detailed_req_report=detailed_req_report,
)
else:
script_requirements = None
ide = ScriptInfo.get_ide(jupyter_status=isinstance(jupyter_filepath, str))
script_info = dict(
repository=remove_user_pass_from_url(repo_info.url),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
ide=ide,
requirements={"pip": requirements, "conda": conda_requirements} if requirements else None,
binary="python{}.{}".format(sys.version_info.major, sys.version_info.minor),
repo_root=repo_root,
jupyter_filepath=jupyter_filepath,
)
# if repo_info.modified:
# messages.append(
# "======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
# script_info.get("repository", "")
# )
# )
if not any(script_info.values()):
script_info = None
return (
ScriptInfoResult(
script=script_info,
warning_messages=messages,
auxiliary_git_diff=auxiliary_git_diff,
),
script_requirements,
)
@classmethod
def _detect_distributed_execution(cls, entry_point: str, log: logging.Logger) -> str:
# check if we are running with torch distributed, or transformers accelerate
# make sure we change the entry point to reflect it.
is_torch_distributed = os.environ.get("TORCHELASTIC_RUN_ID") is not None
is_transformers_distributed = os.environ.get("ACCELERATE_DYNAMO_MODE") is not None
if not is_torch_distributed and not is_transformers_distributed:
return entry_point
# this torch distributed
# noinspection PyBroadException
try:
from psutil import Process # noqa
cmdline = Process().parent().cmdline()
# first find the torch model call "torch.distributed.run" or "torch.distributed.launch"
if is_torch_distributed:
cmdstart_i = next(i for i, c in enumerate(cmdline) if c.lower().startswith("torch.distributed."))
elif is_transformers_distributed:
cmdstart_i = next(i for i, c in enumerate(cmdline) if c.lower().startswith("accelerate.commands."))
else:
raise Exception() # we should not get here
cmdline = cmdline[cmdstart_i:]
# reverse look into the paths
cmdend_i = next(i for i, c in enumerate(cmdline) if Path(c).stem == Path(entry_point).stem)
filearg = cmdline[cmdend_i]
# notice --args (script args) are passed on the Args section, we skip detecting them here
# we are also already removing the filearg from the cmd (it is the last before script args)
new_cmd = cmdline[:cmdend_i]
# we assume our entrypoint is the last parameter of the execution cmd line
if Path(filearg).stem == Path(entry_point).stem:
entry_point = "-m {} {}".format(" ".join(new_cmd), entry_point)
if log:
log.info(
"{} execution detected: adjusting entrypoint to "
"reflect distributed execution arguments".format(
"Torch Distributed" if is_torch_distributed else "Transformers Accelerate"
)
)
except Exception:
if log:
log.warning(
"{} execution detected: Failed Detecting launch arguments, skipping".format(
"Torch Distributed" if is_torch_distributed else "Transformers Accelerate"
)
)
return entry_point
@staticmethod
def __legacy_jupyter_notebook_server_json_parsing() -> Optional[Dict[str, Any]]:
# noinspection PyBroadException
try:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
# noinspection PyPackageRequirements
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), "??server-*.json")):
# noinspection PyBroadException
try:
with open(f, "r") as json_data:
server_info = json.load(json_data)
except Exception:
continue
if server_info:
return server_info
except Exception:
pass
return None
@classmethod
def get(
cls,
filepaths: Optional[List[str]] = None,
check_uncommitted: bool = True,
create_requirements: bool = True,
log: Optional[logging.Logger] = None,
uncommitted_from_remote: bool = False,
detect_jupyter_notebook: bool = True,
add_missing_installed_packages: bool = False,
detailed_req_report: Optional[bool] = None,
force_single_script: bool = False,
) -> Tuple["ScriptInfoResult", Optional["ScriptRequirements"]]:
try:
if not filepaths:
filepaths = [
sys.argv[0],
]
return cls._get_script_info(
filepaths=filepaths,
check_uncommitted=check_uncommitted,
create_requirements=create_requirements,
log=log,
uncommitted_from_remote=uncommitted_from_remote,
detect_jupyter_notebook=detect_jupyter_notebook,
add_missing_installed_packages=add_missing_installed_packages,
detailed_req_report=detailed_req_report,
force_single_script=force_single_script,
)
except SystemExit:
pass
except BaseException as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def is_running_from_module(cls) -> bool:
# noinspection PyBroadException
try:
return "__main__" in sys.modules and vars(sys.modules["__main__"])["__package__"]
except Exception:
return False
@classmethod
def detect_running_module(cls, script_dict: Dict[str, Any]) -> Dict[str, Any]:
if not script_dict:
return script_dict
# noinspection PyBroadException
try:
# If this is jupyter, do not try to detect the running module, we know what we have.
if script_dict.get("jupyter_filepath"):
return script_dict
if cls.is_running_from_module():
argvs = ""
git_root = os.path.abspath(str(script_dict["repo_root"])) if script_dict["repo_root"] else None
for a in sys.argv[1:]:
if git_root and os.path.exists(a):
# check if common to project:
a_abs = os.path.abspath(a)
if os.path.commonpath([a_abs, git_root]) == git_root:
# adjust path relative to working dir inside git repo
a = " " + os.path.relpath(
a_abs,
os.path.join(git_root, str(script_dict["working_dir"])),
)
argvs += " {}".format(a)
# noinspection PyBroadException
try:
module_name = vars(sys.modules["__main__"])["__spec__"].name
except Exception:
module_name = vars(sys.modules["__main__"])["__package__"]
# update the script entry point to match the real argv and module call
script_dict["entry_point"] = "-m {}{}".format(module_name, (" " + argvs) if argvs else "")
except Exception:
pass
return script_dict
@staticmethod
def is_google_colab() -> bool:
"""Know if the script is running from Google Colab"""
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython() and "google.colab" in get_ipython().extension_manager.loaded:
return True
except Exception:
pass
return False
@staticmethod
def is_vscode() -> bool:
"""Know if the script is running from VSCode"""
if os.environ.get("TERM_PROGRAM") == "vscode":
return True
for key in os.environ.keys():
if key.startswith("VSCODE_"):
return True
return False
@staticmethod
def is_pycharm() -> bool:
"""Know if the script is running from PyCharm"""
# youtrack.jetbrains.com ISSUE #PY-4853 added this variables
if os.environ.get("PYCHARM_HOSTED"):
return True
if os.environ.get("TERMINAL_EMULATOR") == "JetBrains-JediTerm":
return True
return False
@staticmethod
def is_jupyter() -> bool:
"""Know if the script is running from Jupyter"""
if isinstance(ScriptInfo._get_jupyter_notebook_filename(), str):
return True
return False
@staticmethod
def get_ide(jupyter_status: bool = False) -> str:
"""
Get the details of ide script is running from
:param jupyter_status: Jupyter status (default False)
:type jupyter_status: bool
:return: Name of the IDE
:rtype: str
"""
if ScriptInfo.is_pycharm():
ide_str = "PyCharm{}".format("_Jupyter" if jupyter_status else "")
elif ScriptInfo.is_vscode():
ide_str = "VSCode{}".format("_Jupyter" if jupyter_status else "")
elif ScriptInfo.is_google_colab():
ide_str = "Google_Colab"
else:
ide_str = "Jupyter" if jupyter_status else ""
return ide_str
@classmethod
def close(cls) -> None:
_JupyterObserver.close()
@attr.s
|
ScriptInfo
|
python
|
apache__airflow
|
providers/alibaba/tests/unit/alibaba/cloud/sensors/test_analyticdb_spark.py
|
{
"start": 1278,
"end": 2811
}
|
class ____:
def setup_method(self):
self.sensor = AnalyticDBSparkSensor(
app_id=MOCK_ADB_SPARK_ID,
adb_spark_conn_id=MOCK_ADB_SPARK_CONN_ID,
region=MOCK_REGION,
task_id=MOCK_SENSOR_TASK_ID,
)
@mock.patch(ADB_SPARK_SENSOR_STRING.format("AnalyticDBSparkHook"))
def test_get_hook(self, mock_service):
"""Test get_hook function works as expected."""
self.sensor.hook
mock_service.assert_called_once_with(adb_spark_conn_id=MOCK_ADB_SPARK_CONN_ID, region=MOCK_REGION)
@mock.patch(ADB_SPARK_SENSOR_STRING.format("AnalyticDBSparkSensor.hook"))
def test_poke_terminal_state(self, mock_service):
"""Test poke_terminal_state works as expected with COMPLETED application."""
# Given
mock_service.get_spark_state.return_value = "COMPLETED"
# When
res = self.sensor.poke(None)
# Then
assert res is True
mock_service.get_spark_state.assert_called_once_with(MOCK_ADB_SPARK_ID)
@mock.patch(ADB_SPARK_SENSOR_STRING.format("AnalyticDBSparkSensor.hook"))
def test_poke_non_terminal_state(self, mock_service):
"""Test poke_terminal_state works as expected with RUNNING application."""
# Given
mock_service.get_spark_state.return_value = "RUNNING"
# When
res = self.sensor.poke(None)
# Then
assert res is False
mock_service.get_spark_state.assert_called_once_with(MOCK_ADB_SPARK_ID)
|
TestAnalyticDBSparkSensor
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/optimizer.py
|
{
"start": 7651,
"end": 8882
}
|
class ____(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, tensor.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if resource_variable_ops.is_resource_variable(v) and not v._in_graph_mode: # pylint: disable=protected-access
# True if and only if `v` was initialized eagerly.
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if isinstance(v, tensor.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
@tf_export(v1=["train.Optimizer"])
|
_TensorProcessor
|
python
|
walkccc__LeetCode
|
solutions/1017. Convert to Base -2/1017.py
|
{
"start": 0,
"end": 185
}
|
class ____:
def baseNeg2(self, n: int) -> str:
ans = []
while n != 0:
ans.append(str(n % 2))
n = -(n >> 1)
return ''.join(reversed(ans)) if ans else '0'
|
Solution
|
python
|
ray-project__ray
|
python/ray/llm/tests/serve/cpu/deployments/test_prefix_aware_request_router.py
|
{
"start": 2440,
"end": 3069
}
|
class ____:
def __init__(self, messages):
self.messages = messages
def fake_pending_request(prompt=None, messages=None) -> PendingRequest:
if prompt is not None:
args = [PromptRequest(prompt)]
elif messages is not None:
args = [ChatRequest(messages)]
else:
args = []
return PendingRequest(
args=args,
kwargs={},
metadata=RequestMetadata(
request_id=generate_request_id(),
internal_request_id=generate_request_id(),
multiplexed_model_id="",
),
created_at=time.time(),
)
# === Tests ===
|
ChatRequest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 945740,
"end": 946134
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("RepositoryTopic", graphql_name="node")
"""The item at the end of the edge."""
|
RepositoryTopicEdge
|
python
|
getsentry__sentry
|
tests/sentry/auth_v2/endpoints/test_auth_merge_user_accounts.py
|
{
"start": 1949,
"end": 5655
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-auth-merge-accounts"
method = "post"
def setUp(self) -> None:
self.user1 = self.create_user(username="powerful mifu", email="mifu@email.com")
self.user2 = self.create_user(username="transcendent mifu", email="mifu@email.com")
self.user3 = self.create_user(username="garden variety mifu", email="mifu@email.com")
org1 = self.create_organization(name="hojicha")
org2 = self.create_organization(name="matcha")
org3 = self.create_organization(name="oolong")
self.create_member(user=self.user1, organization=org1)
self.create_member(user=self.user1, organization=org2)
self.create_member(user=self.user2, organization=org3)
self.unrelated_user = self.create_user(email="foo@bar.com")
self.login_as(self.user1)
self.verification_code = UserMergeVerificationCode.objects.create(user_id=self.user1.id)
def test_simple(self) -> None:
data = {
"ids_to_merge": [self.user2.id],
"ids_to_delete": [self.user3.id],
"verification_code": self.verification_code.token,
}
self.get_success_response(**data)
assert not User.objects.filter(id=self.user2.id).exists()
assert not User.objects.filter(id=self.user3.id).exists()
def test_incorrect_code(self) -> None:
data = {
"ids_to_merge": [self.user2.id],
"ids_to_delete": [self.user3.id],
"verification_code": "hello",
}
response = self.get_error_response(**data)
assert response.status_code == 403
assert response.data == {"error": "Incorrect verification code"}
def test_merge_unrelated_account(self) -> None:
data = {
"ids_to_merge": [self.unrelated_user.id],
"ids_to_delete": [self.user2.id, self.user3.id],
"verification_code": self.verification_code.token,
}
response = self.get_error_response(**data)
assert response.status_code == 403
assert response.data == {
"error": "One or more of the accounts in your request does not share your primary email address"
}
def test_related_and_unrelated_accounts(self) -> None:
data = {
"ids_to_merge": [self.user2.id, self.unrelated_user.id],
"ids_to_delete": [self.user3.id],
"verification_code": self.verification_code.token,
}
response = self.get_error_response(**data)
assert response.status_code == 403
assert response.data == {
"error": "One or more of the accounts in your request does not share your primary email address"
}
def test_pass_current_user_id(self) -> None:
data = {
"ids_to_merge": [self.user1.id],
"ids_to_delete": [self.user2.id, self.user3.id],
"verification_code": self.verification_code.token,
}
response = self.get_error_response(**data)
assert response.status_code == 400
assert response.data == {
"error": "You may not merge the user attached to your current session"
}
def test_not_disjoint(self) -> None:
data = {
"ids_to_merge": [self.user2.id],
"ids_to_delete": [self.user2.id, self.user3.id],
"verification_code": self.verification_code.token,
}
response = self.get_error_response(**data)
assert response.status_code == 400
assert response.data == {
"error": "The set of IDs to merge and the set of IDs to delete must be disjoint"
}
|
MergeUserAccountsWithSharedEmailTest
|
python
|
redis__redis-py
|
tests/test_search.py
|
{
"start": 1982,
"end": 4577
}
|
class ____:
@staticmethod
def waitForIndex(env, idx, timeout=None):
delay = 0.1
while True:
try:
res = env.execute_command("FT.INFO", idx)
if int(res[res.index("indexing") + 1]) == 0:
break
except ValueError:
break
except AttributeError:
try:
if int(res["indexing"]) == 0:
break
except ValueError:
break
except ResponseError:
# index doesn't exist yet
# continue to sleep and try again
pass
time.sleep(delay)
if timeout is not None:
timeout -= delay
if timeout <= 0:
break
@staticmethod
def getClient(client):
"""
Gets a client client attached to an index name which is ready to be
created
"""
return client
@staticmethod
def createIndex(client, num_docs=100, definition=None):
try:
client.create_index(
(
TextField("play", weight=5.0),
TextField("txt"),
NumericField("chapter"),
),
definition=definition,
)
except redis.ResponseError:
client.dropindex(delete_documents=True)
return SearchTestsBase.createIndex(
client, num_docs=num_docs, definition=definition
)
chapters = {}
bzfp = TextIOWrapper(bz2.BZ2File(WILL_PLAY_TEXT), encoding="utf8")
r = csv.reader(bzfp, delimiter=";")
for n, line in enumerate(r):
play, chapter, _, text = line[1], line[2], line[4], line[5]
key = f"{play}:{chapter}".lower()
d = chapters.setdefault(key, {})
d["play"] = play
d["txt"] = d.get("txt", "") + " " + text
d["chapter"] = int(chapter or 0)
if len(chapters) == num_docs:
break
indexer = client.batch_indexer(chunk_size=50)
assert isinstance(indexer, Search.BatchIndexer)
assert 50 == indexer.chunk_size
for key, doc in chapters.items():
indexer.client.client.hset(key, mapping=doc)
indexer.commit()
@pytest.fixture
def client(self, request, stack_url):
r = _get_client(redis.Redis, request, decode_responses=True, from_url=stack_url)
r.flushdb()
return r
|
SearchTestsBase
|
python
|
ApeWorX__ape
|
src/ape/api/query.py
|
{
"start": 5641,
"end": 7242
}
|
class ____(BaseModel, BaseInterface):
"""
Contract-creation metadata, such as the transaction
and deployer. Useful for contract-verification,
``block_identifier=`` usage, and other use-cases.
To get contract-creation metadata, you need a query engine
that can provide it, such as the ``ape-etherscan`` plugin
or a node connected to the OTS namespace.
"""
txn_hash: str
"""
The transaction hash of the deploy transaction.
"""
block: int
"""
The block number of the deploy transaction.
"""
deployer: AddressType
"""
The contract deployer address.
"""
factory: Optional[AddressType] = None
"""
The address of the factory contract, if there is one
and it is known (depends on the query provider!).
"""
@property
def receipt(self) -> ReceiptAPI:
"""
The deploy transaction :class:`~ape.api.transactions.ReceiptAPI`.
"""
return self.chain_manager.get_receipt(self.txn_hash)
@classmethod
def from_receipt(cls, receipt: ReceiptAPI) -> "ContractCreation":
"""
Create a metadata class.
Args:
receipt (:class:`~ape.api.transactions.ReceiptAPI`): The receipt
of the deploy transaction.
Returns:
:class:`~ape.api.query.ContractCreation`
"""
return cls(
txn_hash=receipt.txn_hash,
block=receipt.block_number,
deployer=receipt.sender,
# factory is not detected since this is meant for eoa deployments
)
|
ContractCreation
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_reductions.py
|
{
"start": 35685,
"end": 36695
}
|
class ____(ArrayReduction):
_parameters = ["frame", "order"]
@functools.cached_property
def _meta(self):
# Use var as proxy for result dimension
return make_meta(meta_nonempty(self.frame._meta).var())
@property
def chunk_kwargs(self):
return dict(order=self.order)
@property
def combine_kwargs(self):
return self.chunk_kwargs
@property
def aggregate_kwargs(self):
return dict(
order=self.order,
meta=self._meta,
index=self.frame.columns,
)
@classmethod
def reduction_chunk(cls, x, order):
values = x.values.astype("f8")
return moment_chunk(values, order=order, axis=(0,), keepdims=True)
@classmethod
def reduction_combine(cls, parts, order):
return moment_combine(parts, order=order, axis=(0,))
@classmethod
def reduction_aggregate(cls, vals, order):
result = moment_agg(vals, order=order, axis=(0,))
return result
|
Moment
|
python
|
django__django
|
tests/admin_docs/test_views.py
|
{
"start": 8977,
"end": 9751
}
|
class ____(AdminDocViewTests):
def test_templatefilter_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse("django-admindocs-filters"))
self.assertContains(response, "<title>Template filters</title>", html=True)
def test_templatetag_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse("django-admindocs-tags"))
self.assertContains(response, "<title>Template tags</title>", html=True)
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
|
AdminDocViewWithMultipleEngines
|
python
|
realpython__materials
|
python-string-interpolation/article.py
|
{
"start": 0,
"end": 784
}
|
class ____:
def __init__(self, title, author, pub_date):
self.title = title
self.author = author
self.pub_date = pub_date
def __str__(self):
return (
f"Article: {self.title}\n"
f"Author: {self.author}\n"
f"Published: {self.pub_date}\n"
)
def __repr__(self):
return (
f"{type(self).__name__}("
f"title={self.title!r}, "
f"author={self.author!r}, "
f"pub_date={self.pub_date!r})"
)
if __name__ == "__main__":
article = Article(
title="String Interpolation in Python: Exploring Available Tools",
author="Real Python",
pub_date="2024-05-15",
)
print(f"{article!s}")
print(f"{article!r}")
|
Article
|
python
|
scipy__scipy
|
scipy/optimize/_trustregion_dogleg.py
|
{
"start": 1222,
"end": 4389
}
|
class ____(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by the dogleg method"""
def cauchy_point(self):
"""
The Cauchy point is minimal along the direction of steepest descent.
"""
if self._cauchy_point is None:
g = self.jac
Bg = self.hessp(g)
self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g
return self._cauchy_point
def newton_point(self):
"""
The Newton point is a global minimum of the approximate function.
"""
if self._newton_point is None:
g = self.jac
B = self.hess
cho_info = scipy.linalg.cho_factor(B)
self._newton_point = -scipy.linalg.cho_solve(cho_info, g)
return self._newton_point
def solve(self, trust_radius):
"""
Minimize a function using the dog-leg trust-region algorithm.
This algorithm requires function values and first and second derivatives.
It also performs a costly Hessian decomposition for most iterations,
and the Hessian is required to be positive definite.
Parameters
----------
trust_radius : float
We are allowed to wander only this far away from the origin.
Returns
-------
p : ndarray
The proposed step.
hits_boundary : bool
True if the proposed step is on the boundary of the trust region.
Notes
-----
The Hessian is required to be positive definite.
References
----------
.. [1] Jorge Nocedal and Stephen Wright,
Numerical Optimization, second edition,
Springer-Verlag, 2006, page 73.
"""
# Compute the Newton point.
# This is the optimum for the quadratic model function.
# If it is inside the trust radius then return this point.
p_best = self.newton_point()
if scipy.linalg.norm(p_best) < trust_radius:
hits_boundary = False
return p_best, hits_boundary
# Compute the Cauchy point.
# This is the predicted optimum along the direction of steepest descent.
p_u = self.cauchy_point()
# If the Cauchy point is outside the trust region,
# then return the point where the path intersects the boundary.
p_u_norm = scipy.linalg.norm(p_u)
if p_u_norm >= trust_radius:
p_boundary = p_u * (trust_radius / p_u_norm)
hits_boundary = True
return p_boundary, hits_boundary
# Compute the intersection of the trust region boundary
# and the line segment connecting the Cauchy and Newton points.
# This requires solving a quadratic equation.
# ||p_u + t*(p_best - p_u)||**2 == trust_radius**2
# Solve this for positive time t using the quadratic formula.
_, tb = self.get_boundaries_intersections(p_u, p_best - p_u,
trust_radius)
p_boundary = p_u + tb * (p_best - p_u)
hits_boundary = True
return p_boundary, hits_boundary
|
DoglegSubproblem
|
python
|
pytorch__pytorch
|
test/nn/test_load_state_dict.py
|
{
"start": 22468,
"end": 24632
}
|
class ____(TestCase):
@skipIfCrossRef
@skipIfTorchDynamo("Can't swap with dynamo as dynamo installs weakrefs")
@swap([True])
@parametrize("assign", [True, False])
def test_swap_subclass(self, assign):
def _create_model(subclass=None):
m = torch.nn.Linear(2, 3, bias=False)
m.buf = torch.nn.Buffer(torch.randn(2, 3))
if subclass is not None:
m.weight = torch.nn.Parameter(subclass(m.weight))
m.buf = subclass(m.buf)
return m
def _test(m_subclass=None, sd_subclass=None):
m = _create_model(m_subclass)
sd = _create_model(sd_subclass).state_dict()
m.load_state_dict(sd, assign=assign)
self.assertEqual(m.weight, sd["weight"])
self.assertEqual(m.buf, sd["buf"])
self.assertTrue(isinstance(m.weight, torch.nn.Parameter))
self.assertTrue(not isinstance(m.buf, torch.nn.Parameter))
weight_type, buf_type = (torch.nn.Parameter, torch.Tensor)
if assign:
if sd_subclass is not None:
weight_type, buf_type = (sd_subclass, sd_subclass)
else:
if m_subclass is not None:
weight_type, buf_type = (m_subclass, m_subclass)
self.assertTrue(type(m.weight) is weight_type)
self.assertTrue(type(m.buf) is buf_type)
# (MyLoadTensor, MyWrapperLoadTensor) tests the behavior of (superclass, subclass)
subclasses = [None, MyLoadTensor, MyLoadTensor2, MyWrapperLoadTensor]
for m_s, sd_s in product(subclasses, subclasses):
_test(m_s, sd_s)
# MyBrokenLoadTensor should error since its module_load doesn't call .detach()
with self.assertRaisesRegex(
RuntimeError, re.escape("Error(s) in loading state_dict for Linear:")
):
_test(None, MyBrokenLoadTensor)
instantiate_parametrized_tests(TestLoadStateDict)
instantiate_parametrized_tests(TestLoadStateDictSwap)
if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
|
TestLoadStateDictSwap
|
python
|
fluentpython__example-code-2e
|
15-more-types/cafeteria/cafeteria.py
|
{
"start": 476,
"end": 541
}
|
class ____(Garbage):
"""Biodegradable garbage."""
|
Biodegradable
|
python
|
huggingface__transformers
|
src/transformers/models/glm/modeling_glm.py
|
{
"start": 9769,
"end": 12932
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: GlmConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
@use_kernel_forward_from_hub("RMSNorm")
|
GlmAttention
|
python
|
apache__airflow
|
providers/apache/beam/tests/unit/apache/beam/operators/test_beam.py
|
{
"start": 40872,
"end": 46986
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self, default_options, pipeline_options, py_options):
self.default_op_kwargs = {
"task_id": TASK_ID,
"py_file": PY_FILE,
"py_options": copy.deepcopy(py_options),
"default_pipeline_options": copy.deepcopy(default_options),
"pipeline_options": copy.deepcopy(pipeline_options),
"deferrable": True,
}
def test_init(self, default_options, pipeline_options, py_options):
"""Test BeamRunPythonPipelineOperator instance is properly initialized."""
op = BeamRunPythonPipelineOperator(**self.default_op_kwargs, dataflow_config={})
assert op.task_id == TASK_ID
assert op.runner == DEFAULT_RUNNER
assert op.py_file == PY_FILE
assert op.py_interpreter == PY_INTERPRETER
assert op.py_options == py_options
assert op.default_pipeline_options == default_options
assert op.pipeline_options == pipeline_options
assert op.dataflow_config == {}
assert op.deferrable is True
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_async_execute_should_execute_successfully(self, gcs_hook, beam_hook_mock):
"""
Asserts that a task is deferred and the BeamPythonPipelineTrigger will be fired
when the BeamRunPythonPipelineOperator is executed in deferrable mode when deferrable=True.
"""
op = BeamRunPythonPipelineOperator(**self.default_op_kwargs)
with pytest.raises(TaskDeferred) as exc:
op.execute(context=mock.MagicMock())
assert isinstance(exc.value.trigger, BeamPythonPipelineTrigger), (
"Trigger is not a BeamPythonPipelineTrigger"
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
def test_async_execute_direct_runner(self, beam_hook_mock):
"""
Test BeamHook is created and the right args are passed to
start_python_workflow when executing direct runner.
"""
op = BeamRunPythonPipelineOperator(**self.default_op_kwargs)
with pytest.raises(TaskDeferred):
op.execute(context=mock.MagicMock())
beam_hook_mock.assert_called_once_with(runner=DEFAULT_RUNNER)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_exec_dataflow_runner(self, gcs_hook_mock, dataflow_hook_mock, beam_hook_mock):
"""
Test DataflowHook is created and the right args are passed to
start_python_dataflow when executing Dataflow runner.
"""
dataflow_config = DataflowConfiguration(impersonation_chain=TEST_IMPERSONATION_ACCOUNT)
op = BeamRunPythonPipelineOperator(
runner="DataflowRunner",
dataflow_config=dataflow_config,
**self.default_op_kwargs,
)
magic_mock = mock.MagicMock()
if AIRFLOW_V_3_0_PLUS:
with pytest.raises(TaskDeferred):
op.execute(context=magic_mock)
else:
exception_msg = (
"GoogleBaseLink.persist method call with no extra value is Deprecated for Airflow 3."
" The method calls (only with context) needs to be removed after the Airflow 3 Migration"
" completed!"
)
with (
pytest.raises(TaskDeferred),
pytest.warns(AirflowProviderDeprecationWarning, match=exception_msg),
):
op.execute(context=magic_mock)
dataflow_hook_mock.assert_called_once_with(
gcp_conn_id=dataflow_config.gcp_conn_id,
poll_sleep=dataflow_config.poll_sleep,
impersonation_chain=dataflow_config.impersonation_chain,
drain_pipeline=dataflow_config.drain_pipeline,
cancel_timeout=dataflow_config.cancel_timeout,
wait_until_finished=dataflow_config.wait_until_finished,
)
beam_hook_mock.return_value.start_python_pipeline.assert_called_once()
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowJobLink.persist"))
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
def test_on_kill_dataflow_runner(self, dataflow_hook_mock, _, __, ___):
op = BeamRunPythonPipelineOperator(runner="DataflowRunner", **self.default_op_kwargs)
dataflow_cancel_job = dataflow_hook_mock.return_value.cancel_job
with pytest.raises(TaskDeferred):
op.execute(context=mock.MagicMock())
op.dataflow_job_id = JOB_ID
op.on_kill()
dataflow_cancel_job.assert_called_once_with(
job_id=JOB_ID, project_id=op.dataflow_config.project_id, location=op.dataflow_config.location
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_on_kill_direct_runner(self, _, dataflow_mock, __):
dataflow_cancel_job = dataflow_mock.return_value.cancel_job
op = BeamRunPythonPipelineOperator(runner="DataflowRunner", **self.default_op_kwargs)
if AIRFLOW_V_3_0_PLUS:
with pytest.raises(TaskDeferred):
op.execute(mock.MagicMock())
else:
exception_msg = (
"GoogleBaseLink.persist method call with no extra value is Deprecated for Airflow 3."
" The method calls (only with context) needs to be removed after the Airflow 3 Migration"
" completed!"
)
with (
pytest.raises(TaskDeferred),
pytest.warns(AirflowProviderDeprecationWarning, match=exception_msg),
):
op.execute(mock.MagicMock())
op.on_kill()
dataflow_cancel_job.assert_not_called()
|
TestBeamRunPythonPipelineOperatorAsync
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_merge.py
|
{
"start": 1293,
"end": 18842
}
|
class ____(Expr):
"""Merge / join two dataframes
This is an abstract class. It will be transformed into a concrete
implementation before graph construction.
See Also
--------
BlockwiseMerge
Repartition
Shuffle
"""
_parameters = [
"left",
"right",
"how",
"left_on",
"right_on",
"left_index",
"right_index",
"suffixes",
"indicator",
"shuffle_method",
"_npartitions",
"broadcast",
]
_defaults = {
"how": "inner",
"left_on": None,
"right_on": None,
"left_index": False,
"right_index": False,
"suffixes": ("_x", "_y"),
"indicator": False,
"shuffle_method": None,
"_npartitions": None,
"broadcast": None,
}
@property
def _filter_passthrough(self):
raise NotImplementedError(
"please use _filter_passthrough_available to make this decision"
)
def _filter_passthrough_available(self, parent, dependents):
if is_filter_pushdown_available(self, parent, dependents):
predicate = parent.predicate
# This protects against recursion, no need to separate ands if the first
# condition violates the join direction
while isinstance(predicate, And):
predicate = predicate.left
predicate_columns = self._predicate_columns(predicate)
if predicate_columns is None:
return False
if predicate_columns.issubset(self.left.columns):
return self.how in ("left", "inner", "leftsemi")
elif predicate_columns.issubset(self.right.columns):
return self.how in ("right", "inner")
elif len(predicate_columns) > 0:
return False
return True
elif isinstance(parent.predicate, And):
# If we can make that transformation then we should do it to further
# align filters that sit on top of merges
new = Filter(self, parent.predicate.left)
return new._name in {
x()._name for x in dependents[self._name] if x() is not None
}
return False
def _predicate_columns(self, predicate):
if isinstance(predicate, (Projection, Unaryop, Isin)):
return self._get_original_predicate_columns(predicate)
elif isinstance(predicate, Binop):
if isinstance(predicate, And):
return None
if not isinstance(predicate.right, Expr):
return self._get_original_predicate_columns(predicate.left)
elif isinstance(predicate.right, Elemwise):
return self._get_original_predicate_columns(predicate)
else:
return None
else:
# Unsupported predicate type
return None
def _get_original_predicate_columns(self, predicate):
predicate_columns = set()
stack = [predicate]
seen = set()
while stack:
e = stack.pop()
if self._name == e._name:
continue
if e._name in seen:
continue
seen.add(e._name)
if isinstance(e, _DelayedExpr):
continue
dependencies = e.dependencies()
stack.extend(dependencies)
if any(d._name == self._name for d in dependencies):
predicate_columns.update(e.columns)
return predicate_columns
def __str__(self):
return f"{type(self).__qualname__}({self._name[-7:]})"
@property
def unique_partition_mapping_columns_from_shuffle(self):
if self._is_single_partition_broadcast:
result = self.left.unique_partition_mapping_columns_from_shuffle.copy()
result.update(self.right.unique_partition_mapping_columns_from_shuffle)
return result
return {
tuple(self.left_on) if isinstance(self.left_on, list) else self.left_on,
tuple(self.right_on) if isinstance(self.right_on, list) else self.right_on,
}
@property
def kwargs(self):
return {
k: self.operand(k)
for k in [
"how",
"left_on",
"right_on",
"left_index",
"right_index",
"suffixes",
"indicator",
]
}
@functools.cached_property
def _meta(self):
left = meta_nonempty(self.left._meta)
right = meta_nonempty(self.right._meta)
kwargs = self.kwargs.copy()
if kwargs["how"] == "leftsemi":
kwargs["how"] = "left"
return make_meta(left.merge(right, **kwargs))
@functools.cached_property
def _npartitions(self):
if self.operand("_npartitions") is not None:
return self.operand("_npartitions")
return len(self._divisions()) - 1
@property
def _bcast_left(self):
if self.operand("_npartitions") is not None:
if self.broadcast_side == "right":
return Repartition(self.left, new_partitions=self._npartitions)
return self.left
@property
def _bcast_right(self):
if self.operand("_npartitions") is not None:
if self.broadcast_side == "left":
return Repartition(self.right, new_partitions=self._npartitions)
return self.right
def _divisions(self):
return self._lower().divisions
@functools.cached_property
def broadcast_side(self):
return "left" if self.left.npartitions < self.right.npartitions else "right"
@functools.cached_property
def is_broadcast_join(self):
broadcast_bias, broadcast = 0.5, None
broadcast_side = self.broadcast_side
if isinstance(self.broadcast, float):
broadcast_bias = self.broadcast
elif isinstance(self.broadcast, bool):
broadcast = self.broadcast
s_method = self.shuffle_method or get_default_shuffle_method()
if (
s_method in ("disk", "tasks", "p2p")
and self.how in ("inner", "left", "right", "leftsemi")
and self.how != broadcast_side
and broadcast is not False
):
n_low = min(self.left.npartitions, self.right.npartitions)
n_high = max(self.left.npartitions, self.right.npartitions)
if broadcast or (n_low < math.log2(n_high) * broadcast_bias):
return True
return False
@functools.cached_property
def _is_single_partition_broadcast(self):
_npartitions = max(self.left.npartitions, self.right.npartitions)
return (
_npartitions == 1
or self.left.npartitions == 1
and self.how in ("right", "inner")
or self.right.npartitions == 1
and self.how in ("left", "inner", "leftsemi")
)
@functools.cached_property
def merge_indexed_left(self):
return (
self.left_index or _contains_index_name(self.left, self.left_on)
) and self.left.known_divisions
@functools.cached_property
def merge_indexed_right(self):
return (
self.right_index or _contains_index_name(self.right, self.right_on)
) and self.right.known_divisions
def _on_condition_alread_partitioned(self, expr, on):
if not isinstance(on, list):
result = (
on in expr.unique_partition_mapping_columns_from_shuffle
or (on,) in expr.unique_partition_mapping_columns_from_shuffle
)
else:
result = tuple(on) in expr.unique_partition_mapping_columns_from_shuffle
return result
def _lower(self):
# Lower from an abstract expression
left = self.left
right = self.right
left_on = self.left_on
right_on = self.right_on
left_index = self.left_index
right_index = self.right_index
shuffle_method = self.shuffle_method
# TODO: capture index-merge as well
left_already_partitioned = self._on_condition_alread_partitioned(left, left_on)
right_already_partitioned = self._on_condition_alread_partitioned(
right, right_on
)
# TODO:
# 1. Add/leverage partition statistics
# Check for "trivial" broadcast (single partition)
if self._is_single_partition_broadcast:
return BlockwiseMerge(left, right, **self.kwargs)
# NOTE: Merging on an index is fragile. Pandas behavior
# depends on the actual data, and so we cannot use `meta`
# to accurately predict the output columns. Once general
# partition statistics are available, it may make sense
# to drop support for left_index and right_index.
shuffle_left_on = left_on
shuffle_right_on = right_on
if self.merge_indexed_left and self.merge_indexed_right:
# fully-indexed merge
divisions = list(unique(merge_sorted(left.divisions, right.divisions)))
if len(divisions) == 1:
divisions = (divisions[0], divisions[0])
right = Repartition(right, new_divisions=divisions, force=True)
left = Repartition(left, new_divisions=divisions, force=True)
shuffle_left_on = shuffle_right_on = None
# TODO:
# - Need 'rearrange_by_divisions' equivalent
# to avoid shuffle when we are merging on known
# divisions on one side only.
else:
if left_index:
shuffle_left_on = left.index._meta.name
if shuffle_left_on is None:
# placeholder for unnamed index merge
shuffle_left_on = "_index"
if right_index:
shuffle_right_on = right.index._meta.name
if shuffle_right_on is None:
shuffle_right_on = "_index"
if self.is_broadcast_join:
left, right = self._bcast_left, self._bcast_right
if self.how != "inner":
if self.broadcast_side == "left":
left = RearrangeByColumn(
left,
shuffle_left_on,
npartitions_out=left.npartitions,
)
else:
right = RearrangeByColumn(
right,
shuffle_right_on,
npartitions_out=right.npartitions,
)
return BroadcastJoin(
left,
right,
self.how,
left_on,
right_on,
left_index,
right_index,
self.suffixes,
self.indicator,
)
shuffle_npartitions = self.operand("_npartitions") or max(
self.left.npartitions, self.right.npartitions
)
if (shuffle_left_on or shuffle_right_on) and (
shuffle_method == "p2p"
or shuffle_method is None
and get_default_shuffle_method() == "p2p"
and not left_already_partitioned
and not right_already_partitioned
):
return HashJoinP2P(
left,
right,
how=self.how,
left_on=left_on,
right_on=right_on,
suffixes=self.suffixes,
indicator=self.indicator,
left_index=left_index,
right_index=right_index,
shuffle_left_on=shuffle_left_on,
shuffle_right_on=shuffle_right_on,
_npartitions=shuffle_npartitions,
)
if shuffle_left_on and not (
left_already_partitioned and self.left.npartitions == shuffle_npartitions
):
# Shuffle left
left = RearrangeByColumn(
left,
shuffle_left_on,
npartitions_out=shuffle_npartitions,
method=shuffle_method,
index_shuffle=left_index,
)
if shuffle_right_on and not (
right_already_partitioned and self.right.npartitions == shuffle_npartitions
):
# Shuffle right
right = RearrangeByColumn(
right,
shuffle_right_on,
npartitions_out=shuffle_npartitions,
method=shuffle_method,
index_shuffle=right_index,
)
# Blockwise merge
return BlockwiseMerge(left, right, **self.kwargs)
def _simplify_up(self, parent, dependents):
if isinstance(parent, Filter):
if not self._filter_passthrough_available(parent, dependents):
return
predicate = parent.predicate
if isinstance(predicate, And):
new = Filter(self, predicate.left)
new_pred = predicate.right.substitute(self, new)
return Filter(new, new_pred)
predicate_cols = self._predicate_columns(parent.predicate)
new_left, new_right = self.left, self.right
left_suffix, right_suffix = self.suffixes[0], self.suffixes[1]
if predicate_cols and predicate_cols.issubset(self.left.columns):
if left_suffix != "" and any(
f"{col}{left_suffix}" in self.columns and col in self.right.columns
for col in predicate_cols
):
# column was renamed so the predicate must go into the other side
pass
else:
left_filter = predicate.substitute(self, self.left)
new_left = self.left[left_filter]
if predicate_cols and predicate_cols.issubset(self.right.columns):
if right_suffix != "" and any(
f"{col}{right_suffix}" in self.columns and col in self.left.columns
for col in predicate_cols
):
# column was renamed so the predicate must go into the other side
pass
else:
right_filter = predicate.substitute(self, self.right)
new_right = self.right[right_filter]
if new_right is self.right and new_left is self.left:
# don't drop the filter
return
return type(self)(new_left, new_right, *self.operands[2:])
if isinstance(parent, (Projection, Index)):
# Reorder the column projection to
# occur before the Merge
columns = determine_column_projection(self, parent, dependents)
columns = _convert_to_list(columns)
if isinstance(parent, Index):
# Index creates an empty column projection
projection, parent_columns = columns, None
else:
projection, parent_columns = columns, parent.operand("columns")
if is_scalar(projection):
projection = [projection]
left, right = self.left, self.right
left_on = _convert_to_list(self.left_on)
if left_on is None:
left_on = []
right_on = _convert_to_list(self.right_on)
if right_on is None:
right_on = []
left_suffix, right_suffix = self.suffixes[0], self.suffixes[1]
project_left, project_right = [], []
right_suff_columns, left_suff_columns = [], []
# Find columns to project on the left
for col in left.columns:
if col in left_on or col in projection:
project_left.append(col)
elif f"{col}{left_suffix}" in projection:
project_left.append(col)
if col in right.columns:
# Right column must be present
# for the suffix to be applied
right_suff_columns.append(col)
# Find columns to project on the right
for col in right.columns:
if col in right_on or col in projection:
project_right.append(col)
elif f"{col}{right_suffix}" in projection:
project_right.append(col)
if col in left.columns and col not in project_left:
# Left column must be present
# for the suffix to be applied
left_suff_columns.append(col)
project_left.extend([c for c in left_suff_columns if c not in project_left])
project_right.extend(
[c for c in right_suff_columns if c not in project_right]
)
if set(project_left) < set(left.columns) or set(project_right) < set(
right.columns
):
result = type(self)(
left[project_left], right[project_right], *self.operands[2:]
)
if parent_columns is None:
return type(parent)(result)
return result[parent_columns]
|
Merge
|
python
|
doocs__leetcode
|
solution/2000-2099/2035.Partition Array Into Two Arrays to Minimize Sum Difference/Solution.py
|
{
"start": 0,
"end": 1211
}
|
class ____:
def minimumDifference(self, nums: List[int]) -> int:
n = len(nums) >> 1
f = defaultdict(set)
g = defaultdict(set)
for i in range(1 << n):
s = cnt = 0
s1 = cnt1 = 0
for j in range(n):
if (i & (1 << j)) != 0:
s += nums[j]
cnt += 1
s1 += nums[n + j]
cnt1 += 1
else:
s -= nums[j]
s1 -= nums[n + j]
f[cnt].add(s)
g[cnt1].add(s1)
ans = inf
for i in range(n + 1):
fi, gi = sorted(list(f[i])), sorted(list(g[n - i]))
# min(abs(f[i] + g[n - i]))
for a in fi:
left, right = 0, len(gi) - 1
b = -a
while left < right:
mid = (left + right) >> 1
if gi[mid] >= b:
right = mid
else:
left = mid + 1
ans = min(ans, abs(a + gi[left]))
if left > 0:
ans = min(ans, abs(a + gi[left - 1]))
return ans
|
Solution
|
python
|
django__django
|
django/contrib/humanize/templatetags/humanize.py
|
{
"start": 7417,
"end": 12859
}
|
class ____:
time_strings = {
# Translators: delta will contain a string like '2 months' or
# '1 month, 2 weeks'
"past-day": gettext_lazy("%(delta)s ago"),
# Translators: please keep a non-breaking space (U+00A0) between count
# and time unit.
"past-hour": ngettext_lazy("an hour ago", "%(count)s hours ago", "count"),
# Translators: please keep a non-breaking space (U+00A0) between count
# and time unit.
"past-minute": ngettext_lazy("a minute ago", "%(count)s minutes ago", "count"),
# Translators: please keep a non-breaking space (U+00A0) between count
# and time unit.
"past-second": ngettext_lazy("a second ago", "%(count)s seconds ago", "count"),
"now": gettext_lazy("now"),
# fmt: off
# fmt turned off to avoid black splitting the ngettext_lazy calls to
# multiple lines, as this results in gettext missing the 'Translators:'
# comments.
"future-second": ngettext_lazy(
# Translators: please keep a non-breaking space (U+00A0) between
# count and time unit.
"a second from now", "%(count)s seconds from now", "count"
),
"future-minute": ngettext_lazy(
# Translators: please keep a non-breaking space (U+00A0) between
# count and time unit.
"a minute from now", "%(count)s minutes from now", "count",
),
"future-hour": ngettext_lazy(
# Translators: please keep a non-breaking space (U+00A0) between
# count and time unit.
"an hour from now", "%(count)s hours from now", "count",
),
# fmt: on
# Translators: delta will contain a string like '2 months' or
# '1 month, 2 weeks'
"future-day": gettext_lazy("%(delta)s from now"),
}
past_substrings = {
# fmt: off
"year": npgettext_lazy(
# Translators: 'naturaltime-past' strings will be included in
# '%(delta)s ago'
"naturaltime-past", "%(num)d year", "%(num)d years", "num",
),
# fmt:on
"month": npgettext_lazy(
"naturaltime-past", "%(num)d month", "%(num)d months", "num"
),
"week": npgettext_lazy(
"naturaltime-past", "%(num)d week", "%(num)d weeks", "num"
),
"day": npgettext_lazy("naturaltime-past", "%(num)d day", "%(num)d days", "num"),
"hour": npgettext_lazy(
"naturaltime-past", "%(num)d hour", "%(num)d hours", "num"
),
"minute": npgettext_lazy(
"naturaltime-past", "%(num)d minute", "%(num)d minutes", "num"
),
}
future_substrings = {
# fmt: off
"year": npgettext_lazy(
# Translators: 'naturaltime-future' strings will be included in
# '%(delta)s from now'.
"naturaltime-future", "%(num)d year", "%(num)d years", "num",
),
# fmt: on
"month": npgettext_lazy(
"naturaltime-future", "%(num)d month", "%(num)d months", "num"
),
"week": npgettext_lazy(
"naturaltime-future", "%(num)d week", "%(num)d weeks", "num"
),
"day": npgettext_lazy(
"naturaltime-future", "%(num)d day", "%(num)d days", "num"
),
"hour": npgettext_lazy(
"naturaltime-future", "%(num)d hour", "%(num)d hours", "num"
),
"minute": npgettext_lazy(
"naturaltime-future", "%(num)d minute", "%(num)d minutes", "num"
),
}
@classmethod
def string_for(cls, value):
if not isinstance(value, date): # datetime is a subclass of date
return value
now = datetime.now(UTC if is_aware(value) else None)
if value < now:
delta = now - value
if delta.days != 0:
return cls.time_strings["past-day"] % {
"delta": defaultfilters.timesince(
value, now, time_strings=cls.past_substrings
),
}
elif delta.seconds == 0:
return cls.time_strings["now"]
elif delta.seconds < 60:
return cls.time_strings["past-second"] % {"count": delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return cls.time_strings["past-minute"] % {"count": count}
else:
count = delta.seconds // 60 // 60
return cls.time_strings["past-hour"] % {"count": count}
else:
delta = value - now
if delta.days != 0:
return cls.time_strings["future-day"] % {
"delta": defaultfilters.timeuntil(
value, now, time_strings=cls.future_substrings
),
}
elif delta.seconds == 0:
return cls.time_strings["now"]
elif delta.seconds < 60:
return cls.time_strings["future-second"] % {"count": delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return cls.time_strings["future-minute"] % {"count": count}
else:
count = delta.seconds // 60 // 60
return cls.time_strings["future-hour"] % {"count": count}
|
NaturalTimeFormatter
|
python
|
Pylons__pyramid
|
tests/test_session.py
|
{
"start": 87,
"end": 10731
}
|
class ____:
def test_ctor_no_cookie(self):
request = testing.DummyRequest()
session = self._makeOne(request)
self.assertEqual(dict(session), {})
def test_instance_conforms(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import ISession
request = testing.DummyRequest()
session = self._makeOne(request)
verifyObject(ISession, session)
def test_ctor_with_cookie_still_valid(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time(), 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request)
self.assertEqual(dict(session), {'state': 1})
def test_ctor_with_cookie_expired(self):
request = testing.DummyRequest()
cookieval = self._serialize((0, 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request)
self.assertEqual(dict(session), {})
def test_ctor_with_bad_cookie_cannot_deserialize(self):
request = testing.DummyRequest()
request.cookies['session'] = 'abc'
session = self._makeOne(request)
self.assertEqual(dict(session), {})
def test_ctor_with_bad_cookie_not_tuple(self):
request = testing.DummyRequest()
cookieval = self._serialize('abc')
request.cookies['session'] = cookieval
session = self._makeOne(request)
self.assertEqual(dict(session), {})
def test_timeout(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time() - 5, 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request, timeout=1)
self.assertEqual(dict(session), {})
def test_timeout_never(self):
import time
request = testing.DummyRequest()
LONG_TIME = 31536000
cookieval = self._serialize((time.time() + LONG_TIME, 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request, timeout=None)
self.assertEqual(dict(session), {'state': 1})
def test_timeout_str(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time() - 5, 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request, timeout='1')
self.assertEqual(dict(session), {})
def test_timeout_invalid(self):
request = testing.DummyRequest()
self.assertRaises(
ValueError, self._makeOne, request, timeout='Invalid value'
)
def test_changed(self):
request = testing.DummyRequest()
session = self._makeOne(request)
self.assertEqual(session.changed(), None)
self.assertTrue(session._dirty)
def test_invalidate(self):
request = testing.DummyRequest()
session = self._makeOne(request)
session['a'] = 1
self.assertEqual(session.invalidate(), None)
self.assertFalse('a' in session)
def test_reissue_triggered(self):
import time
request = testing.DummyRequest()
cookieval = self._serialize((time.time() - 2, 0, {'state': 1}))
request.cookies['session'] = cookieval
session = self._makeOne(request)
self.assertEqual(session['state'], 1)
self.assertTrue(session._dirty)
def test__set_cookie_on_exception(self):
request = testing.DummyRequest()
request.exception = True
session = self._makeOne(request)
session._cookie_on_exception = False
response = DummyResponse()
self.assertEqual(session._set_cookie(response), False)
def test__set_cookie_on_exception_no_request_exception(self):
import webob
request = testing.DummyRequest()
request.exception = None
session = self._makeOne(request)
session._cookie_on_exception = False
response = webob.Response()
self.assertEqual(session._set_cookie(response), True)
self.assertEqual(response.headerlist[-1][0], 'Set-Cookie')
def test__set_cookie_cookieval_too_long(self):
request = testing.DummyRequest()
session = self._makeOne(request)
session['abc'] = 'x' * 100000
response = DummyResponse()
self.assertRaises(ValueError, session._set_cookie, response)
def test__set_cookie_real_webob_response(self):
import webob
request = testing.DummyRequest()
session = self._makeOne(request)
session['abc'] = 'x'
response = webob.Response()
self.assertEqual(session._set_cookie(response), True)
self.assertEqual(response.headerlist[-1][0], 'Set-Cookie')
def test__set_cookie_options(self):
from pyramid.response import Response
request = testing.DummyRequest()
request.exception = None
session = self._makeOne(
request,
cookie_name='abc',
path='/foo',
domain='localhost',
secure=True,
httponly=True,
)
session['abc'] = 'x'
response = Response()
self.assertEqual(session._set_cookie(response), True)
cookieval = response.headerlist[-1][1]
val, domain, path, secure, httponly, samesite = (
x.strip() for x in cookieval.split(';')
)
self.assertTrue(val.startswith('abc='))
self.assertEqual(domain, 'Domain=localhost')
self.assertEqual(path, 'Path=/foo')
self.assertEqual(secure, 'secure')
self.assertEqual(httponly, 'HttpOnly')
self.assertEqual(samesite, 'SameSite=Lax')
def test_flash_default(self):
request = testing.DummyRequest()
session = self._makeOne(request)
session.flash('msg1')
session.flash('msg2')
self.assertEqual(session['_f_'], ['msg1', 'msg2'])
def test_flash_allow_duplicate_false(self):
request = testing.DummyRequest()
session = self._makeOne(request)
session.flash('msg1')
session.flash('msg1', allow_duplicate=False)
self.assertEqual(session['_f_'], ['msg1'])
def test_flash_allow_duplicate_true_and_msg_not_in_storage(self):
request = testing.DummyRequest()
session = self._makeOne(request)
session.flash('msg1', allow_duplicate=True)
self.assertEqual(session['_f_'], ['msg1'])
def test_flash_allow_duplicate_false_and_msg_not_in_storage(self):
request = testing.DummyRequest()
session = self._makeOne(request)
session.flash('msg1', allow_duplicate=False)
self.assertEqual(session['_f_'], ['msg1'])
def test_flash_mixed(self):
request = testing.DummyRequest()
session = self._makeOne(request)
session.flash('warn1', 'warn')
session.flash('warn2', 'warn')
session.flash('err1', 'error')
session.flash('err2', 'error')
self.assertEqual(session['_f_warn'], ['warn1', 'warn2'])
def test_pop_flash_default_queue(self):
request = testing.DummyRequest()
session = self._makeOne(request)
queue = ['one', 'two']
session['_f_'] = queue
result = session.pop_flash()
self.assertEqual(result, queue)
self.assertEqual(session.get('_f_'), None)
def test_pop_flash_nodefault_queue(self):
request = testing.DummyRequest()
session = self._makeOne(request)
queue = ['one', 'two']
session['_f_error'] = queue
result = session.pop_flash('error')
self.assertEqual(result, queue)
self.assertEqual(session.get('_f_error'), None)
def test_peek_flash_default_queue(self):
request = testing.DummyRequest()
session = self._makeOne(request)
queue = ['one', 'two']
session['_f_'] = queue
result = session.peek_flash()
self.assertEqual(result, queue)
self.assertEqual(session.get('_f_'), queue)
def test_peek_flash_nodefault_queue(self):
request = testing.DummyRequest()
session = self._makeOne(request)
queue = ['one', 'two']
session['_f_error'] = queue
result = session.peek_flash('error')
self.assertEqual(result, queue)
self.assertEqual(session.get('_f_error'), queue)
def test_new_csrf_token(self):
request = testing.DummyRequest()
session = self._makeOne(request)
token = session.new_csrf_token()
self.assertEqual(token, session['_csrft_'])
def test_get_csrf_token(self):
request = testing.DummyRequest()
session = self._makeOne(request)
session['_csrft_'] = 'token'
token = session.get_csrf_token()
self.assertEqual(token, 'token')
self.assertTrue('_csrft_' in session)
def test_get_csrf_token_new(self):
request = testing.DummyRequest()
session = self._makeOne(request)
token = session.get_csrf_token()
self.assertTrue(token)
self.assertTrue('_csrft_' in session)
def test_no_set_cookie_with_exception(self):
import webob
request = testing.DummyRequest()
request.exception = True
session = self._makeOne(request, set_on_exception=False)
session['a'] = 1
callbacks = request.response_callbacks
self.assertEqual(len(callbacks), 1)
response = webob.Response()
result = callbacks[0](request, response)
self.assertEqual(result, None)
self.assertFalse('Set-Cookie' in dict(response.headerlist))
def test_set_cookie_with_exception(self):
import webob
request = testing.DummyRequest()
request.exception = True
session = self._makeOne(request)
session['a'] = 1
callbacks = request.response_callbacks
self.assertEqual(len(callbacks), 1)
response = webob.Response()
result = callbacks[0](request, response)
self.assertEqual(result, None)
self.assertTrue('Set-Cookie' in dict(response.headerlist))
def test_cookie_is_set(self):
import webob
request = testing.DummyRequest()
session = self._makeOne(request)
session['a'] = 1
callbacks = request.response_callbacks
self.assertEqual(len(callbacks), 1)
response = webob.Response()
result = callbacks[0](request, response)
self.assertEqual(result, None)
self.assertTrue('Set-Cookie' in dict(response.headerlist))
|
SharedCookieSessionTests
|
python
|
scipy__scipy
|
scipy/special/tests/test_basic.py
|
{
"start": 172229,
"end": 172350
}
|
class ____:
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
|
TestFresnelIntegral
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/pex_builder/deploy.py
|
{
"start": 404,
"end": 1301
}
|
class ____:
"""Inputs and outputs for each code location."""
location: parse_workspace.Location
# local dependencies are packaged with the source.pex along with the main code location
local_packages: deps.LocalPackages
# all other dependencies are packaged in the deps.pex
deps_requirements: deps.DepsRequirements
# One of deps_pex_path or published_deps_pex should be set
deps_pex_path: Optional[str] = None # locally build deps.pex
published_deps_pex: Optional[str] = None # already published deps.pex
# dagster_version should be always set for both cases, pre published and locally built deps
dagster_version: Optional[str] = None
source_pex_path: Optional[str] = None
pex_tag: Optional[str] = None # composite tag used to identify the set of pex files
code_location_update_error: Optional[Exception] = None
@dataclass
|
LocationBuild
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 37265,
"end": 45361
}
|
class ____(DatumChannelMixin, core.FieldOrDatumDefWithConditionDatumDefnumber):
"""
AngleDatum schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "angle"
@overload
def bandPosition(self, _: float, /) -> AngleDatum: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> AngleDatum: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> AngleDatum: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> AngleDatum: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> AngleDatum: ...
@overload
def type(self, _: Type_T, /) -> AngleDatum: ...
def __init__(
self,
datum,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
datum=datum,
bandPosition=bandPosition,
condition=condition,
title=title,
type=type,
**kwds,
)
@with_property_setters
|
AngleDatum
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/sharded_variable.py
|
{
"start": 4186,
"end": 6423
}
|
class ____(Partitioner):
"""Partitioner that allocates a minimum size per shard.
This partitioner ensures each shard has at least `min_shard_bytes`, and tries
to allocate as many shards as possible, i.e., keeping shard size as small as
possible. The maximum number of such shards (upper bound) is given by
`max_shards`.
Examples:
>>> partitioner = MinSizePartitioner(min_shard_bytes=4, max_shards=2)
>>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32)
>>> [2, 1]
>>> partitioner = MinSizePartitioner(min_shard_bytes=4, max_shards=10)
>>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32)
>>> [6, 1]
>>>
>>> # use in ParameterServerStrategy
>>> # strategy = tf.distribute.experimental.ParameterServerStrategy(
>>> # cluster_resolver=cluster_resolver, variable_partitioner=partitioner)
"""
def __init__(
self, min_shard_bytes=256 << 10, max_shards=1, bytes_per_string=16
):
"""Creates a new `MinSizePartitioner`.
Args:
min_shard_bytes: Minimum bytes of each shard. Defaults to 256K.
max_shards: Upper bound on the number of shards. Defaults to 1.
bytes_per_string: If the partition value is of type string, this provides
an estimate of how large each string is.
"""
if min_shard_bytes < 1:
raise ValueError(
'Argument `min_shard_bytes` must be positive. '
f'Received: {min_shard_bytes}'
)
if max_shards < 1:
raise ValueError(
f'Argument `max_shards` must be positive. Received: {max_shards}'
)
if bytes_per_string < 1:
raise ValueError(
'Argument `bytes_per_string` must be positive. '
f'Received: {bytes_per_string}'
)
self._min_shard_bytes = min_shard_bytes
self._max_shards = max_shards
self._bytes_per_string = bytes_per_string
def __call__(self, shape, dtype, axis=0):
return partitioned_variables.min_max_variable_partitioner(
max_partitions=self._max_shards,
axis=axis,
min_slice_size=self._min_shard_bytes,
bytes_per_string_element=self._bytes_per_string,
)(shape, dtype)
@tf_export('distribute.experimental.partitioners.MaxSizePartitioner', v1=[])
|
MinSizePartitioner
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/c/_ast.py
|
{
"start": 39421,
"end": 41191
}
|
class ____(ASTDeclarator):
def __init__(
self, declId: ASTNestedName, arrayOps: list[ASTArray], param: ASTParameters
) -> None:
self.declId = declId
self.arrayOps = arrayOps
self.param = param
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTDeclaratorNameParam):
return NotImplemented
return (
self.declId == other.declId
and self.arrayOps == other.arrayOps
and self.param == other.param
)
def __hash__(self) -> int:
return hash((self.declId, self.arrayOps, self.param))
@property
def name(self) -> ASTNestedName:
return self.declId
@property
def function_params(self) -> list[ASTFunctionParameter]:
return self.param.function_params
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self) -> bool:
return self.declId is not None
def _stringify(self, transform: StringifyTransform) -> str:
res = []
if self.declId:
res.append(transform(self.declId))
res.extend(transform(op) for op in self.arrayOps)
if self.param:
res.append(transform(self.param))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
for op in self.arrayOps:
op.describe_signature(signode, mode, env, symbol)
if self.param:
self.param.describe_signature(signode, mode, env, symbol)
|
ASTDeclaratorNameParam
|
python
|
crytic__slither
|
slither/detectors/assembly/return_instead_of_leave.py
|
{
"start": 253,
"end": 1998
}
|
class ____(AbstractDetector):
"""
Check for cases where a return(a,b) is used in an assembly function that also returns two variables
"""
ARGUMENT = "return-leave"
HELP = "If a `return` is used instead of a `leave`."
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#return-instead-of-leave-in-assembly"
WIKI_TITLE = "Return instead of leave in assembly"
WIKI_DESCRIPTION = "Detect if a `return` is used where a `leave` should be used."
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract C {
function f() internal returns (uint a, uint b) {
assembly {
return (5, 6)
}
}
}
```
The function will halt the execution, instead of returning a two uint."""
WIKI_RECOMMENDATION = "Use the `leave` statement."
def _check_function(self, f: Function) -> List[Output]:
results: List[Output] = []
for ir in f.solidity_calls:
if ir.function == SolidityFunction("return(uint256,uint256)"):
info: DETECTOR_INFO = [f, " contains an incorrect call to return: ", ir.node, "\n"]
json = self.generate_result(info)
results.append(json)
return results
def _detect(self) -> List[Output]:
results: List[Output] = []
for c in self.contracts:
for f in c.functions_declared:
if (
len(f.returns) == 2
and f.contains_assembly
and f.visibility not in ["public", "external"]
):
results += self._check_function(f)
return results
|
ReturnInsteadOfLeave
|
python
|
pyqtgraph__pyqtgraph
|
tests/test_stability.py
|
{
"start": 3340,
"end": 3698
}
|
class ____(Exception):
pass
def raiseException():
p('raise exception')
raise TstException("A test exception")
def addReference():
p('add reference')
global widgets
if len(widgets) < 1:
return
obj1 = randItem(widgets)
obj2 = randItem(widgets)
p(' %s -> %s' % (obj1, obj2))
obj1._testref = obj2
|
TstException
|
python
|
django__django
|
tests/db_functions/models.py
|
{
"start": 1057,
"end": 1542
}
|
class ____(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
|
DTModel
|
python
|
coleifer__peewee
|
examples/graph.py
|
{
"start": 575,
"end": 2131
}
|
class ____(Base):
src = ForeignKeyField(Node, backref='outgoing_edges')
dest = ForeignKeyField(Node, backref='incoming_edges')
weight = FloatField()
db.create_tables([Node, Edge])
nodes = [Node.create(name=c) for c in 'abcde']
g = (
('a', 'b', -1),
('a', 'c', 4),
('b', 'c', 3),
('b', 'd', 2),
('b', 'e', 2),
('d', 'b', 1),
('d', 'c', 5),
('e', 'd', -3))
for src, dest, wt in g:
src_n, dest_n = nodes[ord(src) - ord('a')], nodes[ord(dest) - ord('a')]
Edge.create(src=src_n, dest=dest_n, weight=wt)
def bellman_ford(s):
dist = {}
pred = {}
all_nodes = Node.select()
for node in all_nodes:
dist[node] = float('inf')
pred[node] = None
dist[s] = 0
for _ in range(len(all_nodes) - 1):
for u in all_nodes:
for v in u.outgoing():
potential = dist[u] + v.weight
if dist[v] > potential:
dist[v] = potential
pred[v] = u
# Verify no negative-weight cycles.
for u in all_nodes:
for v in u.outgoing():
assert dist[v] <= dist[u] + v.weight
return dist, pred
def print_path(s, e):
dist, pred = bellman_ford(s)
distance = dist[e]
route = [e]
while e != s:
route.append(pred[e])
e = pred[e]
print(' -> '.join(v.name for v in route[::-1]) + ' (%s)' % distance)
print_path(Node['a'], Node['c']) # a -> b -> c
print_path(Node['a'], Node['d']) # a -> b -> e -> d
print_path(Node['b'], Node['d']) # b -> e -> d
|
Edge
|
python
|
huggingface__transformers
|
src/transformers/convert_slow_tokenizer.py
|
{
"start": 46650,
"end": 48273
}
|
class ____(SpmConverter):
handle_byte_fallback = True
SpmExtractor = GemmaSentencePieceExtractor
# start and end of turn tokens must be marked as special
special_tokens = {"<start_of_turn>", "<end_of_turn>"}
""""
split_by_unicode_script: true
split_by_number: true
split_by_whitespace: true
treat_whitespace_as_suffix: false
allow_whitespace_only_pieces: true
split_digits: true
byte_fallback: true
"""
def normalizer(self, proto):
return normalizers.Replace(" ", "▁")
def vocab(self, proto):
vocab = [
(self.original_tokenizer.pad_token, 0.0),
(self.original_tokenizer.eos_token, 0.0),
(self.original_tokenizer.bos_token, 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
# Older gemma tokenizers had a missing tab token, so we fix that here
if not any(x[0] == "\t" for x in vocab):
override_index = next((i for i, x in enumerate(vocab) if x[0] == "<0x09>"), None)
if override_index is not None:
vocab[override_index] = ("\t", 0.0)
return vocab
def pre_tokenizer(self, replacement, add_prefix_space):
return pre_tokenizers.Split(" ", "merged_with_previous")
def unk_id(self, proto):
unk_id = 3
return unk_id
def decoder(self, replacement, add_prefix_space):
return decoders.Sequence(
[
decoders.Replace("▁", " "),
decoders.ByteFallback(),
decoders.Fuse(),
]
)
|
GemmaConverter
|
python
|
coleifer__peewee
|
tests/models.py
|
{
"start": 67988,
"end": 71009
}
|
class ____(ModelTestCase):
database = get_in_memory_db()
requires = [Sample]
def test_coerce(self):
for i in range(3):
Sample.create(counter=i, value=i)
counter_group = fn.GROUP_CONCAT(Sample.counter).coerce(False)
query = Sample.select(counter_group.alias('counter'))
self.assertEqual(query.get().counter, '0,1,2')
query = Sample.select(counter_group.alias('counter_group'))
self.assertEqual(query.get().counter_group, '0,1,2')
query = Sample.select(counter_group)
self.assertEqual(query.scalar(), '0,1,2')
def test_scalar(self):
for i in range(4):
Sample.create(counter=i, value=i)
query = Sample.select(fn.SUM(Sample.counter).alias('total'))
self.assertEqual(query.scalar(), 6)
self.assertEqual(query.scalar(as_tuple=True), (6,))
self.assertEqual(query.scalar(as_dict=True), {'total': 6})
Sample.delete().execute()
self.assertTrue(query.scalar() is None)
self.assertEqual(query.scalar(as_tuple=True), (None,))
self.assertEqual(query.scalar(as_dict=True), {'total': None})
def test_safe_python_value(self):
for i in range(3):
Sample.create(counter=i, value=i)
counter_group = fn.GROUP_CONCAT(Sample.counter)
query = Sample.select(counter_group.alias('counter'))
self.assertEqual(query.get().counter, '0,1,2')
self.assertEqual(query.scalar(), '0,1,2')
query = Sample.select(counter_group.alias('counter_group'))
self.assertEqual(query.get().counter_group, '0,1,2')
self.assertEqual(query.scalar(), '0,1,2')
def test_conv_using_python_value(self):
for i in range(3):
Sample.create(counter=i, value=i)
counter = (fn
.GROUP_CONCAT(Sample.counter)
.python_value(lambda x: [int(i) for i in x.split(',')]))
query = Sample.select(counter.alias('counter'))
self.assertEqual(query.get().counter, [0, 1, 2])
query = Sample.select(counter.alias('counter_group'))
self.assertEqual(query.get().counter_group, [0, 1, 2])
query = Sample.select(counter)
self.assertEqual(query.scalar(), [0, 1, 2])
@requires_models(Category, Sample)
def test_no_coerce_count_avg(self):
for i in range(10):
Category.create(name=str(i))
# COUNT() does not result in the value being coerced.
query = Category.select(fn.COUNT(Category.name))
self.assertEqual(query.scalar(), 10)
# Force the value to be coerced using the field's db_value().
query = Category.select(fn.COUNT(Category.name).coerce(True))
self.assertEqual(query.scalar(), '10')
# Ensure avg over an integer field is returned as a float.
Sample.insert_many([(1, 0), (2, 0)]).execute()
query = Sample.select(fn.AVG(Sample.counter).alias('a'))
self.assertEqual(query.get().a, 1.5)
|
TestFunctionCoerce
|
python
|
django__django
|
tests/servers/tests.py
|
{
"start": 5773,
"end": 6206
}
|
class ____(LiveServerBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# put it in a list to prevent descriptor lookups in test
cls.live_server_url_test = [cls.live_server_url]
def test_live_server_url_is_class_property(self):
self.assertIsInstance(self.live_server_url_test[0], str)
self.assertEqual(self.live_server_url_test[0], self.live_server_url)
|
LiveServerAddress
|
python
|
pytorch__pytorch
|
test/inductor/test_ordered_set.py
|
{
"start": 45800,
"end": 46060
}
|
class ____(TestSubsets, TestCase):
left = OrderedSet()
right = OrderedSet([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
# ------------------------------------------------------------------------------
|
TestSubsetEmptyNonEmpty
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 212517,
"end": 213189
}
|
class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("BypassForcePushAllowanceEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("BypassForcePushAllowance"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
|
BypassForcePushAllowanceConnection
|
python
|
pypa__hatch
|
tests/config/test_model.py
|
{
"start": 22066,
"end": 33120
}
|
class ____:
def test_not_table(self, helpers):
config = RootConfig({"template": 9000})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template
must be a table"""
),
):
_ = config.template
def test_set_lazy_error(self, helpers):
config = RootConfig({})
config.template = 9000
assert config.raw_data == {"template": 9000}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template
must be a table"""
),
):
_ = config.template
def test_name(self):
config = RootConfig({"template": {"name": "foo"}})
assert config.template.name == config.template.name == "foo"
assert config.raw_data == {"template": {"name": "foo"}}
def test_name_default_env_var(self):
config = RootConfig({})
assert config.template.name == "Foo Bar"
assert config.raw_data == {"template": {"name": "Foo Bar"}}
def test_name_default_git(self, temp_dir):
config = RootConfig({})
with temp_dir.as_cwd(exclude=["GIT_AUTHOR_NAME"]):
subprocess.check_output(["git", "init"])
subprocess.check_output(["git", "config", "--local", "user.name", "test"])
assert config.template.name == "test"
assert config.raw_data == {"template": {"name": "test"}}
def test_name_default_no_git(self, temp_dir):
config = RootConfig({})
with temp_dir.as_cwd(exclude=["*"]):
assert config.template.name == "U.N. Owen"
assert config.raw_data == {"template": {"name": "U.N. Owen"}}
def test_name_not_string(self, helpers):
config = RootConfig({"template": {"name": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> name
must be a string"""
),
):
_ = config.template.name
def test_name_set_lazy_error(self, helpers):
config = RootConfig({})
config.template.name = 9000
assert config.raw_data == {"template": {"name": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> name
must be a string"""
),
):
_ = config.template.name
def test_email(self):
config = RootConfig({"template": {"email": "foo"}})
assert config.template.email == config.template.email == "foo"
assert config.raw_data == {"template": {"email": "foo"}}
def test_email_default_env_var(self):
config = RootConfig({})
assert config.template.email == "foo@bar.baz"
assert config.raw_data == {"template": {"email": "foo@bar.baz"}}
def test_email_default_git(self, temp_dir):
config = RootConfig({})
with temp_dir.as_cwd(exclude=["GIT_AUTHOR_EMAIL"]):
subprocess.check_output(["git", "init"])
subprocess.check_output(["git", "config", "--local", "user.email", "test"])
assert config.template.email == "test"
assert config.raw_data == {"template": {"email": "test"}}
def test_email_default_no_git(self, temp_dir):
config = RootConfig({})
with temp_dir.as_cwd(exclude=["*"]):
assert config.template.email == "void@some.where"
assert config.raw_data == {"template": {"email": "void@some.where"}}
def test_email_not_string(self, helpers):
config = RootConfig({"template": {"email": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> email
must be a string"""
),
):
_ = config.template.email
def test_email_set_lazy_error(self, helpers):
config = RootConfig({})
config.template.email = 9000
assert config.raw_data == {"template": {"email": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> email
must be a string"""
),
):
_ = config.template.email
def test_licenses_not_table(self, helpers):
config = RootConfig({"template": {"licenses": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> licenses
must be a table"""
),
):
_ = config.template.licenses
def test_licenses_set_lazy_error(self, helpers):
config = RootConfig({})
config.template.licenses = 9000
assert config.raw_data == {"template": {"licenses": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> licenses
must be a table"""
),
):
_ = config.template.licenses
def test_licenses_headers(self):
config = RootConfig({"template": {"licenses": {"headers": False}}})
assert config.template.licenses.headers is config.template.licenses.headers is False
assert config.raw_data == {"template": {"licenses": {"headers": False}}}
def test_licenses_headers_default(self):
config = RootConfig({})
assert config.template.licenses.headers is config.template.licenses.headers is True
assert config.raw_data == {"template": {"licenses": {"headers": True}}}
def test_licenses_headers_not_boolean(self, helpers):
config = RootConfig({"template": {"licenses": {"headers": 9000}}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> licenses -> headers
must be a boolean"""
),
):
_ = config.template.licenses.headers
def test_licenses_headers_set_lazy_error(self, helpers):
config = RootConfig({})
config.template.licenses.headers = 9000
assert config.raw_data == {"template": {"licenses": {"headers": 9000}}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> licenses -> headers
must be a boolean"""
),
):
_ = config.template.licenses.headers
def test_licenses_default(self):
config = RootConfig({"template": {"licenses": {"default": ["Apache-2.0", "MIT"]}}})
assert config.template.licenses.default == config.template.licenses.default == ["Apache-2.0", "MIT"]
assert config.raw_data == {"template": {"licenses": {"default": ["Apache-2.0", "MIT"]}}}
def test_licenses_default_default(self):
config = RootConfig({})
assert config.template.licenses.default == ["MIT"]
assert config.raw_data == {"template": {"licenses": {"default": ["MIT"]}}}
def test_licenses_default_not_array(self, helpers):
config = RootConfig({"template": {"licenses": {"default": 9000}}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> licenses -> default
must be an array"""
),
):
_ = config.template.licenses.default
def test_licenses_default_entry_not_string(self, helpers):
config = RootConfig({"template": {"licenses": {"default": [9000]}}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> licenses -> default -> 1
must be a string"""
),
):
_ = config.template.licenses.default
def test_licenses_default_set_lazy_error(self, helpers):
config = RootConfig({})
config.template.licenses.default = 9000
assert config.raw_data == {"template": {"licenses": {"default": 9000}}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> licenses -> default
must be an array"""
),
):
_ = config.template.licenses.default
def test_plugins(self):
config = RootConfig({"template": {"plugins": {"foo": {"bar": "baz"}}}})
assert config.template.plugins == config.template.plugins == {"foo": {"bar": "baz"}}
assert config.raw_data == {"template": {"plugins": {"foo": {"bar": "baz"}}}}
def test_plugins_default(self):
config = RootConfig({})
assert config.template.plugins == {"default": {"ci": False, "src-layout": True, "tests": True}}
assert config.raw_data == {
"template": {"plugins": {"default": {"ci": False, "src-layout": True, "tests": True}}}
}
def test_plugins_not_table(self, helpers):
config = RootConfig({"template": {"plugins": 9000}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> plugins
must be a table"""
),
):
_ = config.template.plugins
def test_plugins_data_not_table(self, helpers):
config = RootConfig({"template": {"plugins": {"foo": 9000}}})
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> plugins -> foo
must be a table"""
),
):
_ = config.template.plugins
def test_plugins_set_lazy_error(self, helpers):
config = RootConfig({})
config.template.plugins = 9000
assert config.raw_data == {"template": {"plugins": 9000}}
with pytest.raises(
ConfigurationError,
match=helpers.dedent(
"""
Error parsing config:
template -> plugins
must be a table"""
),
):
_ = config.template.plugins
|
TestTemplate
|
python
|
pytorch__pytorch
|
test/dynamo/test_repros.py
|
{
"start": 6314,
"end": 7976
}
|
class ____:
# from detectron2 poolers.py
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = (
tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
)
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def __len__(self) -> int:
return self.tensor.shape[0]
@property
def device(self):
return self.tensor.device
def convert_boxes_to_pooler_format(box_lists):
# from detectron2 structures.py
boxes = torch.cat([x.tensor for x in box_lists], dim=0)
# __len__ returns Tensor in tracing.
sizes = shapes_to_tensor([x.__len__() for x in box_lists], device=boxes.device)
indices = torch.repeat_interleave(
torch.arange(len(box_lists), dtype=boxes.dtype, device=boxes.device), sizes
)
return cat([indices[:, None], boxes], dim=1)
ReformerBackwardOutput = namedtuple(
"ReformerBackwardOutput",
["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"],
)
ReformerEncoderOutput = namedtuple(
"ReformerEncoderOutput",
["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"],
)
|
Boxes
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py
|
{
"start": 41147,
"end": 49580
}
|
class ____(HourlyReportsTestWithStateChangesAfterMigration):
stream_name = "campaign_impression_performance_report_hourly"
report_file = "campaign_impression_performance_report_hourly"
records_number = 24
state_file = "hourly_reports_state"
incremental_report_file = "campaign_impression_performance_report_hourly_incremental"
report_file_with_records_further_start_date = "campaign_impression_performance_report_hourly_with_records_further_config_start_date"
state_file_legacy = "hourly_reports_state_legacy"
state_file_after_migration = "hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
incremental_report_file_with_records_further_cursor = (
"campaign_impression_performance_report_hourly_incremental_with_records_further_cursor"
)
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "CampaignPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "CampaignPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignStatus", "CampaignName", "CampaignId", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "LowQualityClicks", "LowQualityClicksPercent", "LowQualityImpressions", "LowQualityImpressionsPercent", "LowQualityConversions", "LowQualityConversionRate", "DeviceType", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "TrackingTemplate", "CustomParameters", "AccountStatus", "LowQualityGeneralClicks", "LowQualitySophisticatedClicks", "CampaignLabels", "FinalUrlSuffix", "CampaignType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "BaseCampaignId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AverageCpm", "ConversionsQualified", "LowQualityConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for second read
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "CampaignPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "CampaignPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignStatus", "CampaignName", "CampaignId", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "LowQualityClicks", "LowQualityClicksPercent", "LowQualityImpressions", "LowQualityImpressionsPercent", "LowQualityConversions", "LowQualityConversionRate", "DeviceType", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "TrackingTemplate", "CustomParameters", "AccountStatus", "LowQualityGeneralClicks", "LowQualitySophisticatedClicks", "CampaignLabels", "FinalUrlSuffix", "CampaignType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "BaseCampaignId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AverageCpm", "ConversionsQualified", "LowQualityConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for no config start date test
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "CampaignPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "CampaignPerformanceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignStatus", "CampaignName", "CampaignId", "CurrencyCode", "AdDistribution", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "AveragePosition", "Conversions", "ConversionRate", "CostPerConversion", "LowQualityClicks", "LowQualityClicksPercent", "LowQualityImpressions", "LowQualityImpressionsPercent", "LowQualityConversions", "LowQualityConversionRate", "DeviceType", "QualityScore", "ExpectedCtr", "AdRelevance", "LandingPageExperience", "PhoneImpressions", "PhoneCalls", "Ptr", "Network", "Assists", "Revenue", "ReturnOnAdSpend", "CostPerAssist", "RevenuePerConversion", "RevenuePerAssist", "TrackingTemplate", "CustomParameters", "AccountStatus", "LowQualityGeneralClicks", "LowQualitySophisticatedClicks", "CampaignLabels", "FinalUrlSuffix", "CampaignType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "BaseCampaignId", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllReturnOnAdSpend", "AllRevenuePerConversion", "ViewThroughConversions", "AverageCpm", "ConversionsQualified", "LowQualityConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "VideoViews", "ViewThroughRate", "AverageCPV", "VideoViewsAt25Percent", "VideoViewsAt50Percent", "VideoViewsAt75Percent", "CompletedVideoViews", "VideoCompletionRate", "TotalWatchTimeInMS", "AverageWatchTimePerVideoView", "AverageWatchTimePerImpression", "Sales", "CostPerSale", "RevenuePerSale", "Installs", "CostPerInstall", "RevenuePerInstall"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
|
TestCampaignImpressionPerformanceReportHourlyStream
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 125368,
"end": 127974
}
|
class ____(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
|
CmsgMacroTests
|
python
|
huggingface__transformers
|
src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
{
"start": 2323,
"end": 8043
}
|
class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
# Ignore copy
def __init__(self, config: RecurrentGemmaConfig, device=None):
super().__init__()
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
raise ValueError(
f"RecurrentGemmaRotaryEmbedding does not support RoPE types other than `default` but got {self.rope_type}"
)
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
# Ignore copy
def compute_default_rope_parameters(
config: Optional[RecurrentGemmaConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
# Copied from transformers.models.llama.modeling_llama.rotate_half
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
RecurrentGemmaRotaryEmbedding
|
python
|
PyCQA__pylint
|
doc/data/messages/i/invalid-format-returned/bad.py
|
{
"start": 0,
"end": 148
}
|
class ____:
"""__format__ returns <type 'int'>"""
def __format__(self, format_spec): # [invalid-format-returned]
return 1
|
CustomFormat
|
python
|
dask__distributed
|
distributed/worker_state_machine.py
|
{
"start": 13048,
"end": 13127
}
|
class ____(Instruction):
__slots__ = ("key",)
key: Key
@dataclass
|
Execute
|
python
|
bokeh__bokeh
|
src/bokeh/io/notebook.py
|
{
"start": 2487,
"end": 4853
}
|
class ____:
'''
'''
_json: Any = {}
_cellno: int | None
_doc: Document
def __init__(self, comms: Comm, cell_doc: Document) -> None:
self._cellno = None
try:
from IPython import get_ipython
ip = get_ipython()
assert ip is not None
hm = ip.history_manager
assert hm is not None
p_prompt = next(iter(hm.get_tail(1, include_latest=True)))[1]
self._cellno = p_prompt
except Exception as e:
log.debug("Could not get Notebook cell number, reason: %s", e)
self._comms = comms
self._doc = cell_doc
# Our internal copy of the doc is in perpetual "hold". Events from the
# originating doc will be triggered and collected. Events are
# processed/cleared when push_notebook is called for this comms handle
self._doc.hold()
def _repr_html_(self) -> str:
if self._cellno is not None:
return f"<p><code><Bokeh Notebook handle for <strong>In[{self._cellno}]</strong>></code></p>"
else:
return "<p><code><Bokeh Notebook handle></code></p>"
@property
def comms(self) -> Comm:
return self._comms
@property
def doc(self) -> Document:
return self._doc
# Adding this method makes curdoc dispatch to this Comms to handle
# and Document model changed events. If we find that the event is
# for a model in our internal copy of the docs, then trigger the
# internal doc with the event so that it is collected (until a
# call to push_notebook processes and clear collected events)
def _document_model_changed(self, event: ModelChangedEvent) -> None:
if event.model.id in self.doc.models:
self.doc.callbacks.trigger_on_change(event)
def _column_data_changed(self, event: ColumnDataChangedEvent) -> None:
if event.model.id in self.doc.models:
self.doc.callbacks.trigger_on_change(event)
def _columns_streamed(self, event: ColumnsStreamedEvent) -> None:
if event.model.id in self.doc.models:
self.doc.callbacks.trigger_on_change(event)
def _columns_patched(self, event: ColumnsPatchedEvent) -> None:
if event.model.id in self.doc.models:
self.doc.callbacks.trigger_on_change(event)
|
CommsHandle
|
python
|
aio-libs__aiohttp
|
tests/test_client_response.py
|
{
"start": 540,
"end": 47547
}
|
class ____(mock.AsyncMock):
def done(self) -> bool:
return True
@pytest.fixture
def session() -> mock.Mock:
return mock.Mock()
async def test_http_processing_error(session: ClientSession) -> None:
loop = mock.Mock()
url = URL("http://del-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
loop.get_debug = mock.Mock()
loop.get_debug.return_value = True
connection = mock.Mock()
connection.protocol = aiohttp.DataQueue(loop)
connection.protocol.set_exception(http.HttpProcessingError())
with pytest.raises(aiohttp.ClientResponseError) as info:
await response.start(connection)
assert info.value.request_info.url is url
response.close()
def test_del(session: ClientSession) -> None:
loop = mock.Mock()
url = URL("http://del-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
loop.get_debug = mock.Mock()
loop.get_debug.return_value = True
connection = mock.Mock()
response._closed = False
response._connection = connection
loop.set_exception_handler(lambda loop, ctx: None)
with pytest.warns(ResourceWarning):
del response
gc.collect()
connection.release.assert_called_with()
def test_close(loop: asyncio.AbstractEventLoop, session: ClientSession) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
response._closed = False
response._connection = mock.Mock()
response.close()
assert response.connection is None
response.close()
response.close()
def test_wait_for_100_1(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://python.org")
response = ClientResponse(
"get",
url,
continue100=loop.create_future(),
writer=WriterMock(),
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
assert response._continue is not None
response.close()
def test_wait_for_100_2(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://python.org")
response = ClientResponse(
"get",
url,
continue100=None,
writer=WriterMock(),
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
assert response._continue is None
response.close()
def test_repr(loop: asyncio.AbstractEventLoop, session: ClientSession) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
response.status = 200
response.reason = "Ok"
assert "<ClientResponse(http://def-cl-resp.org) [200 Ok]>" in repr(response)
def test_repr_non_ascii_url() -> None:
url = URL("http://fake-host.org/\u03bb")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
assert "<ClientResponse(http://fake-host.org/%CE%BB) [None None]>" in repr(response)
def test_repr_non_ascii_reason() -> None:
url = URL("http://fake-host.org/path")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
response.reason = "\u03bb"
assert "<ClientResponse(http://fake-host.org/path) [None \\u03bb]>" in repr(
response
)
async def test_read_and_release_connection(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result(b"payload")
return fut
content = response.content = mock.Mock()
content.read.side_effect = side_effect
res = await response.read()
assert res == b"payload"
assert response._connection is None
async def test_read_and_release_connection_with_error(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
content = response.content = mock.Mock()
content.read.return_value = loop.create_future()
content.read.return_value.set_exception(ValueError)
with pytest.raises(ValueError):
await response.read()
assert response._closed
async def test_release(loop: asyncio.AbstractEventLoop, session: ClientSession) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
fut = loop.create_future()
fut.set_result(b"")
content = response.content = mock.Mock()
content.readany.return_value = fut
response.release()
assert response._connection is None
@pytest.mark.skipif(
sys.implementation.name != "cpython",
reason="Other implementations has different GC strategies",
)
async def test_release_on_del(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
connection = mock.Mock()
connection.protocol.upgraded = False
def run(conn: Connection) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
response._closed = False
response._connection = conn
run(connection)
assert connection.release.called
async def test_response_eof(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=None,
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
response._closed = False
conn = response._connection = mock.Mock()
conn.protocol.upgraded = False
response._response_eof()
assert conn.release.called
assert response._connection is None
async def test_response_eof_upgraded(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
conn = response._connection = mock.Mock()
conn.protocol.upgraded = True
response._response_eof()
assert not conn.release.called
assert response._connection is conn
async def test_response_eof_after_connection_detach(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=None,
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
response._closed = False
conn = response._connection = mock.Mock()
conn.protocol = None
response._response_eof()
assert conn.release.called
assert response._connection is None
async def test_text(loop: asyncio.AbstractEventLoop, session: ClientSession) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тест": "пройден"}'.encode("cp1251"))
return fut
h = {"Content-Type": "application/json;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
res = await response.text()
assert res == '{"тест": "пройден"}'
assert response._connection is None
async def test_text_bad_encoding(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тестkey": "пройденvalue"}'.encode("cp1251"))
return fut
# lie about the encoding
h = {"Content-Type": "application/json;charset=utf-8"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
with pytest.raises(UnicodeDecodeError):
await response.text()
# only the valid utf-8 characters will be returned
res = await response.text(errors="ignore")
assert res == '{"key": "value"}'
assert response._connection is None
async def test_text_badly_encoded_encoding_header(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
session._resolve_charset = lambda *_: "utf-8"
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result(b"foo")
return fut
h = {"Content-Type": "text/html; charset=\udc81gutf-8\udc81\udc8d"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
await response.read()
encoding = response.get_encoding()
assert encoding == "utf-8"
async def test_text_custom_encoding(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тест": "пройден"}'.encode("cp1251"))
return fut
h = {"Content-Type": "application/json"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
with mock.patch.object(response, "get_encoding") as m:
res = await response.text(encoding="cp1251")
assert res == '{"тест": "пройден"}'
assert response._connection is None
assert not m.called
@pytest.mark.parametrize("content_type", ("text/plain", "text/plain;charset=invalid"))
async def test_text_charset_resolver(
content_type: str, loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
session._resolve_charset = lambda r, b: "cp1251"
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тест": "пройден"}'.encode("cp1251"))
return fut
h = {"Content-Type": content_type}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
await response.read()
res = await response.text()
assert res == '{"тест": "пройден"}'
assert response._connection is None
assert response.get_encoding() == "cp1251"
async def test_get_encoding_body_none(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Type": "text/html"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = AssertionError
with pytest.raises(
RuntimeError,
match="^Cannot compute fallback encoding of a not yet read body$",
):
response.get_encoding()
assert response.closed
async def test_text_after_read(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тест": "пройден"}'.encode("cp1251"))
return fut
h = {"Content-Type": "application/json;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
res = await response.text()
assert res == '{"тест": "пройден"}'
assert response._connection is None
async def test_json(loop: asyncio.AbstractEventLoop, session: ClientSession) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тест": "пройден"}'.encode("cp1251"))
return fut
h = {"Content-Type": "application/json;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
res = await response.json()
assert res == {"тест": "пройден"}
assert response._connection is None
async def test_json_extended_content_type(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тест": "пройден"}'.encode("cp1251"))
return fut
h = {"Content-Type": "application/this.is-1_content+subtype+json;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
res = await response.json()
assert res == {"тест": "пройден"}
assert response._connection is None
async def test_json_custom_content_type(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тест": "пройден"}'.encode("cp1251"))
return fut
h = {"Content-Type": "custom/type;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
res = await response.json(content_type="custom/type")
assert res == {"тест": "пройден"}
assert response._connection is None
async def test_json_custom_loader(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Type": "application/json;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
response._body = b"data"
def custom(content: str) -> str:
return content + "-custom"
res = await response.json(loads=custom)
assert res == "data-custom"
async def test_json_invalid_content_type(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Type": "data/octet-stream"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
response._body = b""
response.status = 500
with pytest.raises(aiohttp.ContentTypeError) as info:
await response.json()
assert info.value.request_info == response.request_info
assert info.value.status == 500
async def test_json_no_content(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Type": "application/json"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
response._body = b""
with pytest.raises(JSONDecodeError):
await response.json(content_type=None)
async def test_json_override_encoding(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result('{"тест": "пройден"}'.encode("cp1251"))
return fut
h = {"Content-Type": "application/json;charset=utf8"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
with mock.patch.object(response, "get_encoding") as m:
res = await response.json(encoding="cp1251")
assert res == {"тест": "пройден"}
assert response._connection is None
assert not m.called
def test_get_encoding_unknown(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Type": "application/json"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.get_encoding() == "utf-8"
def test_raise_for_status_2xx() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
response.status = 200
response.reason = "OK"
response.raise_for_status() # should not raise
def test_raise_for_status_4xx() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
response.status = 409
response.reason = "CONFLICT"
with pytest.raises(aiohttp.ClientResponseError) as cm:
response.raise_for_status()
assert str(cm.value.status) == "409"
assert str(cm.value.message) == "CONFLICT"
assert response.closed
def test_raise_for_status_4xx_without_reason() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
response.status = 404
response.reason = ""
with pytest.raises(aiohttp.ClientResponseError) as cm:
response.raise_for_status()
assert str(cm.value.status) == "404"
assert str(cm.value.message) == ""
assert response.closed
def test_resp_host() -> None:
url = URL("http://del-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
assert "del-cl-resp.org" == response.host
def test_content_type() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Type": "application/json;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert "application/json" == response.content_type
def test_content_type_no_header() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
response._headers = CIMultiDictProxy(CIMultiDict({}))
assert "application/octet-stream" == response.content_type
def test_charset() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Type": "application/json;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert "cp1251" == response.charset
def test_charset_no_header() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
response._headers = CIMultiDictProxy(CIMultiDict({}))
assert response.charset is None
def test_charset_no_charset() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Type": "application/json"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.charset is None
def test_content_disposition_full() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Disposition": 'attachment; filename="archive.tar.gz"; foo=bar'}
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.content_disposition is not None
assert "attachment" == response.content_disposition.type
assert "bar" == response.content_disposition.parameters["foo"]
assert "archive.tar.gz" == response.content_disposition.filename
with pytest.raises(TypeError):
response.content_disposition.parameters["foo"] = "baz" # type: ignore[index]
def test_content_disposition_no_parameters() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Disposition": "attachment"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.content_disposition is not None
assert "attachment" == response.content_disposition.type
assert response.content_disposition.filename is None
assert {} == response.content_disposition.parameters
@pytest.mark.parametrize(
"content_disposition",
(
'attachment; filename="archive.tar.gz";',
'attachment;; filename="archive.tar.gz"',
),
)
def test_content_disposition_empty_parts(content_disposition: str) -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
h = {"Content-Disposition": content_disposition}
response._headers = CIMultiDictProxy(CIMultiDict(h))
with pytest.warns(BadContentDispositionHeader):
assert response.content_disposition is not None
assert "attachment" == response.content_disposition.type
assert "archive.tar.gz" == response.content_disposition.filename
def test_content_disposition_no_header() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
response._headers = CIMultiDictProxy(CIMultiDict({}))
assert response.content_disposition is None
def test_default_encoding_is_utf8() -> None:
url = URL("http://def-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=None,
request_headers=CIMultiDict[str](),
original_url=url,
)
response._headers = CIMultiDictProxy(CIMultiDict({}))
response._body = b""
assert response.get_encoding() == "utf-8"
def test_response_request_info() -> None:
url = URL("http://def-cl-resp.org")
h = {"Content-Type": "application/json;charset=cp1251"}
headers = CIMultiDict(h)
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=headers,
original_url=url,
)
assert url == response.request_info.url
assert "get" == response.request_info.method
assert headers == response.request_info.headers
def test_request_info_in_exception() -> None:
url = URL("http://def-cl-resp.org")
h = {"Content-Type": "application/json;charset=cp1251"}
headers = CIMultiDict(h)
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=headers,
original_url=url,
)
response.status = 409
response.reason = "CONFLICT"
with pytest.raises(aiohttp.ClientResponseError) as cm:
response.raise_for_status()
assert cm.value.request_info == response.request_info
def test_no_redirect_history_in_exception() -> None:
url = URL("http://def-cl-resp.org")
h = {"Content-Type": "application/json;charset=cp1251"}
headers = CIMultiDict(h)
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=headers,
original_url=url,
)
response.status = 409
response.reason = "CONFLICT"
with pytest.raises(aiohttp.ClientResponseError) as cm:
response.raise_for_status()
assert () == cm.value.history
def test_redirect_history_in_exception() -> None:
hist_url = URL("http://def-cl-resp.org")
u = "http://def-cl-resp.org/index.htm"
url = URL(u)
hist_headers = {"Content-Type": "application/json;charset=cp1251", "Location": u}
h = {"Content-Type": "application/json;charset=cp1251"}
headers = CIMultiDict(h)
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=headers,
original_url=url,
)
response.status = 409
response.reason = "CONFLICT"
hist_response = ClientResponse(
"get",
hist_url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=headers,
original_url=hist_url,
)
hist_response._headers = CIMultiDictProxy(CIMultiDict(hist_headers))
hist_response.status = 301
hist_response.reason = "REDIRECT"
response._history = (hist_response,)
with pytest.raises(aiohttp.ClientResponseError) as cm:
response.raise_for_status()
assert (hist_response,) == cm.value.history
async def test_response_read_triggers_callback(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
trace = mock.Mock()
trace.send_response_chunk_received = mock.AsyncMock()
response_method = "get"
response_url = URL("http://def-cl-resp.org")
response_body = b"This is response"
response = ClientResponse(
response_method,
response_url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
loop=loop,
session=session,
traces=[trace],
request_headers=CIMultiDict[str](),
original_url=response_url,
)
def side_effect(*args: object, **kwargs: object) -> "asyncio.Future[bytes]":
fut = loop.create_future()
fut.set_result(response_body)
return fut
h = {"Content-Type": "application/json;charset=cp1251"}
response._headers = CIMultiDictProxy(CIMultiDict(h))
content = response.content = mock.Mock()
content.read.side_effect = side_effect
res = await response.read()
assert res == response_body
assert response._connection is None
assert trace.send_response_chunk_received.called
assert trace.send_response_chunk_received.call_args == mock.call(
response_method, response_url, response_body
)
def test_response_cookies(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://python.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
cookies = response.cookies
# Ensure the same cookies object is returned each time
assert response.cookies is cookies
def test_response_real_url(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org/#urlfragment")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
assert response.url == url.with_fragment(None)
assert response.real_url == url
def test_response_links_comma_separated(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org/")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = (
(
"Link",
(
"<http://example.com/page/1.html>; rel=next, "
"<http://example.com/>; rel=home"
),
),
)
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.links == {
"next": {"url": URL("http://example.com/page/1.html"), "rel": "next"},
"home": {"url": URL("http://example.com/"), "rel": "home"},
}
def test_response_links_multiple_headers(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org/")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = (
("Link", "<http://example.com/page/1.html>; rel=next"),
("Link", "<http://example.com/>; rel=home"),
)
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.links == {
"next": {"url": URL("http://example.com/page/1.html"), "rel": "next"},
"home": {"url": URL("http://example.com/"), "rel": "home"},
}
def test_response_links_no_rel(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org/")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = (("Link", "<http://example.com/>"),)
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.links == {
"http://example.com/": {"url": URL("http://example.com/")}
}
def test_response_links_quoted(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org/")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = (("Link", '<http://example.com/>; rel="home-page"'),)
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.links == {
"home-page": {"url": URL("http://example.com/"), "rel": "home-page"}
}
def test_response_links_relative(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org/")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
h = (("Link", "</relative/path>; rel=rel"),)
response._headers = CIMultiDictProxy(CIMultiDict(h))
assert response.links == {
"rel": {"url": URL("http://def-cl-resp.org/relative/path"), "rel": "rel"}
}
def test_response_links_empty(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
url = URL("http://def-cl-resp.org/")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
response._headers = CIMultiDictProxy(CIMultiDict())
assert response.links == {}
def test_response_not_closed_after_get_ok(mocker: MockerFixture) -> None:
url = URL("http://del-cl-resp.org")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=mock.Mock(),
request_headers=CIMultiDict[str](),
original_url=url,
)
response.status = 400
response.reason = "Bad Request"
response._closed = False
spy = mocker.spy(response, "raise_for_status")
assert not response.ok
assert not response.closed
assert spy.call_count == 0
def test_response_duplicate_cookie_names(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
"""
Test that response.cookies handles duplicate cookie names correctly.
Note: This behavior (losing cookies with same name but different domains/paths)
is arguably undesirable, but we promise to return a SimpleCookie object, and
SimpleCookie uses cookie name as the key. This is documented behavior.
To access all cookies including duplicates, users should use:
- response.headers.getall('Set-Cookie') for raw headers
- The session's cookie jar correctly stores all cookies
"""
url = URL("http://example.com")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
# Set headers with duplicate cookie names but different domains
headers = CIMultiDict(
[
(
"Set-Cookie",
"session-id=123-4567890; Domain=.example.com; Path=/; Secure",
),
("Set-Cookie", "session-id=098-7654321; Domain=.www.example.com; Path=/"),
("Set-Cookie", "user-pref=dark; Domain=.example.com; Path=/"),
("Set-Cookie", "user-pref=light; Domain=api.example.com; Path=/"),
]
)
response._headers = CIMultiDictProxy(headers)
# Set raw cookie headers as done in ClientResponse.start()
response._raw_cookie_headers = tuple(headers.getall("Set-Cookie", []))
# SimpleCookie only keeps the last cookie with each name
# This is expected behavior since SimpleCookie uses name as the key
assert len(response.cookies) == 2 # Only 'session-id' and 'user-pref'
assert response.cookies["session-id"].value == "098-7654321" # Last one wins
assert response.cookies["user-pref"].value == "light" # Last one wins
def test_response_raw_cookie_headers_preserved(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
"""Test that raw Set-Cookie headers are preserved in _raw_cookie_headers."""
url = URL("http://example.com")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
# Set headers with multiple cookies
cookie_headers = [
"session-id=123; Domain=.example.com; Path=/; Secure",
"session-id=456; Domain=.www.example.com; Path=/",
"tracking=xyz; Domain=.example.com; Path=/; HttpOnly",
]
headers: CIMultiDict[str] = CIMultiDict()
for cookie_hdr in cookie_headers:
headers.add("Set-Cookie", cookie_hdr)
response._headers = CIMultiDictProxy(headers)
# Set raw cookie headers as done in ClientResponse.start()
response._raw_cookie_headers = tuple(response.headers.getall(hdrs.SET_COOKIE, []))
# Verify raw headers are preserved
assert response._raw_cookie_headers == tuple(cookie_headers)
assert len(response._raw_cookie_headers) == 3
# But SimpleCookie only has unique names
assert len(response.cookies) == 2 # 'session-id' and 'tracking'
def test_response_cookies_setter_updates_raw_headers(
loop: asyncio.AbstractEventLoop, session: ClientSession
) -> None:
"""Test that setting cookies property updates _raw_cookie_headers."""
url = URL("http://example.com")
response = ClientResponse(
"get",
url,
writer=WriterMock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=loop,
session=session,
request_headers=CIMultiDict[str](),
original_url=url,
)
# Create a SimpleCookie with some cookies
cookies = SimpleCookie()
cookies["session-id"] = "123456"
cookies["session-id"]["domain"] = ".example.com"
cookies["session-id"]["path"] = "/"
cookies["session-id"]["secure"] = True
cookies["tracking"] = "xyz789"
cookies["tracking"]["domain"] = ".example.com"
cookies["tracking"]["httponly"] = True
# Set the cookies property
response.cookies = cookies
# Verify _raw_cookie_headers was updated
assert response._raw_cookie_headers is not None
assert len(response._raw_cookie_headers) == 2
assert isinstance(response._raw_cookie_headers, tuple)
# Check the raw headers contain the expected cookie strings
raw_headers = list(response._raw_cookie_headers)
assert any("session-id=123456" in h for h in raw_headers)
assert any("tracking=xyz789" in h for h in raw_headers)
assert any("Secure" in h for h in raw_headers)
assert any("HttpOnly" in h for h in raw_headers)
# Verify cookies property returns the same object
assert response.cookies is cookies
# Test setting empty cookies
empty_cookies = SimpleCookie()
response.cookies = empty_cookies
# Should not set _raw_cookie_headers for empty cookies
assert response._raw_cookie_headers is None
|
WriterMock
|
python
|
apache__airflow
|
providers/google/tests/unit/google/suite/hooks/test_calendar.py
|
{
"start": 1542,
"end": 3678
}
|
class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleCalendarHook(api_version=API_VERSION, gcp_conn_id=GCP_CONN_ID)
@mock.patch("airflow.providers.google.suite.hooks.calendar.GoogleCalendarHook.get_conn")
def test_get_events(self, get_conn):
get_method = get_conn.return_value.events.return_value.list
execute_method = get_method.return_value.execute
execute_method.return_value = {"kind": "calendar#events", "nextPageToken": None, "items": [EVENT]}
result = self.hook.get_events(calendar_id=CALENDAR_ID)
assert result == [EVENT]
execute_method.assert_called_once_with(num_retries=NUM_RETRIES)
get_method.assert_called_once_with(
calendarId=CALENDAR_ID,
iCalUID=None,
maxAttendees=None,
maxResults=None,
orderBy=None,
pageToken=None,
privateExtendedProperty=None,
q=None,
sharedExtendedProperty=None,
showDeleted=False,
showHiddenInvitations=False,
singleEvents=False,
syncToken=None,
timeMax=None,
timeMin=None,
timeZone=None,
updatedMin=None,
)
@mock.patch("airflow.providers.google.suite.hooks.calendar.GoogleCalendarHook.get_conn")
def test_create_event(self, mock_get_conn):
create_mock = mock_get_conn.return_value.events.return_value.insert
create_mock.return_value.execute.return_value = API_RESPONSE
result = self.hook.create_event(calendar_id=CALENDAR_ID, event=EVENT)
create_mock.assert_called_once_with(
body=EVENT,
calendarId=CALENDAR_ID,
conferenceDataVersion=0,
maxAttendees=None,
sendNotifications=False,
sendUpdates="false",
supportsAttachments=False,
)
assert result == API_RESPONSE
|
TestGoogleCalendarHook
|
python
|
eth-brownie__brownie
|
brownie/typing.py
|
{
"start": 3494,
"end": 3614
}
|
class ____(TypedDict):
version: NotRequired[Optional[str]]
evm_version: NotRequired[EvmVersion]
@final
|
VyperConfig
|
python
|
coleifer__peewee
|
tests/schema.py
|
{
"start": 453,
"end": 516
}
|
class ____(TestModel):
data = TextField(unique=True)
|
TMUnique
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/multi/test_indexing.py
|
{
"start": 348,
"end": 5065
}
|
class ____:
def test_slice_locs_partial(self, idx):
sorted_idx, _ = idx.sortlevel(0)
result = sorted_idx.slice_locs(("foo", "two"), ("qux", "one"))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ("qux", "one"))
assert result == (0, 5)
result = sorted_idx.slice_locs(("foo", "two"), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs("bar", "baz")
assert result == (2, 4)
def test_slice_locs(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((50, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=50, freq="B"),
)
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(
*idx.slice_locs(
df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30),
)
)
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
stacked = df.stack()
idx = stacked.index
with pytest.raises(TypeError, match="^Level type mismatch"):
idx.slice_locs((1, 3))
with pytest.raises(TypeError, match="^Level type mismatch"):
idx.slice_locs(df.index[5] + timedelta(seconds=30), (5, 2))
df = DataFrame(
np.ones((5, 5)),
index=Index([f"i-{i}" for i in range(5)], name="a"),
columns=Index([f"i-{i}" for i in range(5)], name="a"),
)
stacked = df.stack()
idx = stacked.index
with pytest.raises(TypeError, match="^Level type mismatch"):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with pytest.raises(TypeError, match="^Level type mismatch"):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(
levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
)
msg = "[Kk]ey length.*greater than MultiIndex lexsort depth"
with pytest.raises(KeyError, match=msg):
index.slice_locs((1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(
levels=[[0, 2, 4, 6], [0, 2, 4]],
codes=[[0, 0, 0, 1, 1, 2, 3, 3, 3], [0, 1, 2, 1, 2, 2, 0, 1, 2]],
)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
@pytest.mark.parametrize(
"index_arr,expected,start_idx,end_idx",
[
([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, None),
([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, "b"),
([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, ("b", "e")),
([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), None),
([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), "c"),
([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), ("c", "e")),
],
)
def test_slice_locs_with_missing_value(
self, index_arr, expected, start_idx, end_idx
):
# issue 19132
idx = MultiIndex.from_arrays(index_arr)
result = idx.slice_locs(start=start_idx, end=end_idx)
assert result == expected
|
TestSliceLocs
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_ignored_modules.py
|
{
"start": 2222,
"end": 2708
}
|
class ____(Model):
"""Adds a variable number of :class:`IgnoredModule` to ``self.layer1``."""
def __init__(self, num_ignored: int) -> None:
assert num_ignored >= 0
super().__init__()
layer1_modules = (
[torch.nn.Linear(5, 4), torch.nn.Linear(4, 4)]
+ [IgnoredModule(4, 4) for _ in range(num_ignored)]
+ [torch.nn.Linear(4, 4)]
)
self.layer1 = torch.nn.Sequential(*layer1_modules)
|
ModelWithIgnoredModules
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-qianfan/llama_index/llms/qianfan/base.py
|
{
"start": 1453,
"end": 3942
}
|
class ____(BaseModel):
"""
Chat response model.
"""
result: str
def build_chat_request(
stream: bool, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatRequest:
"""
Construct a ChatRequest.
:param messages: The chat message list.
:param stream: Indicate whether to respond in stream or not.
:return: The ChatResponse object.
"""
request = ChatRequest(messages=[], stream=stream)
for message in messages:
if message.role == MessageRole.USER:
msg = ChatMsg(role="user", content=message.content)
request.messages.append(msg)
elif message.role == MessageRole.ASSISTANT:
msg = ChatMsg(role="assistant", content=message.content)
request.messages.append(msg)
elif message.role == MessageRole.SYSTEM:
request.system = message.content
else:
raise NotImplementedError(
f"The message role {message.role} is not supported."
)
return request
def parse_chat_response(resp_dict: Dict) -> ChatResponse:
"""
Parse chat response.
:param resp_dict: Response body in dict form.
:return: The ChatResponse object.
"""
resp = ChatResp(**resp_dict)
return ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=resp.result)
)
def parse_stream_chat_response(
resp_dict_iter: Iterable[Dict],
) -> Iterable[ChatResponse]:
"""
Parse streaming chat response.
:param resp_dict_iter: Iterator of the response body in dict form.
:return: Iterator of the ChatResponse object.
"""
content = ""
for resp_dict in resp_dict_iter:
resp = ChatResp(**resp_dict)
content += resp.result
yield ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=content),
delta=resp.result,
)
async def aparse_stream_chat_response(
resp_dict_iter: AsyncIterable[Dict],
) -> AsyncIterable[ChatResponse]:
"""
Parse asyncio streaming chat response.
:param resp_dict_iter: Async iterator of the response body in dict form.
:return: Async iterator of the ChatResponse object.
"""
content = ""
async for resp_dict in resp_dict_iter:
resp = ChatResp(**resp_dict)
content += resp.result
yield ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=content),
delta=resp.result,
)
|
ChatResp
|
python
|
walkccc__LeetCode
|
solutions/360. Sort Transformed Array/360.py
|
{
"start": 0,
"end": 858
}
|
class ____:
def sortTransformedArray(
self,
nums: list[int],
a: int,
b: int,
c: int,
) -> list[int]:
n = len(nums)
upward = a > 0
ans = [0] * n
# The concavity of f only depends on a's sign.
def f(x: int, a: int, b: int, c: int) -> int:
return (a * x + b) * x + c
quad = [f(num, a, b, c) for num in nums]
i = n - 1 if upward else 0
l = 0
r = n - 1
while l <= r:
if upward: # is the maximum in the both ends
if quad[l] > quad[r]:
ans[i] = quad[l]
l += 1
else:
ans[i] = quad[r]
r -= 1
i -= 1
else: # is the minimum in the both ends
if quad[l] < quad[r]:
ans[i] = quad[l]
l += 1
else:
ans[i] = quad[r]
r -= 1
i += 1
return ans
|
Solution
|
python
|
viewflow__viewflow
|
viewflow/forms/renderers.py
|
{
"start": 25283,
"end": 27498
}
|
class ____(Span):
"""Render stacked inline."""
def __init__(
self,
formset_field_name: str,
card_desktop: int = 12,
card_tablet: int = 8,
card_mobile: int = 4,
**kwargs,
):
assert 1 <= card_desktop <= 12
self.card_desktop = card_desktop
assert 1 <= card_tablet <= 8
self.card_tablet = card_tablet
assert 1 <= card_mobile <= 4
self.card_mobile = card_mobile
super().__init__(formset_field_name, **kwargs)
def _convert_to_children(elements: List[Union[LayoutNode, str]]):
result = []
for element in elements:
if isinstance(element, LayoutNode):
result.append(element)
elif isinstance(element, str):
result.append(Span(element))
else:
raise ValueError(f"Unknown element {element} type {type(element)}")
return result
def _children_sizes(spans, grid_size=12, grid_name="desktop", keep_in_row=True):
bound = sum(span for span in spans if span != AUTO)
auto_count = sum(1 for span in spans if span == AUTO)
if bound == 0 and not keep_in_row:
# If children AUTO-sized - put every child on the own row
return [grid_size for _ in spans]
else:
rest = grid_size - bound
if rest < 0 or (auto_count != 0 and grid_size % auto_count) != 0:
raise ValueError(
f"Can't equally spread {spans} over {grid_size} columns on a {grid_name} grid"
)
return [rest // auto_count if child == AUTO else child for child in spans]
WIDGET_RENDERERS = {
forms.CheckboxInput: CheckboxRenderer,
forms.CheckboxSelectMultiple: CheckboxSelectMultipleRenderer,
forms.DateInput: DateInputRenderer,
forms.DateTimeInput: DateTimeInputRenderer,
forms.FileInput: FileInputRenderer,
forms.HiddenInput: HiddenInputRenderer,
forms.MultipleHiddenInput: MultipleHiddenInputRenderer,
forms.PasswordInput: PasswordInputRenderer,
forms.RadioSelect: RadioSelectRenderer,
forms.Select: SelectRenderer,
forms.SelectMultiple: SelectMultipleRenderer,
forms.Textarea: TextareaRenderer,
forms.TimeInput: TimeInputRenderer,
}
|
FormSet
|
python
|
getsentry__sentry
|
src/sentry/replays/usecases/query/conditions/selector.py
|
{
"start": 3243,
"end": 4637
}
|
class ____(GenericBase):
"""Click array condition class.
Click array conditions can only be applied to click rows otherwise certain types of
conditional checks will match against non-click rows and return incorrect data. For example,
this query would return incorrect results without checking if the condition was applied to a
click row: `?query=click.label=*`
"""
@staticmethod
def visit_eq(expression: Expression, value: str) -> Condition:
return and_is_click_row(StringArray.visit_eq(expression, value))
@staticmethod
def visit_neq(expression: Expression, value: str) -> Condition:
return and_is_click_row(StringArray.visit_neq(expression, value))
@staticmethod
def visit_match(expression: Expression, value: str) -> Condition:
return and_is_click_row(StringArray.visit_match(expression, value))
@staticmethod
def visit_not_match(expression: Expression, value: str) -> Condition:
return and_is_click_row(StringArray.visit_not_match(expression, value))
@staticmethod
def visit_in(expression: Expression, value: list[str]) -> Condition:
return and_is_click_row(StringArray.visit_in(expression, value))
@staticmethod
def visit_not_in(expression: Expression, value: list[str]) -> Condition:
return and_is_click_row(StringArray.visit_not_in(expression, value))
|
ClickArray
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/sqlite_datasource.py
|
{
"start": 1844,
"end": 3406
}
|
class ____(_PartitionerOneColumnOneParam):
"""A partitioner than can be used for sql engines that represents datetimes as strings.
The SQL engine that this currently supports is SQLite since it stores its datetimes as
strings.
The DatetimePartitioner will also work for SQLite and may be more intuitive.
"""
# date_format_strings syntax is documented here:
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
# It allows for arbitrary strings so can't be validated until conversion time.
date_format_string: str
column_name: str
method_name: Literal["partition_on_converted_datetime"] = "partition_on_converted_datetime"
@property
@override
def param_names(self) -> List[str]:
# The datetime parameter will be a string representing a datetime in the format
# given by self.date_format_string.
return ["datetime"]
@override
def partitioner_method_kwargs(self) -> Dict[str, Any]:
return {
"column_name": self.column_name,
"date_format_string": self.date_format_string,
}
@override
def batch_parameters_to_batch_spec_kwarg_identifiers(
self, options: BatchParameters
) -> Dict[str, Any]:
if "datetime" not in options:
raise ValueError( # noqa: TRY003 # FIXME CoP
"'datetime' must be specified in the batch parameters to create a batch identifier"
)
return {self.column_name: options["datetime"]}
|
PartitionerConvertedDateTime
|
python
|
python-pillow__Pillow
|
Tests/test_file_jpeg.py
|
{
"start": 813,
"end": 43266
}
|
class ____:
def roundtrip_with_bytes(
self, im: Image.Image, **options: Any
) -> tuple[JpegImagePlugin.JpegImageFile, int]:
out = BytesIO()
im.save(out, "JPEG", **options)
test_bytes = out.tell()
out.seek(0)
reloaded = cast(JpegImagePlugin.JpegImageFile, Image.open(out))
return reloaded, test_bytes
def roundtrip(
self, im: Image.Image, **options: Any
) -> JpegImagePlugin.JpegImageFile:
return self.roundtrip_with_bytes(im, **options)[0]
def gen_random_image(self, size: tuple[int, int], mode: str = "RGB") -> Image.Image:
"""Generates a very hard to compress file
:param size: tuple
:param mode: optional image mode
"""
return Image.frombytes(mode, size, os.urandom(size[0] * size[1] * len(mode)))
def test_sanity(self) -> None:
# internal version number
version = features.version_codec("jpg")
assert version is not None
assert re.search(r"\d+\.\d+$", version)
with Image.open(TEST_FILE) as im:
im.load()
assert im.mode == "RGB"
assert im.size == (128, 128)
assert im.format == "JPEG"
assert im.get_format_mimetype() == "image/jpeg"
@pytest.mark.parametrize("size", ((1, 0), (0, 1), (0, 0)))
def test_zero(self, size: tuple[int, int], tmp_path: Path) -> None:
f = tmp_path / "temp.jpg"
im = Image.new("RGB", size)
with pytest.raises(ValueError):
im.save(f)
def test_app(self) -> None:
# Test APP/COM reader (@PIL135)
with Image.open(TEST_FILE) as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
assert im.applist[0] == ("APP0", b"JFIF\x00\x01\x01\x01\x00`\x00`\x00\x00")
assert im.applist[1] == (
"COM",
b"File written by Adobe Photoshop\xa8 4.0\x00",
)
assert len(im.applist) == 2
assert im.info["comment"] == b"File written by Adobe Photoshop\xa8 4.0\x00"
assert im.app["COM"] == im.info["comment"]
def test_comment_write(self) -> None:
with Image.open(TEST_FILE) as im:
assert im.info["comment"] == b"File written by Adobe Photoshop\xa8 4.0\x00"
# Test that existing comment is saved by default
out = BytesIO()
im.save(out, format="JPEG")
with Image.open(out) as reloaded:
assert im.info["comment"] == reloaded.info["comment"]
# Ensure that a blank comment causes any existing comment to be removed
for comment in ("", b"", None):
out = BytesIO()
im.save(out, format="JPEG", comment=comment)
with Image.open(out) as reloaded:
assert "comment" not in reloaded.info
# Test that a comment argument overrides the default comment
for comment in ("Test comment text", b"Test comment text"):
out = BytesIO()
im.save(out, format="JPEG", comment=comment)
with Image.open(out) as reloaded:
assert reloaded.info["comment"] == b"Test comment text"
def test_cmyk(self) -> None:
# Test CMYK handling. Thanks to Tim and Charlie for test data,
# Michael for getting me to look one more time.
def check(im: ImageFile.ImageFile) -> None:
cmyk = im.getpixel((0, 0))
assert isinstance(cmyk, tuple)
c, m, y, k = (x / 255.0 for x in cmyk)
assert c == 0.0
assert m > 0.8
assert y > 0.8
assert k == 0.0
# the opposite corner is black
cmyk = im.getpixel((im.size[0] - 1, im.size[1] - 1))
assert isinstance(cmyk, tuple)
k = cmyk[3] / 255.0
assert k > 0.9
with Image.open("Tests/images/pil_sample_cmyk.jpg") as im:
# the source image has red pixels in the upper left corner.
check(im)
# roundtrip, and check again
check(self.roundtrip(im))
def test_rgb(self) -> None:
def getchannels(im: JpegImagePlugin.JpegImageFile) -> tuple[int, ...]:
return tuple(v[0] for v in im.layer)
im = hopper()
im_ycbcr = self.roundtrip(im)
assert getchannels(im_ycbcr) == (1, 2, 3)
assert_image_similar(im, im_ycbcr, 17)
im_rgb = self.roundtrip(im, keep_rgb=True)
assert getchannels(im_rgb) == (ord("R"), ord("G"), ord("B"))
assert_image_similar(im, im_rgb, 12)
@pytest.mark.parametrize(
"test_image_path",
[TEST_FILE, "Tests/images/pil_sample_cmyk.jpg"],
)
def test_dpi(self, test_image_path: str) -> None:
def test(xdpi: int, ydpi: int | None = None) -> tuple[int, int] | None:
with Image.open(test_image_path) as im:
im = self.roundtrip(im, dpi=(xdpi, ydpi or xdpi))
return im.info.get("dpi")
assert test(72) == (72, 72)
assert test(300) == (300, 300)
assert test(100, 200) == (100, 200)
assert test(0) is None # square pixels
def test_dpi_jfif_cm(self) -> None:
with Image.open("Tests/images/jfif_unit_cm.jpg") as im:
assert im.info["dpi"] == (2.54, 5.08)
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_icc(self, tmp_path: Path) -> None:
# Test ICC support
with Image.open("Tests/images/rgb.jpg") as im1:
icc_profile = im1.info["icc_profile"]
assert len(icc_profile) == 3144
# Roundtrip via physical file.
f = tmp_path / "temp.jpg"
im1.save(f, icc_profile=icc_profile)
with Image.open(f) as im2:
assert im2.info.get("icc_profile") == icc_profile
# Roundtrip via memory buffer.
im1 = self.roundtrip(hopper())
im2 = self.roundtrip(hopper(), icc_profile=icc_profile)
assert_image_equal(im1, im2)
assert not im1.info.get("icc_profile")
assert im2.info.get("icc_profile")
@pytest.mark.parametrize(
"n",
(
0,
1,
3,
4,
5,
65533 - 14, # full JPEG marker block
65533 - 14 + 1, # full block plus one byte
ImageFile.MAXBLOCK, # full buffer block
ImageFile.MAXBLOCK + 1, # full buffer block plus one byte
ImageFile.MAXBLOCK * 4 + 3, # large block
),
)
def test_icc_big(self, n: int) -> None:
# Make sure that the "extra" support handles large blocks
# The ICC APP marker can store 65519 bytes per marker, so
# using a 4-byte test code should allow us to detect out of
# order issues.
icc_profile = (b"Test" * int(n / 4 + 1))[:n]
assert len(icc_profile) == n # sanity
im1 = self.roundtrip(hopper(), icc_profile=icc_profile)
assert im1.info.get("icc_profile") == (icc_profile or None)
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_large_icc_meta(self, tmp_path: Path) -> None:
# https://github.com/python-pillow/Pillow/issues/148
# Sometimes the meta data on the icc_profile block is bigger than
# Image.MAXBLOCK or the image size.
with Image.open("Tests/images/icc_profile_big.jpg") as im:
f = tmp_path / "temp.jpg"
icc_profile = im.info["icc_profile"]
# Should not raise OSError for image with icc larger than image size.
im.save(
f,
progressive=True,
quality=95,
icc_profile=icc_profile,
optimize=True,
)
with Image.open("Tests/images/flower2.jpg") as im:
f = tmp_path / "temp2.jpg"
im.save(f, progressive=True, quality=94, icc_profile=b" " * 53955)
with Image.open("Tests/images/flower2.jpg") as im:
f = tmp_path / "temp3.jpg"
im.save(f, progressive=True, quality=94, exif=b" " * 43668)
def test_optimize(self) -> None:
im1, im1_bytes = self.roundtrip_with_bytes(hopper())
im2, im2_bytes = self.roundtrip_with_bytes(hopper(), optimize=0)
im3, im3_bytes = self.roundtrip_with_bytes(hopper(), optimize=1)
assert_image_equal(im1, im2)
assert_image_equal(im1, im3)
assert im1_bytes >= im2_bytes
assert im1_bytes >= im3_bytes
def test_optimize_large_buffer(self, tmp_path: Path) -> None:
# https://github.com/python-pillow/Pillow/issues/148
f = tmp_path / "temp.jpg"
# this requires ~ 1.5x Image.MAXBLOCK
im = Image.new("RGB", (4096, 4096), 0xFF3333)
im.save(f, format="JPEG", optimize=True)
def test_progressive(self) -> None:
im1, im1_bytes = self.roundtrip_with_bytes(hopper())
im2 = self.roundtrip(hopper(), progressive=False)
im3, im3_bytes = self.roundtrip_with_bytes(hopper(), progressive=True)
assert not im1.info.get("progressive")
assert not im2.info.get("progressive")
assert im3.info.get("progressive")
if features.check_feature("mozjpeg"):
assert_image_similar(im1, im3, 9.39)
else:
assert_image_equal(im1, im3)
assert im1_bytes >= im3_bytes
def test_progressive_large_buffer(self, tmp_path: Path) -> None:
f = tmp_path / "temp.jpg"
# this requires ~ 1.5x Image.MAXBLOCK
im = Image.new("RGB", (4096, 4096), 0xFF3333)
im.save(f, format="JPEG", progressive=True)
def test_progressive_large_buffer_highest_quality(self, tmp_path: Path) -> None:
f = tmp_path / "temp.jpg"
im = self.gen_random_image((255, 255))
# this requires more bytes than pixels in the image
im.save(f, format="JPEG", progressive=True, quality=100)
def test_progressive_cmyk_buffer(self) -> None:
# Issue 2272, quality 90 cmyk image is tripping the large buffer bug.
f = BytesIO()
im = self.gen_random_image((256, 256), "CMYK")
im.save(f, format="JPEG", progressive=True, quality=94)
def test_large_exif(self, tmp_path: Path) -> None:
# https://github.com/python-pillow/Pillow/issues/148
f = tmp_path / "temp.jpg"
im = hopper()
im.save(f, "JPEG", quality=90, exif=b"1" * 65533)
with pytest.raises(ValueError):
im.save(f, "JPEG", quality=90, exif=b"1" * 65534)
def test_exif_typeerror(self) -> None:
with Image.open("Tests/images/exif_typeerror.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
# Should not raise a TypeError
im._getexif()
def test_exif_gps(self, tmp_path: Path) -> None:
expected_exif_gps = {
0: b"\x00\x00\x00\x01",
2: 4294967295,
5: b"\x01",
30: 65535,
29: "1999:99:99 99:99:99",
}
gps_index = 34853
# Reading
with Image.open("Tests/images/exif_gps.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
exif_data = im._getexif()
assert exif_data is not None
assert exif_data[gps_index] == expected_exif_gps
# Writing
f = tmp_path / "temp.jpg"
exif = Image.Exif()
exif[gps_index] = expected_exif_gps
hopper().save(f, exif=exif)
with Image.open(f) as reloaded:
assert isinstance(reloaded, JpegImagePlugin.JpegImageFile)
exif_data = reloaded._getexif()
assert exif_data is not None
assert exif_data[gps_index] == expected_exif_gps
def test_empty_exif_gps(self) -> None:
with Image.open("Tests/images/empty_gps_ifd.jpg") as im:
exif = im.getexif()
del exif[0x8769]
# Assert that it needs to be transposed
assert exif[0x0112] == Image.Transpose.TRANSVERSE
# Assert that the GPS IFD is present and empty
assert exif.get_ifd(0x8825) == {}
transposed = ImageOps.exif_transpose(im)
exif = transposed.getexif()
assert exif.get_ifd(0x8825) == {}
# Assert that it was transposed
assert 0x0112 not in exif
def test_exif_equality(self) -> None:
# In 7.2.0, Exif rationals were changed to be read as
# TiffImagePlugin.IFDRational. This class had a bug in __eq__,
# breaking the self-equality of Exif data
exifs = []
for i in range(2):
with Image.open("Tests/images/exif-200dpcm.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
exifs.append(im._getexif())
assert exifs[0] == exifs[1]
def test_exif_rollback(self) -> None:
# rolling back exif support in 3.1 to pre-3.0 formatting.
# expected from 2.9, with b/u qualifiers switched for 3.2 compatibility
# this test passes on 2.9 and 3.1, but not 3.0
expected_exif = {
34867: 4294967295,
258: (24, 24, 24),
36867: "2099:09:29 10:10:10",
34853: {
0: b"\x00\x00\x00\x01",
2: 4294967295,
5: b"\x01",
30: 65535,
29: "1999:99:99 99:99:99",
},
296: 65535,
34665: 185,
41994: 65535,
514: 4294967295,
271: "Make",
272: "XXX-XXX",
305: "PIL",
42034: (1, 1, 1, 1),
42035: "LensMake",
34856: b"\xaa\xaa\xaa\xaa\xaa\xaa",
282: 4294967295,
33434: 4294967295,
}
with Image.open("Tests/images/exif_gps.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
exif = im._getexif()
assert exif is not None
for tag, value in expected_exif.items():
assert value == exif[tag]
def test_exif_gps_typeerror(self) -> None:
with Image.open("Tests/images/exif_gps_typeerror.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
# Should not raise a TypeError
im._getexif()
def test_progressive_compat(self) -> None:
im1 = self.roundtrip(hopper())
assert not im1.info.get("progressive")
assert not im1.info.get("progression")
im2 = self.roundtrip(hopper(), progressive=0)
im3 = self.roundtrip(hopper(), progression=0) # compatibility
assert not im2.info.get("progressive")
assert not im2.info.get("progression")
assert not im3.info.get("progressive")
assert not im3.info.get("progression")
im2 = self.roundtrip(hopper(), progressive=1)
im3 = self.roundtrip(hopper(), progression=1) # compatibility
if features.check_feature("mozjpeg"):
assert_image_similar(im1, im2, 9.39)
assert_image_similar(im1, im3, 9.39)
else:
assert_image_equal(im1, im2)
assert_image_equal(im1, im3)
assert im2.info.get("progressive")
assert im2.info.get("progression")
assert im3.info.get("progressive")
assert im3.info.get("progression")
def test_quality(self) -> None:
im1, im1_bytes = self.roundtrip_with_bytes(hopper())
im2, im2_bytes = self.roundtrip_with_bytes(hopper(), quality=50)
assert_image(im1, im2.mode, im2.size)
assert im1_bytes >= im2_bytes
im3, im3_bytes = self.roundtrip_with_bytes(hopper(), quality=0)
assert_image(im1, im3.mode, im3.size)
assert im2_bytes > im3_bytes
def test_smooth(self) -> None:
im1 = self.roundtrip(hopper())
im2 = self.roundtrip(hopper(), smooth=100)
assert_image(im1, im2.mode, im2.size)
def test_subsampling(self) -> None:
def getsampling(
im: JpegImagePlugin.JpegImageFile,
) -> tuple[int, int, int, int, int, int]:
layer = im.layer
return layer[0][1:3] + layer[1][1:3] + layer[2][1:3]
# experimental API
for subsampling in (-1, 3): # (default, invalid)
im = self.roundtrip(hopper(), subsampling=subsampling)
assert getsampling(im) == (2, 2, 1, 1, 1, 1)
for subsampling1 in (0, "4:4:4"):
im = self.roundtrip(hopper(), subsampling=subsampling1)
assert getsampling(im) == (1, 1, 1, 1, 1, 1)
for subsampling1 in (1, "4:2:2"):
im = self.roundtrip(hopper(), subsampling=subsampling1)
assert getsampling(im) == (2, 1, 1, 1, 1, 1)
for subsampling1 in (2, "4:2:0", "4:1:1"):
im = self.roundtrip(hopper(), subsampling=subsampling1)
assert getsampling(im) == (2, 2, 1, 1, 1, 1)
# RGB colorspace
for subsampling1 in (-1, 0, "4:4:4"):
# "4:4:4" doesn't really make sense for RGB, but the conversion
# to an integer happens at a higher level
im = self.roundtrip(hopper(), keep_rgb=True, subsampling=subsampling1)
assert getsampling(im) == (1, 1, 1, 1, 1, 1)
for subsampling1 in (1, "4:2:2", 2, "4:2:0", 3):
with pytest.raises(OSError):
self.roundtrip(hopper(), keep_rgb=True, subsampling=subsampling1)
with pytest.raises(TypeError):
self.roundtrip(hopper(), subsampling="1:1:1")
def test_exif(self) -> None:
with Image.open("Tests/images/pil_sample_rgb.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
info = im._getexif()
assert info is not None
assert info[305] == "Adobe Photoshop CS Macintosh"
def test_get_child_images(self) -> None:
with Image.open("Tests/images/flower.jpg") as im:
ims = im.get_child_images()
assert len(ims) == 1
assert_image_similar_tofile(ims[0], "Tests/images/flower_thumbnail.png", 2.1)
def test_mp(self) -> None:
with Image.open("Tests/images/pil_sample_rgb.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
assert im._getmp() is None
def test_quality_keep(self, tmp_path: Path) -> None:
# RGB
with Image.open("Tests/images/hopper.jpg") as im:
f = tmp_path / "temp.jpg"
im.save(f, quality="keep")
# Grayscale
with Image.open("Tests/images/hopper_gray.jpg") as im:
f = tmp_path / "temp.jpg"
im.save(f, quality="keep")
# CMYK
with Image.open("Tests/images/pil_sample_cmyk.jpg") as im:
f = tmp_path / "temp.jpg"
im.save(f, quality="keep")
def test_junk_jpeg_header(self) -> None:
# https://github.com/python-pillow/Pillow/issues/630
filename = "Tests/images/junk_jpeg_header.jpg"
with Image.open(filename):
pass
def test_ff00_jpeg_header(self) -> None:
filename = "Tests/images/jpeg_ff00_header.jpg"
with Image.open(filename):
pass
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_truncated_jpeg_should_read_all_the_data(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
filename = "Tests/images/truncated_jpeg.jpg"
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
with Image.open(filename) as im:
im.load()
assert im.getbbox() is not None
def test_truncated_jpeg_throws_oserror(self) -> None:
filename = "Tests/images/truncated_jpeg.jpg"
with Image.open(filename) as im:
with pytest.raises(OSError):
im.load()
# Test that the error is raised if loaded a second time
with pytest.raises(OSError):
im.load()
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_qtables(self) -> None:
def _n_qtables_helper(n: int, test_file: str) -> None:
b = BytesIO()
with Image.open(test_file) as im:
im.save(b, "JPEG", qtables=[[n] * 64] * n)
with Image.open(b) as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
assert len(im.quantization) == n
reloaded = self.roundtrip(im, qtables="keep")
assert im.quantization == reloaded.quantization
assert max(reloaded.quantization[0]) <= 255
with Image.open("Tests/images/hopper.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
qtables = im.quantization
reloaded = self.roundtrip(im, qtables=qtables, subsampling=0)
assert im.quantization == reloaded.quantization
assert_image_similar(im, self.roundtrip(im, qtables="web_low"), 30)
assert_image_similar(im, self.roundtrip(im, qtables="web_high"), 30)
assert_image_similar(im, self.roundtrip(im, qtables="keep"), 30)
# valid bounds for baseline qtable
bounds_qtable = [int(s) for s in ("255 1 " * 32).split(None)]
im2 = self.roundtrip(im, qtables=[bounds_qtable])
assert im2.quantization == {0: bounds_qtable}
# values from wizard.txt in jpeg9-a src package.
standard_l_qtable = [
int(s)
for s in """
16 11 10 16 24 40 51 61
12 12 14 19 26 58 60 55
14 13 16 24 40 57 69 56
14 17 22 29 51 87 80 62
18 22 37 56 68 109 103 77
24 35 55 64 81 104 113 92
49 64 78 87 103 121 120 101
72 92 95 98 112 100 103 99
""".split(
None
)
]
standard_chrominance_qtable = [
int(s)
for s in """
17 18 24 47 99 99 99 99
18 21 26 66 99 99 99 99
24 26 56 99 99 99 99 99
47 66 99 99 99 99 99 99
99 99 99 99 99 99 99 99
99 99 99 99 99 99 99 99
99 99 99 99 99 99 99 99
99 99 99 99 99 99 99 99
""".split(
None
)
]
for quality in range(101):
qtable_from_qtable_quality = self.roundtrip(
im,
qtables={0: standard_l_qtable, 1: standard_chrominance_qtable},
quality=quality,
).quantization
qtable_from_quality = self.roundtrip(im, quality=quality).quantization
if features.check_feature("libjpeg_turbo"):
assert qtable_from_qtable_quality == qtable_from_quality
else:
assert qtable_from_qtable_quality[0] == qtable_from_quality[0]
assert (
qtable_from_qtable_quality[1][1:] == qtable_from_quality[1][1:]
)
# list of qtable lists
assert_image_similar(
im,
self.roundtrip(
im, qtables=[standard_l_qtable, standard_chrominance_qtable]
),
30,
)
# tuple of qtable lists
assert_image_similar(
im,
self.roundtrip(
im, qtables=(standard_l_qtable, standard_chrominance_qtable)
),
30,
)
# dict of qtable lists
assert_image_similar(
im,
self.roundtrip(
im, qtables={0: standard_l_qtable, 1: standard_chrominance_qtable}
),
30,
)
_n_qtables_helper(1, "Tests/images/hopper_gray.jpg")
_n_qtables_helper(1, "Tests/images/pil_sample_rgb.jpg")
_n_qtables_helper(2, "Tests/images/pil_sample_rgb.jpg")
_n_qtables_helper(3, "Tests/images/pil_sample_rgb.jpg")
_n_qtables_helper(1, "Tests/images/pil_sample_cmyk.jpg")
_n_qtables_helper(2, "Tests/images/pil_sample_cmyk.jpg")
_n_qtables_helper(3, "Tests/images/pil_sample_cmyk.jpg")
_n_qtables_helper(4, "Tests/images/pil_sample_cmyk.jpg")
# not a sequence
with pytest.raises(ValueError):
self.roundtrip(im, qtables="a")
# sequence wrong length
with pytest.raises(ValueError):
self.roundtrip(im, qtables=[])
# sequence wrong length
with pytest.raises(ValueError):
self.roundtrip(im, qtables=[1, 2, 3, 4, 5])
# qtable entry not a sequence
with pytest.raises(ValueError):
self.roundtrip(im, qtables=[1])
# qtable entry has wrong number of items
with pytest.raises(ValueError):
self.roundtrip(im, qtables=[[1, 2, 3, 4]])
def test_load_16bit_qtables(self) -> None:
with Image.open("Tests/images/hopper_16bit_qtables.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
assert len(im.quantization) == 2
assert len(im.quantization[0]) == 64
assert max(im.quantization[0]) > 255
def test_save_multiple_16bit_qtables(self) -> None:
with Image.open("Tests/images/hopper_16bit_qtables.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
im2 = self.roundtrip(im, qtables="keep")
assert im.quantization == im2.quantization
def test_save_single_16bit_qtable(self) -> None:
with Image.open("Tests/images/hopper_16bit_qtables.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
im2 = self.roundtrip(im, qtables={0: im.quantization[0]})
assert len(im2.quantization) == 1
assert im2.quantization[0] == im.quantization[0]
def test_save_low_quality_baseline_qtables(self) -> None:
with Image.open(TEST_FILE) as im:
im2 = self.roundtrip(im, quality=10)
assert len(im2.quantization) == 2
assert max(im2.quantization[0]) <= 255
assert max(im2.quantization[1]) <= 255
@pytest.mark.parametrize(
"blocks, rows, markers",
((0, 0, 0), (1, 0, 15), (3, 0, 5), (8, 0, 1), (0, 1, 3), (0, 2, 1)),
)
def test_restart_markers(self, blocks: int, rows: int, markers: int) -> None:
im = Image.new("RGB", (32, 32)) # 16 MCUs
out = BytesIO()
im.save(
out,
format="JPEG",
restart_marker_blocks=blocks,
restart_marker_rows=rows,
# force 8x8 pixel MCUs
subsampling=0,
)
assert len(re.findall(b"\xff[\xd0-\xd7]", out.getvalue())) == markers
@pytest.mark.skipif(not djpeg_available(), reason="djpeg not available")
def test_load_djpeg(self) -> None:
with Image.open(TEST_FILE) as img:
assert isinstance(img, JpegImagePlugin.JpegImageFile)
img.load_djpeg()
assert_image_similar_tofile(img, TEST_FILE, 5)
def test_no_duplicate_0x1001_tag(self) -> None:
# Arrange
tag_ids = {v: k for k, v in ExifTags.TAGS.items()}
# Assert
assert tag_ids["RelatedImageWidth"] == 0x1001
assert tag_ids["RelatedImageLength"] == 0x1002
def test_MAXBLOCK_scaling(self, tmp_path: Path) -> None:
im = self.gen_random_image((512, 512))
f = tmp_path / "temp.jpeg"
im.save(f, quality=100, optimize=True)
with Image.open(f) as reloaded:
# none of these should crash
reloaded.save(f, quality="keep")
reloaded.save(f, quality="keep", progressive=True)
reloaded.save(f, quality="keep", optimize=True)
def test_bad_mpo_header(self) -> None:
"""Treat unknown MPO as JPEG"""
# Arrange
# Act
# Shouldn't raise error
with pytest.warns(UserWarning, match="malformed MPO file"):
im = Image.open("Tests/images/sugarshack_bad_mpo_header.jpg")
# Assert
assert im.format == "JPEG"
im.close()
@pytest.mark.parametrize("mode", ("1", "L", "RGB", "RGBX", "CMYK", "YCbCr"))
def test_save_correct_modes(self, mode: str) -> None:
out = BytesIO()
img = Image.new(mode, (20, 20))
img.save(out, "JPEG")
@pytest.mark.parametrize("mode", ("LA", "La", "RGBA", "RGBa", "P"))
def test_save_wrong_modes(self, mode: str) -> None:
# ref https://github.com/python-pillow/Pillow/issues/2005
out = BytesIO()
img = Image.new(mode, (20, 20))
with pytest.raises(OSError):
img.save(out, "JPEG")
def test_save_tiff_with_dpi(self, tmp_path: Path) -> None:
# Arrange
outfile = tmp_path / "temp.tif"
with Image.open("Tests/images/hopper.tif") as im:
# Act
im.save(outfile, "JPEG", dpi=im.info["dpi"])
# Assert
with Image.open(outfile) as reloaded:
reloaded.load()
assert im.info["dpi"] == reloaded.info["dpi"]
def test_save_dpi_rounding(self, tmp_path: Path) -> None:
outfile = tmp_path / "temp.jpg"
with Image.open("Tests/images/hopper.jpg") as im:
im.save(outfile, dpi=(72.2, 72.2))
with Image.open(outfile) as reloaded:
assert reloaded.info["dpi"] == (72, 72)
im.save(outfile, dpi=(72.8, 72.8))
with Image.open(outfile) as reloaded:
assert reloaded.info["dpi"] == (73, 73)
def test_dpi_tuple_from_exif(self) -> None:
# Arrange
# This Photoshop CC 2017 image has DPI in EXIF not metadata
# EXIF XResolution is (2000000, 10000)
with Image.open("Tests/images/photoshop-200dpi.jpg") as im:
# Act / Assert
assert im.info.get("dpi") == (200, 200)
def test_dpi_int_from_exif(self) -> None:
# Arrange
# This image has DPI in EXIF not metadata
# EXIF XResolution is 72
with Image.open("Tests/images/exif-72dpi-int.jpg") as im:
# Act / Assert
assert im.info.get("dpi") == (72, 72)
def test_dpi_from_dpcm_exif(self) -> None:
# Arrange
# This is photoshop-200dpi.jpg with EXIF resolution unit set to cm:
# exiftool -exif:ResolutionUnit=cm photoshop-200dpi.jpg
with Image.open("Tests/images/exif-200dpcm.jpg") as im:
# Act / Assert
assert im.info.get("dpi") == (508, 508)
def test_dpi_exif_zero_division(self) -> None:
# Arrange
# This is photoshop-200dpi.jpg with EXIF resolution set to 0/0:
# exiftool -XResolution=0/0 -YResolution=0/0 photoshop-200dpi.jpg
with Image.open("Tests/images/exif-dpi-zerodivision.jpg") as im:
# Act / Assert
# This should return the default, and not raise a ZeroDivisionError
assert im.info.get("dpi") == (72, 72)
def test_dpi_exif_string(self) -> None:
# Arrange
# 0x011A tag in this exif contains string '300300\x02'
with Image.open("Tests/images/broken_exif_dpi.jpg") as im:
# Act / Assert
# This should return the default
assert im.info.get("dpi") == (72, 72)
def test_dpi_exif_truncated(self) -> None:
# Arrange
with Image.open("Tests/images/truncated_exif_dpi.jpg") as im:
# Act / Assert
# This should return the default
assert im.info.get("dpi") == (72, 72)
def test_no_dpi_in_exif(self) -> None:
# Arrange
# This is photoshop-200dpi.jpg with resolution removed from EXIF:
# exiftool "-*resolution*"= photoshop-200dpi.jpg
with Image.open("Tests/images/no-dpi-in-exif.jpg") as im:
# Act / Assert
# "When the image resolution is unknown, 72 [dpi] is designated."
# https://exiv2.org/tags.html
assert im.info.get("dpi") == (72, 72)
def test_invalid_exif(self) -> None:
# This is no-dpi-in-exif with the tiff header of the exif block
# hexedited from MM * to FF FF FF FF
with Image.open("Tests/images/invalid-exif.jpg") as im:
# This should return the default, and not a SyntaxError or
# OSError for unidentified image.
assert im.info.get("dpi") == (72, 72)
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_exif_x_resolution(self, tmp_path: Path) -> None:
with Image.open("Tests/images/flower.jpg") as im:
exif = im.getexif()
assert exif[282] == 180
out = tmp_path / "out.jpg"
with warnings.catch_warnings():
warnings.simplefilter("error")
im.save(out, exif=exif)
with Image.open(out) as reloaded:
assert reloaded.getexif()[282] == 180
def test_invalid_exif_x_resolution(self) -> None:
# When no x or y resolution is defined in EXIF
with Image.open("Tests/images/invalid-exif-without-x-resolution.jpg") as im:
# This should return the default, and not a ValueError or
# OSError for an unidentified image.
assert im.info.get("dpi") == (72, 72)
def test_ifd_offset_exif(self) -> None:
# Arrange
# This image has been manually hexedited to have an IFD offset of 10,
# in contrast to normal 8
with Image.open("Tests/images/exif-ifd-offset.jpg") as im:
# Act / Assert
assert isinstance(im, JpegImagePlugin.JpegImageFile)
exif = im._getexif()
assert exif is not None
assert exif[306] == "2017:03:13 23:03:09"
def test_multiple_exif(self) -> None:
with Image.open("Tests/images/multiple_exif.jpg") as im:
assert im.getexif()[270] == "firstsecond"
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_photoshop(self) -> None:
with Image.open("Tests/images/photoshop-200dpi.jpg") as im:
assert im.info["photoshop"][0x03ED] == {
"XResolution": 200.0,
"DisplayedUnitsX": 1,
"YResolution": 200.0,
"DisplayedUnitsY": 1,
}
# Test that the image can still load, even with broken Photoshop data
# This image had the APP13 length hexedited to be smaller
assert_image_equal_tofile(im, "Tests/images/photoshop-200dpi-broken.jpg")
# This image does not contain a Photoshop header string
with Image.open("Tests/images/app13.jpg") as im:
assert "photoshop" not in im.info
def test_photoshop_malformed_and_multiple(self) -> None:
with Image.open("Tests/images/app13-multiple.jpg") as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
assert "photoshop" in im.info
assert 24 == len(im.info["photoshop"])
apps_13_lengths = [len(v) for k, v in im.applist if k == "APP13"]
assert [65504, 24] == apps_13_lengths
def test_adobe_transform(self) -> None:
with Image.open("Tests/images/pil_sample_rgb.jpg") as im:
assert im.info["adobe_transform"] == 1
with Image.open("Tests/images/pil_sample_cmyk.jpg") as im:
assert im.info["adobe_transform"] == 2
# This image has been manually hexedited
# so that the APP14 reports its length to be 11,
# leaving no room for "adobe_transform"
with Image.open("Tests/images/truncated_app14.jpg") as im:
assert "adobe" in im.info
assert "adobe_transform" not in im.info
def test_icc_after_SOF(self) -> None:
with Image.open("Tests/images/icc-after-SOF.jpg") as im:
assert im.info["icc_profile"] == b"profile"
def test_jpeg_magic_number(self, monkeypatch: pytest.MonkeyPatch) -> None:
size = 4097
buffer = BytesIO(b"\xff" * size) # Many xff bytes
max_pos = 0
orig_read = buffer.read
def read(n: int | None = -1) -> bytes:
nonlocal max_pos
res = orig_read(n)
max_pos = max(max_pos, buffer.tell())
return res
monkeypatch.setattr(buffer, "read", read)
with pytest.raises(UnidentifiedImageError):
with Image.open(buffer):
pass
# Assert the entire file has not been read
assert 0 < max_pos < size
def test_getxmp(self) -> None:
with Image.open("Tests/images/xmp_test.jpg") as im:
if ElementTree is None:
with pytest.warns(
UserWarning,
match="XMP data cannot be read without defusedxml dependency",
):
assert im.getxmp() == {}
else:
assert "xmp" in im.info
xmp = im.getxmp()
description = xmp["xmpmeta"]["RDF"]["Description"]
assert description["DerivedFrom"] == {
"documentID": "8367D410E636EA95B7DE7EBA1C43A412",
"originalDocumentID": "8367D410E636EA95B7DE7EBA1C43A412",
}
assert description["Look"]["Description"]["Group"]["Alt"]["li"] == {
"lang": "x-default",
"text": "Profiles",
}
assert description["ToneCurve"]["Seq"]["li"] == ["0, 0", "255, 255"]
# Attribute
assert description["Version"] == "10.4"
if ElementTree is not None:
with Image.open("Tests/images/hopper.jpg") as im:
assert im.getxmp() == {}
def test_getxmp_no_prefix(self) -> None:
with Image.open("Tests/images/xmp_no_prefix.jpg") as im:
if ElementTree is None:
with pytest.warns(
UserWarning,
match="XMP data cannot be read without defusedxml dependency",
):
assert im.getxmp() == {}
else:
assert im.getxmp() == {"xmpmeta": {"key": "value"}}
def test_getxmp_padded(self) -> None:
with Image.open("Tests/images/xmp_padded.jpg") as im:
if ElementTree is None:
with pytest.warns(
UserWarning,
match="XMP data cannot be read without defusedxml dependency",
):
assert im.getxmp() == {}
else:
assert im.getxmp() == {"xmpmeta": None}
def test_save_xmp(self, tmp_path: Path) -> None:
f = tmp_path / "temp.jpg"
im = hopper()
im.save(f, xmp=b"XMP test")
with Image.open(f) as reloaded:
assert reloaded.info["xmp"] == b"XMP test"
# Check that XMP is not saved from image info
reloaded.save(f)
with Image.open(f) as reloaded:
assert "xmp" not in reloaded.info
im.save(f, xmp=b"1" * 65504)
with Image.open(f) as reloaded:
assert reloaded.info["xmp"] == b"1" * 65504
with pytest.raises(ValueError):
im.save(f, xmp=b"1" * 65505)
@timeout_unless_slower_valgrind(1)
def test_eof(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Even though this decoder never says that it is finished
# the image should still end when there is no new data
class InfiniteMockPyDecoder(ImageFile.PyDecoder):
def decode(
self, buffer: bytes | Image.SupportsArrayInterface
) -> tuple[int, int]:
return 0, 0
Image.register_decoder("INFINITE", InfiniteMockPyDecoder)
with Image.open(TEST_FILE) as im:
im.tile = [
ImageFile._Tile("INFINITE", (0, 0, 128, 128), 0, ("RGB", 0, 1)),
]
monkeypatch.setattr(ImageFile, "LOAD_TRUNCATED_IMAGES", True)
im.load()
def test_separate_tables(self) -> None:
im = hopper()
data = [] # [interchange, tables-only, image-only]
for streamtype in range(3):
out = BytesIO()
im.save(out, format="JPEG", streamtype=streamtype)
data.append(out.getvalue())
# SOI, EOI
for marker in b"\xff\xd8", b"\xff\xd9":
assert marker in data[1]
assert marker in data[2]
# DQT
markers = [b"\xff\xdb"]
if features.check_feature("libjpeg_turbo"):
# DHT
markers.append(b"\xff\xc4")
for marker in markers:
assert marker in data[1]
assert marker not in data[2]
# SOF0, SOS, APP0 (JFIF header)
for marker in b"\xff\xc0", b"\xff\xda", b"\xff\xe0":
assert marker not in data[1]
assert marker in data[2]
with Image.open(BytesIO(data[0])) as interchange_im:
with Image.open(BytesIO(data[1] + data[2])) as combined_im:
assert_image_equal(interchange_im, combined_im)
def test_repr_jpeg(self) -> None:
im = hopper()
b = im._repr_jpeg_()
assert b is not None
with Image.open(BytesIO(b)) as repr_jpeg:
assert repr_jpeg.format == "JPEG"
assert_image_similar(im, repr_jpeg, 17)
def test_repr_jpeg_error_returns_none(self) -> None:
im = hopper("F")
assert im._repr_jpeg_() is None
@pytest.mark.skipif(not is_win32(), reason="Windows only")
@skip_unless_feature("jpg")
|
TestFileJpeg
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/code_pointer.py
|
{
"start": 12034,
"end": 12424
}
|
class ____(CodePointer):
module: str
working_directory: Optional[str]
def load_target(self) -> object:
from dagster.components.core.load_defs import load_defs
module = load_python_module(self.module, self.working_directory)
return load_defs(module)
def describe(self) -> str:
return f"autoload from {self.module}"
|
AutoloadDefsModuleCodePointer
|
python
|
pytorch__pytorch
|
test/export/random_dag.py
|
{
"start": 6995,
"end": 8618
}
|
class ____(Unflatten):
"""
Generates test that unflattens a model with several nn.Modules that call
each other and access and mutate buffers. The modules are generated by
calling the nn_module_generator() method.
"""
def nn_module_generator(self):
class GenNNModule(NNModuleGenerator):
def __init__(self, n):
super().__init__()
self.n = n
self.accesses = random_dag(self.n)
self.mutations = random_dag(self.n)
self.calls = random_dag(self.n)
def gen_init_body(self, i: int):
code = Block()
code.new_line("super().__init__()")
code.new_line("self.buf = torch.nn.Buffer(torch.ones(1))")
if i < self.n - 1:
code.new_line(f"self.n{i + 1} = N{i + 1}()")
return code
def gen_forward_body(self, i: int):
def path(i, j):
if i + 1 == j:
return f"n{j}"
else:
return f"n{i + 1}.{path(i + 1, j)}"
code = Block()
for j in self.accesses[i]:
code.new_line(f"x = x + self.{path(i, j)}.buf")
for j in self.calls[i]:
code.new_line(f"x = self.{path(i, j)}(x + 1)")
for j in self.mutations[i]:
code.new_line(f"self.{path(i, j)}.buf.add_(1)")
code.new_line("return x + 1")
return code
return GenNNModule(self.n)
|
BufferUnflatten
|
python
|
cython__cython
|
tests/run/test_subclassinit.py
|
{
"start": 120,
"end": 9480
}
|
class ____(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertRegex'):
def assertRegex(self, value, regex):
self.assertTrue(re.search(regex, str(value)),
"'%s' did not match '%s'" % (value, regex))
if not hasattr(unittest.TestCase, 'assertCountEqual'):
def assertCountEqual(self, first, second):
self.assertEqual(set(first), set(second))
self.assertEqual(len(first), len(second))
def test_init_subclass(self):
class A:
initialized = False
def __init_subclass__(cls):
super().__init_subclass__()
cls.initialized = True
class B(A):
pass
self.assertFalse(A.initialized)
self.assertTrue(B.initialized)
def test_init_subclass_dict(self):
class A(dict):
initialized = False
def __init_subclass__(cls):
super().__init_subclass__()
cls.initialized = True
class B(A):
pass
self.assertFalse(A.initialized)
self.assertTrue(B.initialized)
def test_init_subclass_kwargs(self):
class A:
def __init_subclass__(cls, **kwargs):
cls.kwargs = kwargs
class B(A, x=3):
pass
self.assertEqual(B.kwargs, dict(x=3))
def test_init_subclass_error(self):
class A:
def __init_subclass__(cls):
raise RuntimeError
with self.assertRaises(RuntimeError):
class B(A):
pass
def test_init_subclass_wrong(self):
class A:
def __init_subclass__(cls, whatever):
pass
with self.assertRaises(TypeError):
class B(A):
pass
def test_init_subclass_skipped(self):
class BaseWithInit:
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.initialized = cls
class BaseWithoutInit(BaseWithInit):
pass
class A(BaseWithoutInit):
pass
self.assertIs(A.initialized, A)
self.assertIs(BaseWithoutInit.initialized, BaseWithoutInit)
def test_init_subclass_diamond(self):
class Base:
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.calls = []
class Left(Base):
pass
class Middle:
def __init_subclass__(cls, middle, **kwargs):
super().__init_subclass__(**kwargs)
cls.calls += [middle]
class Right(Base):
def __init_subclass__(cls, right="right", **kwargs):
super().__init_subclass__(**kwargs)
cls.calls += [right]
class A(Left, Middle, Right, middle="middle"):
pass
self.assertEqual(A.calls, ["right", "middle"])
self.assertEqual(Left.calls, [])
self.assertEqual(Right.calls, [])
def test_set_name(self):
class Descriptor:
def __set_name__(self, owner, name):
self.owner = owner
self.name = name
class A:
d = Descriptor()
self.assertEqual(A.d.name, "d")
self.assertIs(A.d.owner, A)
def test_set_name_metaclass(self):
class Meta(type):
def __new__(cls, name, bases, ns):
ret = super().__new__(cls, name, bases, ns)
self.assertEqual(ret.d.name, "d")
self.assertIs(ret.d.owner, ret)
return 0
class Descriptor:
def __set_name__(self, owner, name):
self.owner = owner
self.name = name
class A(metaclass=Meta):
d = Descriptor()
self.assertEqual(A, 0)
def test_set_name_error(self):
class Descriptor:
def __set_name__(self, owner, name):
1 / ZERO
with self.assertRaises((RuntimeError, ZeroDivisionError)) as cm:
class NotGoingToWork:
attr = Descriptor()
if sys.version_info >= (3, 12):
notes = cm.exception.__notes__
self.assertRegex(str(notes), r'\bNotGoingToWork\b')
self.assertRegex(str(notes), r'\battr\b')
self.assertRegex(str(notes), r'\bDescriptor\b')
else:
exc = cm.exception
self.assertRegex(str(exc), r'\bNotGoingToWork\b')
self.assertRegex(str(exc), r'\battr\b')
self.assertRegex(str(exc), r'\bDescriptor\b')
self.assertIsInstance(exc.__cause__, ZeroDivisionError)
def test_set_name_wrong(self):
class Descriptor:
def __set_name__(self):
pass
with self.assertRaises((RuntimeError, TypeError)) as cm:
class NotGoingToWork:
attr = Descriptor()
if sys.version_info >= (3, 12):
notes = cm.exception.__notes__
self.assertRegex(str(notes), r'\bNotGoingToWork\b')
self.assertRegex(str(notes), r'\battr\b')
self.assertRegex(str(notes), r'\bDescriptor\b')
else:
exc = cm.exception
self.assertRegex(str(exc), r'\bNotGoingToWork\b')
self.assertRegex(str(exc), r'\battr\b')
self.assertRegex(str(exc), r'\bDescriptor\b')
self.assertIsInstance(exc.__cause__, TypeError)
def test_set_name_lookup(self):
resolved = []
class NonDescriptor:
def __getattr__(self, name):
resolved.append(name)
class A:
d = NonDescriptor()
self.assertNotIn('__set_name__', resolved,
'__set_name__ is looked up in instance dict')
def test_set_name_init_subclass(self):
class Descriptor:
def __set_name__(self, owner, name):
self.owner = owner
self.name = name
class Meta(type):
def __new__(cls, name, bases, ns):
self = super().__new__(cls, name, bases, ns)
self.meta_owner = self.owner
self.meta_name = self.name
return self
class A:
def __init_subclass__(cls):
cls.owner = cls.d.owner
cls.name = cls.d.name
class B(A, metaclass=Meta):
d = Descriptor()
self.assertIs(B.owner, B)
self.assertEqual(B.name, 'd')
self.assertIs(B.meta_owner, B)
self.assertEqual(B.name, 'd')
def test_set_name_modifying_dict(self):
notified = []
class Descriptor:
def __set_name__(self, owner, name):
setattr(owner, name + 'x', None)
notified.append(name)
class A:
a = Descriptor()
b = Descriptor()
c = Descriptor()
d = Descriptor()
e = Descriptor()
self.assertCountEqual(notified, ['a', 'b', 'c', 'd', 'e'])
def test_errors(self):
class MyMeta(type):
pass
with self.assertRaises(TypeError):
class MyClass(metaclass=MyMeta, otherarg=1):
pass
with self.assertRaises(TypeError):
types.new_class("MyClass", (object,),
dict(metaclass=MyMeta, otherarg=1))
types.prepare_class("MyClass", (object,),
dict(metaclass=MyMeta, otherarg=1))
class MyMeta(type):
def __init__(self, name, bases, namespace, otherarg):
super().__init__(name, bases, namespace)
with self.assertRaises(TypeError):
class MyClass(metaclass=MyMeta, otherarg=1):
pass
class MyMeta(type):
def __new__(cls, name, bases, namespace, otherarg):
return super().__new__(cls, name, bases, namespace)
def __init__(self, name, bases, namespace, otherarg):
super().__init__(name, bases, namespace)
self.otherarg = otherarg
class MyClass(metaclass=MyMeta, otherarg=1):
pass
self.assertEqual(MyClass.otherarg, 1)
def test_errors_changed_pep487(self):
# These tests failed before Python 3.6, PEP 487
class MyMeta(type):
def __new__(cls, name, bases, namespace):
return super().__new__(cls, name=name, bases=bases,
dict=namespace)
with self.assertRaises(TypeError):
class MyClass(metaclass=MyMeta):
pass
class MyMeta(type):
def __new__(cls, name, bases, namespace, otherarg):
self = super().__new__(cls, name, bases, namespace)
self.otherarg = otherarg
return self
class MyClass(metaclass=MyMeta, otherarg=1):
pass
self.assertEqual(MyClass.otherarg, 1)
def test_type(self):
t = type('NewClass', (object,), {})
self.assertIsInstance(t, type)
self.assertEqual(t.__name__, 'NewClass')
with self.assertRaises(TypeError):
type(name='NewClass', bases=(object,), dict={})
if __name__ == "__main__":
unittest.main()
|
Test
|
python
|
kamyu104__LeetCode-Solutions
|
Python/evaluate-division.py
|
{
"start": 1635,
"end": 2483
}
|
class ____(object):
def __init__(self):
self.set = {}
def find_set(self, x):
xp, xr = self.set.setdefault(x, (x, 1.0))
if x != xp:
pp, pr = self.find_set(xp) # path compression.
self.set[x] = (pp, xr*pr) # x/pp = xr*pr
return self.set[x]
def union_set(self, x, y, r):
(xp, xr), (yp, yr) = map(self.find_set, (x, y))
if xp == yp:
return False
# to make x/yp = r*yr and merge xp into yp
# => since x/xp = xr, we can merge with xp/yp = r*yr/xr
self.set[xp] = (yp, r*yr/xr)
return True
def query_set(self, x, y):
if x not in self.set or y not in self.set:
return -1.0
(xp, xr), (yp, yr) = map(self.find_set, (x, y))
return xr/yr if xp == yp else -1.0
|
UnionFindPathCompressionOnly
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/remote_representation/handle.py
|
{
"start": 5014,
"end": 5762
}
|
class ____:
"""Compound ID object for the two id schemes that state is recorded in the database against."""
remote_origin_id: str
selector_id: str
def to_string(self) -> str:
return f"{self.remote_origin_id}{_DELIMITER}{self.selector_id}"
@staticmethod
def from_string(serialized: str):
parts = serialized.split(_DELIMITER)
if len(parts) != 2:
raise DagsterInvariantViolationError(f"Invalid serialized InstigatorID: {serialized}")
return CompoundID(
remote_origin_id=parts[0],
selector_id=parts[1],
)
@staticmethod
def is_valid_string(serialized: str):
parts = serialized.split(_DELIMITER)
return len(parts) == 2
|
CompoundID
|
python
|
facebook__pyre-check
|
api/tests/connection_test.py
|
{
"start": 389,
"end": 6042
}
|
class ____(unittest.TestCase):
# pyre-ignore[56]
@patch.object(
PyreConnection,
"_validate_query_response",
side_effect=lambda response: response,
)
@patch("subprocess.run")
def test_query_server(
self, run: MagicMock, _validate_query_response: MagicMock
) -> None:
run_result = MagicMock()
run_result.returncode = 0
run.return_value = run_result
# We always start a server once when querying.
pyre_connection = PyreConnection(Path("/tmp"))
pyre_connection.server_initialized = False
pyre_connection.query_server("hi")
self.assertEqual(
run.call_args_list,
[
call(
["pyre", "--noninteractive", "incremental"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
call(
["pyre", "--noninteractive", "query", "hi"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
],
)
run.reset_mock()
pyre_connection = PyreConnection(
Path("/tmp"),
skip_initial_type_check=True,
)
pyre_connection.query_server("hi")
self.assertEqual(
run.call_args_list,
[
call(
["pyre", "--noninteractive", "start", "--skip-initial-type-check"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
call(
["pyre", "--noninteractive", "query", "hi"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
],
)
run.reset_mock()
pyre_connection = PyreConnection(
Path("/tmp"),
skip_initial_type_check=True,
wait_on_initialization=True,
)
pyre_connection.query_server("hi")
self.assertEqual(
run.call_args_list,
[
call(
[
"pyre",
"--noninteractive",
"start",
"--skip-initial-type-check",
"--wait-on-initialization",
],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
call(
["pyre", "--noninteractive", "query", "hi"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
],
)
run.reset_mock()
pyre_connection = PyreConnection(Path("/tmp"))
pyre_connection.query_server("hi")
pyre_connection.query_server("bye")
self.assertEqual(
run.call_args_list,
[
call(
["pyre", "--noninteractive", "incremental"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
call(
["pyre", "--noninteractive", "query", "hi"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
call(
["pyre", "--noninteractive", "query", "bye"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
],
)
run.reset_mock()
with PyreConnection(Path("/tmp")) as pyre_connection:
pyre_connection.query_server("hi")
self.assertEqual(
run.call_args_list,
[
call(
["pyre", "--noninteractive", "incremental"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
call(
["pyre", "--noninteractive", "query", "hi"],
check=False,
cwd="/tmp",
stdout=subprocess.PIPE,
),
call(
["pyre", "--noninteractive", "stop"],
check=True,
cwd="/tmp",
stdout=subprocess.PIPE,
),
],
)
def test_validate_query_response(self) -> None:
with self.assertRaisesRegex(PyreQueryError, "Foo"):
PyreConnection._validate_query_response('{"error": "Foo"}')
with self.assertRaisesRegex(PyreQueryUnexpectedError, "is not valid JSON."):
PyreConnection._validate_query_response("asdf")
with self.assertRaisesRegex(
PyreQueryUnexpectedError, "The server response is invalid."
):
PyreConnection._validate_query_response("{}")
self.assertEqual(
PyreConnection._validate_query_response('{"response": "Foo"}'),
{"response": "Foo"},
)
def test_context_manager(self) -> None:
with patch.object(PyreConnection, "start_server") as start_server, patch.object(
PyreConnection, "stop_server"
) as stop_server:
with PyreConnection():
pass
start_server.assert_called_once_with()
stop_server.assert_called_once_with()
|
ConnectionApiTest
|
python
|
pennersr__django-allauth
|
allauth/headless/spec/views.py
|
{
"start": 383,
"end": 802
}
|
class ____(View):
def get(self, request):
import yaml
spec = get_schema()
content = yaml.dump(spec, Dumper=yaml.Dumper)
return HttpResponse(
content,
content_type="application/vnd.oai.openapi",
headers={"Content-Disposition": "inline; filename=allauth-openapi.yaml"},
)
@method_decorator(login_not_required, name="dispatch")
|
OpenAPIYAMLView
|
python
|
urllib3__urllib3
|
test/with_dummyserver/test_https.py
|
{
"start": 1814,
"end": 46384
}
|
class ____(HTTPSHypercornDummyServerTestCase):
tls_protocol_name: str | None = None
def tls_protocol_not_default(self) -> bool:
return self.tls_protocol_name in {"TLSv1", "TLSv1.1"}
def tls_version(self) -> ssl.TLSVersion:
if self.tls_protocol_name is None:
return pytest.skip("Skipping base test class")
try:
from ssl import TLSVersion
except ImportError:
return pytest.skip("ssl.TLSVersion isn't available")
return TLSVersion[self.tls_protocol_name.replace(".", "_")]
def ssl_version(self) -> int:
if self.tls_protocol_name is None:
return pytest.skip("Skipping base test class")
if self.tls_protocol_name == "TLSv1.3" and ssl.HAS_TLSv1_3:
return ssl.PROTOCOL_TLS_CLIENT
if self.tls_protocol_name == "TLSv1.2" and ssl.HAS_TLSv1_2:
return ssl.PROTOCOL_TLSv1_2
if self.tls_protocol_name == "TLSv1.1" and ssl.HAS_TLSv1_1:
return ssl.PROTOCOL_TLSv1_1
if self.tls_protocol_name == "TLSv1" and ssl.HAS_TLSv1:
return ssl.PROTOCOL_TLSv1
else:
return pytest.skip(f"{self.tls_protocol_name} isn't available")
@classmethod
def setup_class(cls) -> None:
super().setup_class()
cls.certs_dir = tempfile.mkdtemp()
# Start from existing root CA as we don't want to change the server certificate yet
with open(DEFAULT_CA, "rb") as crt, open(DEFAULT_CA_KEY, "rb") as key:
root_ca = trustme.CA.from_pem(crt.read(), key.read())
# Generate another CA to test verification failure
bad_ca = trustme.CA()
cls.bad_ca_path = os.path.join(cls.certs_dir, "ca_bad.pem")
bad_ca.cert_pem.write_to_path(cls.bad_ca_path)
# client cert chain
intermediate_ca = root_ca.create_child_ca()
cert = intermediate_ca.issue_cert("example.com")
encrypted_key = encrypt_key_pem(cert.private_key_pem, b"letmein")
cert.private_key_pem.write_to_path(
os.path.join(cls.certs_dir, CLIENT_INTERMEDIATE_KEY)
)
encrypted_key.write_to_path(
os.path.join(cls.certs_dir, PASSWORD_CLIENT_KEYFILE)
)
# Write the client cert and the intermediate CA
client_cert = os.path.join(cls.certs_dir, CLIENT_INTERMEDIATE_PEM)
cert.cert_chain_pems[0].write_to_path(client_cert)
cert.cert_chain_pems[1].write_to_path(client_cert, append=True)
# Write only the client cert
cert.cert_chain_pems[0].write_to_path(
os.path.join(cls.certs_dir, CLIENT_NO_INTERMEDIATE_PEM)
)
@classmethod
def teardown_class(cls) -> None:
super().teardown_class()
shutil.rmtree(cls.certs_dir)
def test_simple(self, http_version: str) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
assert r.headers["server"] == f"hypercorn-{http_version}"
assert r.data == b"Dummy server!"
def test_default_port(self) -> None:
conn = HTTPSConnection(self.host, port=None)
assert conn.port == 443
@resolvesLocalhostFQDN()
def test_dotted_fqdn(self) -> None:
with HTTPSConnectionPool(
self.host + ".",
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/")
assert r.status == 200, r.data
def test_client_intermediate(self) -> None:
"""Check that certificate chains work well with client certs
We generate an intermediate CA from the root CA, and issue a client certificate
from that intermediate CA. Since the server only knows about the root CA, we
need to send it the certificate *and* the intermediate CA, so that it can check
the whole chain.
"""
with HTTPSConnectionPool(
self.host,
self.port,
key_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_KEY),
cert_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_PEM),
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/certificate")
subject = r.json()
assert subject["organizationalUnitName"].startswith("Testing cert")
def test_client_no_intermediate(self) -> None:
"""Check that missing links in certificate chains indeed break
The only difference with test_client_intermediate is that we don't send the
intermediate CA to the server, only the client cert.
"""
with HTTPSConnectionPool(
self.host,
self.port,
cert_file=os.path.join(self.certs_dir, CLIENT_NO_INTERMEDIATE_PEM),
key_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_KEY),
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises((SSLError, ProtocolError)):
https_pool.request("GET", "/certificate", retries=False)
def test_client_key_password(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
key_file=os.path.join(self.certs_dir, PASSWORD_CLIENT_KEYFILE),
cert_file=os.path.join(self.certs_dir, CLIENT_CERT),
key_password="letmein",
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/certificate")
subject = r.json()
assert subject["organizationalUnitName"].startswith("Testing cert")
def test_client_encrypted_key_requires_password(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
key_file=os.path.join(self.certs_dir, PASSWORD_CLIENT_KEYFILE),
cert_file=os.path.join(self.certs_dir, CLIENT_CERT),
key_password=None,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError, match="password is required") as e:
https_pool.request("GET", "/certificate")
assert type(e.value.reason) is SSLError
def test_verified(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with contextlib.closing(https_pool._new_conn()) as conn:
assert conn.__class__ == VerifiedHTTPSConnection
with warnings.catch_warnings(record=True) as w:
r = https_pool.request("GET", "/")
assert r.status == 200
assert [str(wm) for wm in w] == []
def test_verified_with_context(self) -> None:
ctx = util.ssl_.create_urllib3_context(
cert_reqs=ssl.CERT_REQUIRED, ssl_minimum_version=self.tls_version()
)
ctx.load_verify_locations(cafile=DEFAULT_CA)
with HTTPSConnectionPool(self.host, self.port, ssl_context=ctx) as https_pool:
with contextlib.closing(https_pool._new_conn()) as conn:
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
assert not warn.called, warn.call_args_list
def test_context_combines_with_ca_certs(self) -> None:
ctx = util.ssl_.create_urllib3_context(
cert_reqs=ssl.CERT_REQUIRED, ssl_minimum_version=self.tls_version()
)
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_context=ctx
) as https_pool:
with contextlib.closing(https_pool._new_conn()) as conn:
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
assert not warn.called, warn.call_args_list
def test_ca_dir_verified(self, tmp_path: Path) -> None:
# OpenSSL looks up certificates by the hash for their name, see c_rehash
# TODO infer the bytes using `cryptography.x509.Name.public_bytes`.
# https://github.com/pyca/cryptography/pull/3236
shutil.copyfile(DEFAULT_CA, str(tmp_path / "81deb5f7.0"))
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_cert_dir=str(tmp_path),
ssl_minimum_version=self.tls_version(),
) as https_pool:
with contextlib.closing(https_pool._new_conn()) as conn:
assert conn.__class__ == VerifiedHTTPSConnection
with warnings.catch_warnings(record=True) as w:
r = https_pool.request("GET", "/")
assert r.status == 200
assert [str(wm) for wm in w] == []
def test_invalid_common_name(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/", retries=0)
assert type(e.value.reason) is SSLError
assert "doesn't match" in str(
e.value.reason
) or "certificate verify failed" in str(e.value.reason)
def test_verified_with_bad_ca_certs(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert type(e.value.reason) is SSLError
assert (
"certificate verify failed" in str(e.value.reason)
# PyPy is more specific
or "self signed certificate in certificate chain" in str(e.value.reason)
), f"Expected 'certificate verify failed', instead got: {e.value.reason!r}"
def test_wrap_socket_failure_resource_leak(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with contextlib.closing(https_pool._get_conn()) as conn:
with pytest.raises(ssl.SSLError):
conn.connect()
assert conn.sock is not None # type: ignore[attr-defined]
def test_verified_without_ca_certs(self) -> None:
# default is cert_reqs=None which is ssl.CERT_NONE
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert type(e.value.reason) is SSLError
# there is a different error message depending on whether or
# not pyopenssl is injected
assert (
"No root certificates specified" in str(e.value.reason)
# PyPy is more specific
or "self signed certificate in certificate chain" in str(e.value.reason)
# PyPy sometimes uses all-caps here
or "certificate verify failed" in str(e.value.reason).lower()
or "invalid certificate chain" in str(e.value.reason)
), (
"Expected 'No root certificates specified', "
"'certificate verify failed', or "
"'invalid certificate chain', "
"instead got: %r" % e.value.reason
)
def test_no_ssl(self) -> None:
with HTTPSConnectionPool(self.host, self.port) as pool:
pool.ConnectionCls = None # type: ignore[assignment]
with pytest.raises(ImportError):
pool._new_conn()
with pytest.raises(ImportError):
pool.request("GET", "/", retries=0)
def test_unverified_ssl(self) -> None:
"""Test that bare HTTPSConnection can connect, make requests"""
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs=ssl.CERT_NONE,
ssl_minimum_version=self.tls_version(),
) as pool:
with mock.patch("warnings.warn") as warn:
r = pool.request("GET", "/")
assert r.status == 200
assert warn.called
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
assert InsecureRequestWarning in [x[0][1] for x in calls]
def test_ssl_unverified_with_ca_certs(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_NONE",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as pool:
with mock.patch("warnings.warn") as warn:
r = pool.request("GET", "/")
assert r.status == 200
assert warn.called
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
category = calls[0][0][1]
assert category == InsecureRequestWarning
def test_assert_hostname_false(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_hostname = False
https_pool.request("GET", "/")
def test_assert_specific_hostname(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_hostname = "localhost"
https_pool.request("GET", "/")
def test_server_hostname(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
server_hostname="localhost",
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
conn.request("GET", "/")
# Assert the wrapping socket is using the passed-through SNI name.
# pyopenssl doesn't let you pull the server_hostname back off the
# socket, so only add this assertion if the attribute is there (i.e.
# the python ssl module).
if hasattr(conn.sock, "server_hostname"): # type: ignore[attr-defined]
assert conn.sock.server_hostname == "localhost" # type: ignore[attr-defined]
conn.getresponse().close()
conn.close()
def test_assert_fingerprint_md5(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=("55:39:BF:70:05:12:43:FA:1F:D1:BF:4E:E8:1B:07:1D"),
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request("GET", "/")
def test_assert_fingerprint_sha1(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=(
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
),
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request("GET", "/")
def test_assert_fingerprint_sha256(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=(
"E3:59:8E:69:FF:C5:9F:C7:88:87:44:58:22:7F:90:8D:D9:BC:12:C4:90:79:D5:"
"DC:A8:5D:4F:60:40:1E:A6:D2"
),
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request("GET", "/")
def test_assert_invalid_fingerprint(self) -> None:
def _test_request(pool: HTTPSConnectionPool) -> SSLError:
with pytest.raises(MaxRetryError) as cm:
pool.request("GET", "/", retries=0)
assert type(cm.value.reason) is SSLError
return cm.value.reason
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA"
)
e = _test_request(https_pool)
expected = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
got = "728b554c9afc1e88a11cad1bb2e7cc3edbc8f98a"
assert (
str(e)
== f'Fingerprints did not match. Expected "{expected}", got "{got}"'
)
# Uneven length
https_pool.assert_fingerprint = "AA:A"
e = _test_request(https_pool)
assert "Fingerprint of invalid length:" in str(e)
# Invalid length
https_pool.assert_fingerprint = "AA"
e = _test_request(https_pool)
assert "Fingerprint of invalid length:" in str(e)
def test_verify_none_and_bad_fingerprint(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_NONE",
assert_hostname=False,
assert_fingerprint=(
"AA:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
),
) as https_pool:
with pytest.raises(MaxRetryError) as cm:
https_pool.request("GET", "/", retries=0)
assert type(cm.value.reason) is SSLError
def test_verify_none_and_good_fingerprint(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_NONE",
assert_hostname=False,
assert_fingerprint=(
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
),
) as https_pool:
https_pool.request("GET", "/")
def test_good_fingerprint_and_hostname_mismatch(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=(
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
),
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request("GET", "/")
@requires_network()
def test_https_timeout(self) -> None:
timeout = Timeout(total=None, connect=SHORT_TIMEOUT)
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=timeout,
retries=False,
cert_reqs="CERT_REQUIRED",
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/")
timeout = Timeout(read=0.01)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
retries=False,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=(
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
),
ssl_minimum_version=self.tls_version(),
) as https_pool:
# TODO This was removed in https://github.com/urllib3/urllib3/pull/703/files
# We need to put something back or remove this block.
pass
timeout = Timeout(total=None)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
cert_reqs="CERT_NONE",
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.warns(InsecureRequestWarning):
https_pool.request("GET", "/")
def test_tunnel(self, http_version: str) -> None:
"""test the _tunnel behavior"""
timeout = Timeout(total=None)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
cert_reqs="CERT_NONE",
ssl_minimum_version=self.tls_version(),
) as https_pool:
with contextlib.closing(https_pool._new_conn()) as conn:
if http_version == "h2":
with pytest.raises(NotImplementedError) as e:
conn.set_tunnel(self.host, self.port)
assert (
str(e.value)
== "HTTP/2 does not support setting up a tunnel through a proxy"
)
else:
conn.set_tunnel(self.host, self.port)
with mock.patch.object(
conn, "_tunnel", create=True, return_value=None
) as conn_tunnel:
with pytest.warns(InsecureRequestWarning):
https_pool._make_request(conn, "GET", "/")
conn_tunnel.assert_called_once_with()
@requires_network()
def test_enhanced_timeout(self) -> None:
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(connect=SHORT_TIMEOUT),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
with contextlib.closing(https_pool._new_conn()) as conn:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/")
with pytest.raises(ConnectTimeoutError):
https_pool._make_request(conn, "GET", "/")
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(connect=LONG_TIMEOUT),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/", timeout=Timeout(connect=SHORT_TIMEOUT))
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(total=None),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
with contextlib.closing(https_pool._new_conn()) as conn:
with pytest.raises(ConnectTimeoutError):
https_pool.request(
"GET", "/", timeout=Timeout(total=None, connect=SHORT_TIMEOUT)
)
def test_enhanced_ssl_connection(self) -> None:
fingerprint = "72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=fingerprint,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
def test_ssl_correct_system_time(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.cert_reqs = "CERT_REQUIRED"
https_pool.ca_certs = DEFAULT_CA
w = self._request_without_resource_warnings("GET", "/")
assert [] == w
def test_ssl_wrong_system_time(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.cert_reqs = "CERT_REQUIRED"
https_pool.ca_certs = DEFAULT_CA
with mock.patch("urllib3.connection.datetime") as mock_date:
mock_date.date.today.return_value = datetime.date(1970, 1, 1)
w = self._request_without_resource_warnings("GET", "/")
assert len(w) == 1
warning = w[0]
assert SystemTimeWarning == warning.category
assert isinstance(warning.message, Warning)
assert str(RECENT_DATE) in warning.message.args[0]
def _request_without_resource_warnings(
self, method: str, url: str
) -> list[warnings.WarningMessage]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request(method, url)
w = [x for x in w if not isinstance(x.message, ResourceWarning)]
return w
def test_set_ssl_version_to_tls_version(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
https_pool.ssl_version = ssl_version = self.certs["ssl_version"]
if ssl_version is getattr(ssl, "PROTOCOL_TLS", object()):
cmgr: contextlib.AbstractContextManager[object] = (
contextlib.nullcontext()
)
else:
cmgr = pytest.warns(
DeprecationWarning,
match=r"'ssl_version' option is deprecated and will be removed "
r"in urllib3 v2\.6\.0\. Instead use 'ssl_minimum_version'",
)
with cmgr:
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
def test_set_cert_default_cert_required(self) -> None:
conn = VerifiedHTTPSConnection(self.host, self.port)
with pytest.warns(DeprecationWarning) as w:
conn.set_cert()
assert conn.cert_reqs == ssl.CERT_REQUIRED
assert len(w) == 1 and str(w[0].message) == (
"HTTPSConnection.set_cert() is deprecated and will be removed in urllib3 v2.1.0. "
"Instead provide the parameters to the HTTPSConnection constructor."
)
@pytest.mark.parametrize("verify_mode", [ssl.CERT_NONE, ssl.CERT_REQUIRED])
def test_set_cert_inherits_cert_reqs_from_ssl_context(
self, verify_mode: int
) -> None:
ssl_context = urllib3.util.ssl_.create_urllib3_context(cert_reqs=verify_mode)
assert ssl_context.verify_mode == verify_mode
conn = HTTPSConnection(self.host, self.port, ssl_context=ssl_context)
with pytest.warns(DeprecationWarning) as w:
conn.set_cert()
assert conn.cert_reqs == verify_mode
assert (
conn.ssl_context is not None and conn.ssl_context.verify_mode == verify_mode
)
assert len(w) == 1 and str(w[0].message) == (
"HTTPSConnection.set_cert() is deprecated and will be removed in urllib3 v2.1.0. "
"Instead provide the parameters to the HTTPSConnection constructor."
)
def test_tls_protocol_name_of_socket(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
ssl_maximum_version=self.tls_version(),
) as https_pool:
with contextlib.closing(https_pool._get_conn()) as conn:
conn.connect()
if not hasattr(conn.sock, "version"): # type: ignore[attr-defined]
pytest.skip("SSLSocket.version() not available")
assert conn.sock.version() == self.tls_protocol_name # type: ignore[attr-defined]
def test_ssl_version_is_deprecated(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
if self.ssl_version() == ssl.PROTOCOL_TLS_CLIENT:
pytest.skip(
"Skipping because ssl_version=ssl.PROTOCOL_TLS_CLIENT is not deprecated"
)
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_version=self.ssl_version()
) as https_pool:
with contextlib.closing(https_pool._get_conn()) as conn:
with pytest.warns(DeprecationWarning) as w:
conn.connect()
assert len(w) >= 1
assert any(x.category == DeprecationWarning for x in w)
assert any(
str(x.message)
== (
"'ssl_version' option is deprecated and will be removed in "
"urllib3 v2.6.0. Instead use 'ssl_minimum_version'"
)
for x in w
)
@pytest.mark.parametrize(
"ssl_version", [None, ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_CLIENT]
)
def test_ssl_version_with_protocol_tls_or_client_not_deprecated(
self, ssl_version: int | None
) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
if self.tls_protocol_not_default():
pytest.skip(
f"Skipping because '{self.tls_protocol_name}' isn't set by default"
)
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_version=ssl_version
) as https_pool:
with contextlib.closing(https_pool._get_conn()) as conn:
with warnings.catch_warnings(record=True) as w:
conn.connect()
assert [str(wm) for wm in w if wm.category != ResourceWarning] == []
def test_no_tls_version_deprecation_with_ssl_context(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
ctx = util.ssl_.create_urllib3_context(ssl_minimum_version=self.tls_version())
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_context=ctx,
) as https_pool:
with contextlib.closing(https_pool._get_conn()) as conn:
with warnings.catch_warnings(record=True) as w:
conn.connect()
assert [str(wm) for wm in w if wm.category != ResourceWarning] == []
def test_tls_version_maximum_and_minimum(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
from ssl import TLSVersion
min_max_versions = [
(self.tls_version(), self.tls_version()),
(TLSVersion.MINIMUM_SUPPORTED, self.tls_version()),
(TLSVersion.MINIMUM_SUPPORTED, TLSVersion.MAXIMUM_SUPPORTED),
]
for minimum_version, maximum_version in min_max_versions:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=minimum_version,
ssl_maximum_version=maximum_version,
) as https_pool:
conn = https_pool._get_conn()
try:
conn.connect()
if maximum_version == TLSVersion.MAXIMUM_SUPPORTED:
# A higher protocol than tls_protocol_name could be negotiated
assert conn.sock.version() >= self.tls_protocol_name # type: ignore[attr-defined]
else:
assert conn.sock.version() == self.tls_protocol_name # type: ignore[attr-defined]
finally:
conn.close()
@pytest.mark.parametrize("use_env_var_expansion", [True, False])
def test_sslkeylogfile(
self,
tmp_path: Path,
monkeypatch: pytest.MonkeyPatch,
use_env_var_expansion: bool,
) -> None:
if not hasattr(util.SSLContext, "keylog_filename"):
pytest.skip("requires OpenSSL 1.1.1+")
keylog_file = tmp_path / "keylogfile.txt"
if use_env_var_expansion:
monkeypatch.setenv("FILEPATH", str(keylog_file))
if sys.platform == "win32":
monkeypatch.setenv("SSLKEYLOGFILE", "%FILEPATH%")
else:
monkeypatch.setenv("SSLKEYLOGFILE", "${FILEPATH}")
else:
monkeypatch.setenv("SSLKEYLOGFILE", str(keylog_file))
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
assert keylog_file.is_file(), "keylogfile '%s' should exist" % str(
keylog_file
)
assert keylog_file.read_text().startswith(
"# TLS secrets log file"
), "keylogfile '%s' should start with '# TLS secrets log file'" % str(
keylog_file
)
@pytest.mark.parametrize("sslkeylogfile", [None, ""])
def test_sslkeylogfile_empty(
self, monkeypatch: pytest.MonkeyPatch, sslkeylogfile: str | None
) -> None:
# Assert that an HTTPS connection doesn't error out when given
# no SSLKEYLOGFILE or an empty value (ie 'SSLKEYLOGFILE=')
if sslkeylogfile is not None:
monkeypatch.setenv("SSLKEYLOGFILE", sslkeylogfile)
else:
monkeypatch.delenv("SSLKEYLOGFILE", raising=False)
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/")
assert r.status == 200, r.data
def test_alpn_default(self, http_version: str) -> None:
"""Default ALPN protocols are sent by default."""
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/alpn_protocol", retries=0)
assert r.status == 200
assert r.data.decode("utf-8") == util.ALPN_PROTOCOLS[0]
assert (
r.data.decode("utf-8") == {"h11": "http/1.1", "h2": "h2"}[http_version]
)
def test_http2_probe_result_is_cached(self, http_version: str) -> None:
assert http2_probe._values() == {}
for i in range(2): # Do this twice to exercise the cache path
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
) as pool:
r = pool.request("GET", "/alpn_protocol", retries=0)
assert r.status == 200
if http_version == "h2":
# This means the probe was successful.
assert http2_probe._values() == {(self.host, self.port): True}
else:
# This means the probe wasn't attempted, otherwise would have a value.
assert http_version == "h11"
assert http2_probe._values() == {}
@pytest.mark.xfail(reason="Hypercorn always supports both HTTP/2 and HTTP/1.1")
def test_http2_probe_result_failed(self, http_version: str) -> None:
if http_version == "h2":
pytest.skip("Test must have server in HTTP/1.1 mode")
assert http2_probe._values() == {}
urllib3.http2.inject_into_urllib3()
try:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
) as pool:
r = pool.request("GET", "/", retries=0)
assert r.status == 200
# The probe was a failure because Hypercorn didn't support HTTP/2.
assert http2_probe._values() == {(self.host, self.port): False}
finally:
urllib3.http2.extract_from_urllib3()
def test_http2_probe_no_result_in_connect_error(self) -> None:
assert http2_probe._values() == {}
urllib3.http2.inject_into_urllib3()
try:
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
ca_certs=DEFAULT_CA,
timeout=SHORT_TIMEOUT,
) as pool:
with pytest.raises(ConnectTimeoutError):
pool.request("GET", "/", retries=False)
# The probe was inconclusive since an error occurred during connection.
assert http2_probe._values() == {(TARPIT_HOST, self.port): None}
finally:
urllib3.http2.extract_from_urllib3()
def test_http2_probe_no_result_in_ssl_error(self) -> None:
urllib3.http2.inject_into_urllib3()
try:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=None,
timeout=LONG_TIMEOUT,
) as pool:
with pytest.raises(SSLError):
pool.request("GET", "/", retries=False)
# The probe was inconclusive since an error occurred during connection.
assert http2_probe._values() == {(self.host, self.port): None}
finally:
urllib3.http2.extract_from_urllib3()
def test_http2_probe_blocked_per_thread(self) -> None:
state, current_thread, last_action = None, None, time.perf_counter()
def connect_callback(label: str, thread_id: int, **kwargs: typing.Any) -> None:
nonlocal state, current_thread, last_action
if label in ("before connect", "after connect failure"):
# We don't know if the target supports HTTP/2 as connections fail
assert kwargs["target_supports_http2"] is None
# Since we're trying to connect to TARPIT_HOST, all connections will
# fail, but they should be tried one after the other
now = time.perf_counter()
assert now >= last_action
last_action = now
if label == "before connect":
assert state is None
state = "connect"
assert current_thread != thread_id
current_thread = thread_id
elif label == "after connect failure":
assert state == "connect"
assert current_thread == thread_id
state = None
assert http2_probe._values() == {}
connect_timeout = LONG_TIMEOUT
total_threads = 3
urllib3.http2.inject_into_urllib3()
try:
def try_connect(_: typing.Any) -> tuple[float, float]:
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
ca_certs=DEFAULT_CA,
timeout=connect_timeout,
) as pool:
start_time = time.time()
conn = pool._get_conn()
assert isinstance(conn, HTTPSConnection)
conn._connect_callback = connect_callback
with pytest.raises(ConnectTimeoutError):
conn.connect()
end_time = time.time()
return start_time, end_time
threadpool = concurrent.futures.ThreadPoolExecutor(total_threads)
list(threadpool.map(try_connect, range(total_threads)))
# The probe was inconclusive since an error occurred during connection.
assert http2_probe._values() == {(TARPIT_HOST, self.port): None}
finally:
urllib3.http2.extract_from_urllib3()
def test_default_ssl_context_ssl_min_max_versions(self) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context()
assert ctx.minimum_version == ssl.TLSVersion.TLSv1_2
# urllib3 sets a default maximum version only when it is
# injected with PyOpenSSL SSL-support.
# Otherwise, the default maximum version is set by Python's
# `ssl.SSLContext`. The value respects OpenSSL configuration and
# can be different from `ssl.TLSVersion.MAXIMUM_SUPPORTED`.
# https://github.com/urllib3/urllib3/issues/2477#issuecomment-1151452150
if util.IS_PYOPENSSL:
expected_maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
else:
expected_maximum_version = ssl.SSLContext(
ssl.PROTOCOL_TLS_CLIENT
).maximum_version
assert ctx.maximum_version == expected_maximum_version
def test_ssl_context_ssl_version_uses_ssl_min_max_versions(self) -> None:
if self.ssl_version() == ssl.PROTOCOL_TLS_CLIENT:
pytest.skip(
"Skipping because ssl_version=ssl.PROTOCOL_TLS_CLIENT is not deprecated"
)
with pytest.warns(
DeprecationWarning,
match=r"'ssl_version' option is deprecated and will be removed in "
r"urllib3 v2\.6\.0\. Instead use 'ssl_minimum_version'",
):
ctx = urllib3.util.ssl_.create_urllib3_context(
ssl_version=self.ssl_version()
)
assert ctx.minimum_version == self.tls_version()
assert ctx.maximum_version == self.tls_version()
def test_default_ssl_context_verify_flags(self) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context()
ssl_ctx = ssl.create_default_context()
assert ctx.verify_flags == ssl_ctx.verify_flags
def test_assert_missing_hashfunc(self, monkeypatch: pytest.MonkeyPatch) -> None:
fingerprint = "55:39:BF:70:05:12:43:FA:1F:D1:BF:4E:E8:1B:07:1D"
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=(fingerprint),
ssl_minimum_version=self.tls_version(),
) as https_pool:
digest_length = len(fingerprint.replace(":", "").lower())
monkeypatch.setitem(urllib3.util.ssl_.HASHFUNC_MAP, digest_length, None)
with pytest.raises(MaxRetryError) as cm:
https_pool.request("GET", "/", retries=0)
assert type(cm.value.reason) is SSLError
assert (
f"Hash function implementation unavailable for fingerprint length: {digest_length}"
== str(cm.value.reason)
)
@pytest.mark.usefixtures("requires_tlsv1")
|
BaseTestHTTPS
|
python
|
python-pillow__Pillow
|
src/PIL/IcnsImagePlugin.py
|
{
"start": 7902,
"end": 12405
}
|
class ____(ImageFile.ImageFile):
"""
PIL image support for Mac OS .icns files.
Chooses the best resolution, but will possibly load
a different size image if you mutate the size attribute
before calling 'load'.
The info dictionary has a key 'sizes' that is a list
of sizes that the icns file has.
"""
format = "ICNS"
format_description = "Mac OS icns resource"
def _open(self) -> None:
self.icns = IcnsFile(self.fp)
self._mode = "RGBA"
self.info["sizes"] = self.icns.itersizes()
self.best_size = self.icns.bestsize()
self.size = (
self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2],
)
@property
def size(self) -> tuple[int, int]:
return self._size
@size.setter
def size(self, value: tuple[int, int]) -> None:
# Check that a matching size exists,
# or that there is a scale that would create a size that matches
for size in self.info["sizes"]:
simple_size = size[0] * size[2], size[1] * size[2]
scale = simple_size[0] // value[0]
if simple_size[1] / value[1] == scale:
self._size = value
return
msg = "This is not one of the allowed sizes of this image"
raise ValueError(msg)
def load(self, scale: int | None = None) -> Image.core.PixelAccess | None:
if scale is not None:
width, height = self.size[:2]
self.size = width * scale, height * scale
self.best_size = width, height, scale
px = Image.Image.load(self)
if self._im is not None and self.im.size == self.size:
# Already loaded
return px
self.load_prepare()
# This is likely NOT the best way to do it, but whatever.
im = self.icns.getimage(self.best_size)
# If this is a PNG or JPEG 2000, it won't be loaded yet
px = im.load()
self.im = im.im
self._mode = im.mode
self.size = im.size
return px
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
"""
Saves the image as a series of PNG files,
that are then combined into a .icns file.
"""
if hasattr(fp, "flush"):
fp.flush()
sizes = {
b"ic07": 128,
b"ic08": 256,
b"ic09": 512,
b"ic10": 1024,
b"ic11": 32,
b"ic12": 64,
b"ic13": 256,
b"ic14": 512,
}
provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])}
size_streams = {}
for size in set(sizes.values()):
image = (
provided_images[size]
if size in provided_images
else im.resize((size, size))
)
temp = io.BytesIO()
image.save(temp, "png")
size_streams[size] = temp.getvalue()
entries = []
for type, size in sizes.items():
stream = size_streams[size]
entries.append((type, HEADERSIZE + len(stream), stream))
# Header
fp.write(MAGIC)
file_length = HEADERSIZE # Header
file_length += HEADERSIZE + 8 * len(entries) # TOC
file_length += sum(entry[1] for entry in entries)
fp.write(struct.pack(">i", file_length))
# TOC
fp.write(b"TOC ")
fp.write(struct.pack(">i", HEADERSIZE + len(entries) * HEADERSIZE))
for entry in entries:
fp.write(entry[0])
fp.write(struct.pack(">i", entry[1]))
# Data
for entry in entries:
fp.write(entry[0])
fp.write(struct.pack(">i", entry[1]))
fp.write(entry[2])
if hasattr(fp, "flush"):
fp.flush()
def _accept(prefix: bytes) -> bool:
return prefix.startswith(MAGIC)
Image.register_open(IcnsImageFile.format, IcnsImageFile, _accept)
Image.register_extension(IcnsImageFile.format, ".icns")
Image.register_save(IcnsImageFile.format, _save)
Image.register_mime(IcnsImageFile.format, "image/icns")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Syntax: python3 IcnsImagePlugin.py [file]")
sys.exit()
with open(sys.argv[1], "rb") as fp:
imf = IcnsImageFile(fp)
for size in imf.info["sizes"]:
width, height, scale = imf.size = size
imf.save(f"out-{width}-{height}-{scale}.png")
with Image.open(sys.argv[1]) as im:
im.save("out.png")
if sys.platform == "windows":
os.startfile("out.png")
|
IcnsImageFile
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/input.py
|
{
"start": 16565,
"end": 68139
}
|
class ____:
"""Store information about the Tensor: Is it sparse?, map_op, and rank."""
def __init__(self, sparse, map_op, rank):
"""Create the metadata.
Args:
sparse: Python boolean.
map_op: The `Operation` that created the `SparseTensorsMap` in question.
This Op contains information about the underlying Map object and the
dtype of the original data.
rank: The statically known rank of the `SparseTensor`.
"""
self._sparse = sparse
self._map_op = map_op
self._rank = tensor_shape.as_dimension(rank)
def __eq__(self, other):
if self.sparse != other.sparse:
return False
if not self.sparse:
return True
# If map_ops are not the same, the data source is not the same.
if (self.map_op is not None) != (other.map_op is not None):
return False
if self.map_op != other.map_op:
return False
if not self.rank.is_compatible_with(other.rank):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "[SparseMetaData(%s, %s, %s)]" % (self.sparse, self.map_op.name,
self.rank)
def merge_with(self, other):
if self != other:
raise ValueError("SparseMetaData objects are incompatible: %s vs. %s"
% (self, other))
if self.sparse:
self.rank.merge_with(other.rank)
return self
@property
def map_op(self):
return self._map_op
@property
def sparse(self):
return self._sparse
@property
def rank(self):
return self._rank
def _as_tensor_list(tensors):
if isinstance(tensors, dict):
return [tensors[k] for k in sorted(tensors, key=str)]
else:
return tensors
def _as_tensor_list_list(tensors_list):
if not tensors_list:
raise ValueError("Expected at least one set of tensors")
if isinstance(tensors_list[0], dict):
expected_keys = set(tensors_list[0].keys())
for tensors in tensors_list[1:]:
if set(tensors.keys()) != expected_keys:
raise ValueError("All dictionaries in tensors_list must have "
"the same keys")
return [_as_tensor_list(tensors) for tensors in tensors_list]
else:
return tensors_list
def _as_original_type(original_tensors, tensor_list):
if isinstance(original_tensors, dict):
if len(original_tensors) == 1:
# tensor_list is bogusly returned as a single tensor if only one tensor
# was enqueued. Make it a list again. See b/28117485.
tensor_list = [tensor_list]
return {k: tensor_list[i]
for i, k in enumerate(sorted(original_tensors, key=str))}
else:
return tensor_list
def _store_sparse_tensors(tensor_list, enqueue_many, keep_input,
shared_map_ops=None):
"""Store SparseTensors for feeding into batch, etc.
If `shared_map_ops` is provided, the underlying `SparseTensorsMap` objects
are reused (shared). This argument is useful for, e.g., `batch_join`
where multiple enqueue operations write to the same Queue component,
and another (dequeue) thread reads from that same location and must then
restore the associated `SparseTensor` objects. In this case, the sparse
restore must have a single `SparseTensorMap` from which to read out the
handles; so a single `SparseTensorMap` must be shared for storing
across the multiple enqueue operations. This sharing is performed by
calling `_store_sparse_tensors` the first time with `shared_map_ops=None`,
and then in subsequent times with this value set to the list of `Operation`
objects created in the first call.
Args:
tensor_list: List of `Tensor` and `SparseTensor` objects.
enqueue_many: Python `Boolean`.
keep_input: Must be a scalar bool Tensor (not a Python bool). If False,
don't store.
shared_map_ops: (optional) List of `Operation` objects from a previous
call to `_store_sparse_tensors`. If not `None`, the op types should be
one of `AddSparseToTensorsMap` or `AddManySparseToTensorsMap` in the
locations corresponding to `SparseTensors` in `tensor_list`.
Returns:
A tuple `(stored_list, sparse_info_list)` where `stored_list` is a list
of `Tensor` objects (same length as `tensor_list`) and `sparse_info_list`
is a list of the same length of `_SparseMetaData` objects.
"""
maybe_shared_map_ops = shared_map_ops or [None] * len(tensor_list)
def _sparse_meta_data(t, storing_op, map_op):
if not isinstance(t, sparse_tensor.SparseTensor):
return _SparseMetaData(False, None, None)
rank = t.dense_shape.shape.with_rank(1).dims[0]
if enqueue_many:
rank -= 1
# If a shared map_op was provided, use that. Otherwise use the name of
# the operation used to store the SparseTensor.
return _SparseMetaData(
sparse=True, map_op=map_op or storing_op, rank=rank)
def _maybe_store(t, shared_map_op):
"""Store Sparse tensor, if necessary."""
if not isinstance(t, sparse_tensor.SparseTensor):
return t
map_op_name = shared_map_op.name if shared_map_op else None
def _maybe_store_sparse(t, map_op_name, keep_input):
"""Conditionally store a single sparse Tensor."""
return utils.smart_cond(
keep_input,
lambda: _store_sparse(t, shared_name=map_op_name),
lambda: constant_op.constant(-1, dtypes.int64))
def _maybe_store_many_sparse(t, map_op_name, keep_input):
"""Conditionally store multiple sparse Tensors."""
out_tensor = utils.smart_cond(
keep_input,
lambda: _store_many_sparse(t, shared_name=map_op_name),
lambda: -1 * array_ops.ones(array_ops.shape(t)[0:1], dtypes.int64))
out_tensor.set_shape([None]) # necessary when t.ndims is unknown
return out_tensor
def _sparse_values_to_keep(t, keep_input):
"""Convert a per-row `keep_input` vector to a per-value one."""
# Get the rows of every value in the sparse Tensor.
row_values = t.indices[:, 0]
# The value should be kept iff the row should be kept.
return array_ops.gather(keep_input, row_values)
if keep_input.shape.ndims == 1:
t = sparse_ops.sparse_retain(t, _sparse_values_to_keep(t, keep_input))
store_f = lambda t, name, _: _store_many_sparse(t, shared_name=name)
elif enqueue_many:
store_f = _maybe_store_many_sparse
else:
store_f = _maybe_store_sparse
return store_f(t, map_op_name, keep_input)
stored_list = [
_maybe_store(t, shared_map_op) for t, shared_map_op
in zip(tensor_list, maybe_shared_map_ops)]
# Since the output of `_store{_many}_sparse is wrapped in a tf.cond `Merge`,
# we can't just get the Op of the resulting tensor.
def _sparse_op(stored):
for input_tensor in stored.op.inputs:
if input_tensor.op.type in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
return input_tensor.op
# If there was no sparse input, then the original stored Tensor wasn't
# sparse and we can just return the original Tensor's Op.
return stored.op
sparse_info_list = [
_sparse_meta_data(t, _sparse_op(stored), shared_map_op)
for t, stored, shared_map_op
in zip(tensor_list, stored_list, maybe_shared_map_ops)]
# Expand dims of stored tensors by 1 for proper enqueue shape
stored_list = [
array_ops.expand_dims(s, [-1]) if s_info.sparse else s
for s, s_info in zip(stored_list, sparse_info_list)]
return stored_list, sparse_info_list
def _store_sparse_tensors_join(tensor_list_list, enqueue_many, keep_input):
"""Store SparseTensors for feeding into batch_join, etc."""
(s0, sparse_info_list) = _store_sparse_tensors(
tensor_list_list[0], enqueue_many, keep_input)
stored_list_list = [s0]
for tensor_list in tensor_list_list[1:]:
s, sparse_info_candidate = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input,
[st.map_op for st in sparse_info_list])
if sparse_info_list != sparse_info_candidate:
raise ValueError("Inconsistent SparseTensors list: %s vs. %s"
% (tensor_list_list[0], tensor_list))
sparse_info_list = [
info.merge_with(candidate)
for (info, candidate) in zip(sparse_info_list, sparse_info_candidate)]
stored_list_list.append(s)
return (stored_list_list, sparse_info_list)
def _restore_sparse_tensors(stored_list, sparse_info_list):
"""Restore SparseTensors after dequeue in batch, batch_join, etc."""
received_sequence = isinstance(stored_list, collections_abc.Sequence)
if not received_sequence:
stored_list = (stored_list,)
tensors = [
_restore_sparse(sparse_map_op=info.map_op,
sparse_handles=array_ops.squeeze(s, [1]),
rank=tensor_shape.dimension_value(info.rank + 1))
if info.sparse else s
for (s, info) in zip(stored_list, sparse_info_list)]
has_st = any(isinstance(x, sparse_tensor.SparseTensor) for x in tensors)
if has_st:
t_values = [
x.values if isinstance(x, sparse_tensor.SparseTensor)
else x
for x in tensors]
with_deps = lambda x: control_flow_ops.with_dependencies(t_values, x)
ensure_restore_tensors = [
sparse_tensor.SparseTensor(indices=with_deps(x.indices),
values=with_deps(x.values),
dense_shape=with_deps(x.dense_shape))
if isinstance(x, sparse_tensor.SparseTensor)
else with_deps(x)
for x in tensors]
else:
ensure_restore_tensors = tensors
return ensure_restore_tensors if received_sequence else tensors[0]
def _validate(tensor_list):
tensor_list = indexed_slices.convert_n_to_tensor_or_indexed_slices(
tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [
indexed_slices.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list
]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _validate_keep_input(keep_input, enqueue_many):
"""Validate `keep_input` argument to conditional batching functions."""
keep_input = ops.convert_to_tensor(keep_input)
if keep_input.shape.ndims is None:
raise ValueError(
"`keep_input` dimensions must be known at graph construction.")
if not enqueue_many and keep_input.shape.ndims == 1:
raise ValueError(
"`keep_input` cannot be a vector when `enqueue_many=False`.")
if keep_input.shape.ndims > 1:
raise ValueError("`keep_input` must be 0 or 1 dimensions.")
return keep_input
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
(", ".join(x.name for x in types),
", ".join(x.name for x in other_types)))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
"""Calculate and merge the shapes of incoming tensors.
Args:
tensor_list_list: List of tensor lists.
shapes: List of shape tuples corresponding to tensors within the lists.
enqueue_many: Boolean describing whether shapes will be enqueued as
batches or individual entries.
Returns:
A list of shapes aggregating shape inference info from `tensor_list_list`,
or returning `shapes` if it is not `None`.
Raises:
ValueError: If any of the inferred shapes in `tensor_list_list` lack a
well defined rank.
"""
if shapes is None:
len0 = len(tensor_list_list[0])
for tl in tensor_list_list:
for i in range(len0):
if tl[i].shape.ndims is None:
raise ValueError("Cannot infer Tensor's rank: %s" % tl[i])
shapes = [
_merge_shapes([tl[i].shape.as_list()
for tl in tensor_list_list], enqueue_many)
for i in range(len0)
]
return shapes
def _select_which_to_enqueue(tensor_list, keep_input):
"""Select which examples to enqueue based on vector `keep_input`."""
select_i = math_ops.cast(keep_input, dtypes.int32)
tensor_list = [
data_flow_ops.dynamic_partition(x, select_i, num_partitions=2)[1]
for x in tensor_list]
return tensor_list
def _enqueue_join(queue, tensor_list_list, enqueue_many, keep_input):
"""Enqueue `tensor_list_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.shape.ndims == 1:
enqueue_ops = [enqueue_fn(_select_which_to_enqueue(x, keep_input))
for x in tensor_list_list]
else:
enqueue_ops = [utils.smart_cond(
keep_input,
lambda: enqueue_fn(tl), # pylint:disable=cell-var-from-loop
control_flow_ops.no_op) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many, keep_input):
"""Enqueue `tensor_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.shape.ndims == 1:
enqueue_ops = [
enqueue_fn(_select_which_to_enqueue(tensor_list, keep_input))] * threads
else:
enqueue_ops = [utils.smart_cond(
keep_input,
lambda: enqueue_fn(tensor_list),
control_flow_ops.no_op)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
def _batch(tensors, batch_size, keep_input, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `batch` and `maybe_batch`."""
if context.executing_eagerly():
raise ValueError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "batch", list(tensor_list) + [keep_input]) as name:
tensor_list = _validate(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def _batch_join(tensors_list, batch_size, keep_input, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Helper function for `batch_join` and `maybe_batch_join`."""
if context.executing_eagerly():
raise ValueError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
def _shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Helper function for `shuffle_batch` and `maybe_shuffle_batch`."""
if context.executing_eagerly():
raise ValueError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "shuffle_batch",
list(tensor_list) + [keep_input]) as name:
if capacity <= min_after_dequeue:
raise ValueError("capacity %d must be bigger than min_after_dequeue %d."
% (capacity, min_after_dequeue))
tensor_list = _validate(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list, sparse_info = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue), dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
def _shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `shuffle_batch_join` and `maybe_shuffle_batch_join`."""
if context.executing_eagerly():
raise ValueError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "shuffle_batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue), dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
# Batching functions ----------------------------------------------------------
@tf_export(v1=["train.batch"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.batch(batch_size)` (or `padded_batch(...)` if "
"`dynamic_pad=True`).")
def batch(tensors, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Creates batches of tensors in `tensors`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensors` is assumed to represent a single
example. An input tensor with shape `[x, y, z]` will be output as a tensor
with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a batch of
examples, where the first dimension is indexed by example, and all members of
`tensors` should have the same size in the first dimension. If an input
tensor has shape `[*, x, y, z]`, the output will have shape `[batch_size, x,
y, z]`. The `capacity` argument controls the how long the prefetching is
allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`shape` property will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensors`. The batching will
be nondeterministic if `num_threads > 1`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensors` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same types as `tensors` (except if
the input is a list of one element, then it returns a tensor, not a list).
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _batch(
tensors,
batch_size,
keep_input=True,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.maybe_batch"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.filter(...).batch(batch_size)` (or `padded_batch(...)`"
" if `dynamic_pad=True`).")
def maybe_batch(tensors, keep_input, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Conditionally creates batches of tensors based on `keep_input`.
See docstring in `batch` for more details.
Args:
tensors: The list or dictionary of tensors to enqueue.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially
acts as a filtering mechanism.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensors`. The batching will
be nondeterministic if `num_threads > 1`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensors` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
return _batch(
tensors,
batch_size,
keep_input,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.batch_join"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.interleave(...).batch(batch_size)` (or "
"`padded_batch(...)` if `dynamic_pad=True`).")
def batch_join(tensors_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, dynamic_pad=False, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
The `tensors_list` argument is a list of tuples of tensors, or a list of
dictionaries of tensors. Each element in the list is treated similarly
to the `tensors` argument of `tf.compat.v1.train.batch()`.
WARNING: This function is nondeterministic, since it starts a separate thread
for each tensor.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensors_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensors_list[i]`. `tensors_list[i1][j]` must match
`tensors_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensors_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensors_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensors_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors_list` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have value `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`shape` property will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _batch_join(
tensors_list,
batch_size,
keep_input=True,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.maybe_batch_join"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.interleave(...).filter(...).batch(batch_size)` (or "
"`padded_batch(...)` if `dynamic_pad=True`).")
def maybe_batch_join(tensors_list, keep_input, batch_size, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Runs a list of tensors to conditionally fill a queue to create batches.
See docstring in `batch_join` for more details.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially
acts as a filtering mechanism.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
return _batch_join(
tensors_list,
batch_size,
keep_input,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.shuffle_batch"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.shuffle(min_after_dequeue).batch(batch_size)`.")
def shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors`.
If `enqueue_many` is `False`, `tensors` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensors` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.compat.v1.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensors` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`shape` property will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _shuffle_batch(
tensors,
batch_size,
capacity,
min_after_dequeue,
keep_input=True,
num_threads=num_threads,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.maybe_shuffle_batch"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.filter(...).shuffle(min_after_dequeue).batch(batch_size)`"
".")
def maybe_shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Creates batches by randomly shuffling conditionally-enqueued tensors.
See docstring in `shuffle_batch` for more details.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially
acts as a filtering mechanism.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _shuffle_batch(
tensors,
batch_size,
capacity,
min_after_dequeue,
keep_input,
num_threads=num_threads,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.shuffle_batch_join"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.interleave(...).shuffle(min_after_dequeue).batch"
"(batch_size)`.")
def shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Create batches by randomly shuffling tensors.
The `tensors_list` argument is a list of tuples of tensors, or a list of
dictionaries of tensors. Each element in the list is treated similarly
to the `tensors` argument of `tf.compat.v1.train.shuffle_batch()`.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors_list`.
`len(tensors_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensors_list[i]`. `tensors_list[i1][j]` must match
`tensors_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensors_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y, z]`
will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensors_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`shape` property will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors_list[i]`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors_list`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _shuffle_batch_join(
tensors_list,
batch_size,
capacity,
min_after_dequeue,
keep_input=True,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.maybe_shuffle_batch_join"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.interleave(...).filter(...).shuffle(min_after_dequeue)"
".batch(batch_size)`.")
def maybe_shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Create batches by randomly shuffling conditionally-enqueued tensors.
See docstring in `shuffle_batch_join` for more details.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially
acts as a filtering mechanism.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors_list[i]`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors_list`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _shuffle_batch_join(
tensors_list,
batch_size,
capacity,
min_after_dequeue,
keep_input,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
|
_SparseMetaData
|
python
|
joke2k__faker
|
faker/providers/ssn/ko_KR/__init__.py
|
{
"start": 41,
"end": 252
}
|
class ____(SsnProvider):
ssn_formats = (
"##0#0#-1######",
"##0#1#-1######",
"##0#2#-1######",
"##0#0#-2######",
"##0#1#-2######",
"##0#2#-2######",
)
|
Provider
|
python
|
simplejson__simplejson
|
simplejson/tests/test_speedups.py
|
{
"start": 740,
"end": 1313
}
|
class ____(TestCase):
@skip_if_speedups_missing
def test_make_scanner(self):
self.assertRaises(AttributeError, scanner.c_make_scanner, 1)
@skip_if_speedups_missing
def test_bad_bool_args(self):
def test(value):
decoder.JSONDecoder(strict=BadBool()).decode(value)
self.assertRaises(ZeroDivisionError, test, '""')
self.assertRaises(ZeroDivisionError, test, '{}')
if not PY3:
self.assertRaises(ZeroDivisionError, test, u'""')
self.assertRaises(ZeroDivisionError, test, u'{}')
|
TestDecode
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_hash.py
|
{
"start": 40,
"end": 123
}
|
class ____:
def __hash__(self):
return True # [invalid-hash-return]
|
Bool
|
python
|
hynek__structlog
|
tests/test_contextvars.py
|
{
"start": 7128,
"end": 9087
}
|
class ____:
def test_cleanup(self):
"""
Bindings are cleaned up
"""
with bound_contextvars(x=42, y="foo"):
assert {"x": 42, "y": "foo"} == get_contextvars()
assert {} == get_contextvars()
def test_cleanup_conflict(self):
"""
Overwritten keys are restored after the clean up
"""
bind_contextvars(x="original", z="unrelated")
with bound_contextvars(x=42, y="foo"):
assert {"x": 42, "y": "foo", "z": "unrelated"} == get_contextvars()
assert {"x": "original", "z": "unrelated"} == get_contextvars()
def test_preserve_independent_bind(self):
"""
New bindings inside bound_contextvars are preserved after the clean up
"""
with bound_contextvars(x=42):
bind_contextvars(y="foo")
assert {"x": 42, "y": "foo"} == get_contextvars()
assert {"y": "foo"} == get_contextvars()
def test_nesting_works(self):
"""
bound_contextvars binds and unbinds even when nested
"""
with bound_contextvars(l1=1):
assert {"l1": 1} == get_contextvars()
with bound_contextvars(l2=2):
assert {"l1": 1, "l2": 2} == get_contextvars()
assert {"l1": 1} == get_contextvars()
assert {} == get_contextvars()
def test_as_decorator(self):
"""
bound_contextvars can be used as a decorator and it preserves the
name, signature and documentation of the wrapped function.
"""
@bound_contextvars(x=42)
def wrapped(arg1):
"""Wrapped documentation"""
bind_contextvars(y=arg1)
assert {"x": 42, "y": arg1} == get_contextvars()
wrapped(23)
assert "wrapped" == wrapped.__name__
assert "(arg1)" == str(inspect.signature(wrapped))
assert "Wrapped documentation" == wrapped.__doc__
|
TestBoundContextvars
|
python
|
pytorch__pytorch
|
test/mobile/model_test/tensor_ops.py
|
{
"start": 15,
"end": 3036
}
|
class ____(torch.nn.Module):
def forward(self):
return self.tensor_general_ops()
def tensor_general_ops(self):
a = torch.randn(4)
b = torch.tensor([1.5])
x = torch.ones((2,))
c = torch.randn(4, dtype=torch.cfloat)
w = torch.rand(4, 4, 4, 4)
v = torch.rand(4, 4, 4, 4)
return len(
# torch.is_tensor(a),
# torch.is_storage(a),
torch.is_complex(a),
torch.is_conj(a),
torch.is_floating_point(a),
torch.is_nonzero(b),
# torch.set_default_dtype(torch.float32),
# torch.get_default_dtype(),
# torch.set_default_tensor_type(torch.DoubleTensor),
torch.numel(a),
# torch.set_printoptions(),
# torch.set_flush_denormal(False),
# https://pytorch.org/docs/stable/tensors.html#tensor-class-reference
# x.new_tensor([[0, 1], [2, 3]]),
x.new_full((3, 4), 3.141592),
x.new_empty((2, 3)),
x.new_ones((2, 3)),
x.new_zeros((2, 3)),
x.is_cuda,
x.is_quantized,
x.is_meta,
x.device,
x.dim(),
c.real,
c.imag,
# x.backward(),
x.clone(),
w.contiguous(),
w.contiguous(memory_format=torch.channels_last),
w.copy_(v),
w.copy_(1),
w.copy_(0.5),
x.cpu(),
# x.cuda(),
# x.data_ptr(),
x.dense_dim(),
w.fill_diagonal_(0),
w.element_size(),
w.exponential_(),
w.fill_(0),
w.geometric_(0.5),
a.index_fill(0, torch.tensor([0, 2]), 1),
a.index_put_([torch.argmax(a)], torch.tensor(1.0)),
a.index_put([torch.argmax(a)], torch.tensor(1.0)),
w.is_contiguous(),
c.is_complex(),
w.is_conj(),
w.is_floating_point(),
w.is_leaf,
w.is_pinned(),
w.is_set_to(w),
# w.is_shared,
w.is_coalesced(),
w.coalesce(),
w.is_signed(),
w.is_sparse,
torch.tensor([1]).item(),
x.log_normal_(),
# x.masked_scatter_(),
# x.masked_scatter(),
# w.normal(),
w.numel(),
# w.pin_memory(),
# w.put_(0, torch.tensor([0, 1], w)),
x.repeat(4, 2),
a.clamp_(0),
a.clamp(0),
a.clamp_min(0),
a.hardsigmoid_(),
a.hardsigmoid(),
a.hardswish_(),
a.hardswish(),
a.hardtanh_(),
a.hardtanh(),
a.leaky_relu_(),
a.leaky_relu(),
a.relu_(),
a.relu(),
a.resize_as_(a),
a.type_as(a),
a._shape_as_tensor(),
a.requires_grad_(False),
)
|
TensorOpsModule
|
python
|
scipy__scipy
|
benchmarks/benchmarks/test_functions.py
|
{
"start": 2827,
"end": 3283
}
|
class ____:
target_E = 0.
solution = np.array([1., 3.])
xmin = np.array([-10., -10.])
xmax = np.array([10., 10.])
def fun(self, coords):
x, y = coords
return (x + 2. * y - 7.)**2 + (2. * x + y - 5.)**2
def der(self, coords):
x, y = coords
dfdx = 2. * (x + 2. * y - 7.) + 4. * (2. * x + y - 5.)
dfdy = 4. * (x + 2. * y - 7.) + 2. * (2. * x + y - 5.)
return np.array([dfdx, dfdy])
|
Booth
|
python
|
django__django
|
django/contrib/gis/db/backends/oracle/models.py
|
{
"start": 1421,
"end": 2080
}
|
class ____(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
class Meta:
app_label = "gis"
db_table = "CS_SRS"
managed = False
@property
def wkt(self):
return self.wktext
|
OracleSpatialRefSys
|
python
|
walkccc__LeetCode
|
solutions/2454. Next Greater Element IV/2454.py
|
{
"start": 0,
"end": 890
}
|
class ____:
def secondGreaterElement(self, nums: list[int]) -> list[int]:
ans = [-1] * len(nums)
# a decreasing stack that stores indices that met the first greater number.
prevStack = []
# a decreasing stack that stores indices.
currStack = []
for i, num in enumerate(nums):
# Indices in prevStack meet the second greater num.
while prevStack and nums[prevStack[-1]] < num:
ans[prevStack.pop()] = num
# Push indices that meet the first greater number from `currStack` to
# `prevStack`. We need a temporary array to make the indices in the
# `prevStack` increasing.
decreasingIndices = []
while currStack and nums[currStack[-1]] < num:
decreasingIndices.append(currStack.pop())
while decreasingIndices:
prevStack.append(decreasingIndices.pop())
currStack.append(i)
return ans
|
Solution
|
python
|
pytorch__pytorch
|
.ci/lumen_cli/tests/test_docker_helper.py
|
{
"start": 183,
"end": 2973
}
|
class ____(unittest.TestCase):
def setUp(self):
# Reset the singleton in the target module
patcher = mock.patch("cli.lib.common.docker_helper._docker_client", None)
self.addCleanup(patcher.stop)
patcher.start()
def test_local_image_exists_true(self):
# Mock a docker client whose images.get returns an object (no exception)
mock_client = MagicMock()
mock_client.images.get.return_value = object()
ok = local_image_exists("repo:tag", client=mock_client)
self.assertTrue(ok)
def test_local_image_exists_not_found_false(self):
mock_client = MagicMock()
# Raise docker.errors.NotFound
mock_client.images.get.side_effect = derr.NotFound("nope")
ok = local_image_exists("missing:latest", client=mock_client)
self.assertFalse(ok)
def test_local_image_exists_api_error_false(self):
mock_client = MagicMock()
mock_client.images.get.side_effect = derr.APIError("boom", None)
ok = local_image_exists("broken:tag", client=mock_client)
self.assertFalse(ok)
def test_local_image_exists_uses_lazy_singleton(self):
# Patch docker.from_env used by _get_client()
with mock.patch(
"cli.lib.common.docker_helper.docker.from_env"
) as mock_from_env:
mock_docker_client = MagicMock()
mock_from_env.return_value = mock_docker_client
# First call should create and cache the client
c1 = _get_client()
self.assertIs(c1, mock_docker_client)
mock_from_env.assert_called_once()
# Second call should reuse cached client (no extra from_env calls)
c2 = _get_client()
self.assertIs(c2, mock_docker_client)
mock_from_env.assert_called_once() # still once
def test_local_image_exists_without_client_param_calls_get_client_once(self):
# Ensure _get_client is called and cached; local_image_exists should reuse it
with mock.patch("cli.lib.common.docker_helper._get_client") as mock_get_client:
mock_client = MagicMock()
mock_get_client.return_value = mock_client
# 1st call
local_image_exists("repo:tag")
# 2nd call
local_image_exists("repo:tag2")
# local_image_exists should call _get_client each time,
# but your _get_client itself caches docker.from_env.
self.assertEqual(mock_get_client.call_count, 2)
self.assertEqual(mock_client.images.get.call_count, 2)
mock_client.images.get.assert_any_call("repo:tag")
mock_client.images.get.assert_any_call("repo:tag2")
if __name__ == "__main__":
unittest.main()
|
TestDockerImageHelpers
|
python
|
huggingface__transformers
|
src/transformers/models/lfm2/modeling_lfm2.py
|
{
"start": 15871,
"end": 19452
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Lfm2Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.rotary_fn = apply_rotary_pos_emb
self.out_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.q_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps)
self.k_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Lfm2HybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_layernorm(self.q_proj(hidden_states).view(*hidden_shape)).transpose(1, 2)
key_states = self.k_layernorm(self.k_proj(hidden_states).view(*hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(*hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
output = self.out_proj(attn_output)
return output, attn_weights
def apply_mask_to_padding_states(hidden_states, attention_mask):
"""
Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
"""
# NOTE: attention mask is a 2D boolean tensor
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
return hidden_states
kernel_modules = (causal_conv1d_fn, causal_conv1d_update)
is_fast_path_available = all(kernel_modules)
|
Lfm2Attention
|
python
|
pytorch__pytorch
|
test/test_fake_tensor.py
|
{
"start": 44162,
"end": 44849
}
|
class ____(TestCase):
@ops(custom_op_db, dtypes=OpDTypes.any_one)
def test_fake(self, device, dtype, op):
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
for sample_input in sample_inputs_itr:
args = (sample_input.input,) + sample_input.args
kwargs = sample_input.kwargs
optests.fake_check(op, args, kwargs)
make_propagate_real_tensors_cls(FakeTensorOpInfoTest)
instantiate_device_type_tests(FakeTensorOpInfoTest, globals(), only_for=("cpu", "cuda"))
instantiate_device_type_tests(
PropagateRealTensorsFakeTensorOpInfoTest, # noqa: F821
globals(),
only_for=("cpu",),
)
|
FakeTensorOpInfoTest
|
python
|
simonw__sqlite-utils
|
sqlite_utils/db.py
|
{
"start": 5839,
"end": 5897
}
|
class ____(Exception):
"Error altering table"
|
AlterError
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 671041,
"end": 672446
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of
GrantEnterpriseOrganizationsMigratorRole
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "organizations")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
organizations = sgqlc.types.Field(
"OrganizationConnection",
graphql_name="organizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""The organizations that had the migrator role applied to for the
given user.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
|
GrantEnterpriseOrganizationsMigratorRolePayload
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarDefaultClass2.py
|
{
"start": 3985,
"end": 4363
}
|
class ____(Generic[T1, *Ts2]): ...
tc1 = ClassTC()
reveal_type(tc1, expected_text="ClassTC[str, *tuple[str, ...]]")
tc2 = ClassTC[int]()
reveal_type(tc2, expected_text="ClassTC[int, *tuple[int, ...]]")
tc3 = ClassTC[int, *tuple[()]]()
reveal_type(tc3, expected_text="ClassTC[int]")
tc4 = ClassTC[int, *tuple[None]]()
reveal_type(tc4, expected_text="ClassTC[int, None]")
|
ClassTC
|
python
|
imageio__imageio
|
imageio/core/util.py
|
{
"start": 3647,
"end": 6142
}
|
class ____(np.ndarray):
"""Array(array, meta=None)
A subclass of np.ndarray that has a meta attribute. Get the dictionary
that contains the meta data using ``im.meta``. Convert to a plain numpy
array using ``np.asarray(im)``.
"""
def __new__(cls, array, meta=None):
# Check
if not isinstance(array, np.ndarray):
raise ValueError("Array expects a numpy array.")
if not (meta is None or isinstance(meta, dict)):
raise ValueError("Array expects meta data to be a dict.")
# Convert and return
meta = meta if meta is not None else getattr(array, "meta", {})
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
ob._copy_meta(meta)
return ob
def _copy_meta(self, meta):
"""Make a 2-level deep copy of the meta dictionary."""
self._meta = Dict()
for key, val in meta.items():
if isinstance(val, dict):
val = Dict(val) # Copy this level
self._meta[key] = val
@property
def meta(self):
"""The dict with the meta data of this image."""
return self._meta
def __array_finalize__(self, ob):
"""So the meta info is maintained when doing calculations with
the array.
"""
if isinstance(ob, Array):
self._copy_meta(ob.meta)
else:
self._copy_meta({})
def __array_wrap__(self, out, context=None):
"""So that we return a native numpy array (or scalar) when a
reducting ufunc is applied (such as sum(), std(), etc.)
"""
if not out.shape:
return out.dtype.type(out) # Scalar
elif out.shape != self.shape:
return out.view(type=np.ndarray)
elif not isinstance(out, Array):
return Array(out, self.meta)
else:
return out # Type Array
Image = Array # Alias for backwards compatibility
def asarray(a):
"""Pypy-safe version of np.asarray. Pypy's np.asarray consumes a
*lot* of memory if the given array is an ndarray subclass. This
function does not.
"""
if isinstance(a, np.ndarray):
if IS_PYPY: # pragma: no cover
a = a.copy() # pypy has issues with base views
plain = a.view(type=np.ndarray)
return plain
return np.asarray(a)
|
Array
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/projects.py
|
{
"start": 56227,
"end": 63402
}
|
class ____(Response):
"""
Response of projects.get_all endpoint.
:param projects: Projects list
:type projects: Sequence[ProjectsGetAllResponseSingle]
"""
_service = "projects"
_action = "get_all"
_version = "2.13"
_schema = {
"definitions": {
"projects_get_all_response_single": {
"properties": {
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Creation time",
"format": "date-time",
"type": ["string", "null"],
},
"default_output_destination": {
"description": "The default output destination URL for new tasks under this project",
"type": ["string", "null"],
},
"description": {
"description": "Project description",
"type": ["string", "null"],
},
"id": {"description": "Project id", "type": ["string", "null"]},
"name": {"description": "Project name", "type": ["string", "null"]},
"stats": {
"description": "Additional project stats",
"oneOf": [{"$ref": "#/definitions/stats"}, {"type": "null"}],
},
"sub_projects": {
"description": "The list of sub projects",
"items": {
"properties": {
"id": {
"description": "Subproject ID",
"type": "string",
},
"name": {
"description": "Subproject name",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
"stats": {
"properties": {
"active": {
"description": "Stats for active tasks",
"oneOf": [
{"$ref": "#/definitions/stats_status_count"},
{"type": "null"},
],
},
"archived": {
"description": "Stats for archived tasks",
"oneOf": [
{"$ref": "#/definitions/stats_status_count"},
{"type": "null"},
],
},
},
"type": "object",
},
"stats_status_count": {
"properties": {
"status_count": {
"description": "Status counts",
"properties": {
"closed": {
"description": "Number of 'closed' tasks in project",
"type": "integer",
},
"created": {
"description": "Number of 'created' tasks in project",
"type": "integer",
},
"failed": {
"description": "Number of 'failed' tasks in project",
"type": "integer",
},
"in_progress": {
"description": "Number of 'in_progress' tasks in project",
"type": "integer",
},
"published": {
"description": "Number of 'published' tasks in project",
"type": "integer",
},
"queued": {
"description": "Number of 'queued' tasks in project",
"type": "integer",
},
"stopped": {
"description": "Number of 'stopped' tasks in project",
"type": "integer",
},
"unknown": {
"description": "Number of 'unknown' tasks in project",
"type": "integer",
},
},
"type": ["object", "null"],
},
"total_runtime": {
"description": "Total run time of all tasks in project (in seconds)",
"type": ["integer", "null"],
},
},
"type": "object",
},
},
"properties": {
"projects": {
"description": "Projects list",
"items": {"$ref": "#/definitions/projects_get_all_response_single"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, projects: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.projects = projects
@schema_property("projects")
def projects(self) -> Optional[List[Any]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [ProjectsGetAllResponseSingle.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "projects", ProjectsGetAllResponseSingle, is_array=True)
self._property_projects = value
|
GetAllResponse
|
python
|
numpy__numpy
|
numpy/_core/tests/test_umath.py
|
{
"start": 15949,
"end": 26879
}
|
class ____:
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dtype,ex_val", itertools.product(
sctypes['int'] + sctypes['uint'], (
(
# dividend
"np.array(range(fo.max-lsize, fo.max)).astype(dtype),"
# divisors
"np.arange(lsize).astype(dtype),"
# scalar divisors
"range(15)"
),
(
# dividend
"np.arange(fo.min, fo.min+lsize).astype(dtype),"
# divisors
"np.arange(lsize//-2, lsize//2).astype(dtype),"
# scalar divisors
"range(fo.min, fo.min + 15)"
), (
# dividend
"np.array(range(fo.max-lsize, fo.max)).astype(dtype),"
# divisors
"np.arange(lsize).astype(dtype),"
# scalar divisors
"[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]"
)
)
))
def test_division_int_boundary(self, dtype, ex_val):
fo = np.iinfo(dtype)
neg = -1 if fo.min < 0 else 1
# Large enough to test SIMD loops and remainder elements
lsize = 512 + 7
a, b, divisors = eval(ex_val)
a_lst, b_lst = a.tolist(), b.tolist()
c_div = lambda n, d: (
0 if d == 0 else (
fo.min if (n and n == fo.min and d == -1) else n // d
)
)
with np.errstate(divide='ignore'):
ac = a.copy()
ac //= b
div_ab = a // b
div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)]
msg = "Integer arrays floor division check (//)"
assert all(div_ab == div_lst), msg
msg_eq = "Integer arrays floor division check (//=)"
assert all(ac == div_lst), msg_eq
for divisor in divisors:
ac = a.copy()
with np.errstate(divide='ignore', over='ignore'):
div_a = a // divisor
ac //= divisor
div_lst = [c_div(i, divisor) for i in a_lst]
assert all(div_a == div_lst), msg
assert all(ac == div_lst), msg_eq
with np.errstate(divide='raise', over='raise'):
if 0 in b:
# Verify overflow case
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
a // b
else:
a // b
if fo.min and fo.min in a:
with pytest.raises(FloatingPointError,
match='overflow encountered in floor_divide'):
a // -1
elif fo.min:
a // -1
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
a // 0
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
ac = a.copy()
ac //= 0
np.array([], dtype=dtype) // 0
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("dtype,ex_val", itertools.product(
sctypes['int'] + sctypes['uint'], (
"np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)",
"np.array([fo.min, 1, -2, 1, 1, 2, -3]).astype(dtype)",
"np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)",
"np.array(range(fo.max-(100*7), fo.max, 7)).astype(dtype)",
)
))
def test_division_int_reduce(self, dtype, ex_val):
fo = np.iinfo(dtype)
a = eval(ex_val)
lst = a.tolist()
c_div = lambda n, d: (
0 if d == 0 or (n and n == fo.min and d == -1) else n // d
)
with np.errstate(divide='ignore'):
div_a = np.floor_divide.reduce(a)
div_lst = reduce(c_div, lst)
msg = "Reduce floor integer division check"
assert div_a == div_lst, msg
with np.errstate(divide='raise', over='raise'):
with pytest.raises(FloatingPointError,
match="divide by zero encountered in reduce"):
np.floor_divide.reduce(np.arange(-100, 100).astype(dtype))
if fo.min:
with pytest.raises(FloatingPointError,
match='overflow encountered in reduce'):
np.floor_divide.reduce(
np.array([fo.min, 1, -1], dtype=dtype)
)
@pytest.mark.parametrize(
"dividend,divisor,quotient",
[(np.timedelta64(2, 'Y'), np.timedelta64(2, 'M'), 12),
(np.timedelta64(2, 'Y'), np.timedelta64(-2, 'M'), -12),
(np.timedelta64(-2, 'Y'), np.timedelta64(2, 'M'), -12),
(np.timedelta64(-2, 'Y'), np.timedelta64(-2, 'M'), 12),
(np.timedelta64(2, 'M'), np.timedelta64(-2, 'Y'), -1),
(np.timedelta64(2, 'Y'), np.timedelta64(0, 'M'), 0),
(np.timedelta64(2, 'Y'), 2, np.timedelta64(1, 'Y')),
(np.timedelta64(2, 'Y'), -2, np.timedelta64(-1, 'Y')),
(np.timedelta64(-2, 'Y'), 2, np.timedelta64(-1, 'Y')),
(np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')),
(np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')),
(np.timedelta64(-2, 'Y'), -3, np.timedelta64(0, 'Y')),
(np.timedelta64(-2, 'Y'), 0, np.timedelta64('Nat', 'Y')),
])
def test_division_int_timedelta(self, dividend, divisor, quotient):
# If either divisor is 0 or quotient is Nat, check for division by 0
if divisor and (isinstance(quotient, int) or not np.isnat(quotient)):
msg = "Timedelta floor division check"
assert dividend // divisor == quotient, msg
# Test for arrays as well
msg = "Timedelta arrays floor division check"
dividend_array = np.array([dividend] * 5)
quotient_array = np.array([quotient] * 5)
assert all(dividend_array // divisor == quotient_array), msg
else:
if IS_WASM:
pytest.skip("fp errors don't work in wasm")
with np.errstate(divide='raise', invalid='raise'):
with pytest.raises(FloatingPointError):
dividend // divisor
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1. * 1j, 1. + .5 * 1j, 1. + 2. * 1j], dtype=np.complex128)
assert_almost_equal(x**2 / x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2 / x
assert_almost_equal(y / x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
with np.errstate(invalid="ignore", divide="ignore"):
x = np.array([0.0], dtype=np.complex128)
y = 1.0 / x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan) / x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf) / x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf) / x
assert_(np.isinf(y)[0])
y = 0.0 / x
assert_(np.isnan(y)[0])
def test_floor_division_complex(self):
# check that floor division, divmod and remainder raises type errors
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], dtype=np.complex128)
with pytest.raises(TypeError):
x // 7
with pytest.raises(TypeError):
np.divmod(x, 7)
with pytest.raises(TypeError):
np.remainder(x, 7)
def test_floor_division_signed_zero(self):
# Check that the sign bit is correctly set when dividing positive and
# negative zero by one.
x = np.zeros(10)
assert_equal(np.signbit(x // 1), 0)
assert_equal(np.signbit((-x) // 1), 1)
@pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"),
reason="gh-22982")
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_errors(self, dtype):
fnan = np.array(np.nan, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
fzer = np.array(0.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
# divide by zero error check
with np.errstate(divide='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.floor_divide, fone, fzer)
with np.errstate(divide='ignore', invalid='raise'):
np.floor_divide(fone, fzer)
# The following already contain a NaN and should not warn
with np.errstate(all='raise'):
np.floor_divide(fnan, fone)
np.floor_divide(fone, fnan)
np.floor_divide(fnan, fzer)
np.floor_divide(fzer, fnan)
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_corner_cases(self, dtype):
# test corner cases like 1.0//0.0 for errors and return vals
x = np.zeros(10, dtype=dtype)
y = np.ones(10, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
fzer = np.array(0.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning)
div = np.floor_divide(fnan, fone)
assert np.isnan(div), f"div: {div}"
div = np.floor_divide(fone, fnan)
assert np.isnan(div), f"div: {div}"
div = np.floor_divide(fnan, fzer)
assert np.isnan(div), f"div: {div}"
# verify 1.0//0.0 computations return inf
with np.errstate(divide='ignore'):
z = np.floor_divide(y, x)
assert_(np.isinf(z).all())
def floor_divide_and_remainder(x, y):
return (np.floor_divide(x, y), np.remainder(x, y))
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
|
TestDivision
|
python
|
astropy__astropy
|
astropy/cosmology/_src/tests/io/base.py
|
{
"start": 915,
"end": 1873
}
|
class ____(IOTestBase):
"""Tests for a Cosmology[To/From]Format with some ``format``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class")
def from_format(self):
"""Convert to Cosmology using ``Cosmology.from_format()``."""
return Cosmology.from_format
@pytest.fixture(scope="class")
def to_format(self, cosmo):
"""Convert Cosmology instance using ``.to_format()``."""
return cosmo.to_format
def can_autodentify(self, format):
"""Check whether a format can auto-identify."""
return format in Cosmology.from_format.registry._identifiers
|
ToFromTestMixinBase
|
python
|
doocs__leetcode
|
lcof/面试题29. 顺时针打印矩阵/Solution.py
|
{
"start": 0,
"end": 639
}
|
class ____:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
if not matrix or not matrix[0]:
return []
dirs = (0, 1, 0, -1, 0)
m, n = len(matrix), len(matrix[0])
vis = [[False] * n for _ in range(m)]
ans = []
i = j = k = 0
for _ in range(m * n):
ans.append(matrix[i][j])
vis[i][j] = True
x, y = i + dirs[k], j + dirs[k + 1]
if x < 0 or y < 0 or x >= m or y >= n or vis[x][y]:
k = (k + 1) % 4
x, y = i + dirs[k], j + dirs[k + 1]
i, j = x, y
return ans
|
Solution
|
python
|
pytorch__pytorch
|
benchmarks/transformer/sdp.py
|
{
"start": 1462,
"end": 2281
}
|
class ____:
nn_mha_time: float
compiled_nn_mha_time: Optional[float]
composite_mha_time: float
compiled_composite_mha_time: Optional[float]
def get_entries(self) -> list:
return [
f"{self.nn_mha_time:2f}",
f"{self.compiled_nn_mha_time:2f}" if self.compiled_nn_mha_time else None,
f"{self.composite_mha_time:2f}",
f"{self.compiled_composite_mha_time:2f}"
if self.compiled_composite_mha_time
else None,
]
@classmethod
def get_entry_names(cls) -> list[str]:
return [
"nn_mha_time (\u00b5s)",
"compiled_nn_mha_time (\u00b5s)",
"composite_mha_time (\u00b5s)",
"compiled_composite_mha_time (\u00b5s)",
]
@dataclass(frozen=True)
|
ExperimentResults
|
python
|
google__jax
|
jax/_src/linear_util.py
|
{
"start": 9972,
"end": 10050
}
|
class ____:
pass
initial_result_paths = InitialResultPaths()
|
InitialResultPaths
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/organization_group_search_view_details.py
|
{
"start": 1029,
"end": 1373
}
|
class ____(TypedDict):
id: str
name: NotRequired[str]
query: NotRequired[str]
querySort: NotRequired[SORT_LITERALS]
projects: NotRequired[list[int]]
isAllProjects: NotRequired[bool]
environments: NotRequired[list[str]]
timeFilters: NotRequired[dict[str, Any]]
@region_silo_endpoint
|
GroupSearchViewValidatorResponse
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_fastmath.py
|
{
"start": 429,
"end": 1117
}
|
class ____:
fast_expected: List[str] = field(default_factory=list)
fast_unexpected: List[str] = field(default_factory=list)
prec_expected: List[str] = field(default_factory=list)
prec_unexpected: List[str] = field(default_factory=list)
def check(self, test: CUDATestCase, fast: str, prec: str):
test.assertTrue(all(i in fast for i in self.fast_expected))
test.assertTrue(all(i not in fast for i in self.fast_unexpected))
test.assertTrue(all(i in prec for i in self.prec_expected))
test.assertTrue(all(i not in prec for i in self.prec_unexpected))
@skip_on_cudasim('Fastmath and PTX inspection not available on cudasim')
|
FastMathCriterion
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py
|
{
"start": 38852,
"end": 39920
}
|
class ____(ColumnConstraint):
"""A column constraint that validates the pandas column dtypes based on the expected set of dtypes.
Args:
expected_dtype_set (Set[str]): The set of pandas dtypes that the pandas column dtypes must match.
"""
def __init__(self, expected_dtype_set):
self.expected_dtype_set = check.set_param(expected_dtype_set, "expected_dtype_set")
description = f"Column dtype must be in the following set {self.expected_dtype_set}."
super().__init__(error_description=description, markdown_description=description)
def validate(self, dataframe, column_name):
received_dtypes = dataframe[column_name].dtype
if str(received_dtypes) not in self.expected_dtype_set:
raise ColumnConstraintViolationException(
constraint_name=self.name,
constraint_description=(
f"{self.error_description}. DTypes received: {received_dtypes}"
),
column_name=column_name,
)
|
ColumnDTypeInSetConstraint
|
python
|
pydata__xarray
|
xarray/core/_typed_ops.py
|
{
"start": 17896,
"end": 30491
}
|
class ____:
__slots__ = ()
def _binary_op(
self, other: DaCompatible, f: Callable, reflexive: bool = False
) -> Self:
raise NotImplementedError
@overload
def __add__(self, other: Dataset) -> Dataset: ...
@overload
def __add__(self, other: DataTree) -> DataTree: ...
@overload
def __add__(self, other: DaCompatible) -> Self: ...
def __add__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.add)
@overload
def __sub__(self, other: Dataset) -> Dataset: ...
@overload
def __sub__(self, other: DataTree) -> DataTree: ...
@overload
def __sub__(self, other: DaCompatible) -> Self: ...
def __sub__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.sub)
@overload
def __mul__(self, other: Dataset) -> Dataset: ...
@overload
def __mul__(self, other: DataTree) -> DataTree: ...
@overload
def __mul__(self, other: DaCompatible) -> Self: ...
def __mul__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.mul)
@overload
def __pow__(self, other: Dataset) -> Dataset: ...
@overload
def __pow__(self, other: DataTree) -> DataTree: ...
@overload
def __pow__(self, other: DaCompatible) -> Self: ...
def __pow__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.pow)
@overload
def __truediv__(self, other: Dataset) -> Dataset: ...
@overload
def __truediv__(self, other: DataTree) -> DataTree: ...
@overload
def __truediv__(self, other: DaCompatible) -> Self: ...
def __truediv__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.truediv)
@overload
def __floordiv__(self, other: Dataset) -> Dataset: ...
@overload
def __floordiv__(self, other: DataTree) -> DataTree: ...
@overload
def __floordiv__(self, other: DaCompatible) -> Self: ...
def __floordiv__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.floordiv)
@overload
def __mod__(self, other: Dataset) -> Dataset: ...
@overload
def __mod__(self, other: DataTree) -> DataTree: ...
@overload
def __mod__(self, other: DaCompatible) -> Self: ...
def __mod__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.mod)
@overload
def __and__(self, other: Dataset) -> Dataset: ...
@overload
def __and__(self, other: DataTree) -> DataTree: ...
@overload
def __and__(self, other: DaCompatible) -> Self: ...
def __and__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.and_)
@overload
def __xor__(self, other: Dataset) -> Dataset: ...
@overload
def __xor__(self, other: DataTree) -> DataTree: ...
@overload
def __xor__(self, other: DaCompatible) -> Self: ...
def __xor__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.xor)
@overload
def __or__(self, other: Dataset) -> Dataset: ...
@overload
def __or__(self, other: DataTree) -> DataTree: ...
@overload
def __or__(self, other: DaCompatible) -> Self: ...
def __or__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.or_)
@overload
def __lshift__(self, other: Dataset) -> Dataset: ...
@overload
def __lshift__(self, other: DataTree) -> DataTree: ...
@overload
def __lshift__(self, other: DaCompatible) -> Self: ...
def __lshift__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.lshift)
@overload
def __rshift__(self, other: Dataset) -> Dataset: ...
@overload
def __rshift__(self, other: DataTree) -> DataTree: ...
@overload
def __rshift__(self, other: DaCompatible) -> Self: ...
def __rshift__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.rshift)
@overload
def __lt__(self, other: Dataset) -> Dataset: ...
@overload
def __lt__(self, other: DataTree) -> DataTree: ...
@overload
def __lt__(self, other: DaCompatible) -> Self: ...
def __lt__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.lt)
@overload
def __le__(self, other: Dataset) -> Dataset: ...
@overload
def __le__(self, other: DataTree) -> DataTree: ...
@overload
def __le__(self, other: DaCompatible) -> Self: ...
def __le__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.le)
@overload
def __gt__(self, other: Dataset) -> Dataset: ...
@overload
def __gt__(self, other: DataTree) -> DataTree: ...
@overload
def __gt__(self, other: DaCompatible) -> Self: ...
def __gt__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.gt)
@overload
def __ge__(self, other: Dataset) -> Dataset: ...
@overload
def __ge__(self, other: DataTree) -> DataTree: ...
@overload
def __ge__(self, other: DaCompatible) -> Self: ...
def __ge__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, operator.ge)
@overload # type:ignore[override]
def __eq__(self, other: Dataset) -> Dataset: ...
@overload
def __eq__(self, other: DataTree) -> DataTree: ...
@overload
def __eq__(self, other: DaCompatible) -> Self: ...
def __eq__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, nputils.array_eq)
@overload # type:ignore[override]
def __ne__(self, other: Dataset) -> Dataset: ...
@overload
def __ne__(self, other: DataTree) -> DataTree: ...
@overload
def __ne__(self, other: DaCompatible) -> Self: ...
def __ne__(self, other: DaCompatible) -> Self | Dataset | DataTree:
return self._binary_op(other, nputils.array_ne)
# When __eq__ is defined but __hash__ is not, then an object is unhashable,
# and it should be declared as follows:
__hash__: None # type:ignore[assignment]
def __radd__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.add, reflexive=True)
def __rsub__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.sub, reflexive=True)
def __rmul__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.mul, reflexive=True)
def __rpow__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.pow, reflexive=True)
def __rtruediv__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.truediv, reflexive=True)
def __rfloordiv__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.floordiv, reflexive=True)
def __rmod__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.mod, reflexive=True)
def __rand__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.and_, reflexive=True)
def __rxor__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.xor, reflexive=True)
def __ror__(self, other: DaCompatible) -> Self:
return self._binary_op(other, operator.or_, reflexive=True)
def _inplace_binary_op(self, other: DaCompatible, f: Callable) -> Self:
raise NotImplementedError
def __iadd__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.iadd)
def __isub__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.isub)
def __imul__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.imul)
def __ipow__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.ipow)
def __itruediv__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.itruediv)
def __ifloordiv__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.ifloordiv)
def __imod__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.imod)
def __iand__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.iand)
def __ixor__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.ixor)
def __ior__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.ior)
def __ilshift__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.ilshift)
def __irshift__(self, other: DaCompatible) -> Self: # type:ignore[misc]
return self._inplace_binary_op(other, operator.irshift)
def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self:
raise NotImplementedError
def __neg__(self) -> Self:
return self._unary_op(operator.neg)
def __pos__(self) -> Self:
return self._unary_op(operator.pos)
def __abs__(self) -> Self:
return self._unary_op(operator.abs)
def __invert__(self) -> Self:
return self._unary_op(operator.invert)
def round(self, *args: Any, **kwargs: Any) -> Self:
return self._unary_op(ops.round_, *args, **kwargs)
def argsort(self, *args: Any, **kwargs: Any) -> Self:
return self._unary_op(ops.argsort, *args, **kwargs)
def conj(self, *args: Any, **kwargs: Any) -> Self:
return self._unary_op(ops.conj, *args, **kwargs)
def conjugate(self, *args: Any, **kwargs: Any) -> Self:
return self._unary_op(ops.conjugate, *args, **kwargs)
__add__.__doc__ = operator.add.__doc__
__sub__.__doc__ = operator.sub.__doc__
__mul__.__doc__ = operator.mul.__doc__
__pow__.__doc__ = operator.pow.__doc__
__truediv__.__doc__ = operator.truediv.__doc__
__floordiv__.__doc__ = operator.floordiv.__doc__
__mod__.__doc__ = operator.mod.__doc__
__and__.__doc__ = operator.and_.__doc__
__xor__.__doc__ = operator.xor.__doc__
__or__.__doc__ = operator.or_.__doc__
__lshift__.__doc__ = operator.lshift.__doc__
__rshift__.__doc__ = operator.rshift.__doc__
__lt__.__doc__ = operator.lt.__doc__
__le__.__doc__ = operator.le.__doc__
__gt__.__doc__ = operator.gt.__doc__
__ge__.__doc__ = operator.ge.__doc__
__eq__.__doc__ = nputils.array_eq.__doc__
__ne__.__doc__ = nputils.array_ne.__doc__
__radd__.__doc__ = operator.add.__doc__
__rsub__.__doc__ = operator.sub.__doc__
__rmul__.__doc__ = operator.mul.__doc__
__rpow__.__doc__ = operator.pow.__doc__
__rtruediv__.__doc__ = operator.truediv.__doc__
__rfloordiv__.__doc__ = operator.floordiv.__doc__
__rmod__.__doc__ = operator.mod.__doc__
__rand__.__doc__ = operator.and_.__doc__
__rxor__.__doc__ = operator.xor.__doc__
__ror__.__doc__ = operator.or_.__doc__
__iadd__.__doc__ = operator.iadd.__doc__
__isub__.__doc__ = operator.isub.__doc__
__imul__.__doc__ = operator.imul.__doc__
__ipow__.__doc__ = operator.ipow.__doc__
__itruediv__.__doc__ = operator.itruediv.__doc__
__ifloordiv__.__doc__ = operator.ifloordiv.__doc__
__imod__.__doc__ = operator.imod.__doc__
__iand__.__doc__ = operator.iand.__doc__
__ixor__.__doc__ = operator.ixor.__doc__
__ior__.__doc__ = operator.ior.__doc__
__ilshift__.__doc__ = operator.ilshift.__doc__
__irshift__.__doc__ = operator.irshift.__doc__
__neg__.__doc__ = operator.neg.__doc__
__pos__.__doc__ = operator.pos.__doc__
__abs__.__doc__ = operator.abs.__doc__
__invert__.__doc__ = operator.invert.__doc__
round.__doc__ = ops.round_.__doc__
argsort.__doc__ = ops.argsort.__doc__
conj.__doc__ = ops.conj.__doc__
conjugate.__doc__ = ops.conjugate.__doc__
|
DataArrayOpsMixin
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/embed/test_bundle.py
|
{
"start": 8048,
"end": 9051
}
|
class ____:
def test_without_tables(self) -> None:
assert beb._use_tables(beb._all_objs([plot()])) is False
assert beb._use_tables(beb._all_objs([plot(), glplot()])) is False
assert beb._use_tables(beb._all_objs([plot(), widget()])) is False
d = Document()
d.add_root(plot())
d.add_root(glplot())
d.add_root(widget())
assert beb._use_tables(beb._all_objs([d])) is False
def test_with_tables(self) -> None:
assert beb._use_tables(beb._all_objs([table()])) is True
assert beb._use_tables(beb._all_objs([table(), plot()])) is True
assert beb._use_tables(beb._all_objs([table(), plot(), glplot()])) is True
assert beb._use_tables(beb._all_objs([table(), widget(), table(), glplot()])) is True
d = Document()
d.add_root(plot())
d.add_root(table())
d.add_root(widget())
d.add_root(glplot())
assert beb._use_tables(beb._all_objs([d])) is True
|
Test__use_tables
|
python
|
spack__spack
|
lib/spack/spack/cmd/create.py
|
{
"start": 9006,
"end": 9453
}
|
class ____(PackageTemplate):
"""Provides appropriate overrides for SCons-based packages"""
base_class_name = "SConsPackage"
package_class_import = "from spack_repo.builtin.build_systems.scons import SConsPackage"
body_def = """\
def build_args(self, spec, prefix):
# FIXME: Add arguments to pass to build.
# FIXME: If not needed delete this function
args = []
return args"""
|
SconsPackageTemplate
|
python
|
pytorch__pytorch
|
test/test_autograd.py
|
{
"start": 488099,
"end": 505424
}
|
class ____(TestCase):
def get_default_streams(self, num_devices=1):
out = []
for i in range(num_devices):
with _set_device_index(i):
acc = torch.accelerator.current_accelerator()
out.append(torch.get_device_module(acc).default_stream())
return tuple(out)
def synchronize_all_devices(self, num_devices=1):
for i in range(num_devices):
torch.accelerator.synchronize(i)
def assert_all_streams_default(self, num_devices=1):
# Sanity check
default_streams = self.get_default_streams(num_devices)
for i in range(num_devices):
with _set_device_index(i):
acc = torch.accelerator.current_accelerator()
# Do this instead of using torch.accelerator.current_stream(i)
# Otherwise, e.g. in the case of cuda, we'd be trying to compare
# torch.cuda.Stream with torch.Stream
self.assertEqual(
torch.get_device_module(acc).current_stream(), default_streams[i]
)
# AttributeError: module 'torch.mps' has no attribute 'default_stream'
@expectedFailureMPS
@skipCUDANonDefaultStreamIf(True)
def test_consumer_to_single_producer_case_2_correctness(self, device):
if device == "cpu":
self.skipTest("requires accelerator")
# Device Stream
# Consumer (MulBackward): cuda:0 s0
# Producer : cuda:0 s1
# Gradient : cuda:0 s1
class Producer(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gO):
out = gO.clone()
_sleep_if_cuda(NUM_GPU_CYCLES_IN_ONE_SEC // 2)
out.add_(1)
return out
def test():
self.synchronize_all_devices()
self.assert_all_streams_default()
with torch.Stream(0) as s0:
a = torch.ones(256, 256, requires_grad=True, device=_get_device_name(0))
b = a * 2
with torch.Stream(0) as s1:
s1.wait_stream(s0)
out = Producer.apply(b)
with torch.autograd.grad_mode.set_multithreading_enabled(False):
out.sum().backward()
self.synchronize_all_devices()
# Expected result: a.grad = (grad_out + 1) * 2 = 4
self.assertEqual(a.grad, torch.full_like(a, 4))
# Run an extra time to warm up
for _ in range(2):
test()
def _test_consumer_to_single_producer_case_3_correctness(
self, non_default_ambient_stream
):
# Device Stream
# Consumer (MulBackward): cuda:0 s0
# Producer : cuda:1 cuda:1 default
# Gradient : cuda:0 cuda:0 default
class Producer(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
# The node's canonical stream is the current stream
# of the device of the first output.
ctx.node_stream = torch.accelerator.current_stream(1)
return x.to(_get_device_name(1))
@staticmethod
def backward(ctx, gO):
out = gO.to(_get_device_name(0))
with _set_device_index(0):
_sleep_if_cuda(NUM_GPU_CYCLES_IN_ONE_SEC // 2)
# It's the node's responsibility to sync back to its canonical stream.
out.add_(1)
ctx.node_stream.wait_stream(torch.accelerator.current_stream(0))
return out
def test():
self.synchronize_all_devices(2)
self.assert_all_streams_default(2)
(default_stream_0,) = self.get_default_streams()
# Ensure consumer node happens on non-default stream so that
# when FuncBackward produces a gradient on a default stream
# a sync is necessary.
with torch.Stream(0) as s0:
a = torch.ones(256, 256, requires_grad=True, device="cuda")
b = a * 2
default_stream_0.wait_stream(s0)
out = Producer.apply(b)
def call_backward(x):
with torch.autograd.grad_mode.set_multithreading_enabled(False):
x.sum().backward()
if non_default_ambient_stream:
with torch.Stream(0) as s1:
s1.wait_stream(default_stream_0)
call_backward(out)
else:
call_backward(out)
self.synchronize_all_devices(2)
# Expected result: a.grad = (grad_out + 1) * 2 = 4
self.assertEqual(a.grad, torch.full_like(a, 4))
# Run an extra time to warm up
for _ in range(2):
test()
# AttributeError: module 'torch.mps' has no attribute 'default_stream'
@expectedFailureMPS
@skipCUDANonDefaultStreamIf(True)
@unittest.skipIf(
torch.accelerator.device_count() < 2, "accelerator count is less than 2"
)
def test_consumer_to_single_producer_case_3_correctness_non_default_ambient_stream(
self, device
):
if device == "cpu":
self.skipTest("requires accelerator")
self._test_consumer_to_single_producer_case_3_correctness(
non_default_ambient_stream=True
)
# AttributeError: module 'torch.mps' has no attribute 'default_stream'
@expectedFailureMPS
@skipCUDANonDefaultStreamIf(True)
@unittest.skipIf(
torch.accelerator.device_count() < 2, "accelerator count is less than 2"
)
def test_consumer_to_single_producer_case_3_correctness(self, device):
if device == "cpu":
self.skipTest("requires accelerator")
self._test_consumer_to_single_producer_case_3_correctness(
non_default_ambient_stream=False
)
# AttributeError: module 'torch.mps' has no attribute 'default_stream'
@expectedFailureMPS
@skipCUDANonDefaultStreamIf(True)
@unittest.skipIf(
torch.accelerator.device_count() < 2, "accelerator count is less than 2"
)
def test_consumer_to_single_producer_case_4_correctness(self, device):
if device == "cpu":
self.skipTest("requires accelerator")
# Device Stream
# Consumer: cuda:0 cuda:0 default
# Producer: cuda:1 s1
# Gradient: cuda:1 s1
class Producer(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gO):
out = gO.clone()
_sleep_if_cuda(NUM_GPU_CYCLES_IN_ONE_SEC // 2)
return out.add_(1)
class Consumer(torch.autograd.Function):
# In the multi-output case, the node's canonical device and stream correspond to
# that of its first output. This is required to induce cases 4/5.
@staticmethod
def forward(ctx, x):
return x.clone(), x.to(_get_device_name(1))
@staticmethod
def backward(ctx, gO_0, gO_1):
# gO_1 is on s1, but we're currently doing compute in cuda:1 default
# It's the user's responsibility to sync to consumer (.to() should do this
# already.)
# Things would work out if the engine sync'd s1 with consumer.
# Ignore grad wrt first arg because we don't use it.
return gO_1.to(_get_device_name(0))
def test():
self.synchronize_all_devices(2)
self.assert_all_streams_default(2)
_, default_stream_1 = self.get_default_streams(2)
a = torch.ones(256, 256, requires_grad=True, device=_get_device_name(0))
_unused, b = Consumer.apply(a)
with torch.Stream(1) as s1:
s1.wait_stream(default_stream_1)
out = Producer.apply(b)
with torch.autograd.grad_mode.set_multithreading_enabled(False):
out.sum().backward()
self.synchronize_all_devices(2)
# Expected result: a.grad = grad_out + 1 = 2
self.assertEqual(a.grad, torch.full_like(a, 2))
# Run an extra time to warm up
for _ in range(2):
test()
# AttributeError: module 'torch.mps' has no attribute 'default_stream'
@expectedFailureMPS
@skipCUDANonDefaultStreamIf(True)
@unittest.skipIf(
torch.accelerator.device_count() < 2, "accelerator count is less than 2"
)
def test_consumer_to_multi_producer_case_4_correctness(self, device):
if device == "cpu":
self.skipTest("requires accelerator")
# Device Stream
# Consumer : cuda:0 cuda:0 default
#
# Producer 1: cuda:1 s1
# Gradient 1: cuda:1 s1
#
# Producer 2: cuda:1 s2
# Gradient 2: cuda:1 s2
#
# Accumulation stream: s2 since it is scheduled first
class ProducerFast(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gO):
out = gO.clone()
return out * 2
class ProducerSlow(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gO):
out = gO.clone()
_sleep_if_cuda(NUM_GPU_CYCLES_IN_ONE_SEC // 2)
return out.mul_(2)
class Consumer(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.node_stream = torch.accelerator.current_stream(x.device)
return x.clone(), x.to(_get_device_name(1))
@staticmethod
def backward(ctx, gO_0, gO_1):
torch.accelerator.current_stream(gO_1.device).wait_stream(
ctx.node_stream
)
return (gO_1 * 2).to(_get_device_name(0))
def test():
self.synchronize_all_devices(2)
self.assert_all_streams_default(2)
default_stream_0, default_stream_1 = self.get_default_streams(2)
a = torch.ones(256, 256, requires_grad=True, device=_get_device_name(0))
_unused, b = Consumer.apply(a)
with torch.Stream(1) as s1:
s1.wait_stream(default_stream_1)
out1 = ProducerFast.apply(b)
with torch.Stream(1) as s2:
s2.wait_stream(default_stream_1)
out2 = ProducerSlow.apply(b)
default_stream_1.wait_stream(s1)
default_stream_1.wait_stream(s2)
with torch.autograd.grad_mode.set_multithreading_enabled(False):
(out1 + out2).sum().backward()
self.synchronize_all_devices(2)
# If the accumulation stream does not wait for the slow producer stream
# the in-place mul-by-2 is performed on the accumulated buffer AFTER
# ProducerFast has already accumulated!
#
# Correct: (1.mul_(2) + 2) * 2 = 8
# Incorrect: (1 + 2).mul_(2) * 2 = 12
self.assertEqual(a.grad, torch.full_like(a, 8))
# Run an extra time to warm up
for _ in range(2):
test()
# This test may spuriously fail on non-cuda accelerators (since we won't
# be calling sleep)
@onlyCUDA
@skipCUDANonDefaultStreamIf(True)
def test_side_stream_backward_overlap(self, device):
# In case 2/3, we would designate the consumer as the accumulation
# stream and naively, one might have the consumer wait for the producer
# as soon as we've added to the InputBuffer the first time.
#
# However, in the case where the stream of the consumer also happens to
# be the stream of the producer, this is suboptimal because it would
# prevent the computation of the two producers from being overlapped.
# what you really want to do is to have that sync between the producer
# and consumer to be delayed until right before the accumulation.
# Note that this doesn't address N=3, but the side-stream N=2 case is
# the common case.
events = {
"main_backward_start": None,
"side_backward_start": None,
"side_backward_end": None,
}
class Main(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
# Record when main backward starts
evt = torch.Event(enable_timing=True)
evt.record()
events["main_backward_start"] = evt
return gO
class Side(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
evt = torch.Event(enable_timing=True)
evt.record()
events["side_backward_start"] = evt
_sleep_if_cuda(NUM_GPU_CYCLES_IN_ONE_SEC // 2)
result = gO.clone()
evt = torch.Event(enable_timing=True)
evt.record()
events["side_backward_end"] = evt
return result
def populate_events():
self.synchronize_all_devices()
self.assert_all_streams_default()
(default_stream_0,) = self.get_default_streams()
a = torch.ones(256, 256, requires_grad=True, device=_get_device_name(0))
b = a.clone() # not a leaf, does it matter?
evt = torch.Event()
evt.record()
# Overlap during forward
c_main = Main.apply(b)
with torch.Stream(0) as s0:
s0.wait_event(evt)
c_side = Side.apply(b)
default_stream_0.wait_stream(s0)
with torch.autograd.grad_mode.set_multithreading_enabled(False):
(c_main + c_side).sum().backward()
self.synchronize_all_devices()
def check_ordering():
# Sanity check: side backward's end happens after start
self.assertTrue(
events["side_backward_start"].elapsed_time(events["side_backward_end"])
> 0
)
# Overlap check: side's backward starts before side backward ends
self.assertTrue(
events["main_backward_start"].elapsed_time(events["side_backward_end"])
> 0
)
# Warmup
for _ in range(2):
populate_events()
# Reset events (not really necessary but OK)
events["side_backward_start"] = None
events["side_backward_end"] = None
events["main_backward_start"] = None
# Test
populate_events()
check_ordering()
@expectedFailureMPS
def test_warn_on_accumulate_grad_stream_mismatch_flag(self, device):
if device == "cpu":
self.skipTest("requires accelerator")
def do_test(suppress_warn, keep_grad_acc):
def _test():
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
with torch.Stream(0) as s0:
a = torch.ones(8, 8, device=device, requires_grad=True)
if keep_grad_acc:
# create grad_acc under s1 and keep alive with b
b = a.clone()
with torch.Stream(0) as s1:
s1.wait_stream(s0)
c = a.sum()
c.backward()
filter_str = "set_warn_on_accumulate_grad_stream_mismatch"
return sum([filter_str in str(w.message) for w in warns]) > 0
if suppress_warn:
try:
torch.autograd.graph.set_warn_on_accumulate_grad_stream_mismatch(
False
)
actual_warn = _test()
finally:
torch.autograd.graph.set_warn_on_accumulate_grad_stream_mismatch(
True
)
else:
actual_warn = _test()
expect_warn = not suppress_warn and keep_grad_acc
self.assertEqual(actual_warn, expect_warn)
# Warn by default
self.assertTrue(torch._C._warn_on_accumulate_grad_stream_mismatch())
for suppress_warn in (True, False):
for keep_grad_acc in (True, False):
do_test(suppress_warn=suppress_warn, keep_grad_acc=keep_grad_acc)
|
TestAutogradStreamSynchronization
|
python
|
keras-team__keras
|
keras/src/saving/saving_lib_test.py
|
{
"start": 3191,
"end": 3569
}
|
class ____(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = MyDense(1)
def compile(self, *args, **kwargs):
super().compile(*args, **kwargs)
def call(self, inputs):
return self.dense1(inputs)
@keras.saving.register_keras_serializable(package="my_custom_package")
|
CompileOverridingModel
|
python
|
huggingface__transformers
|
src/transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py
|
{
"start": 4368,
"end": 4572
}
|
class ____(LlamaDecoderLayer):
def __init__(self, config: HunYuanDenseV1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.layer_idx = layer_idx
|
HunYuanDenseV1DecoderLayer
|
python
|
graphql-python__graphene
|
examples/context_example.py
|
{
"start": 105,
"end": 698
}
|
class ____(graphene.ObjectType):
me = graphene.Field(User)
def resolve_me(root, info):
return info.context["user"]
schema = graphene.Schema(query=Query)
query = """
query something{
me {
id
name
}
}
"""
def test_query():
result = schema.execute(query, context={"user": User(id="1", name="Syrus")})
assert not result.errors
assert result.data == {"me": {"id": "1", "name": "Syrus"}}
if __name__ == "__main__":
result = schema.execute(query, context={"user": User(id="X", name="Console")})
print(result.data["me"])
|
Query
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/util/concurrency.py
|
{
"start": 9240,
"end": 10314
}
|
class ____:
"""Asyncio util for test suite/ util only"""
def __init__(self) -> None:
self.runner = _Runner() # runner it lazy so it can be created here
def run(
self,
fn: Callable[..., Coroutine[Any, Any, _T]],
*args: Any,
**kwargs: Any,
) -> _T:
"""Run coroutine on the loop"""
return self.runner.run(fn(*args, **kwargs))
def run_in_greenlet(
self, fn: Callable[..., _T], *args: Any, **kwargs: Any
) -> _T:
"""Run sync function in greenlet. Support nested calls"""
_concurrency_shim._initialize(raise_=False)
if _concurrency_shim._has_greenlet:
if self.runner.get_loop().is_running():
# allow for a wrapped test function to call another
assert in_greenlet()
return fn(*args, **kwargs)
else:
return self.runner.run(greenlet_spawn(fn, *args, **kwargs))
else:
return fn(*args, **kwargs)
def close(self) -> None:
self.runner.close()
|
_AsyncUtil
|
python
|
ray-project__ray
|
rllib/utils/replay_buffers/replay_buffer.py
|
{
"start": 822,
"end": 2068
}
|
class ____(Enum):
"""Specifies how batches are structured in a ReplayBuffer.
timesteps: One buffer slot per timestep.
sequences: One buffer slot per sequence.
episodes: One buffer slot per episode.
fragemts: One buffer slot per incoming batch.
"""
TIMESTEPS = "timesteps"
SEQUENCES = "sequences"
EPISODES = "episodes"
FRAGMENTS = "fragments"
@DeveloperAPI
def warn_replay_capacity(*, item: SampleBatchType, num_items: int) -> None:
"""Warn if the configured replay buffer capacity is too large."""
if log_once("replay_capacity"):
item_size = item.size_bytes()
psutil_mem = psutil.virtual_memory()
total_gb = psutil_mem.total / 1e9
mem_size = num_items * item_size / 1e9
msg = (
"Estimated max memory usage for replay buffer is {} GB "
"({} batches of size {}, {} bytes each), "
"available system memory is {} GB".format(
mem_size, num_items, item.count, item_size, total_gb
)
)
if mem_size > total_gb:
raise ValueError(msg)
elif mem_size > 0.2 * total_gb:
logger.warning(msg)
else:
logger.info(msg)
@DeveloperAPI
|
StorageUnit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.