language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py | {
"start": 12561,
"end": 13492
} | class ____(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
return hidden_state * self.lambda1
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
| Dinov2WithRegistersLayerScale |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_wrapper_gpu.py | {
"start": 16865,
"end": 36612
} | class ____(CppWrapperCpu):
"""
Generates cpp wrapper for running on GPU and calls CUDA kernels
"""
def __init__(self) -> None:
self.device = get_gpu_type()
self.device_codegen = get_device_op_overrides(self.device)
super().__init__()
self.grid_id = count()
self._kernel_name_to_body: dict[str, str] = {}
self._triton_call_wrappers: dict[str, DeferredTritonCallWrapper] = {}
self.autotune_input_prefix = "_REAL_AUTOTUNE_INPUT"
@staticmethod
def create(
is_subgraph: bool,
subgraph_name: Optional[str],
parent_wrapper: Optional[PythonWrapperCodegen],
partition_signatures: Optional[GraphPartitionSignature] = None,
):
# TODO - support subgraph codegen by lifting functions. Check the
# comment at CppWrapperCpu `codegen_subgraph` function.
return CppWrapperGpu()
def write_header(self):
if V.graph.is_const_graph:
# We do not write header for constant graph, it will be written by main module.
return
super().write_header()
self.header.splice(
maybe_hipify_code_wrapper(self.device_codegen.kernel_driver())
)
@cache_on_self
def write_tma_descriptor_helpers_once(self):
self.header.splice(self.device_codegen.tma_descriptor_helpers())
def write_get_raw_stream(self, device_idx: int, graph_name: str) -> str:
name = f"stream{device_idx}"
self.writeline(
maybe_hipify_code_wrapper(
f"{self.device_codegen.cpp_stream_type()} {name};"
)
)
self.writeline(
f"AOTI_TORCH_ERROR_CODE_CHECK({self.device_codegen.aoti_get_stream()}({device_idx}, (void**)&{name}));"
)
return name
def get_autotuning_input_name(self, idx):
return f"{self.autotune_input_prefix}_{idx}"
def codegen_inputs(self):
# See Note: [Input Alignment handling in Inductor]
#
# JIT Inductor does not guard on input alignment. It relies on copy_misaligned_inputs to
# copy misaligned inputs to aligned buffers. For AOTInductor, we need to do the same in cpp.
if config.is_fbcode():
# TODO: This is added because FC. Remove this once the newly added shim symbols,
# e.g. aoti_torch_clone_preserve_strides, have landed
return super().codegen_inputs()
if V.graph.aot_mode and V.graph.inputs_to_check:
for idx in V.graph.inputs_to_check:
input_name = V.graph.graph_input_names[idx]
assert input_name in V.graph.graph_inputs, (
f"{input_name} not found in graph inputs"
)
value = V.graph.graph_inputs[input_name]
assert isinstance(value, TensorBox), (
f"{input_name} is expected to be tensor but found as {type(value)}"
)
warn_msg = (
f"Input {idx} was compiled as {GPU_ALIGN_BYTES}-bytes aligned, "
"but it is not aligned at run time. Copying to an aligned tensor "
"to guarantee correctness, but expect a performance hit."
)
self.prefix.splice(
f"""
if ((reinterpret_cast<std::uintptr_t>({input_name}.data_ptr()) & ({GPU_ALIGN_BYTES} -1)) != 0) {{
AOTI_TORCH_WARN("{warn_msg}");
AtenTensorHandle {input_name}_aligned;
aoti_torch_clone_preserve_strides({input_name}, &{input_name}_aligned);
{input_name} = std::move(RAIIAtenTensorHandle({input_name}_aligned));
}}
"""
)
super().codegen_inputs()
def _define_kernel_helper(
self,
kernel_name: str,
kernel_body: str,
metadata: Optional[str] = None,
gpu: bool = True,
cpp_definition: Optional[str] = None,
):
if gpu:
self._kernel_name_to_body[kernel_name] = kernel_body
if config.triton.autotune_at_compile_time:
# Call PythonWrapperCodegen to create the autotune code block
PythonWrapperCodegen._define_kernel_helper(
self, kernel_name, kernel_body, metadata, gpu, cpp_definition
)
else:
return CppWrapperCpu._define_kernel_helper(
self, kernel_name, kernel_body, metadata, gpu, cpp_definition
)
def generate(self, is_inference):
with dynamo_timed("CppWrapperGpu.generate", log_pt2_compile_event=True):
return super().generate(is_inference)
def finalize_prefix(self):
"""Define the triton kernels now that autotuning is finished"""
old_prefix = self.prefix # new content should go at start of prefix
# Generating triton kernel callers can modify the prefix (cached dtypes),
# so do this before running finalize_prefix(), but put the generated code
# after the finalize_prefix() code.
self.prefix = IndentedBuffer()
for kernel in self._triton_call_wrappers.values():
self.prefix.writeline("\n")
kernel.generate(self)
triton_prefix = self.prefix
self.prefix = IndentedBuffer()
super().finalize_prefix()
self.prefix.splice(triton_prefix)
self.prefix.writeline("\n")
self.prefix.splice(old_prefix)
def generate_tma_descriptor(self, desc):
self.write_tma_descriptor_helpers_once()
if isinstance(desc, TMADescriptorExperimental):
self._generate_experimental_tma_descriptor(desc)
else:
assert isinstance(desc, TMADescriptorStable)
self._generate_stable_tma_descriptor(desc)
def _generate_experimental_tma_descriptor(self, desc):
# generate data pointer for the source tensor
source = self.generate_args_decl(
code=self,
call_args=[self.val_to_arg_str(desc.tensor)],
arg_types=[desc.tensor.get_dtype()],
arg_signatures=[None],
# these args are passed to initNDTMADescriptor, which is NOT a triton kernel
is_triton_kernel=False,
)
desc_name = desc.name
self.writeline(f"alignas(64) CUtensorMap {desc_name};")
# `source` is in the form of `&var_x`, where `var_x` is the data pointer
# (CUdeviceptr); we dereference `source` and cast to `void*` to pass to
# the data pointer of the source tensor to the helper function
# `init{1,2}DTMADescriptor`
ptr = f"reinterpret_cast<void*>(*({source}))"
dims = ", ".join(self.val_to_arg_str(dim) for dim in desc.dims)
block_dims = ", ".join(self.val_to_arg_str(dim) for dim in desc.block_dims)
element_size = self.val_to_arg_str(desc.element_size)
fn = f"init{desc.rank}DTMADescriptor"
args = f"&{desc_name}, {ptr}, {dims}, {block_dims}, {element_size}"
self.writeline(f"{fn}({args});")
def _generate_stable_tma_descriptor(self, desc):
source = self.generate_args_decl(
code=self,
call_args=[self.val_to_arg_str(desc.tensor)],
arg_types=[desc.tensor.get_dtype()],
arg_signatures=[None],
# these args are passed to initNDTMADescriptor, which is NOT a triton kernel
is_triton_kernel=False,
)
desc_name = desc.name
# Pack the relevant information into a StableTMADescriptor struct.
# See [Note: AOTI TMA Stable handling] for more details.
self.writeline(f"alignas(64) StableTMADescriptor {desc_name};")
def fill_array(name, values):
for i, val in enumerate(values):
self.writeline(f"{name}[{i}] = {val};")
ptr = f"reinterpret_cast<void*>(*({source}))"
rank = len(desc.tensor.get_size())
fill_array(f"{desc_name}.block_shape", desc.block_shape)
fill_array(f"{desc_name}.global_shape", desc.tensor.get_size())
fill_array(f"{desc_name}.strides", desc.tensor.get_stride())
element_size = self.val_to_arg_str(desc.tensor.get_dtype().itemsize)
fn = "initTMADescriptor"
args = ", ".join(
str(x)
for x in [
f"&{desc_name}.m",
ptr,
element_size,
rank,
f"{desc_name}.block_shape",
f"{desc_name}.global_shape",
f"{desc_name}.strides",
]
)
self.writeline(f"{fn}({args});")
def generate_args_decl(
self,
code: Union[IndentedBuffer, Self],
call_args,
arg_types,
arg_signatures,
is_triton_kernel=True,
scratch_spaces: Optional[dict[str, int]] = None,
):
"""
Generates any declarations of args to pass into a kernel call, and then returns the arg names.
In more detail:
* declarations: e.g. this function has a side effect of generating lines like `auto var_0 = ...;`
* returns: a string with the list of args, e.g. "var_0, var_1"
call_args: list of call arguments
arg_types: list of argument types
arg_signatures: list with signatures of all the args
is_triton_kernel: whether these are passed into a triton kernel or not. In particular,
calls to triton kernels will have an additional global scratch space
arg injected at the front of the arg list.
"""
new_args: list[str] = []
# Add more cases for other types as needed
signature2dtype = {
"i32": "int32_t",
"i64": "int64_t",
"fp32": "float",
}
def signature_is_tma_desc(sig):
if not sig:
return False
if sig == "nvTmaDesc":
return True
if sig.startswith("tensordesc<"):
return True
return False
def process_tma_stable_arg(arg, arg_type, arg_signature, var_name):
# [Note: AOTI TMA Stable handling]
# For most args, a single arg passed to the python triton interface
# maps to a single arg in the cubin interface. However, for host-side
# TMA descriptors, a single python arg turns into 1 + 2 * N args in the
# cubin interface (where N is the rank).
#
# To do this: at TMA codegen time (for aoti), we generate a struct
# (StableTMADescriptor) containing the necessary information; and then
# when we call the function (i.e. here), we unpack the struct members.
code.writeline(f"auto {var_name} = {cexpr(arg)};")
result = []
result.append(f"&{var_name}.m")
# from https://github.com/triton-lang/triton/blob/16961b79bdac1b774b42d44e52fd55a266ec2866/third_party/nvidia/backend/driver.py#L111 # noqa: B950
match = re.match("tensordesc<([^[>]*)\\[([^]]*)\\]", arg_signature)
assert match is not None
shape = match.group(2)
ndim = shape.count(",") + 1
for i in range(ndim):
result.append(f"&{var_name}.block_shape[{i}]")
for i in range(ndim):
result.append(f"&{var_name}.strides[{i}]")
return result
def process_args(arg, arg_type, arg_signature=None):
var_name = f"var_{next(self.arg_var_id)}"
# ignore tma descriptors, as host-side TMA descriptors need
# to be passed to the compiled Triton kernel by value
if isinstance(arg_type, UnwrapUnspecArg) and not signature_is_tma_desc(
arg_signature
):
self.codegen_tensor_item(
arg_type.dtype,
arg,
var_name,
indented_buffer=code,
)
new_args.append(f"&{var_name}")
elif isinstance(arg_type, torch_dtype) and not signature_is_tma_desc(
arg_signature
):
device_ptr_type = self.device_codegen.cpp_device_ptr()
code.writeline(
maybe_hipify_code_wrapper(
f"{device_ptr_type} {var_name} = reinterpret_cast<{device_ptr_type}>({arg}.data_ptr());"
)
)
new_args.append(f"&{var_name}")
elif arg_type in (sympy.Integer, int):
code.writeline(f"int {var_name} = {cexpr(arg)};")
new_args.append(f"&{var_name}")
elif arg_type in (sympy.Float, float):
code.writeline(f"float {var_name} = {cexpr(arg)};")
new_args.append(f"&{var_name}")
# For symbolic call arguments, examine the arg signatures from triton meta
# to explicitly cast to the right type
# Reason: `auto` can infer unexpected type against kernel input signature.
elif (
isinstance(arg_type, type(SymbolicCallArg))
and arg_signature is not None
and arg_signature in signature2dtype
):
code.writeline(
f"{signature2dtype[arg_signature]} {var_name} = {cexpr(arg)};"
)
new_args.append(f"&{var_name}")
elif arg_signature and arg_signature.startswith("tensordesc<"):
new_args.extend(
process_tma_stable_arg(arg, arg_type, arg_signature, var_name)
)
else:
code.writeline(f"auto {var_name} = {cexpr(arg)};")
new_args.append(f"&{var_name}")
for arg, arg_type, arg_signature in zip_longest(
call_args, arg_types, arg_signatures
):
process_args(arg, arg_type, arg_signature)
for scratch_name, workspace_size in (scratch_spaces or {}).items():
if (
is_triton_kernel
and (
scratch := self.device_codegen.cpp_scratch(
next(self.arg_var_id),
workspace=TritonScratchWorkspace(
size=workspace_size,
generate_dtype_str=(
lambda: self.codegen_dtype(torch.uint8)
),
),
prefix=scratch_name,
)
)
is not None
):
scratch_def, scratch_var = scratch
code.writelines([maybe_hipify_code_wrapper(x) for x in scratch_def])
new_args.append(f"&{scratch_var}")
return ", ".join(new_args)
def _generate_kernel_call_helper(
self,
kernel_name: str,
call_args,
*,
device=None,
triton=True,
arg_types=None,
raw_keys=None,
raw_args=None,
triton_meta=None,
graph_name="",
original_fxnode_name=None,
):
"""
Override the default value of argument 'gpu' to True here.
generate_kernel_call can still be called with gpu=False because of
a mix of cpu kernels and gpu kernels.
"""
device = device or V.graph.get_current_device_or_throw()
if device.type == "cpu":
# Even in CppWrapperGpu, we may see cpp kernels
return CppWrapperCpu._generate_kernel_call_helper(
self,
kernel_name,
call_args,
device=device,
triton=triton,
arg_types=arg_types,
raw_keys=raw_keys,
raw_args=raw_args,
triton_meta=triton_meta,
)
if (
triton
and config.triton.autotune_at_compile_time
and kernel_name not in self.kernel_autotune_names
):
# Call PythonWrapperCodegen to create the autotune code block
PythonWrapperCodegen._generate_kernel_call_helper(
self,
kernel_name,
call_args,
device=device,
triton=triton,
arg_types=arg_types,
raw_keys=raw_keys,
raw_args=raw_args,
triton_meta=triton_meta,
original_fxnode_name=original_fxnode_name,
)
stream = (
"stream"
if V.graph.aot_mode
else self.write_get_raw_stream(device.index, graph_name)
)
if triton:
call_args, arg_types = self.prepare_triton_wrapper_args(
call_args,
# pyrefly: ignore [bad-argument-type]
arg_types,
)
wrapper_name = f"call_{kernel_name}"
if wrapper_name not in self._triton_call_wrappers:
self._triton_call_wrappers[wrapper_name] = DeferredTritonCallWrapper(
wrapper_name,
kernel_name,
self._kernel_name_to_body,
arg_types,
)
device_idx = "this->device_idx_" if V.graph.aot_mode else str(device.index)
call_args.append(device_idx)
call_args.append(stream)
if V.graph.aot_mode:
call_args.append("kernels")
call_args.append("this->cubin_dir_")
debug_printer_manager = V.graph.wrapper_code.debug_printer
debug_printer_manager.set_printer_args(
call_args[: len(arg_types)], kernel_name, arg_types, None
)
with debug_printer_manager:
self.writeline(f"{wrapper_name}({', '.join(call_args)});")
else:
casted = []
# pyrefly: ignore [no-matching-overload]
for arg_type, arg in zip(arg_types, call_args):
new_arg = arg
if arg_type.endswith("*") and arg != "nullptr":
new_arg = f"{arg}.data_ptr()"
# pyrefly: ignore [bad-argument-type]
casted.append(f"({arg_type}){cexpr(new_arg)}")
call_args_str = ", ".join(casted)
self.writeline(f"kernels.{kernel_name}({call_args_str}, {stream});")
@staticmethod
def prepare_triton_wrapper_args(
call_args: list[Any], arg_types: list[Any]
) -> tuple[list[Any], list[Any]]:
assert len(call_args) == len(arg_types), (call_args, arg_types)
new_args = []
new_args_types = []
for arg, arg_type in zip(call_args, arg_types):
if isinstance(arg, str):
if isinstance(arg_type, torch_dtype) and should_unwrap_unspec_arg(arg):
# dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar
arg_type = UnwrapUnspecArg(dtype=arg_type)
new_args.append(arg)
elif isinstance(arg, bool):
new_args.append(str(arg).lower())
elif isinstance(arg, (int, float, SymbolicCallArg)):
new_args.append(str(arg))
else:
new_args.append(cexpr(V.graph.sizevars.simplify(arg)))
new_args_types.append(arg_type)
return new_args, new_args_types
def make_zero_buffer(self, name):
return f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_zero_({name}.get()));"
@dataclasses.dataclass
| CppWrapperGpu |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_xml_parseable.py | {
"start": 1035,
"end": 1823
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.xml_parseable"
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def is_xml(val):
try:
etree.fromstring(val)
return True
except Exception:
return False
return column.map(is_xml)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
def is_xml(val):
try:
etree.fromstring(val)
return True
except Exception:
return False
is_xml_udf = F.udf(is_xml, pyspark.types.BooleanType())
return is_xml_udf(column)
| ColumnValuesXmlParseable |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-koda-retriever/llama_index/packs/koda_retriever/matrix.py | {
"start": 73,
"end": 3904
} | class ____(BaseModel):
"""
This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided.
Pydantic class to enforce the required fields for a KodaRetriever
Its best to just instantiate this using a dictionary, don't both trying to instantiate by declaring any AlphaCategory objects.
Example:
>>> data = {
"normal query": { # examples is not required if you aren't using few-shot auto-routing
"alpha": .5
, "description": "This is a normal query" # desc is not required if you aren't using few-shot auto-routing
, "examples": ["This is a normal query", "Another normal query"]
}
}
>>> matrix = AlphaMatrix(data=data) # arg must be named matrix for the retriever to use it
"""
class AlphaCategory(BaseModel):
"""
Subclass to enforce the required fields for a category in the AlphaMatrix - necessary for nesting in the AlphaMatrix class
You should not have to really touch this, as it is only used for type checking and validation.
"""
alpha: float
description: Optional[str] = (
None # optional if providing a custom LLM, its presumed this was part of your training data for the custom model
)
examples: Optional[List[str]] = (
None # if not providing a custom model, this is required
)
data: Dict[str, AlphaCategory]
def get_alpha(self, category: str) -> float:
"""Simple helper function to get the alpha value for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).alpha # type: ignore
def get_examples(self, category: str) -> List[str]:
"""Simple helper function to get the examples for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).examples # type: ignore
def get_description(self, category: str) -> str:
"""Simple helper function to get the description for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).description # type: ignore
def get_categories(self) -> list:
"""Simple helper function to get the categories for a given category."""
return list(self.data.keys())
def format_category(self, category: str) -> str:
"""Simple helper function to format the category information for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
description = self.get_description(category)
examples = self.get_examples(category)
category_info = f"""
- {category}:
description: {description}
""".strip()
if examples:
examples = "; ".join(examples)
example_info = f"""
examples:
{examples}
"""
category_info = f"{category_info}\n{example_info}"
return category_info
def get_all_category_info(self) -> str:
"""Simple helper function to get the category information for all categories."""
categories = []
for category in self.get_categories():
category_info = self.format_category(category)
categories.append(category_info)
return "\n".join(categories)
| AlphaMatrix |
python | pytorch__pytorch | torch/cuda/memory.py | {
"start": 1238,
"end": 1451
} | class ____(TypedDict):
"""Memory segment information."""
address: int
total_size: int
stream: int
segment_type: str
allocated_size: int
active_size: int
blocks: list[_Block]
| _Segment |
python | pytorch__pytorch | test/jit/test_data_parallel.py | {
"start": 444,
"end": 5633
} | class ____(JitTestCase):
class Mpy(torch.nn.Module):
def __init__(self) -> None:
super(TestDataParallel.Mpy, self).__init__()
self.m = nn.Sequential(
nn.Linear(2, 2), nn.BatchNorm1d(2), nn.ReLU(), nn.Linear(2, 2)
)
@torch.jit.ignore
def forward(self, input):
return self.m(input)
class Mpy1(torch.nn.Module):
def __init__(self, block):
super(TestDataParallel.Mpy1, self).__init__()
self.m = block
@torch.jit.ignore
def forward(self, input):
return self.m.forward(input)
class Mpy2(torch.nn.Module):
def __init__(self, block1, block2):
super(TestDataParallel.Mpy2, self).__init__()
self.m1 = block1
self.m2 = block2
@torch.jit.ignore
def forward(self, input):
x = self.m1.forward(input)
return self.m2(x)
class Msm(torch.jit.ScriptModule):
__constants__ = ["m"]
def __init__(self) -> None:
super(TestDataParallel.Msm, self).__init__()
self.m = nn.Sequential(
nn.Linear(2, 2), nn.BatchNorm1d(2), nn.ReLU(), nn.Linear(2, 2)
)
@torch.jit.script_method
def forward(self, input):
return self.m(input)
class Msm1(torch.jit.ScriptModule):
def __init__(self, block):
super(TestDataParallel.Msm1, self).__init__()
self.block = block
@torch.jit.script_method
def forward(self, input):
x = self.block(input)
return x
def check_replicas(self, module, replicas, input_shape=(2, 2)):
input = torch.randn(input_shape).cuda()
expected_output = module(input).data
for i, replica in enumerate(replicas):
for p in replica.parameters():
self.assertEqual(p.get_device(), i)
for b in replica.buffers():
self.assertEqual(b.get_device(), i)
replica_input = input.cuda(i)
self.assertEqual(replica(replica_input).data, expected_output)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_python_submodule_script(self):
module = self.Mpy1(self.Msm()).cuda()
replicas = dp.replicate(module, {0, 1})
self.check_replicas(module, replicas)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_shared_module(self):
s = self.Msm()
p1 = self.Mpy1(s)
module = self.Mpy2(p1, s).cuda()
replicas = dp.replicate(module, {0, 1})
self.check_replicas(module, replicas)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_traced_module(self):
module = torch.jit.trace(self.Mpy1(self.Mpy()), torch.ones(2, 2)).cuda()
replicas = dp.replicate(module, {0, 1})
self.check_replicas(module, replicas)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_tensor_sharing(self):
module = self.Msm1(self.Msm()).cuda()
replica = dp.replicate(module, {0, 1})
def assert_share_data(t1, t2):
# Only checks that they point to the same memory on the same device.
return (
t1.device == t2.device
and t1.storage().data_ptr() == t2.storage().data_ptr()
)
for p1, p2 in zip(module.parameters(), replica[0].parameters()):
self.assertTrue(assert_share_data(p1, p2))
for p1, p2 in zip(module.buffers(), replica[0].buffers()):
self.assertTrue(assert_share_data(p1, p2))
for p1, p2 in zip(module.parameters(), replica[1].parameters()):
self.assertFalse(assert_share_data(p1, p2))
for p1, p2 in zip(module.buffers(), replica[1].buffers()):
self.assertFalse(assert_share_data(p1, p2))
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_tensor_sharing_with_forward(self):
module = self.Msm1(self.Msm()).cuda()
replica = dp.replicate(module, {0, 1})
x = torch.ones(2, 2, requires_grad=True).cuda()
first_forward = module(x)
first_forward.sum().backward()
with torch.no_grad():
for p in module.parameters():
# Use .data here to avoid version counter bump.
# The graph created by the following forward will be wrong but
# we never backward through them so it's fine
p.data -= 1.0 * p.grad
second_forward = module(x)
# replica which is on the same GPU has a shallow copy of the original
# params and buffers
r0_forward = replica[0](x)
self.assertEqual(second_forward, r0_forward)
# replica which is on a different GPU has a deep copy of the original
# params and buffers
x1 = torch.ones(2, 2, requires_grad=True).cuda(device=1)
r1_forward = replica[1](x1)
self.assertEqual(first_forward, r1_forward)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestDataParallel |
python | tensorflow__tensorflow | tensorflow/python/training/saver_test.py | {
"start": 70147,
"end": 72786
} | class ____(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def test_recover_last_checkpoints(self):
with context.eager_mode():
save_dir = self._get_test_dir("recover_last_checkpoints")
v = variable_v1.VariableV1(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=10)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(None, os.path.join(save_dir, "ckpt-1"))
s2 = save.save(None, os.path.join(save_dir, "ckpt-2"))
s3 = save.save(None, os.path.join(save_dir, "ckpt-3"))
self.assertEqual([s1, s2, s3], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s1, s2, s3],
save_dir=save_dir)
# Create another saver and recover last checkpoints.
save2 = saver_module.Saver({"v": v}, max_to_keep=10)
self.assertEqual([], save2.last_checkpoints)
save2.recover_last_checkpoints([s1, s2, s3])
self.assertEqual([s1, s2, s3], save2.last_checkpoints)
# Remove a checkpoint and check that last checkpoints are
# restored correctly.
for fname in gfile.Glob("{}*".format(s1)):
gfile.Remove(fname)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
# Create another saver and recover last checkpoints. The removed
# checkpoint would be correctly omitted.
save3 = saver_module.Saver({"v": v}, max_to_keep=10)
self.assertEqual([], save3.last_checkpoints)
save3.recover_last_checkpoints([s1, s2, s3])
self.assertEqual([s2, s3], save3.last_checkpoints)
s4 = save3.save(None, os.path.join(save_dir, "ckpt-4"))
self.assertCheckpointState(
model_checkpoint_path=s4,
all_model_checkpoint_paths=[s2, s3, s4],
save_dir=save_dir)
| RecoverLastCheckpointsTest |
python | walkccc__LeetCode | solutions/1958. Check if Move is Legal/1958.py | {
"start": 0,
"end": 703
} | class ____:
def checkMove(
self,
board: list[list[str]],
rMove: int,
cMove: int,
color: str,
) -> bool:
DIRS = ((-1, -1), (-1, 0), (-1, 1), (0, -1),
(0, 1), (1, -1), (1, 0), (1, 1))
for dx, dy in DIRS:
cellsCount = 2
i = rMove + dx
j = cMove + dy
while 0 <= i < 8 and 0 <= j < 8:
# There are no free cells in between.
if board[i][j] == '.':
break
# Need >= 3 cells.
if cellsCount == 2 and board[i][j] == color:
break
# >= 3 cells.
if board[i][j] == color:
return True
i += dx
j += dy
cellsCount += 1
return False
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_api.py | {
"start": 7523,
"end": 9850
} | class ____:
def __init__(
self,
fn: Callable[..., Any],
cascading: bool = False,
quiet: bool = False,
):
# suppport
# @declared_attr
# @classmethod
# def foo(cls) -> Mapped[thing]:
# ...
# which seems to help typing tools interpret the fn as a classmethod
# for situations where needed
if isinstance(fn, classmethod):
fn = fn.__func__
self.fget = fn
self._cascading = cascading
self._quiet = quiet
self.__doc__ = fn.__doc__
def _collect_return_annotation(self) -> Optional[Type[Any]]:
return util.get_annotations(self.fget).get("return")
def __get__(self, instance: Optional[object], owner: Any) -> Any:
# the declared_attr needs to make use of a cache that exists
# for the span of the declarative scan_attributes() phase.
# to achieve this we look at the class manager that's configured.
# note this method should not be called outside of the declarative
# setup phase
cls = owner
manager = attributes.opt_manager_of_class(cls)
if manager is None:
if not re.match(r"^__.+__$", self.fget.__name__):
# if there is no manager at all, then this class hasn't been
# run through declarative or mapper() at all, emit a warning.
util.warn(
"Unmanaged access of declarative attribute %s from "
"non-mapped class %s" % (self.fget.__name__, cls.__name__)
)
return self.fget(cls)
elif manager.is_mapped:
# the class is mapped, which means we're outside of the declarative
# scan setup, just run the function.
return self.fget(cls)
# here, we are inside of the declarative scan. use the registry
# that is tracking the values of these attributes.
declarative_scan = manager.declarative_scan()
# assert that we are in fact in the declarative scan
assert declarative_scan is not None
reg = declarative_scan.declared_attr_reg
if self in reg:
return reg[self]
else:
reg[self] = obj = self.fget(cls)
return obj
| _declared_attr_common |
python | walkccc__LeetCode | solutions/1998. GCD Sort of an Array/1998.py | {
"start": 0,
"end": 536
} | class ____:
def __init__(self, n: int):
self.id = list(range(n))
self.rank = [0] * n
def unionByRank(self, u: int, v: int) -> None:
i = self.find(u)
j = self.find(v)
if i == j:
return False
if self.rank[i] < self.rank[j]:
self.id[i] = j
elif self.rank[i] > self.rank[j]:
self.id[j] = i
else:
self.id[i] = j
self.rank[j] += 1
return True
def find(self, u: int) -> int:
if self.id[u] != u:
self.id[u] = self.find(self.id[u])
return self.id[u]
| UnionFind |
python | ansible__ansible | lib/ansible/errors/__init__.py | {
"start": 9635,
"end": 9895
} | class ____(AnsibleTemplateError):
"""A broken conditional with non-boolean result was used."""
_default_help_text = 'Broken conditionals can be temporarily allowed with the `ALLOW_BROKEN_CONDITIONALS` configuration option.'
| AnsibleBrokenConditionalError |
python | pandas-dev__pandas | pandas/tests/io/formats/test_ipython_compat.py | {
"start": 112,
"end": 3162
} | class ____:
def test_publishes(self, ip):
ipython = ip.instance(config=ip.config)
df = DataFrame({"A": [1, 2]})
objects = [df["A"], df] # dataframe / series
expected_keys = [
{"text/plain", "application/vnd.dataresource+json"},
{"text/plain", "text/html", "application/vnd.dataresource+json"},
]
opt = cf.option_context("display.html.table_schema", True)
last_obj = None
for obj, expected in zip(objects, expected_keys):
last_obj = obj
with cf.option_context("display.html.table_schema", True):
# Can't reuse opt on all systems GH#58055
formatted = ipython.display_formatter.format(obj)
assert set(formatted[0].keys()) == expected
with_latex = cf.option_context("styler.render.repr", "latex")
with opt, with_latex:
formatted = ipython.display_formatter.format(last_obj)
expected = {
"text/plain",
"text/html",
"text/latex",
"application/vnd.dataresource+json",
}
assert set(formatted[0].keys()) == expected
def test_publishes_not_implemented(self, ip):
# column MultiIndex
# GH#15996
midx = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]])
df = DataFrame(
np.random.default_rng(2).standard_normal((5, len(midx))), columns=midx
)
opt = cf.option_context("display.html.table_schema", True)
with opt:
formatted = ip.instance(config=ip.config).display_formatter.format(df)
expected = {"text/plain", "text/html"}
assert set(formatted[0].keys()) == expected
def test_config_on(self):
df = DataFrame({"A": [1, 2]})
with cf.option_context("display.html.table_schema", True):
result = df._repr_data_resource_()
assert result is not None
def test_config_default_off(self):
df = DataFrame({"A": [1, 2]})
with cf.option_context("display.html.table_schema", False):
result = df._repr_data_resource_()
assert result is None
def test_enable_data_resource_formatter(self, ip):
# GH#10491
formatters = ip.instance(config=ip.config).display_formatter.formatters
mimetype = "application/vnd.dataresource+json"
with cf.option_context("display.html.table_schema", True):
assert "application/vnd.dataresource+json" in formatters
assert formatters[mimetype].enabled
# still there, just disabled
assert "application/vnd.dataresource+json" in formatters
assert not formatters[mimetype].enabled
# able to re-set
with cf.option_context("display.html.table_schema", True):
assert "application/vnd.dataresource+json" in formatters
assert formatters[mimetype].enabled
# smoke test that it works
ip.instance(config=ip.config).display_formatter.format(cf)
| TestTableSchemaRepr |
python | django__django | tests/check_framework/test_4_0_compatibility.py | {
"start": 209,
"end": 1007
} | class ____(SimpleTestCase):
@override_settings(CSRF_TRUSTED_ORIGINS=["example.com"])
def test_invalid_url(self):
self.assertEqual(
check_csrf_trusted_origins(None),
[
Error(
"As of Django 4.0, the values in the CSRF_TRUSTED_ORIGINS "
"setting must start with a scheme (usually http:// or "
"https://) but found example.com. See the release notes for "
"details.",
id="4_0.E001",
)
],
)
@override_settings(
CSRF_TRUSTED_ORIGINS=["http://example.com", "https://example.com"],
)
def test_valid_urls(self):
self.assertEqual(check_csrf_trusted_origins(None), [])
| CheckCSRFTrustedOrigins |
python | PyCQA__pylint | tests/functional/u/unsubscriptable_value.py | {
"start": 1800,
"end": 2158
} | class ____(metaclass=MetaSubscriptable):
pass
SubscriptableClass[0]
SubscriptableClass()[0] # [unsubscriptable-object]
# functions are not subscriptable
def test(*args, **kwargs):
return args, kwargs
test()[0]
test[0] # [unsubscriptable-object]
# deque
from collections import deque
deq = deque(maxlen=10)
deq.append(42)
deq[0]
| SubscriptableClass |
python | ray-project__ray | doc/source/ray-core/doc_code/cgraph_nccl.py | {
"start": 1052,
"end": 1171
} | class ____:
def send(self, shape):
return torch.zeros(shape, device="cuda")
@ray.remote(num_gpus=1)
| GPUSender |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_ecs.py | {
"start": 4046,
"end": 5815
} | class ____(EcsBaseTestCase):
"""Test Base ECS Operator."""
@pytest.mark.parametrize("aws_conn_id", [None, NOTSET, "aws_test_conn"])
@pytest.mark.parametrize("region_name", [None, NOTSET, "ca-central-1"])
def test_initialise_operator(self, aws_conn_id, region_name):
"""Test initialize operator."""
op_kw = {"aws_conn_id": aws_conn_id, "region_name": region_name}
op_kw = {k: v for k, v in op_kw.items() if v is not NOTSET}
op = EcsBaseOperator(task_id="test_ecs_base", **op_kw)
assert op.aws_conn_id == (aws_conn_id if aws_conn_id is not NOTSET else "aws_default")
assert op.region_name == (region_name if region_name is not NOTSET else None)
@pytest.mark.parametrize("aws_conn_id", [None, NOTSET, "aws_test_conn"])
@pytest.mark.parametrize("region_name", [None, NOTSET, "ca-central-1"])
def test_initialise_operator_hook(self, aws_conn_id, region_name):
"""Test initialize operator."""
op_kw = {"aws_conn_id": aws_conn_id, "region_name": region_name}
op_kw = {k: v for k, v in op_kw.items() if v is not NOTSET}
op = EcsBaseOperator(task_id="test_ecs_base", **op_kw)
assert op.hook.aws_conn_id == (aws_conn_id if aws_conn_id is not NOTSET else "aws_default")
assert op.hook.region_name == (region_name if region_name is not NOTSET else None)
with mock.patch.object(EcsBaseOperator, "hook", new_callable=mock.PropertyMock) as m:
mocked_hook = mock.MagicMock(name="MockHook")
mocked_client = mock.MagicMock(name="Mocklient")
mocked_hook.conn = mocked_client
m.return_value = mocked_hook
assert op.client == mocked_client
m.assert_called_once()
| TestEcsBaseOperator |
python | django__django | tests/postgres_tests/fields.py | {
"start": 1769,
"end": 1897
} | class ____(models.IntegerField):
def get_placeholder(self, value, compiler, connection):
return "(%s + 1)"
| OffByOneField |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 53129,
"end": 55021
} | class ____:
"""Ensure that only scheduled flow runs are marked late"""
@pytest.mark.parametrize(
"intended_transition",
[
(StateType.RUNNING, StateType.SCHEDULED),
(StateType.PENDING, StateType.SCHEDULED),
(StateType.COMPLETED, StateType.SCHEDULED),
(StateType.FAILED, StateType.SCHEDULED),
(StateType.CANCELLING, StateType.SCHEDULED),
],
ids=transition_names,
)
async def test_reject_marking_states_other_than_scheduled_as_late(
self,
session,
run_type,
initialize_orchestration,
intended_transition,
):
ctx = await initialize_orchestration(
session, run_type, *intended_transition, proposed_state_name="Late"
)
state_protection = EnsureOnlyScheduledFlowsMarkedLate(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.REJECT
@pytest.mark.parametrize(
"intended_transition",
[
(StateType.SCHEDULED, StateType.SCHEDULED),
],
ids=transition_names,
)
async def test_scheduled_to_late_transition_is_accepted(
self,
session,
run_type,
initialize_orchestration,
intended_transition,
):
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
proposed_state_name="Late",
)
state_protection = EnsureOnlyScheduledFlowsMarkedLate(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
@pytest.mark.parametrize("run_type", ["flow"])
| TestEnsureOnlyScheduledFlowMarkedLate |
python | getsentry__sentry | src/sentry/hybridcloud/outbox/base.py | {
"start": 2937,
"end": 5797
} | class ____(BaseManager[_RM]):
"""
Provides bulk update and delete methods that respect outbox creation.
"""
def bulk_create(self, objs: Iterable[_RM], *args: Any, **kwds: Any) -> list[_RM]:
from sentry.hybridcloud.models.outbox import outbox_context
tuple_of_objs: tuple[_RM, ...] = tuple(objs)
if not tuple_of_objs:
return super().bulk_create(tuple_of_objs, *args, **kwds)
model: type[_RM] = type(tuple_of_objs[0])
using = router.db_for_write(model)
assert not uses_snowflake_id(model), "bulk_create cannot work for snowflake models!"
with outbox_context(transaction.atomic(using=using), flush=False):
with connections[using].cursor() as cursor:
cursor.execute(
"SELECT nextval(%s) FROM generate_series(1,%s);",
[f"{model._meta.db_table}_id_seq", len(tuple_of_objs)],
)
ids = [i for i, in cursor.fetchall()]
outboxes: list[RegionOutboxBase] = []
for row_id, obj in zip(ids, tuple_of_objs):
obj.id = row_id
outboxes.append(obj.outbox_for_update())
type(outboxes[0]).objects.bulk_create(outboxes)
return super().bulk_create(tuple_of_objs, *args, **kwds)
def bulk_update(
self, objs: Iterable[_RM], fields: Iterable[str], *args: Any, **kwds: Any
) -> Any:
from sentry.hybridcloud.models.outbox import outbox_context
tuple_of_objs: tuple[_RM, ...] = tuple(objs)
if not tuple_of_objs:
return super().bulk_update(tuple_of_objs, fields, *args, **kwds)
model: type[_RM] = type(tuple_of_objs[0])
using = router.db_for_write(model)
with outbox_context(transaction.atomic(using=using), flush=False):
outboxes: list[RegionOutboxBase] = []
for obj in tuple_of_objs:
outboxes.append(obj.outbox_for_update())
type(outboxes[0]).objects.bulk_create(outboxes)
return super().bulk_update(tuple_of_objs, fields, *args, **kwds)
def bulk_delete(self, objs: Iterable[_RM]) -> tuple[int, Mapping[str, int]]:
from sentry.hybridcloud.models.outbox import outbox_context
tuple_of_objs: tuple[_RM, ...] = tuple(objs)
if not tuple_of_objs:
return 0, {}
model: type[_RM] = type(tuple_of_objs[0])
using = router.db_for_write(model)
with outbox_context(transaction.atomic(using=using), flush=False):
outboxes: list[RegionOutboxBase] = []
for obj in tuple_of_objs:
outboxes.append(obj.outbox_for_update())
type(outboxes[0]).objects.bulk_create(outboxes)
return self.filter(id__in={o.id for o in tuple_of_objs}).delete()
| RegionOutboxProducingManager |
python | fluentpython__example-code-2e | 23-descriptor/bulkfood/bulkfood_v3.py | {
"start": 1279,
"end": 1612
} | class ____:
weight = Quantity('weight') # <1>
price = Quantity('price') # <2>
def __init__(self, description, weight, price): # <3>
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# end::LINEITEM_V3[]
| LineItem |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 7440,
"end": 7883
} | class ____(_Permission[RolesAction]):
role: str
scope: Optional[str] = None
def _to_weaviate(self) -> List[WeaviatePermission]:
roles: PermissionRoles = {"role": self.role}
if self.scope is not None:
roles["scope"] = self.scope
return [
{
"action": action,
"roles": roles,
}
for action in self.actions
]
| _RolesPermission |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 19540,
"end": 19642
} | class ____(PermissionInstanceView):
permission_classes = (BasicPermWithDetail,)
| DeniedViewWithDetail |
python | doocs__leetcode | solution/0100-0199/0109.Convert Sorted List to Binary Search Tree/Solution.py | {
"start": 343,
"end": 810
} | class ____:
def sortedListToBST(self, head: Optional[ListNode]) -> Optional[TreeNode]:
def dfs(i: int, j: int) -> Optional[TreeNode]:
if i > j:
return None
mid = (i + j) >> 1
l, r = dfs(i, mid - 1), dfs(mid + 1, j)
return TreeNode(nums[mid], l, r)
nums = []
while head:
nums.append(head.val)
head = head.next
return dfs(0, len(nums) - 1)
| Solution |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/test_backend_name.py | {
"start": 778,
"end": 1223
} | class ____(enum.Enum):
"""DTensor backend the test is being run on."""
UNSPECIFIED = 'unspecified'
CPU = 'cpu'
GPU = 'gpu'
GPU_2DEVS_BACKEND = '2gpus'
TPU = 'tpu'
TPU_STREAM_EXECUTOR = 'tpu_se'
TPU_V3_DONUT_BACKEND = 'tpu_v3_2x2'
TPU_V4_DONUT_BACKEND = 'tpu_v4_2x2'
DTENSOR_TEST_UTIL_BACKEND = DTensorTestUtilBackend(
os.getenv('DTENSOR_TEST_UTIL_BACKEND', default='unspecified')
)
# LINT.ThenChange()
| DTensorTestUtilBackend |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 1122,
"end": 1226
} | class ____(GQLResult):
edges: List[ArtifactCollectionFragmentTagsEdges]
| ArtifactCollectionFragmentTags |
python | dagster-io__dagster | examples/assets_pandas_type_metadata/assets_pandas_type_metadata/lib.py | {
"start": 1561,
"end": 6046
} | class ____(pa.DataFrameModel):
"""Anomalous price events, defined by a day on which a stock's closing price strayed above or
below its Bollinger bands.
"""
date: Series[pd.Timestamp] = pa.Field(description="Date of price event")
name: Series[str] = pa.Field(description="Ticker symbol of stock")
event: Series[pd.CategoricalDtype] = pa.Field(description="Type of event: 'high' or low'")
AnomalousEventsDgType = pandera_schema_to_dagster_type(AnomalousEvents)
# ****************************************************************************
# ***** FUNCTIONS ************************************************************
DATA_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data"))
SP500_CSV_URL = "https://raw.githubusercontent.com/plotly/datasets/master/all_stocks_5yr.csv"
def normalize_path(path: str) -> str:
return path if path[0] == "/" else os.path.join(DATA_ROOT, path)
def download_file(url: str, path: str):
"""Download a file from a URL to a local path. If relative path, will be resolved relative to `DATA_ROOT`."""
path = normalize_path(path)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(requests.get(url).content)
def load_prices_csv(path: str) -> pd.DataFrame:
"""Load a CSV file containing stock prices. CSV should conform to the schema in the
`StockPrices` pandera schema above. If relative path, will be resolved relative to
`DATA_ROOT`.
"""
path = normalize_path(path)
df = cast("pd.DataFrame", pd.read_csv(path, parse_dates=["date"]))
df = df.rename(columns={"Name": "name"})
df = df.dropna()
return df
def load_sp500_prices(download: bool = True) -> pd.DataFrame:
path = normalize_path("all_stocks_5yr.csv")
if not os.path.exists(path):
if download:
download_file(SP500_CSV_URL, path)
else:
raise FileNotFoundError(f"{path} not found")
return load_prices_csv(path)
def compute_bollinger_bands(
df: pd.DataFrame, rate: int = 30, sigma: float = 2.0, dropna=True
) -> pd.DataFrame:
"""Compute Bollinger bands for a single stock over time. The dataframe passed in here should be
represent a single timeseries.
"""
price = df["close"]
rma = price.rolling(window=rate).mean()
rstd = price.rolling(window=rate).std()
upper = rma + sigma * rstd
lower = rma - sigma * rstd
odf = pd.DataFrame({"name": df["name"], "date": df["date"], "upper": upper, "lower": lower})
if dropna:
odf = odf.dropna()
return odf
def compute_bollinger_bands_multi(
df: pd.DataFrame, dropna: bool = True, rate: int = 30, sigma: float = 2.0
):
"""Compute Bollinger bands for a set of stocks over time. The input dataframe can contain
multiple timeseries grouped by the `name` column.
"""
odf = df.groupby("name", group_keys=False).apply(
lambda idf: compute_bollinger_bands(idf, dropna=False, rate=rate, sigma=sigma),
)
return odf.dropna().reset_index() if dropna else odf
EVENT_TYPE = pd.CategoricalDtype(["high", "low"], ordered=False)
def compute_anomalous_events(df_prices: pd.DataFrame, df_bollinger: pd.DataFrame):
"""Compute anomalous (high or low) price events for a set of stocks over time."""
df = pd.concat([df_prices, df_bollinger.add_prefix("bol_")], axis=1)
df["event"] = pd.Series(None, index=df.index, dtype=EVENT_TYPE)
df["event"][df["close"] > df["bol_upper"]] = "high"
df["event"][df["close"] < df["bol_lower"]] = "low"
return df[df["event"].notna()][["name", "date", "event"]].reset_index()
# ****************************************************************************
# ***** VISUALIZATIONS *******************************************************
def plot_sample_bollinger_bands(df_prices, df_bollinger):
df = pd.concat([df_prices, df_bollinger.add_prefix("bol_")], axis=1)
plt.figure(figsize=(16, 30))
for i, n in enumerate(df["name"].unique()[:5]):
bdf = df[df["name"] == n]
plt.subplot(5, 1, i + 1)
plt.title(n)
plt.plot(bdf["close"])
plt.plot(bdf["bol_upper"])
plt.plot(bdf["bol_lower"])
def plot_sample_anonymous_events(df_anom):
top_20 = df_anom.groupby("name").size().sort_values(ascending=False)[:20].index.to_list()
plt.figure(figsize=(16, 6))
df = df_anom[df_anom.name.isin(top_20)]
sns.stripplot(x="name", y="date", data=df, hue="event")
| AnomalousEvents |
python | ansible__ansible | lib/ansible/_internal/_datatag/_wrappers.py | {
"start": 149,
"end": 1137
} | class ____(ObjectProxy):
"""
Janky proxy around IOBase to allow streams to carry tags and support basic interrogation by the tagging API.
Most tagging operations will have undefined behavior for this type.
"""
_self__ansible_tags_mapping: _datatag._AnsibleTagsMapping
def __init__(self, stream: io.IOBase, tags: _datatag.AnsibleDatatagBase | _t.Iterable[_datatag.AnsibleDatatagBase]) -> None:
super().__init__(stream)
tag_list: list[_datatag.AnsibleDatatagBase]
# noinspection PyProtectedMember
if type(tags) in _datatag._known_tag_types:
tag_list = [tags] # type: ignore[list-item]
else:
tag_list = list(tags) # type: ignore[arg-type]
self._self__ansible_tags_mapping = _datatag._AnsibleTagsMapping((type(tag), tag) for tag in tag_list)
@property
def _ansible_tags_mapping(self) -> _datatag._AnsibleTagsMapping:
return self._self__ansible_tags_mapping
| TaggedStreamWrapper |
python | paramiko__paramiko | paramiko/ssh_exception.py | {
"start": 4001,
"end": 4475
} | class ____(SSHException):
"""
A disagreement arose regarding an algorithm required for key exchange.
.. versionadded:: 2.9
"""
# TODO 4.0: consider making this annotate w/ 1..N 'missing' algorithms,
# either just the first one that would halt kex, or even updating the
# Transport logic so we record /all/ that /could/ halt kex.
# TODO: update docstrings where this may end up raised so they are more
# specific.
pass
| IncompatiblePeer |
python | encode__django-rest-framework | tests/test_viewsets.py | {
"start": 6301,
"end": 8264
} | class ____(TestCase):
def test_extra_actions(self):
view = ActionViewSet()
actual = [action.__name__ for action in view.get_extra_actions()]
expected = [
'custom_detail_action',
'custom_list_action',
'detail_action',
'list_action',
'unresolvable_detail_action',
'wrapped_detail_action',
'wrapped_list_action',
]
self.assertEqual(actual, expected)
def test_should_only_return_decorated_methods(self):
view = ActionViewSetWithMapping()
actual = [action.__name__ for action in view.get_extra_actions()]
expected = [
'custom_detail_action',
'custom_list_action',
'detail_action',
'list_action',
'unresolvable_detail_action',
'wrapped_detail_action',
'wrapped_list_action',
]
self.assertEqual(actual, expected)
def test_attr_name_check(self):
def decorate(fn):
def wrapper(self, request, *args, **kwargs):
return fn(self, request, *args, **kwargs)
return wrapper
class ActionViewSet(GenericViewSet):
queryset = Action.objects.all()
@action(detail=False)
@decorate
def wrapped_list_action(self, request, *args, **kwargs):
raise NotImplementedError
view = ActionViewSet()
with pytest.raises(AssertionError) as excinfo:
view.get_extra_actions()
assert str(excinfo.value) == (
'Expected function (`wrapper`) to match its attribute name '
'(`wrapped_list_action`). If using a decorator, ensure the inner '
'function is decorated with `functools.wraps`, or that '
'`wrapper.__name__` is otherwise set to `wrapped_list_action`.')
@override_settings(ROOT_URLCONF='tests.test_viewsets')
| GetExtraActionsTests |
python | getsentry__responses | responses/tests/test_responses.py | {
"start": 77314,
"end": 80034
} | class ____:
"""Validate that teardown raises if not all requests were executed.
Similar to ``TestUnitTestPatchSetup``.
"""
def setup_method(self):
self.r_mock = responses.RequestsMock()
self.r_mock.start()
self.r_mock.get("https://example.com", status=505)
self.r_mock.put("https://example.com", status=506)
def teardown_method(self):
with pytest.raises(AssertionError) as exc:
self.r_mock.stop()
self.r_mock.reset()
assert "[('PUT', 'https://example.com/')]" in str(exc.value)
assert_reset()
def test_function(self):
resp = requests.get("https://example.com")
assert resp.status_code == 505
def test_reset_in_the_middle():
@responses.activate
def run():
with responses.RequestsMock() as rsps2:
rsps2.reset()
responses.add(responses.GET, "https://example.invalid", status=200)
resp = requests.request("GET", "https://example.invalid")
assert resp.status_code == 200
run()
assert_reset()
def test_redirect():
@responses.activate
def run():
# create multiple Response objects where first two contain redirect headers
rsp1 = responses.Response(
responses.GET,
"http://example.com/1",
status=301,
headers={"Location": "http://example.com/2"},
)
rsp2 = responses.Response(
responses.GET,
"http://example.com/2",
status=301,
headers={"Location": "http://example.com/3"},
)
rsp3 = responses.Response(responses.GET, "http://example.com/3", status=200)
# register above generated Responses in `response` module
responses.add(rsp1)
responses.add(rsp2)
responses.add(rsp3)
# do the first request in order to generate genuine `requests` response
# this object will contain genuine attributes of the response, like `history`
rsp = requests.get("http://example.com/1")
responses.calls.reset()
# customize exception with `response` attribute
my_error = requests.ConnectionError("custom error")
my_error.response = rsp
# update body of the 3rd response with Exception, this will be raised during execution
rsp3.body = my_error
with pytest.raises(requests.ConnectionError) as exc_info:
requests.get("http://example.com/1")
assert exc_info.value.args[0] == "custom error"
assert rsp1.url in exc_info.value.response.history[0].url
assert rsp2.url in exc_info.value.response.history[1].url
run()
assert_reset()
| TestUnitTestPatchSetupRaises |
python | geekcomputers__Python | Assembler/assembler.py | {
"start": 557,
"end": 39047
} | class ____:
def __init__(self, token, t):
self.token = token
self.t = t
# def initRegister():
# global register
# for i in range(9):
# register.append(0)
def loadFile(fileName):
"""
loadFile: This function loads the file and reads its lines.
"""
global lines
fo = open(fileName)
for line in fo:
lines.append(line)
fo.close()
def scanner(string):
"""
scanner: This function builds the tokens by the content of the file.
The tokens will be saved in list 'tokens'
"""
global tokens
token = ""
state = 0 # init state
for ch in string:
match state:
case 0:
match ch:
case "m": # catch mov-command
state = 1
token += "m"
case "e": # catch register
state = 4
token += "e"
case "1": # catch a number
if ch <= "9" or ch == "-":
state = 6
token += ch
case "0": # catch a number or hex-code
state = 17
token += ch
case "a": # catch add-command
state = 7
token += ch
case "s": # catch sub command
state = 10
token += ch
case "i": # capture int command
state = 14
token += ch
case "p": # capture push or pop command
state = 19
token += ch
case "l": # capture label
state = 25
token += ch
case "j": # capture jmp command
state = 26
token += ch
case "c": # catch cmp-command
state = 29
token += ch
case ";": # capture comment
state = 33
case '"': # catch a string
state = 34
# without "
case ch.isupper(): # capture identifier
state = 35
token += ch
case "d": # capture db keyword
state = 36
token += ch
case "$": # catch variable with prefix $
state = 38
# not catching $
case "_": # catch label for subprogram
state = 40
# not catches the character _
case "r": # catch ret-command
state = 44
token += ch
case _: # other characters like space-characters etc
state = 0
token = ""
case 1: # state 1
match ch:
case "o":
state = 2
token += ch
case "u":
state = 47
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 2: # state 2
match ch:
case "v":
state = 3
token += "v"
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 3: # state 3
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 4: # state 4
if ch >= "a" and ch <= "d":
state = 5
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 5: # state 5
match ch:
case "x":
state = 13
token += ch
case _:
state = 0
token = ""
raise InvalidSyntax()
case 6: # state 6
if ch.isdigit():
state = 6
token += ch
elif ch.isspace():
state = 0
tokens.append(Token(token, "value"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 7: # state 7
match ch:
case "d":
state = 8
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 8: # state 8
match ch:
case "d":
state = 9
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 9: # state 9
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 10: # state 10
match ch:
case "u":
state = 11
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 11: # state 11
match ch:
case "b":
state = 12
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 12: # state 12
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 13: # state 13
if ch == "," or ch.isspace():
state = 0
tokens.append(Token(token, "register"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 14: # state 14
if ch == "n":
state = 15
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 15: # state 15
if ch == "t":
state = 16
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 16: # state 16
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 17: # state 17
if ch == "x":
state = 18
token += ch
elif ch.isspace():
state = 0
tokens.append(Token(token, "value"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 18: # state 18
if ch.isdigit() or (ch >= "a" and ch <= "f"):
state = 18
token += ch
elif ch.isspace():
state = 0
tokens.append(Token(token, "value"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 19: # state 19
if ch == "u":
state = 20
token += ch
elif ch == "o":
state = 23
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 20: # state 20
if ch == "s":
state = 21
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 21: # state 21
if ch == "h":
state = 22
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 22: # state 22
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 23: # state 23
if ch == "p":
state = 24
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 24: # state 24
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 25: # state 25
if ch.isdigit():
state = 25
token += ch
elif ch == ":" or ch.isspace():
state = 0
tokens.append(Token(token, "label"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 26: # state 26
if ch == "m":
state = 27
token += ch
elif ch == "e": # catch je command
state = 32
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 27: # state 27
if ch == "p":
state = 28
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 28: # state 28
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 29: # state 29
match ch:
case "m":
state = 30
token += ch
case "a": # catch call-command
state = 41
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 30: # state 30
if ch == "p":
state = 31
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 31: # state 31
token = ""
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
else: # error case
state = 0
raise InvalidSyntax()
case 32: # state 32
token = ""
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
else: # error case
state = 0
raise InvalidSyntax()
case 33: # state 33
if (
ch.isdigit()
or ch.isalpha()
or (ch.isspace() and ch != "\n")
or ch == '"'
):
state = 33
elif ch == "\n":
state = 0
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 34: # state 34
if ch.isdigit() or ch.isalpha() or ch.isspace():
state = 34
token += ch
elif ch == '"':
state = 0
tokens.append(Token(token, "string"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 35: # state 35
if ch.isdigit() or ch.isupper():
state = 35
token += ch
elif ch == " " or ch == "\n":
state = 0
tokens.append(Token(token, "identifier"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 36: # state 36
if ch == "b":
state = 37
token += ch
elif ch == "i":
state = 49
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 37: # state 37
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 38: # state 38
if ch.isalpha():
state = 39
token += ch
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 39: # state 39
if ch.isalpha() or ch.isdigit():
state = 39
token += ch
elif ch.isspace():
state = 0
tokens.append(Token(token, "identifier"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 40: # state 40
if (
(ch >= "a" and ch <= "z")
or (ch >= "A" and ch <= "Z")
or (ch >= "0" and ch <= "9")
):
state = 40
token += ch
elif ch == ":" or ch.isspace():
state = 0
tokens.append(Token(token, "subprogram"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 41: # state 41
match ch:
case "l":
state = 42
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 42: # state 42
match ch:
case "l":
state = 43
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 43: # state 43
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 44: # state 44
match ch:
case "e":
state = 45
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 45: # state 45
match ch:
case "t":
state = 46
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 46: # state 46
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 47: # state 47
match ch:
case "l":
state = 48
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 48: # state 48
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
case 49: # state 49
match ch:
case "v":
state = 50
token += ch
case _: # error case
state = 0
token = ""
raise InvalidSyntax()
case 50: # state 50
if ch.isspace():
state = 0
tokens.append(Token(token, "command"))
token = ""
else: # error case
state = 0
token = ""
raise InvalidSyntax()
def scan():
"""
scan: applies function scanner() to each line of the source code.
"""
global lines
assert len(lines) > 0, "no lines"
for line in lines:
try:
scanner(line)
except InvalidSyntax:
print("line=", line)
def parser():
"""
parser: parses the tokens of the list 'tokens'
"""
global tokens
global eax, ebx, ecx, edx
assert len(tokens) > 0, "no tokens"
pointer = 0 # pointer for tokens
token = Token("", "")
tmpToken = Token("", "")
while pointer < len(tokens):
token = tokens[pointer]
if token.token == "mov": # mov commando
# it must follow a register
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found argument!")
return
# TODO use token.t for this stuff
if token.t == "register":
tmpToken = token
# it must follow a value / string / register / variable
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found argument!")
return
# converts the token into float, if token contains only digits.
# TODO response of float
if token.t == "identifier": # for variables
# check of exists of variable
if token.token in variables:
token.token = variables[token.token]
else:
print(f"Error: Undefined variable {token.token}")
return
elif token.t == "string":
token.token = str(token.token)
elif isinstance(token.token, float):
pass
elif token.token.isdigit():
token.token = float(token.token)
elif token.token[0] == "-" and token.token[1:].isdigit():
token.token = float(token.token[1:])
token.token *= -1
elif token.t == "register": # loads out of register
match token.token:
case "eax":
token.token = eax
case "ebx":
token.token = ebx
case "ecx":
token.token = ecx
case "edx":
token.token = edx
match tmpToken.token:
case "eax":
eax = token.token
case "ebx":
ebx = token.token
case "ecx":
ecx = token.token
case "edx":
edx = token.token
else:
print("Error: No found register!")
return
elif token.token == "add": # add commando
pointer += 1
token = tokens[pointer]
if token.t == "register":
tmpToken = token
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found number!")
return
# converts the token into float, if token contains only digits.
if token.t == "register":
# for the case that token is register
match token.token:
case "eax":
token.token = eax
case "ebx":
token.token = ebx
case "ecx":
token.token = ecx
case "edx":
token.token = edx
elif token.token.isdigit():
token.token = float(token.token)
elif token.token[0] == "-" and token.token[1:].isdigit():
token.token = float(token.token[1:])
token.token *= -1
else:
print("Error: ", token, " is not a number!")
return
match tmpToken.token:
case "eax":
eax += token.token
# update zero flag
zeroFlag = False
if eax == 0:
zeroFlag = True
case "ebx":
ebx += token.token
# update zero flag
zeroFlag = False
if ebx == 0:
zeroFlag = True
case "ecx":
ecx += token.token
# update zero flag
zeroFlag = False
if ecx == 0:
zeroFlag = True
case "edx":
edx += token.token
# update zero flag
zeroFlag = False
if edx == 0:
zeroFlag = True
else:
print("Error: Not found register!")
return
elif token.token == "sub": # sub commando
pointer += 1
token = tokens[pointer]
if token.t == "register":
tmpToken = token
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found number!")
return
# converts the token into float, if token contains only digits.
if token.t == "register":
# for the case that token is register
if token.token == "eax":
token.token = eax
elif token.token == "ebx":
token.token = ebx
elif token.token == "ecx":
token.token = ecx
elif token.token == "edx":
token.token = edx
elif isinstance(token.token, float):
pass
elif token.token.isdigit():
token.token = float(token.token)
elif token.token[0] == "-" and token.token[1:].isdigit():
token.token = float(token.token[1:])
token.token *= -1
else:
print("Error: ", token.token, " is not a number!")
return
if tmpToken.token == "eax":
eax -= token.token
# updated zero flag
if eax == 0:
zeroFlag = True
else:
zeroFlag = False
elif tmpToken.token == "ebx":
ebx -= token.token
# update zero flag
if ebx == 0:
zeroFlag = True
else:
zeroFlag = False
elif tmpToken.token == "ecx":
ecx -= token.token
# update zero flag
if ecx == 0:
zeroFlag = True
else:
zeroFlag = False
elif tmpToken.token == "edx":
edx -= token.token
# update zero flag
if edx == 0:
zeroFlag = True
else:
zeroFlag = False
else:
print("Error: No found register!")
return
elif token.token == "int": # int commando
tmpToken = token
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found argument!")
return
if token.token == "0x80": # system interrupt 0x80
if eax == 1: # exit program
if ebx == 0:
print("END PROGRAM")
return
else:
print("END PROGRAM WITH ERRORS")
return
elif eax == 3:
ecx = float(input(">> "))
elif eax == 4: # output information
print(ecx)
elif token.token == "push": # push commando
tmpToken = token
# it must follow a register
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found register!")
return
# pushing register on the stack
stack.append(token.token)
elif token.token == "pop": # pop commando
tmpToken = token
# it must follow a register
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found register!")
return
# pop register from stack
match token.token:
case "eax":
if len(stack) == 0:
print("Error: Stack Underflow")
return
eax = stack.pop()
case "ebx":
ebx = stack.pop()
case "ecx":
ecx = stack.pop()
case "edx":
edx = stack.pop()
elif token.t == "label": # capture label
jumps[token.token] = pointer
elif token.token == "jmp": # capture jmp command
# it must follow a label
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found label!")
return
if token.t == "label":
pointer = jumps[token.token]
else:
print("Error: expected a label!")
elif token.token == "cmp":
# TODO
# it must follow a register
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer]
else:
print("Error: Not found argument!")
return
if token.t == "register":
# it must follow a register
if pointer + 1 < len(tokens):
pointer += 1
tmpToken = tokens[pointer] # next register
else:
print("Error: Not found register!")
return
# actual comparing
zeroFlag = setZeroFlag(token.token, tmpToken.token)
elif token.token == "je":
# it must follow a label
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer] # next register
else:
print("Error: Not found argument")
return
# check of label
if token.t == "label":
# actual jump
if zeroFlag:
pointer = jumps[token.token]
else:
print("Error: Not found label")
return
elif token.t == "identifier":
# check whether identifier is in variables-table
if token.token not in variables:
# it must follow a command
if pointer + 1 < len(tokens):
pointer += 1
tmpToken = tokens[pointer] # next register
else:
print("Error: Not found argument")
return
if tmpToken.t == "command" and tmpToken.token == "db":
# it must follow a value (string)
if pointer + 1 < len(tokens):
pointer += 1
tmpToken = tokens[pointer] # next register
else:
print("Error: Not found argument")
return
if tmpToken.t == "value" or tmpToken.t == "string":
if tmpToken.t == "value":
variables[token.token] = float(tmpToken.token)
elif tmpToken.t == "string":
variables[token.token] = tmpToken.token
else:
print("Error: Not found db-keyword")
return
elif token.token == "call": # catch the call-command
# it must follow a subprogram label
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer] # next register
else:
print("Error: Not found subprogram label")
return
if token.t == "subprogram":
if token.token in jumps:
# save the current pointer
returnStack.append(pointer) # eventuell pointer + 1
# jump to the subprogram
pointer = jumps[token.token]
else: # error case
print("Error: Unknown subprogram!")
return
else: # error case
print("Error: Not found subprogram")
return
elif token.token == "ret": # catch the ret-command
if len(returnStack) >= 1:
pointer = returnStack.pop()
else: # error case
print("Error: No return address on stack")
return
elif token.t == "subprogram":
pass
elif token.token == "mul": # catch mul-command
# it must follow a register
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer] # next register
else:
print("Error: Not found argument")
return
if token.t == "register":
if token.token == "eax":
eax *= eax
elif token.token == "ebx":
eax *= ebx
elif token.token == "ecx":
eax *= ecx
elif token.token == "edx":
eax *= edx
else:
print("Error: Not found register")
return
elif token.token == "div":
# it must follow a register
if pointer + 1 < len(tokens):
pointer += 1
token = tokens[pointer] # next register
else:
print("Error: Not found argument")
return
if token.t == "register":
match token.token:
case "eax":
eax /= eax
case "ebx":
if ebx == 0:
print("Error: Division by Zero")
return
eax /= ebx
case "ecx":
eax /= ecx
case "edx":
eax /= edx
else:
print("Error: Not found register")
return
# increment pointer for fetching next token.
pointer += 1
def setZeroFlag(token, tmpToken):
"""return bool for zero flag based on the regToken"""
global eax, ebx, ecx, edx
# Register in string
registers = {
"eax": eax,
"ebx": ebx,
"ecx": ecx,
"edx": edx,
}
zeroFlag = False
match tmpToken:
case "eax":
if registers.get(token) == registers.get(tmpToken):
zeroFlag = True
case "ebx":
if registers.get(token) == registers.get(tmpToken):
zeroFlag = True
case "ecx":
if registers.get(token) == registers.get(tmpToken):
zeroFlag = True
case "edx":
if registers.get(token) == registers.get(tmpToken):
zeroFlag = True
case _:
print("Error: Not found register!")
return
return zeroFlag
def registerLabels():
"""
This function search for labels / subprogram-labels and registers this in the 'jumps' list.
"""
for i in range(len(tokens)):
if tokens[i].t == "label":
jumps[tokens[i].token] = i
elif tokens[i].t == "subprogram":
jumps[tokens[i].token] = i
def resetInterpreter():
"""
resets the interpreter mind.
"""
global eax, ebx, ecx, edx, zeroFlag, stack
global variables, jumps, lines, tokens, returnStack
eax = 0
ebx = 0
ecx = 0
edx = 0
zeroFlag = False
stack = []
jumps = {}
variables = {}
lines = []
tokens = []
returnStack = []
# DEBUG FUNCTION
# def printTokens():
# for token in tokens:
# print(token.token, " --> ", token.t)
# main program
def main():
"""
reads textfiles from the command-line and interprets them.
"""
# [1:] because the first argument is the program itself.
for arg in sys.argv[1:]:
resetInterpreter() # resets interpreter mind
try:
loadFile(arg)
scan()
registerLabels()
parser()
except Exception as e:
print(f"Error: {e}")
if __name__ == "__main__":
main()
| Token |
python | ray-project__ray | python/ray/air/_internal/filelock.py | {
"start": 132,
"end": 1430
} | class ____:
"""FileLock wrapper that uses temporary file locks.
The temporary directory that these locks are saved to can be configured via
the `RAY_TMPDIR` environment variable.
Args:
path: The file path that this temporary file lock is used for.
This will be used to generate the lockfile filename.
Ex: For concurrent writes to a file, this is the common filepath
that multiple processes are writing to.
**kwargs: Additional keyword arguments to pass to the underlying `FileLock`.
"""
def __init__(self, path: str, **kwargs):
self.path = path
temp_dir = Path(ray._common.utils.get_user_temp_dir()).resolve()
self._lock_dir = temp_dir / RAY_LOCKFILE_DIR
self._path_hash = hashlib.sha1(
str(Path(self.path).resolve()).encode("utf-8")
).hexdigest()
self._lock_path = self._lock_dir / f"{self._path_hash}.lock"
os.makedirs(str(self._lock_dir), exist_ok=True)
self._lock = FileLock(self._lock_path, **kwargs)
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, type, value, traceback):
self._lock.release()
def __getattr__(self, name):
return getattr(self._lock, name)
| TempFileLock |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/mixedversions/package.py | {
"start": 217,
"end": 510
} | class ____(Package):
url = "http://www.fake-mixedversions.org/downloads/mixedversions-1.0.tar.gz"
version("2.0.1", md5="0000000000000000000000000000000c")
version("2.0", md5="0000000000000000000000000000000b")
version("1.0.1", md5="0000000000000000000000000000000a")
| Mixedversions |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/autoVariance1.py | {
"start": 476,
"end": 937
} | class ____[T](Sequence[T]):
def __len__(self) -> int: ...
@overload
def __getitem__(self, index: int) -> T: ...
@overload
def __getitem__(self, index: slice) -> Sequence[T]: ...
def __getitem__(self, index: int | slice) -> T | Sequence[T]: ...
vco2_1: ShouldBeCovariant2[float] = ShouldBeCovariant2[int]()
# This should generate an error based on variance.
vco2_2: ShouldBeCovariant2[int] = ShouldBeCovariant2[float]()
| ShouldBeCovariant2 |
python | getsentry__sentry | src/sentry/dynamic_sampling/models/common.py | {
"start": 231,
"end": 774
} | class ____:
id: ProjectId | TransactionName
count: float
new_sample_rate: float = 0.0
def sum_classes_counts(classes: list[RebalancedItem]) -> float:
ret_val = 0.0
for elm in classes:
ret_val += elm.count
return ret_val
def guarded_run(model: Model[Any, Any], model_input: ModelInput) -> Any | None:
try:
return model.run(model_input)
except Exception as e:
# We want to track the error when running the model.
sentry_sdk.capture_exception(e)
return None
| RebalancedItem |
python | scipy__scipy | benchmarks/benchmarks/special.py | {
"start": 336,
"end": 470
} | class ____(Benchmark):
def time_ai_zeros(self):
ai_zeros(100000)
def time_bi_zeros(self):
bi_zeros(100000)
| Airy |
python | pydata__xarray | xarray/tests/test_sparse.py | {
"start": 8149,
"end": 17913
} | class ____:
@pytest.fixture(autouse=True)
def setUp(self):
self.data = sparse.random((4, 6), random_state=0, density=0.5)
self.var = xr.Variable(("x", "y"), self.data)
def test_nbytes(self):
assert self.var.nbytes == self.data.nbytes
def test_unary_op(self):
assert_sparse_equal(-self.var.data, -self.data)
assert_sparse_equal(abs(self.var).data, abs(self.data))
assert_sparse_equal(self.var.round().data, self.data.round())
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_univariate_ufunc(self):
assert_sparse_equal(np.sin(self.data), np.sin(self.var).data)
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_bivariate_ufunc(self):
assert_sparse_equal(np.maximum(self.data, 0), np.maximum(self.var, 0).data)
assert_sparse_equal(np.maximum(self.data, 0), np.maximum(0, self.var).data)
def test_univariate_xufunc(self):
assert_sparse_equal(xu.sin(self.var).data, np.sin(self.data))
def test_bivariate_xufunc(self):
assert_sparse_equal(xu.multiply(self.var, 0).data, np.multiply(self.data, 0))
assert_sparse_equal(xu.multiply(0, self.var).data, np.multiply(0, self.data))
def test_repr(self):
expected = dedent(
"""\
<xarray.Variable (x: 4, y: 6)> Size: 288B
<COO: shape=(4, 6), dtype=float64, nnz=12, fill_value=0.0>"""
)
assert expected == repr(self.var)
def test_pickle(self):
v1 = self.var
v2 = pickle.loads(pickle.dumps(v1))
assert_sparse_equal(v1.data, v2.data)
def test_missing_values(self):
a = np.array([0, 1, np.nan, 3])
s = sparse.COO.from_numpy(a)
var_s = Variable("x", s)
assert np.all(var_s.fillna(2).data.todense() == np.arange(4))
assert np.all(var_s.count() == 3)
@pytest.mark.parametrize(
"prop",
[
"attrs",
"chunks",
"coords",
"data",
"dims",
"dtype",
"encoding",
"imag",
"indexes",
"loc",
"name",
"nbytes",
"ndim",
"plot",
"real",
"shape",
"size",
"sizes",
"str",
"variable",
],
)
def test_dataarray_property(prop):
arr = make_xrarray({"x": 10, "y": 5})
getattr(arr, prop)
@pytest.mark.parametrize(
"func,sparse_output",
[
(do("all"), False),
(do("any"), False),
(do("assign_attrs", {"foo": "bar"}), True),
(do("assign_coords", x=make_xrarray({"x": 10}).x + 1), True),
(do("astype", int), True),
(do("clip", min=0, max=1), True),
(do("compute"), True),
(do("conj"), True),
(do("copy"), True),
(do("count"), False),
(do("diff", "x"), True),
(do("drop_vars", "x"), True),
(do("expand_dims", {"z": 2}, axis=2), True),
(do("get_axis_num", "x"), False),
(do("get_index", "x"), False),
(do("identical", make_xrarray({"x": 5, "y": 5})), False),
(do("integrate", "x"), True),
(do("isel", {"x": slice(0, 3), "y": slice(2, 4)}), True),
(do("isnull"), True),
(do("load"), True),
(do("mean"), False),
(do("persist"), True),
(do("reindex", {"x": [1, 2, 3]}), True),
(do("rename", "foo"), True),
(do("reorder_levels"), True),
(do("reset_coords", drop=True), True),
(do("reset_index", "x"), True),
(do("round"), True),
(do("sel", x=[0, 1, 2]), True),
(do("shift"), True),
(do("sortby", "x", ascending=False), True),
(do("stack", z=["x", "y"]), True),
(do("transpose"), True),
# TODO
# set_index
# swap_dims
(do("broadcast_equals", make_xrvar({"x": 10, "y": 5})), False),
(do("equals", make_xrvar({"x": 10, "y": 5})), False),
param(
do("argmax"),
True,
marks=[
xfail(reason="Missing implementation for np.argmax"),
filterwarnings("ignore:Behaviour of argmin/argmax"),
],
),
param(
do("argmin"),
True,
marks=[
xfail(reason="Missing implementation for np.argmin"),
filterwarnings("ignore:Behaviour of argmin/argmax"),
],
),
param(
do("argsort"),
True,
marks=xfail(reason="'COO' object has no attribute 'argsort'"),
),
param(
do("bfill", dim="x"),
False,
marks=xfail(reason="Missing implementation for np.flip"),
),
(do("combine_first", make_xrarray({"x": 10, "y": 5})), True),
param(
do("conjugate"),
False,
marks=xfail(reason="'COO' object has no attribute 'conjugate'"),
),
param(
do("cumprod"),
True,
marks=xfail(reason="Missing implementation for np.nancumprod"),
),
param(
do("cumsum"),
True,
marks=xfail(reason="Missing implementation for np.nancumsum"),
),
param(
do("differentiate", "x"),
False,
marks=xfail(reason="Missing implementation for np.gradient"),
),
param(
do("dot", make_xrarray({"x": 10, "y": 5})),
True,
marks=xfail(reason="Missing implementation for np.einsum"),
),
param(do("dropna", "x"), False, marks=xfail(reason="Coercion to dense")),
param(do("ffill", "x"), False, marks=xfail(reason="Coercion to dense")),
(do("fillna", 0), True),
param(
do("interp", coords={"x": np.arange(10) + 0.5}),
True,
marks=xfail(reason="Coercion to dense"),
),
param(
do(
"interp_like",
make_xrarray(
{"x": 10, "y": 5},
coords={"x": np.arange(10) + 0.5, "y": np.arange(5) + 0.5},
),
),
True,
marks=xfail(reason="Indexing COO with more than one iterable index"),
),
param(do("interpolate_na", "x"), True, marks=xfail(reason="Coercion to dense")),
param(
do("isin", [1, 2, 3]),
False,
marks=xfail(reason="Missing implementation for np.isin"),
),
param(
do("item", (1, 1)),
False,
marks=xfail(reason="'COO' object has no attribute 'item'"),
),
param(do("max"), False),
param(do("min"), False),
param(
do("median"),
False,
marks=xfail(reason="Missing implementation for np.nanmedian"),
),
(do("notnull"), True),
(do("pipe", func="sum", axis=1), True),
(do("prod"), False),
param(
do("quantile", q=0.5),
False,
marks=xfail(reason="Missing implementation for np.nanpercentile"),
),
param(
do("rank", "x"),
False,
marks=xfail(reason="Only implemented for NumPy arrays (via bottleneck)"),
),
param(
do("reduce", func="sum", dim="x"),
False,
marks=xfail(reason="Coercion to dense"),
),
param(
do(
"reindex_like",
make_xrarray(
{"x": 10, "y": 5},
coords={"x": np.arange(10) + 0.5, "y": np.arange(5) + 0.5},
),
),
True,
marks=xfail(reason="Indexing COO with more than one iterable index"),
),
(do("roll", x=2, roll_coords=True), True),
param(
do("sel", x=[0, 1, 2], y=[2, 3]),
True,
marks=xfail(reason="Indexing COO with more than one iterable index"),
),
param(
do("std"), False, marks=xfail(reason="Missing implementation for np.nanstd")
),
(do("sum"), False),
param(
do("var"), False, marks=xfail(reason="Missing implementation for np.nanvar")
),
param(
do("where", make_xrarray({"x": 10, "y": 5}) > 0.5),
False,
marks=xfail(reason="Conversion of dense to sparse when using sparse mask"),
),
],
ids=repr,
)
def test_dataarray_method(func, sparse_output):
arr_s = make_xrarray(
{"x": 10, "y": 5}, coords={"x": np.arange(10), "y": np.arange(5)}
)
arr_d = xr.DataArray(arr_s.data.todense(), coords=arr_s.coords, dims=arr_s.dims)
ret_s = func(arr_s)
ret_d = func(arr_d)
if sparse_output:
assert isinstance(ret_s.data, sparse.SparseArray)
assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True)
else:
assert np.allclose(ret_s, ret_d, equal_nan=True)
@pytest.mark.parametrize(
"func,sparse_output",
[
(do("squeeze"), True),
param(
do("searchsorted", [1, 2, 3]),
False,
marks=xfail(reason="'COO' object has no attribute 'searchsorted'"),
),
],
)
def test_datarray_1d_method(func, sparse_output):
arr_s = make_xrarray({"x": 10}, coords={"x": np.arange(10)})
arr_d = xr.DataArray(arr_s.data.todense(), coords=arr_s.coords, dims=arr_s.dims)
ret_s = func(arr_s)
ret_d = func(arr_d)
if sparse_output:
assert isinstance(ret_s.data, sparse.SparseArray)
assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True)
else:
assert np.allclose(ret_s, ret_d, equal_nan=True)
| TestSparseVariable |
python | mlflow__mlflow | mlflow/types/chat.py | {
"start": 5953,
"end": 6068
} | class ____(BaseModel):
index: int
finish_reason: str | None = None
delta: ChatChoiceDelta
| ChatChunkChoice |
python | ApeWorX__ape | tests/functional/test_accounts.py | {
"start": 2153,
"end": 41707
} | class ____(EIP712Message):
_name_: "string" = "Foo" # type: ignore # noqa: F821
bar: "address" # type: ignore # noqa: F821
baz: Baz
def test_sign_message(signer, message):
signature = signer.sign_message(message)
assert signer.check_signature(message, signature)
def test_sign_transaction(signer, message, ethereum):
transaction = ethereum.create_transaction(nonce=0, max_fee=0, max_priority_fee=0)
signed_transaction = signer.sign_transaction(transaction)
assert signed_transaction.signature is not None
def test_sign_transaction_using_keyfile_account(keyfile_account, message, ethereum, runner):
transaction = ethereum.create_transaction(
nonce=0, max_fee=0, max_priority_fee=0, data="0x21314135413451"
)
with runner.isolation(f"y\n{PASSPHRASE}\ny"):
signed_transaction = keyfile_account.sign_transaction(transaction)
assert signed_transaction.signature is not None
def test_sign_string(signer):
message = "Hello Apes!"
signature = signer.sign_message(message)
assert signer.check_signature(message, signature)
def test_sign_int(signer):
message = 4
signature = signer.sign_message(message)
assert signer.check_signature(message, signature)
def test_sign_message_unsupported_type_returns_none(signer):
message = 1234.123
signature = signer.sign_message(message)
assert signature is None
def test_recover_signer(signer, message):
signature = signer.sign_message(message)
assert recover_signer(message, signature) == signer
def test_sign_eip712_message(signer):
baz = Baz(addr=signer.address) # type: ignore[call-arg]
foo = Foo(bar=signer.address, baz=baz) # type: ignore[call-arg]
signature = signer.sign_message(foo)
assert signer.check_signature(foo, signature)
def test_sign_message_with_prompts(runner, keyfile_account, message):
# "y\na\ny": yes sign, password, yes keep unlocked
start_nonce = keyfile_account.nonce
with runner.isolation(input=f"y\n{PASSPHRASE}\ny"):
signature = keyfile_account.sign_message(message)
assert keyfile_account.check_signature(message, signature)
# # "n": don't sign
with runner.isolation(input="n\n"):
signature = keyfile_account.sign_message(message)
assert signature is None
# Nonce should not change from signing messages.
end_nonce = keyfile_account.nonce
assert start_nonce == end_nonce
def test_sign_eip712_message_shows_custom_types(signer):
baz = Baz(addr=signer.address) # type: ignore[call-arg]
foo = Foo(bar=signer.address, baz=baz) # type: ignore[call-arg]
display_msg, msg = _get_signing_message_with_display(foo)
expected = """
Signing EIP712 Message
Domain
Name: Foo
Message
bar: 0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc
baz:
addr: 0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc
""".strip()
assert display_msg
assert display_msg.strip() == expected
def test_sign_raw_hash(runner, keyfile_account):
# NOTE: `message` is a 32 byte raw hash, which is treated specially
message = b"\xab" * 32
# "y\na\ny": yes sign raw hash, password, yes keep unlocked
with runner.isolation(input=f"y\n{PASSPHRASE}\ny"):
signature = keyfile_account.sign_raw_msghash(message)
assert keyfile_account.check_signature(message, signature, recover_using_eip191=False)
# "n\nn": no sign raw hash: don't sign
with runner.isolation(input="n"):
signature = keyfile_account.sign_message(message)
assert signature is None
def test_transfer(sender, receiver, eth_tester_provider, convert):
initial_receiver_balance = receiver.balance
initial_sender_balance = sender.balance
value_str = "24 gwei"
value_int = convert(value_str, int)
receipt = sender.transfer(receiver, value_str)
# Ensure each account balance was affected accordingly
expected_receiver_balance = initial_receiver_balance + value_int
expected_sender_loss = receipt.total_fees_paid + value_int
expected_sender_balance = initial_sender_balance - expected_sender_loss
assert receiver.balance == expected_receiver_balance
assert sender.balance == expected_sender_balance, (
f"difference: {abs(sender.balance - expected_sender_balance)}"
)
def test_transfer_with_negative_value(sender, receiver):
with pytest.raises(AccountsError, match="Value cannot be negative."):
sender.transfer(receiver, value=-1)
def test_transfer_without_value(sender, receiver):
with pytest.raises(AccountsError, match=MISSING_VALUE_TRANSFER_ERR_MSG):
sender.transfer(receiver)
def test_transfer_without_value_send_everything_false(sender, receiver):
with pytest.raises(AccountsError, match=MISSING_VALUE_TRANSFER_ERR_MSG):
sender.transfer(receiver, send_everything=False)
def test_transfer_without_value_send_everything_true_with_low_gas(sender, receiver):
initial_receiver_balance = receiver.balance
initial_sender_balance = sender.balance
# Clear balance of sender.
# Use small gas so for sure runs out of money.
receipt = sender.transfer(receiver, send_everything=True, gas=22000)
value_given = receipt.value
total_spent = value_given + receipt.total_fees_paid
assert sender.balance == initial_sender_balance - total_spent
assert receiver.balance == initial_receiver_balance + value_given
expected_err_regex = r"Sender does not have enough to cover transaction value and gas: \d*"
with pytest.raises(AccountsError, match=expected_err_regex):
sender.transfer(receiver, send_everything=True)
def test_transfer_without_value_send_everything_true_with_high_gas(sender, receiver):
initial_receiver_balance = receiver.balance
initial_sender_balance = sender.balance
# The gas selected here is very high compared to what actually gets used.
gas = 25000000
# Clear balance of sender
receipt = sender.transfer(receiver, send_everything=True, gas=gas)
value_given = receipt.value
total_spent = value_given + receipt.total_fees_paid
assert sender.balance == initial_sender_balance - total_spent
assert receiver.balance == initial_receiver_balance + value_given
# The sender is able to transfer again because they have so much left over
# from safely using such a high gas before.
# Use smaller (more expected) amount of gas this time.
sender.transfer(receiver, send_everything=True, gas=21000)
def test_transfer_with_value_send_everything_true(sender, receiver):
with pytest.raises(AccountsError, match="Cannot use 'send_everything=True' with 'VALUE'."):
sender.transfer(receiver, 1, send_everything=True)
def test_transfer_with_prompts(runner, receiver, keyfile_account):
# "y\na\ny": yes sign, password, yes keep unlocked
with runner.isolation(f"y\n{PASSPHRASE}\ny"):
receipt = keyfile_account.transfer(receiver, "1 gwei")
assert receipt.receiver == receiver
# "n": don't sign
with runner.isolation(input="n\n"):
with pytest.raises(SignatureError):
keyfile_account.transfer(receiver, "1 gwei")
def test_transfer_using_type_0(sender, receiver, convert):
initial_balance = receiver.balance
sender.transfer(receiver, "1 gwei", type=0)
expected = initial_balance + convert("1 gwei", int)
assert receiver.balance == expected
def test_transfer_value_of_0(sender, receiver):
"""
There was a bug where this failed, thinking there was no value.
"""
initial_balance = receiver.balance
sender.transfer(receiver, 0)
assert receiver.balance == initial_balance
# Also show conversion works.
sender.transfer(receiver, "0 wei")
assert receiver.balance == initial_balance
def test_transfer_mixed_up_sender_and_value(sender, receiver):
"""
Testing the case where the user mixes up the argument order,
it should show a nicer error than it was previously, as this is
a common and easy mistake.
"""
expected = (
r"Cannot use integer-type for the `receiver` "
r"argument in the `\.transfer\(\)` method \(this "
r"protects against accidentally passing the "
r"`value` as the `receiver`\)."
)
with pytest.raises(AccountsError, match=expected):
sender.transfer(123, receiver)
# Similarly show using currency-str (may fail for different error).
expected = r"Invalid `receiver` value: '123 wei'\."
with pytest.raises(AccountsError, match=expected):
sender.transfer("123 wei", receiver)
def test_transfer_sign_is_false(sender, receiver):
with pytest.raises(SignatureError):
sender.transfer(receiver, "1 gwei", sign=False)
def test_deploy(owner, contract_container, clean_contract_caches):
contract = owner.deploy(contract_container, 0)
assert contract.address
assert contract.txn_hash
# Deploy again to prove that we get the correct txn_hash below
owner.deploy(contract_container, 0)
# Verify can reload same contract from cache
contract_from_cache = ape.Contract(contract.address)
assert contract_from_cache.contract_type == contract.contract_type
assert contract_from_cache.address == contract.address
assert contract_from_cache.txn_hash == contract.txn_hash
@explorer_test
def test_deploy_and_publish_local_network(owner, contract_container):
with pytest.raises(ProjectError, match="Can only publish deployments on a live network"):
owner.deploy(contract_container, 0, publish=True)
@explorer_test
def test_deploy_and_publish_live_network_no_explorer(owner, contract_container, dummy_live_network):
dummy_live_network.__dict__["explorer"] = None
expected_message = "Unable to publish contract - no explorer plugin installed."
with pytest.raises(NetworkError, match=expected_message):
owner.deploy(contract_container, 0, publish=True, required_confirmations=0)
@explorer_test
def test_deploy_and_publish(
owner, contract_container, dummy_live_network_with_explorer, mock_explorer
):
contract = owner.deploy(contract_container, 0, publish=True, required_confirmations=0)
mock_explorer.publish_contract.assert_called_once_with(contract.address)
@explorer_test
def test_deploy_and_not_publish(
owner, contract_container, dummy_live_network_with_explorer, mock_explorer
):
owner.deploy(contract_container, 0, publish=True, required_confirmations=0)
assert not mock_explorer.call_count
def test_deploy_proxy(owner, vyper_contract_instance, project, chain):
target = vyper_contract_instance.address
proxy = owner.deploy(project.SimpleProxy, target)
# Ensure we can call both proxy and target methods on it.
assert proxy.implementation # No attr err
assert proxy.myNumber # No attr err
# Ensure was properly cached.
assert proxy.address in chain.contracts.contract_types
assert proxy.address in chain.contracts.proxy_infos
# Show the cached proxy info is correct.
proxy_info = chain.contracts.proxy_infos[proxy.address]
assert proxy_info.target == target
assert proxy_info.type == ProxyType.Delegate
assert proxy_info.abi.name == "implementation"
# Show we get the implementation contract type using the proxy address
re_contract = chain.contracts.instance_at(proxy.address)
assert re_contract.contract_type == proxy.contract_type
# Show proxy methods are not available on target alone.
target = chain.contracts.instance_at(proxy_info.target)
assert target.myNumber # No attr err
with pytest.raises(AttributeError):
_ = target.implementation
def test_deploy_instance(owner, vyper_contract_instance):
"""
Tests against a confusing scenario where you would get a SignatureError when
trying to deploy a ContractInstance because Ape would attempt to create a tx
by calling the contract's default handler.
"""
expected = (
r"contract argument must be a ContractContainer type, "
r"such as 'project\.MyContract' where 'MyContract' is the "
r"name of a contract in your project\."
)
with pytest.raises(TypeError, match=expected):
owner.deploy(vyper_contract_instance)
@pytest.mark.parametrize("bytecode", (None, {}, {"bytecode": "0x"}))
def test_deploy_no_deployment_bytecode(owner, bytecode):
"""
https://github.com/ApeWorX/ape/issues/1904
"""
expected = (
r"Cannot deploy: contract 'Apes' has no deployment-bytecode\. "
r"Are you attempting to deploy an interface\?"
)
contract_type = ContractType.model_validate(
{"abi": [], "contractName": "Apes", "deploymentBytecode": bytecode}
)
contract = ContractContainer(contract_type)
with pytest.raises(MissingDeploymentBytecodeError, match=expected):
owner.deploy(contract)
def test_deploy_contract_type(owner, project, clean_contract_caches):
contract_type = project.VyperContract.contract_type
contract = owner.deploy(contract_type, 0)
assert contract.address
assert contract.txn_hash
def test_deploy_sending_funds_to_non_payable_constructor(project, owner):
with pytest.raises(
MethodNonPayableError,
match=r"Sending funds to a non-payable constructor\.",
):
owner.deploy(project.SolidityContract, 1, value="1 ether")
def test_send_transaction_with_bad_nonce(sender, receiver):
# Bump the nonce so we can set one that is too low.
sender.transfer(receiver, "1 gwei", type=0)
with pytest.raises(AccountsError, match="Invalid nonce, will not publish."):
sender.transfer(receiver, "1 gwei", type=0, nonce=0)
def test_send_transaction_without_enough_funds(sender, receiver, eth_tester_provider, convert):
expected = (
rf"Transfer value meets or exceeds account balance for account '{sender.address}' .*"
rf"on chain '{eth_tester_provider.chain_id}' using provider '{eth_tester_provider.name}'\."
rf"\nAre you using the correct account / chain \/ provider combination\?"
rf"\n\(transfer_value=\d+, balance=\d+\)\."
)
with pytest.raises(AccountsError, match=expected):
sender.transfer(receiver, "10000000000000 ETH")
def test_send_transaction_without_enough_funds_impersonated_account(
receiver, accounts, eth_tester_provider, convert
):
address = "0x4838B106FCe9647Bdf1E7877BF73cE8B0BAD5f97" # Not a test account!
impersonated_account = ImpersonatedAccount(raw_address=address)
accounts._impersonated_accounts[address] = impersonated_account
# Basically, it failed anywhere else besides the AccountsError you get from not
# enough balance.
with pytest.raises(SignatureError):
impersonated_account.transfer(receiver, "10000000000000 ETH")
def test_send_transaction_sets_defaults(sender, receiver):
receipt = sender.transfer(receiver, "1 GWEI", gas_limit=None, required_confirmations=None)
assert receipt.gas_limit > 0
assert receipt.required_confirmations == 0
def test_account_index_access(accounts):
account = accounts[0]
assert account.index == 0
last_account = accounts[-1]
assert last_account.index == len(accounts) - 1
def test_accounts_splice_access(accounts):
alice, bob = accounts[:2]
assert alice == accounts[0]
assert bob == accounts[1]
cat = accounts[-1]
assert cat == accounts[len(accounts) - 1]
expected = (len(accounts) // 2) if len(accounts) % 2 == 0 else (len(accounts) // 2 + 1)
assert len(accounts[::2]) == expected
def test_accounts_address_access(owner, accounts):
assert accounts[owner.address] == owner
def test_accounts_address_access_conversion_fail(account_manager):
with pytest.raises(
KeyError,
match=(
r"No account with ID 'FAILS'\. "
r"Do you have the necessary conversion plugins installed?"
),
):
_ = account_manager["FAILS"]
def test_accounts_address_access_not_found(accounts):
address = "0x1222262222222922222222222222222222222222"
with pytest.raises(KeyError, match=rf"No account with address '{address}'\."):
_ = accounts[address]
def test_test_accounts_address_access_conversion_fail(accounts):
with pytest.raises(KeyError, match=r"No account with ID 'FAILS'"):
_ = accounts["FAILS"]
def test_test_accounts_address_access_not_found(accounts):
address = "0x1222262222222922222222222222222222222222"
with pytest.raises(KeyError, match=rf"No account with address '{address}'\."):
_ = accounts[address]
def test_accounts_contains(accounts, owner):
assert owner.address in accounts
def test_autosign_messages(runner, keyfile_account, message):
keyfile_account.set_autosign(True, passphrase=PASSPHRASE)
signature = keyfile_account.sign_message(message)
assert keyfile_account.check_signature(message, signature)
# Re-enable prompted signing
keyfile_account.set_autosign(False)
with runner.isolation(input=f"y\n{PASSPHRASE}\n"):
signature = keyfile_account.sign_message(message)
assert keyfile_account.check_signature(message, signature)
def test_autosign_transactions(runner, keyfile_account, receiver):
keyfile_account.set_autosign(True, passphrase=PASSPHRASE)
assert keyfile_account.transfer(receiver, "1 gwei")
# Re-enable prompted signing
keyfile_account.set_autosign(False)
with runner.isolation(input=f"y\n{PASSPHRASE}\n"):
assert keyfile_account.transfer(receiver, "1 gwei")
def test_impersonate_not_implemented(accounts, address):
expected_err_msg = (
r"Provider 'test' does not support impersonating accounts:\\n"
rf"No account with address '{address}'\."
)
with pytest.raises(KeyError, match=expected_err_msg):
_ = accounts[address]
def test_impersonated_account_ignores_signature_check_on_txn(accounts, address):
account = ImpersonatedAccount(raw_address=address)
# Impersonate hack, since no providers in core actually support it.
accounts._impersonated_accounts[address] = account
other_0 = accounts[8]
other_1 = accounts[9]
txn = other_0.transfer(other_1, "1 gwei").transaction
# Hack in fake sender.
txn.sender = address
actual = txn.serialize_transaction()
# Normally, you'd get a signature error here, but since the account is registered
# as impersonated, ape lets it slide because it knows it won't match.
assert isinstance(actual, bytes)
def test_contract_as_sender_non_fork_network(contract_instance):
expected_err_msg = (
r"Provider 'test' does not support impersonating accounts:\\n"
rf"No account with address '{contract_instance}'\."
)
with pytest.raises(KeyError, match=expected_err_msg):
contract_instance.setNumber(5, sender=contract_instance)
def test_unlock_with_passphrase_and_sign_message(runner, keyfile_account, message):
keyfile_account.unlock(passphrase=PASSPHRASE)
# y: yes, sign (note: unlocking makes the key available but is not the same as autosign).
with runner.isolation(input="y\n"):
signature = keyfile_account.sign_message(message)
assert keyfile_account.check_signature(message, signature)
def test_unlock_from_prompt_and_sign_message(runner, keyfile_account, message):
# a = password
with runner.isolation(input=f"{PASSPHRASE}\n"):
keyfile_account.unlock()
# yes, sign the message
with runner.isolation(input="y\n"):
signature = keyfile_account.sign_message(message)
assert keyfile_account.check_signature(message, signature)
def test_unlock_with_passphrase_and_sign_transaction(runner, keyfile_account, receiver):
keyfile_account.unlock(passphrase=PASSPHRASE)
# y: yes, sign (note: unlocking makes the key available but is not the same as autosign).
with runner.isolation(input="y\n"):
receipt = keyfile_account.transfer(receiver, "1 gwei")
assert receipt.receiver == receiver
def test_unlock_from_prompt_and_sign_transaction(runner, keyfile_account, receiver):
# a = password
with runner.isolation(input=f"{PASSPHRASE}\n"):
keyfile_account.unlock()
# yes, sign the transaction
with runner.isolation(input="y\n"):
receipt = keyfile_account.transfer(receiver, "1 gwei")
assert receipt.receiver == receiver
def test_unlock_with_passphrase_from_env_and_sign_message(runner, keyfile_account, message):
ENV_VARIABLE = f"APE_ACCOUNTS_{keyfile_account.alias}_PASSPHRASE"
# Set environment variable with passphrase
environ[ENV_VARIABLE] = PASSPHRASE
# Unlock using environment variable
keyfile_account.unlock()
# Account should be unlocked
assert not keyfile_account.locked
# y: yes, sign (note: unlocking makes the key available but is not the same as autosign).
with runner.isolation(input="y\n"):
signature = keyfile_account.sign_message(message)
assert keyfile_account.check_signature(message, signature)
def test_unlock_with_wrong_passphrase_from_env(keyfile_account):
ENV_VARIABLE = f"APE_ACCOUNTS_{keyfile_account.alias}_PASSPHRASE"
# Set environment variable with passphrase
environ[ENV_VARIABLE] = INVALID_PASSPHRASE
# Use pytest.raises to assert that InvalidPasswordError is raised
with pytest.raises(AccountsError, match="Invalid password"):
# Unlock using environment variable
keyfile_account.unlock()
# Account should be unlocked
assert keyfile_account.locked
def test_unlock_and_reload(runner, account_manager, keyfile_account, message):
"""
Tests against a condition where reloading after unlocking
would not honor unlocked state.
"""
keyfile_account.unlock(passphrase=PASSPHRASE)
reloaded_account = account_manager.load(keyfile_account.alias)
# y: yes, sign (note: unlocking makes the key available but is not the same as autosign).
with runner.isolation(input="y\n"):
signature = reloaded_account.sign_message(message)
assert keyfile_account.check_signature(message, signature)
def test_custom_num_of_test_accounts_config(accounts, project):
custom_number_of_test_accounts = 25
test_config = {
"test": {
"number_of_accounts": custom_number_of_test_accounts,
}
}
with project.temp_config(**test_config):
assert len(accounts) == custom_number_of_test_accounts
def test_test_accounts_repr(accounts, config):
actual = repr(accounts)
assert config.get_config("test").hd_path in actual
# Show actual test-account repr (address should be checksummed).
actual = repr(accounts[0])
assert accounts[0].address in actual
def test_account_comparison_to_non_account(core_account):
# Before, would get a ConversionError.
assert core_account != "foo"
def test_create_account(accounts):
length_at_start = len(accounts)
created_account = accounts.generate_test_account()
assert isinstance(created_account, TestAccount)
assert created_account.index == length_at_start
length_at_start = len(accounts)
second_created_account = accounts.generate_test_account()
assert len(accounts) == length_at_start + 1
assert created_account.address != second_created_account.address
assert second_created_account.index == created_account.index + 1
# Last index should now refer to the last-created account.
last_idx_acct = accounts[-1]
assert last_idx_acct.index == second_created_account.index
assert last_idx_acct.address == second_created_account.address
assert last_idx_acct.address != accounts[0].address
assert last_idx_acct.address != created_account.address
def test_dir(core_account):
actual = dir(core_account)
expected = [
"address",
"alias",
"balance",
"call",
"delegate",
"delegate_to",
"deploy",
"nonce",
"prepare_transaction",
"provider",
"remove_delegate",
"set_delegate",
"sign_authorization",
"sign_message",
"sign_transaction",
"transfer",
]
assert sorted(actual) == sorted(expected)
def test_is_not_contract(owner, keyfile_account):
assert not owner.is_contract
assert not keyfile_account.is_contract
def test_using_different_hd_path(accounts, project, eth_tester_provider):
test_config = {
"test": {
"hd_path": "m/44'/60'/0/0",
}
}
old_address = accounts[0].address
original_settings = eth_tester_provider.settings.model_dump(by_alias=True)
with project.temp_config(**test_config):
eth_tester_provider.update_settings(test_config["test"])
new_address = accounts[0].address
eth_tester_provider.update_settings(original_settings)
assert old_address != new_address
def test_mnemonic(accounts):
actual = accounts.mnemonic
expected = DEFAULT_TEST_MNEMONIC
assert actual == expected
def test_mnemonic_setter(accounts):
original_mnemonic = accounts.mnemonic
new_mnemonic = "candy maple cake sugar pudding cream honey rich smooth crumble sweet treat"
original_address = accounts[0].address
# Change.
accounts.mnemonic = new_mnemonic
new_address = accounts[0].address
# Put back.
accounts.mnemonic = original_mnemonic
# Assert.
assert new_address != original_address
def test_iter_test_accounts(accounts):
accounts.reset()
accounts = list(iter(accounts))
actual = len(accounts)
expected = len(accounts)
assert actual == expected
def test_declare(contract_container, sender):
receipt = sender.declare(contract_container)
assert not receipt.failed
def test_prepare_transaction(ethereum, sender):
tx = ethereum.create_transaction()
prepared_tx = sender.prepare_transaction(tx)
assert prepared_tx.sender == sender.address
assert prepared_tx.signature is None
def test_prepare_transaction_sign(sender, ethereum):
tx = ethereum.create_transaction()
prepared_tx = sender.prepare_transaction(tx, sign=True)
assert prepared_tx.sender == sender.address
assert prepared_tx.signature is not None
@pytest.mark.parametrize("tx_type", (TransactionType.STATIC, TransactionType.DYNAMIC))
def test_prepare_transaction_using_auto_gas(sender, ethereum, tx_type):
params = (
("gas_price",) if tx_type is TransactionType.STATIC else ("max_fee", "max_priority_fee")
)
def clear_network_property_cached():
for field in ("gas_limit", "auto_gas_multiplier"):
if field in ethereum.local.__dict__:
del ethereum.local.__dict__[field]
auto_gas = AutoGasLimit(multiplier=1.0)
original_limit = ethereum.config.local.gas_limit
try:
clear_network_property_cached()
ethereum.config.local.gas_limit = auto_gas
assert ethereum.local.gas_limit == auto_gas, "Setup failed - auto gas not set."
# NOTE: Must create tx _after_ setting network gas value.
tx = ethereum.create_transaction(type=tx_type)
# Show tx doesn't have these by default.
assert tx.nonce is None
for param in params:
# Custom fields depending on type.
assert getattr(tx, param) is None, f"'{param}' unexpectedly set."
# Gas should NOT yet be estimated, as that happens closer to sending.
assert tx.gas_limit is None
# Sets fields.
tx = sender.prepare_transaction(tx)
# We expect these fields to have been set.
assert tx.nonce is not None
assert tx.gas_limit is not None
# Show multipliers work. First, reset network to use one (hack).
gas_smaller = tx.gas_limit
auto_gas.multiplier = 1.1
ethereum.config.local.gas_limit = auto_gas
clear_network_property_cached()
assert ethereum.local.gas_limit == auto_gas, "Setup failed - auto gas multiplier not set."
tx2 = ethereum.create_transaction(type=tx_type)
tx2 = sender.prepare_transaction(tx2)
gas_bigger = tx2.gas_limit
assert gas_smaller < gas_bigger
for param in params:
assert getattr(tx, param) is not None
finally:
ethereum.config.local.gas_limit = original_limit
clear_network_property_cached()
@pytest.mark.parametrize("tx_type", (TransactionType.STATIC, TransactionType.DYNAMIC))
def test_prepare_transaction_and_call_using_max_gas(tx_type, ethereum, sender, eth_tester_provider):
tx = ethereum.create_transaction(type=tx_type.value)
tx = sender.prepare_transaction(tx)
assert tx.gas_limit == eth_tester_provider.max_gas, "Test setup failed - gas limit unexpected."
actual = sender.call(tx)
assert not actual.failed
def test_authorizations_transaction(sender, vyper_contract_instance):
assert not sender.delegate
# NOTE: 0x23fd0e40 is method_id for `myNumber()`
# NOTE: Must call something, since `__default__` raises
with sender.delegate_to(vyper_contract_instance, data="0x23fd0e40") as delegate:
assert sender.delegate == vyper_contract_instance
assert delegate.myNumber() == 0
sender.remove_delegate()
assert not sender.delegate
def test_public_key(runner, keyfile_account, owner):
with runner.isolation(input=f"{PASSPHRASE}\ny\n"):
pub_key = keyfile_account.public_key
assert isinstance(keyfile_account.public_key, HexBytes)
# Prove it is the correct public key by deriving the address.
derived_address = to_checksum_address(keccak(pub_key)[-20:])
assert derived_address == keyfile_account.address
# Also, show the test accounts have access to their public key.
derived_address = to_checksum_address(keccak(owner.public_key)[-20:])
assert derived_address == owner.address
def test_load_public_key_from_keyfile(runner, keyfile_account):
with runner.isolation(input=f"{PASSPHRASE}\ny\n"):
assert isinstance(keyfile_account.public_key, HexBytes)
assert (
to_hex(keyfile_account.public_key)
== "0x8318535b54105d4a7aae60c08fc45f9687181b4fdfc625bd1a753fa7397fed753547f11ca8696646f2f3acb08e31016afac23e630c5d11f59f61fef57b0d2aa5" # noqa: E501
)
# no need for password when loading from the keyfile
assert keyfile_account.public_key
def test_generate_account(delete_account_after):
alias = "gentester"
with delete_account_after(alias):
account, mnemonic = generate_account(alias, PASSPHRASE)
assert len(mnemonic.split(" ")) == 12
assert isinstance(account, KeyfileAccount)
assert account.alias == alias
assert account.locked is True
account.unlock(PASSPHRASE)
assert account.locked is False
def test_generate_account_invalid_alias(delete_account_after):
with pytest.raises(AccountsError, match="Longer aliases cannot be hex strings."):
generate_account(
"3fbc0ce3e71421b94f7ff4e753849c540dec9ade57bad60ebbc521adcbcbc024", "asdf1234"
)
with pytest.raises(AccountsError, match="Alias must be a str"):
# Testing an invalid type as arg, so ignoring
generate_account(b"imma-bytestr", "asdf1234") # type: ignore
used_alias = "used"
with delete_account_after(used_alias):
generate_account(used_alias, "qwerty1")
with pytest.raises(AliasAlreadyInUseError):
generate_account(used_alias, "asdf1234")
def test_generate_account_invalid_passphrase():
with pytest.raises(AccountsError, match="Account file encryption passphrase must be provided."):
generate_account("invalid-passphrase", "")
with pytest.raises(AccountsError, match="Account file encryption passphrase must be provided."):
generate_account("invalid-passphrase", b"bytestring") # type: ignore
def test_generate_account_insecure_passphrase(delete_account_after):
short_alias = "shortaccount"
with delete_account_after(short_alias):
with pytest.warns(UserWarning, match="short"):
generate_account(short_alias, "short")
simple_alias = "simpleaccount"
with delete_account_after(simple_alias):
with pytest.warns(UserWarning, match="simple"):
generate_account(simple_alias, "simple")
def test_import_account_from_mnemonic(delete_account_after):
alias = "iafmtester"
with delete_account_after(alias):
account = import_account_from_mnemonic(alias, PASSPHRASE, MNEMONIC)
assert isinstance(account, KeyfileAccount)
assert account.alias == alias
assert account.address == "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
assert account.locked is True
account.unlock(PASSPHRASE)
assert account.locked is False
def test_import_account_from_mnemonic_invalid_alias(delete_account_after):
with pytest.raises(AccountsError, match="Longer aliases cannot be hex strings."):
import_account_from_mnemonic(
"3fbc0ce3e71421b94f7ff4e753849c540dec9ade57bad60ebbc521adcbcbc024", "asdf1234", MNEMONIC
)
with pytest.raises(AccountsError, match="Alias must be a str"):
# Testing an invalid type as arg, so ignoring
import_account_from_mnemonic(b"imma-bytestr", "asdf1234", MNEMONIC) # type: ignore
used_alias = "iamfused"
with delete_account_after(used_alias):
import_account_from_mnemonic(used_alias, "qwerty1", MNEMONIC)
with pytest.raises(AliasAlreadyInUseError):
import_account_from_mnemonic(used_alias, "asdf1234", MNEMONIC)
def test_import_account_from_mnemonic_invalid_passphrase():
with pytest.raises(AccountsError, match="Account file encryption passphrase must be provided."):
import_account_from_mnemonic("invalid-passphrase", "", MNEMONIC)
with pytest.raises(AccountsError, match="Account file encryption passphrase must be provided."):
import_account_from_mnemonic("invalid-passphrase", b"bytestring", MNEMONIC) # type: ignore
def test_import_account_from_mnemonic_insecure_passphrase(delete_account_after):
short_alias = "iafmshortaccount"
with delete_account_after(short_alias):
with pytest.warns(UserWarning, match="short"):
import_account_from_mnemonic(short_alias, "short", MNEMONIC)
simple_alias = "iafmsimpleaccount"
with delete_account_after(simple_alias):
with pytest.warns(UserWarning, match="simple"):
import_account_from_mnemonic(simple_alias, "simple", MNEMONIC)
def test_import_account_from_private_key(delete_account_after):
alias = "iafpktester"
with delete_account_after(alias):
account = import_account_from_private_key(alias, PASSPHRASE, PRIVATE_KEY)
assert isinstance(account, KeyfileAccount)
assert account.alias == alias
assert account.address == "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
assert account.locked is True
account.unlock(PASSPHRASE)
assert account.locked is False
def test_import_account_from_private_key_invalid_alias(delete_account_after):
with pytest.raises(AccountsError, match="Longer aliases cannot be hex strings."):
import_account_from_private_key(
"3fbc0ce3e71421b94f7ff4e753849c540dec9ade57bad60ebbc521adcbcbc024",
"asdf1234",
PRIVATE_KEY,
)
with pytest.raises(AccountsError, match="Alias must be a str"):
# Testing an invalid type as arg, so ignoring
import_account_from_private_key(b"imma-bytestr", "asdf1234", PRIVATE_KEY) # type: ignore
used_alias = "iafpkused"
with delete_account_after(used_alias):
import_account_from_private_key(used_alias, "qwerty1", PRIVATE_KEY)
with pytest.raises(AliasAlreadyInUseError):
import_account_from_private_key(used_alias, "asdf1234", PRIVATE_KEY)
def test_import_account_from_private_key_invalid_passphrase():
with pytest.raises(AccountsError, match="Account file encryption passphrase must be provided."):
import_account_from_private_key("invalid-passphrase", "", PRIVATE_KEY)
with pytest.raises(AccountsError, match="Account file encryption passphrase must be provided."):
import_account_from_private_key(
"invalid-passphrase",
b"bytestring", # type: ignore
PRIVATE_KEY,
)
def test_import_account_from_private_key_insecure_passphrase(delete_account_after):
short_alias = "iafpkshortaccount"
with delete_account_after(short_alias):
with pytest.warns(UserWarning, match="short"):
import_account_from_private_key(short_alias, "short", PRIVATE_KEY)
simple_alias = "iafpksimpleaccount"
with delete_account_after(simple_alias):
with pytest.warns(UserWarning, match="simple"):
import_account_from_private_key(simple_alias, "simple", PRIVATE_KEY)
def test_load(account_manager, keyfile_account):
account = account_manager.load(keyfile_account.alias)
assert account == keyfile_account
def test_get_deployment_address(owner, project):
deployment_address_1 = owner.get_deployment_address()
deployment_address_2 = owner.get_deployment_address(nonce=owner.nonce + 1)
instance_1 = owner.deploy(project.VyperContract, 490)
assert instance_1.address == deployment_address_1
instance_2 = owner.deploy(project.VyperContract, 490)
assert instance_2.address == deployment_address_2
def test_repr(account_manager):
"""
NOTE: __repr__ should be simple and fast!
Previously, we showed the repr of all the accounts.
That was a bad idea, as that can be very unnecessarily slow.
Hence, this test exists to ensure care is taken.
"""
actual = repr(account_manager)
assert actual == "<AccountManager>"
def test_call(owner, vyper_contract_instance):
tx = vyper_contract_instance.setNumber.as_transaction(5991)
receipt = owner.call(tx)
assert not receipt.failed
def test_call_sign_false(owner, vyper_contract_instance):
tx = vyper_contract_instance.setNumber.as_transaction(5991)
with pytest.raises(SignatureError):
owner.call(tx, sign=False)
def test_resolve_address(owner, keyfile_account, account_manager, vyper_contract_instance):
# Test test-account alias input.
actual = account_manager.resolve_address(owner.alias)
assert actual == owner.address
# Test keyfile-account alias input.
actual = account_manager.resolve_address(keyfile_account.alias)
assert actual == keyfile_account.address
# Test address input.
actual = account_manager.resolve_address(owner.address)
assert actual == owner.address
# Test account input.
actual = account_manager.resolve_address(owner)
assert actual == owner.address
# Test contract input.
actual = account_manager.resolve_address(vyper_contract_instance)
assert actual == vyper_contract_instance.address
# Test int input.
actual = account_manager.resolve_address(int(owner.address, 16))
assert actual == owner.address
# Test int input.
actual = account_manager.resolve_address(HexBytes(owner.address))
assert actual == owner.address
def test_use_ape_signer(accounts, project):
"""
Showing you can use the inner Ape account (base class) directly as an ape account in txns.
"""
signer = ApeSigner(private_key=accounts[5].private_key)
assert isinstance(signer, ApeSigner)
contract = project.VyperContract.deploy(1012, sender=signer)
assert contract.is_contract
| Foo |
python | python-markdown__markdown | markdown/preprocessors.py | {
"start": 2266,
"end": 2737
} | class ____(Preprocessor):
""" Normalize whitespace for consistent parsing. """
def run(self, lines: list[str]) -> list[str]:
source = '\n'.join(lines)
source = source.replace(util.STX, "").replace(util.ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = source.expandtabs(self.md.tab_length)
source = re.sub(r'(?<=\n) +\n', '\n', source)
return source.split('\n')
| NormalizeWhitespace |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/identity_bijector_test.py | {
"start": 941,
"end": 1867
} | class ____(test.TestCase):
"""Tests correctness of the Y = g(X) = X transformation."""
def testBijector(self):
bijector = identity_bijector.Identity(validate_args=True)
self.assertEqual("identity", bijector.name)
x = [[[0.], [1.]]]
self.assertAllEqual(x, self.evaluate(bijector.forward(x)))
self.assertAllEqual(x, self.evaluate(bijector.inverse(x)))
self.assertAllEqual(
0.,
self.evaluate(
bijector.inverse_log_det_jacobian(x, event_ndims=3)))
self.assertAllEqual(
0.,
self.evaluate(
bijector.forward_log_det_jacobian(x, event_ndims=3)))
@test_util.run_deprecated_v1
def testScalarCongruency(self):
with self.cached_session():
bijector = identity_bijector.Identity()
bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
| IdentityBijectorTest |
python | bokeh__bokeh | src/bokeh/models/filters.py | {
"start": 5144,
"end": 5633
} | class ____(Filter):
''' A ``BooleanFilter`` filters data by returning the subset of data corresponding to indices
where the values of the booleans array is True.
'''
booleans = Nullable(Seq(Bool), help="""
A list of booleans indicating which rows of data to select.
""")
def __init__(self, *args, **kwargs) -> None:
if len(args) == 1 and "booleans" not in kwargs:
kwargs["booleans"] = args[0]
super().__init__(**kwargs)
| BooleanFilter |
python | davidhalter__jedi | test/completion/decorators.py | {
"start": 3032,
"end": 3278
} | class ____():
def __init__(self, func):
self.func = func
@DecoratorWithoutCall
def f():
return 1
# cannot be resolved - should be ignored
@DecoratorWithoutCall(None)
def g():
return 1
#?
f()
#? int()
g()
| DecoratorWithoutCall |
python | GoogleCloudPlatform__python-docs-samples | dataflow/flex-templates/pipeline_with_dependencies/src/my_package/my_transforms.py | {
"start": 925,
"end": 1241
} | class ____(beam.PTransform):
"""Extracts words from text and finds the longest one."""
def expand(self, pcoll):
return (
pcoll
| "Extract words" >> beam.ParDo(WordExtractingDoFn())
| "Find longest" >> beam.combiners.Top.Largest(n=1, key=len)
)
| FindLongestWord |
python | TheAlgorithms__Python | graphs/markov_chain.py | {
"start": 96,
"end": 2085
} | class ____:
"""
Undirected Unweighted Graph for running Markov Chain Algorithm
"""
def __init__(self):
self.connections = {}
def add_node(self, node: str) -> None:
self.connections[node] = {}
def add_transition_probability(
self, node1: str, node2: str, probability: float
) -> None:
if node1 not in self.connections:
self.add_node(node1)
if node2 not in self.connections:
self.add_node(node2)
self.connections[node1][node2] = probability
def get_nodes(self) -> list[str]:
return list(self.connections)
def transition(self, node: str) -> str:
current_probability = 0
random_value = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def get_transitions(
start: str, transitions: list[tuple[str, str, float]], steps: int
) -> dict[str, int]:
"""
Running Markov Chain algorithm and calculating the number of times each node is
visited
>>> transitions = [
... ('a', 'a', 0.9),
... ('a', 'b', 0.075),
... ('a', 'c', 0.025),
... ('b', 'a', 0.15),
... ('b', 'b', 0.8),
... ('b', 'c', 0.05),
... ('c', 'a', 0.25),
... ('c', 'b', 0.25),
... ('c', 'c', 0.5)
... ]
>>> result = get_transitions('a', transitions, 5000)
>>> result['a'] > result['b'] > result['c']
True
"""
graph = MarkovChainGraphUndirectedUnweighted()
for node1, node2, probability in transitions:
graph.add_transition_probability(node1, node2, probability)
visited = Counter(graph.get_nodes())
node = start
for _ in range(steps):
node = graph.transition(node)
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| MarkovChainGraphUndirectedUnweighted |
python | pandas-dev__pandas | asv_bench/benchmarks/ctors.py | {
"start": 2477,
"end": 2730
} | class ____:
def setup(self):
N = 10**4
self.iterables = [Index([f"i-{i}" for i in range(N)], dtype=object), range(20)]
def time_multiindex_from_iterables(self):
MultiIndex.from_product(self.iterables)
| MultiIndexConstructor |
python | keras-team__keras | keras/src/optimizers/loss_scale_optimizer_test.py | {
"start": 261,
"end": 11182
} | class ____(testing.TestCase):
def _skip_test_for_stateless(self, stateless):
if not stateless and backend.backend() == "jax":
self.skipTest(
"LossScaleOptimizer must use stateless_apply with JAX."
)
if stateless and backend.backend() == "tensorflow":
self.skipTest(
"stateless_apply is not supported with the TF backend."
)
def test_config(self):
inner_optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
optimizer = LossScaleOptimizer(inner_optimizer)
self.run_class_serialization_test(optimizer)
def test_apply_with_no_vars(self):
self._skip_test_for_stateless(False)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0]) * optimizer.initial_scale]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
optimizer.apply(grads)
self.assertAllClose(
vars, [[0.5, -1.0, -0.5, 3.0]], rtol=1e-4, atol=1e-4
)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_finite_step(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0]) * optimizer.initial_scale]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
[v.value for v in optimizer.variables],
grads,
[v.value for v in vars],
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(
vars, [[0.5, -1.0, -0.5, 3.0]], rtol=1e-4, atol=1e-4
)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_finite_step_with_inner_loss_scale(self, stateless):
self._skip_test_for_stateless(stateless)
# Ensure that the inner loss scale does not interfere with the update.
inner_optimizer = SGD(learning_rate=0.5, loss_scale_factor=100)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0]) * optimizer.initial_scale]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
[v.value for v in optimizer.variables],
grads,
[v.value for v in vars],
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(
vars, [[0.5, -1.0, -0.5, 3.0]], rtol=1e-4, atol=1e-4
)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_infinite_step(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([np.inf, np.inf, np.inf, np.inf])]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
[v.value for v in optimizer.variables],
grads,
[v.value for v in vars],
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(vars, [[1.0, 2.0, 3.0, 4.0]], rtol=1e-4, atol=1e-4)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_finite_step_with_overwrite(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0])]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
vars[0].overwrite_with_gradient = True
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
[v.value for v in optimizer.variables],
grads,
[v.value for v in vars],
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(vars, grads)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_downscaling(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer, initial_scale=400.0)
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
opt_var_values = [v.value for v in optimizer.variables]
grads = [ops.array([np.inf, np.inf, np.inf, np.inf])]
for _ in range(4):
if stateless:
_, opt_var_values = optimizer.stateless_apply(
opt_var_values, grads, [v.value for v in vars]
)
for ref_v, v in zip(optimizer.variables, opt_var_values):
ref_v.assign(v)
else:
optimizer.apply(grads, vars)
self.assertAllClose(optimizer.scale_loss(1.0), 25.0)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_upscaling(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(
inner_optimizer,
initial_scale=2.0,
dynamic_growth_steps=2,
)
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
opt_var_values = [v.value for v in optimizer.variables]
grads = [ops.array([1.0, 6.0, 7.0, 2.0])]
for _ in range(8):
if stateless:
_, opt_var_values = optimizer.stateless_apply(
opt_var_values, grads, [v.value for v in vars]
)
for ref_v, v in zip(optimizer.variables, opt_var_values):
ref_v.assign(v)
else:
optimizer.apply(grads, vars)
self.assertAllClose(optimizer.scale_loss(1.0), 32.0)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_iterations_update(self, stateless):
self._skip_test_for_stateless(stateless)
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
opt_var_values = [v.value for v in optimizer.variables]
grads = [ops.array([1.0, 6.0, 7.0, 2.0])]
self.assertEqual(optimizer.iterations.value, 0)
for i in range(3):
if stateless:
_, opt_var_values = optimizer.stateless_apply(
opt_var_values, grads, [v.value for v in vars]
)
for ref_v, v in zip(optimizer.variables, opt_var_values):
ref_v.assign(v)
else:
optimizer.apply(grads, vars)
self.assertEqual(optimizer.iterations.value, i + 1)
def test_serialization(self):
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(
inner_optimizer,
initial_scale=3.0,
dynamic_growth_steps=2,
name="test_opt",
)
config = optimizer.get_config()
self.assertLen(config, 4)
self.assertEqual(config["name"], "test_opt")
self.assertEqual(config["initial_scale"], 3.0)
self.assertEqual(config["dynamic_growth_steps"], 2)
self.assertIn("inner_optimizer", config)
LossScaleOptimizer.from_config(config)
def test_init_dynamic_arg(self):
inner_optimizer = SGD(learning_rate=0.5)
# dynamic=True is supported
LossScaleOptimizer(inner_optimizer, dynamic=True)
# dynamic=False is not supported
with self.assertRaisesRegex(ValueError, "set `loss_scale_factor`"):
LossScaleOptimizer(inner_optimizer, dynamic=False)
def test_init_unsupported_arg(self):
inner_optimizer = SGD(learning_rate=0.5)
with self.assertRaisesRegex(ValueError, "arguments: `foo`, `bar`"):
LossScaleOptimizer(inner_optimizer, foo=True, bar=3)
@parameterized.named_parameters(
("weight_decay", "weight_decay", 0.5),
("clipnorm", "clipnorm", 0.5),
("global_clipnorm", "global_clipnorm", 0.5),
("clipvalue", "clipvalue", 0.5),
("use_ema", "use_ema", True),
("ema_momentum", "ema_momentum", 0.5),
("ema_overwrite_frequency", "ema_overwrite_frequency", 2),
("loss_scale_factor", "loss_scale_factor", 0.5),
("gradient_accumulation_steps", "gradient_accumulation_steps", 2),
)
def test_init_base_optimizer_unsupported_args(self, arg_name, arg_value):
inner_optimizer = SGD(learning_rate=0.5)
with self.assertRaisesRegex(ValueError, "on the `inner_optimizer`"):
LossScaleOptimizer(inner_optimizer, **{arg_name: arg_value})
def test_deserialization_backwards_compatibility(self):
# Test deserializing with a config that has all the unsupported
# arguments from the base optimizer (which are no longer serialized)
config = {
"name": "loss_scale_optimizer",
"weight_decay": None,
"clipnorm": None,
"global_clipnorm": None,
"clipvalue": None,
"use_ema": False,
"ema_momentum": 0.99,
"ema_overwrite_frequency": None,
"loss_scale_factor": None,
"gradient_accumulation_steps": None,
"inner_optimizer": {
"module": "keras.optimizers",
"class_name": "SGD",
"config": {
"name": "SGD",
"learning_rate": 0.5,
"weight_decay": None,
"clipnorm": None,
"global_clipnorm": None,
"clipvalue": None,
"use_ema": False,
"ema_momentum": 0.99,
"ema_overwrite_frequency": None,
"loss_scale_factor": None,
"gradient_accumulation_steps": None,
"momentum": 0.0,
"nesterov": False,
},
"registered_name": None,
},
"initial_scale": 2.0,
"dynamic_growth_steps": 2,
}
LossScaleOptimizer.from_config(config)
| LossScaleOptimizerTest |
python | networkx__networkx | networkx/classes/coreviews.py | {
"start": 12489,
"end": 13251
} | class ____(FilterAdjacency): # multiedgedict
"""A read-only Mapping of Mappings with filtering criteria
for nodes and edges.
It is a view into a dict-of-dict-of-dict-of-dict data structure,
and it selects nodes and edges that satisfy specific criteria
defined by ``NODE_OK`` and ``EDGE_OK``, respectively.
See Also
========
FilterAtlas
FilterAdjacency
FilterMultiInner
"""
def __getitem__(self, node):
if node in self._atlas and self.NODE_OK(node):
def edge_ok(nbr, key):
return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key)
return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok)
raise KeyError(f"Key {node} not found")
| FilterMultiAdjacency |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py | {
"start": 2161,
"end": 3036
} | class ____(cfg.GraphVisitor):
"""CFG visitor that determines reaching definitions at statement level."""
def __init__(self, graph, external_defs):
super(Analyzer, self).__init__(graph)
# This allows communicating that nodes have extra reaching definitions,
# e.g. those that a function closes over.
self.external_defs = external_defs
def init_state(self, _):
return _NodeState()
def visit_node(self, node):
prev_defs_out = self.out[node]
if node is self.graph.entry:
defs_in = _NodeState(self.external_defs)
else:
defs_in = prev_defs_out
for n in node.prev:
defs_in |= self.out[n]
defs_out = defs_in
if isinstance(node.ast_node, (gast.Lambda, gast.FunctionDef)):
defs_out += node.ast_node
self.in_[node] = defs_in
self.out[node] = defs_out
return prev_defs_out != defs_out
| Analyzer |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py | {
"start": 413,
"end": 598
} | class ____(Generic[S]):
var: S
# This case gets a diagnostic but not a fix because we can't look up the bounds
# or constraints on the TypeVar imported from another module
| Constrained |
python | kamyu104__LeetCode-Solutions | Python/range-addition-ii.py | {
"start": 53,
"end": 336
} | class ____(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
for op in ops:
m = min(m, op[0])
n = min(n, op[1])
return m*n
| Solution |
python | realpython__materials | contact-book-python-textual/source_code/rpcontacts/tui.py | {
"start": 3906,
"end": 4906
} | class ____(Screen):
def compose(self):
yield Grid(
Label("Add Contact", id="title"),
Label("Name:", classes="label"),
Input(placeholder="Contact Name", classes="input", id="name"),
Label("Phone:", classes="label"),
Input(placeholder="Contact Phone", classes="input", id="phone"),
Label("Email:", classes="label"),
Input(placeholder="Contact Email", classes="input", id="email"),
Static(),
Button("Cancel", variant="warning", id="cancel"),
Button("Ok", variant="success", id="ok"),
id="input-dialog",
)
def on_button_pressed(self, event):
if event.button.id == "ok":
name = self.query_one("#name", Input).value
phone = self.query_one("#phone", Input).value
email = self.query_one("#email", Input).value
self.dismiss((name, phone, email))
else:
self.dismiss(())
| InputDialog |
python | rq__rq | tests/test_spawn_worker.py | {
"start": 422,
"end": 2257
} | class ____(RQTestCase):
def test_work_and_quit(self):
"""SpawnWorker processes work, then quits."""
queue = Queue('foo', connection=self.connection)
worker = SpawnWorker([queue])
self.assertEqual(worker.work(burst=True), False, 'Did not expect any work on the queue.')
job = queue.enqueue(say_hello, name='Frank')
worker.work(burst=True)
registry = FinishedJobRegistry(queue=queue)
self.assertEqual(registry.get_job_ids(), [job.id])
registry = queue.started_job_registry
self.assertEqual(registry.get_job_ids(), [])
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue(connection=self.connection)
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = job.enqueued_at
w = SpawnWorker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertIn(job, failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id, connection=self.connection)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(job.enqueued_at.replace(tzinfo=timezone.utc).timestamp(), enqueued_at_date.timestamp())
result = Result.fetch_latest(job)
self.assertTrue(result.exc_string)
self.assertEqual(result.type, Result.Type.FAILED)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
| TestWorker |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 48349,
"end": 51821
} | class ____(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
| ExtendedReadTest |
python | kamyu104__LeetCode-Solutions | Python/count-operations-to-obtain-zero.py | {
"start": 62,
"end": 356
} | class ____(object):
def countOperations(self, num1, num2):
"""
:type num1: int
:type num2: int
:rtype: int
"""
result = 0
while num2:
result += num1//num2
num1, num2 = num2, num1%num2
return result
| Solution |
python | getsentry__sentry | src/sentry/monitors/constants.py | {
"start": 606,
"end": 1275
} | class ____(Enum):
ACCEPT = 0
"""
Check-in should be fully accepted and shall be passed through
the entire Monitor Check-In processing logic.
"""
DROP = 1
"""
Check-in should not be processed. All logic should be skipped
and the consumer should halt work on this check-in immediately.
"""
ACCEPTED_FOR_UPSERT = 2
"""
The check-in should be accepted to allow a monitor to be auto-created
if possible. However the check-in should be discarded once the monitor
has been upserted.
This status is used when an unknown monitor slug is seen and has yet to
have been assigned a seat.
"""
| PermitCheckInStatus |
python | pytorch__pytorch | test/dynamo/test_fx_graph_runnable.py | {
"start": 2158,
"end": 2387
} | class ____(logging.Filter):
def filter(self, record):
return (
"artifact" in record.metadata
and record.metadata["artifact"]["name"] == "fx_graph_runnable"
)
| FxGraphRunnableArtifactFilter |
python | doocs__leetcode | solution/2000-2099/2090.K Radius Subarray Averages/Solution.py | {
"start": 0,
"end": 325
} | class ____:
def getAverages(self, nums: List[int], k: int) -> List[int]:
n = len(nums)
ans = [-1] * n
s = 0
for i, x in enumerate(nums):
s += x
if i >= k * 2:
ans[i - k] = s // (k * 2 + 1)
s -= nums[i - k * 2]
return ans
| Solution |
python | django-debug-toolbar__django-debug-toolbar | tests/test_integration.py | {
"start": 1749,
"end": 1918
} | class ____(Panel):
def title(self):
return "BuggyPanel"
@property
def content(self):
raise Exception
@override_settings(DEBUG=True)
| BuggyPanel |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-databricks/llama_index/llms/databricks/base.py | {
"start": 104,
"end": 1736
} | class ____(OpenAILike):
"""
Databricks LLM.
Examples:
`pip install llama-index-llms-databricks`
```python
from llama_index.llms.databricks import Databricks
# Set up the Databricks class with the required model, API key and serving endpoint
llm = Databricks(model="databricks-dbrx-instruct", api_key="your_api_key", api_base="https://[your-work-space].cloud.databricks.com/serving-endpoints")
# Call the complete method with a query
response = llm.complete("Explain the importance of open source LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("DATABRICKS_TOKEN", None)
api_base = api_base or os.environ.get("DATABRICKS_SERVING_ENDPOINT", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Databricks"
def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
"""Get the kwargs that need to be provided to the model invocation."""
# Fix the input to work with the Databricks API
if "tool_choice" in kwargs and "tools" not in kwargs:
del kwargs["tool_choice"]
return super()._get_model_kwargs(**kwargs)
| Databricks |
python | tensorflow__tensorflow | tensorflow/python/data/ops/options.py | {
"start": 6891,
"end": 10707
} | class ____(options_lib.OptionsBase):
"""Represents options for autotuning dataset performance.
```python
options = tf.data.Options()
options.autotune.enabled = False
dataset = dataset.with_options(options)
```
"""
enabled = options_lib.create_option(
name="enabled",
ty=bool,
docstring="Whether to automatically tune performance knobs. If None, "
"defaults to True.")
cpu_budget = options_lib.create_option(
name="cpu_budget",
ty=int,
docstring="When autotuning is enabled (through `autotune`), determines "
"the CPU budget to use. Values greater than the number of schedulable "
"CPU cores are allowed but may result in CPU contention. If None, "
"defaults to the number of schedulable CPU cores.")
ram_budget = options_lib.create_option(
name="ram_budget",
ty=int,
docstring="When autotuning is enabled (through `autotune`), determines "
"the RAM budget to use. Values greater than the available RAM in bytes "
"may result in OOM. If None, defaults to half of the available RAM in "
"bytes.")
autotune_algorithm = options_lib.create_option(
name="autotune_algorithm",
ty=AutotuneAlgorithm,
docstring="When autotuning is enabled (through `autotune`), determines "
"the algorithm to use.")
initial_parallelism = options_lib.create_option(
name="initial_parallelism",
ty=int,
docstring=(
"The initial parallelism to use for parallel transformations before"
" autotune has a chance to run. A higher value can help with quick"
" startup, but may cause the ram_budget to temporarily be exceeded."
" Memory-sensitive datasets should consider setting this to `1` to"
" avoid running out of memory. Defaults to 16."
),
)
min_parallelism = options_lib.create_option(
name="min_parallelism",
ty=int,
docstring=(
"When true, `.map(num_parallel_calls=AUTOTUNE)` and"
" `.batch(num_parallel_calls=AUTOTUNE)` will be at least"
" parallelized by `min_parallelism` threads."
),
)
def _to_proto(self):
pb = dataset_options_pb2.AutotuneOptions()
if self.enabled is not None:
pb.enabled = self.enabled
if self.cpu_budget is not None:
pb.cpu_budget = self.cpu_budget
if self.ram_budget is not None:
pb.ram_budget = self.ram_budget
if self.autotune_algorithm is not None:
pb.autotune_algorithm = AutotuneAlgorithm._to_proto( # pylint: disable=protected-access
self.autotune_algorithm)
if self.initial_parallelism is not None:
pb.initial_parallelism = self.initial_parallelism
if self.min_parallelism is not None:
pb.min_parallelism = self.min_parallelism
return pb
def _from_proto(self, pb):
if pb.WhichOneof("optional_enabled") is not None:
self.enabled = pb.enabled
if pb.WhichOneof("optional_cpu_budget") is not None:
self.cpu_budget = pb.cpu_budget
if pb.WhichOneof("optional_ram_budget") is not None:
self.ram_budget = pb.ram_budget
if pb.WhichOneof("optional_autotune_algorithm") is not None:
self.autotune_algorithm = AutotuneAlgorithm._from_proto( # pylint: disable=protected-access
pb.autotune_algorithm)
if pb.WhichOneof("optional_initial_parallelism") is not None:
self.initial_parallelism = pb.initial_parallelism
if pb.WhichOneof("optional_min_parallelism") is not None:
self.min_parallelism = pb.min_parallelism
def _set_mutable(self, mutable):
"""Change the mutability value to `mutable` on this options and children."""
# pylint: disable=protected-access
object.__setattr__(self, "_mutable", mutable)
@tf_export("data.experimental.DistributeOptions")
| AutotuneOptions |
python | ray-project__ray | python/ray/_common/test_utils.py | {
"start": 5541,
"end": 7712
} | class ____(Enum):
DRIVER = "driver"
ACTOR = "actor"
TASK = "task"
def _get_library_usages() -> Set[str]:
return set(
ray_usage_lib.get_library_usages_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
)
def _get_extra_usage_tags() -> Dict[str, str]:
return ray_usage_lib.get_extra_usage_tags_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client()
)
def check_library_usage_telemetry(
use_lib_fn: Callable[[], None],
*,
callsite: TelemetryCallsite,
expected_library_usages: List[Set[str]],
expected_extra_usage_tags: Optional[Dict[str, str]] = None,
):
"""Helper for writing tests to validate library usage telemetry.
`use_lib_fn` is a callable that will be called from the provided callsite.
After calling it, the telemetry data to export will be validated against
expected_library_usages and expected_extra_usage_tags.
"""
assert len(_get_library_usages()) == 0, _get_library_usages()
if callsite == TelemetryCallsite.DRIVER:
use_lib_fn()
elif callsite == TelemetryCallsite.ACTOR:
@ray.remote
class A:
def __init__(self):
use_lib_fn()
a = A.remote()
ray.get(a.__ray_ready__.remote())
elif callsite == TelemetryCallsite.TASK:
@ray.remote
def f():
use_lib_fn()
ray.get(f.remote())
else:
assert False, f"Unrecognized callsite: {callsite}"
library_usages = _get_library_usages()
extra_usage_tags = _get_extra_usage_tags()
assert library_usages in expected_library_usages, library_usages
if expected_extra_usage_tags:
assert all(
[extra_usage_tags[k] == v for k, v in expected_extra_usage_tags.items()]
), extra_usage_tags
def is_named_tuple(cls):
"""Return True if cls is a namedtuple and False otherwise."""
b = cls.__bases__
if len(b) != 1 or b[0] is not tuple:
return False
f = getattr(cls, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n) is str for n in f)
| TelemetryCallsite |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/missingSuper1.py | {
"start": 720,
"end": 810
} | class ____(ParentA, ParentB):
def __init__(self):
ParentB.__init__(self)
| ChildC1 |
python | optuna__optuna | optuna/storages/_rdb/models.py | {
"start": 1368,
"end": 2470
} | class ____(BaseModel):
__tablename__ = "studies"
study_id = _Column(Integer, primary_key=True)
study_name = _Column(
String(MAX_INDEXED_STRING_LENGTH), index=True, unique=True, nullable=False
)
@classmethod
def find_or_raise_by_id(
cls, study_id: int, session: orm.Session, for_update: bool = False
) -> "StudyModel":
query = session.query(cls).filter(cls.study_id == study_id)
if for_update:
query = query.with_for_update()
study = query.one_or_none()
if study is None:
raise KeyError(NOT_FOUND_MSG)
return study
@classmethod
def find_by_name(cls, study_name: str, session: orm.Session) -> "StudyModel" | None:
study = session.query(cls).filter(cls.study_name == study_name).one_or_none()
return study
@classmethod
def find_or_raise_by_name(cls, study_name: str, session: orm.Session) -> "StudyModel":
study = cls.find_by_name(study_name, session)
if study is None:
raise KeyError(NOT_FOUND_MSG)
return study
| StudyModel |
python | networkx__networkx | networkx/readwrite/gml.py | {
"start": 9127,
"end": 31193
} | class ____(NamedTuple):
category: Pattern
value: Any
line: int
position: int
LIST_START_VALUE = "_networkx_list_start"
def parse_gml_lines(lines, label, destringizer):
"""Parse GML `lines` into a graph."""
def tokenize():
patterns = [
r"[A-Za-z][0-9A-Za-z_]*\b", # keys
# reals
r"[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*|INF)(?:[Ee][+-]?[0-9]+)?",
r"[+-]?[0-9]+", # ints
r'".*?"', # strings
r"\[", # dict start
r"\]", # dict end
r"#.*$|\s+", # comments and whitespaces
]
tokens = re.compile("|".join(f"({pattern})" for pattern in patterns))
lineno = 0
multilines = [] # entries spread across multiple lines
for line in lines:
pos = 0
# deal with entries spread across multiple lines
#
# should we actually have to deal with escaped "s then do it here
if multilines:
multilines.append(line.strip())
if line[-1] == '"': # closing multiline entry
# multiline entries will be joined by space. cannot
# reintroduce newlines as this will break the tokenizer
line = " ".join(multilines)
multilines = []
else: # continued multiline entry
lineno += 1
continue
else:
if line.count('"') == 1: # opening multiline entry
if line.strip()[0] != '"' and line.strip()[-1] != '"':
# since we expect something like key "value", the " should not be found at ends
# otherwise tokenizer will pick up the formatting mistake.
multilines = [line.rstrip()]
lineno += 1
continue
length = len(line)
while pos < length:
match = tokens.match(line, pos)
if match is None:
m = f"cannot tokenize {line[pos:]} at ({lineno + 1}, {pos + 1})"
raise NetworkXError(m)
for i in range(len(patterns)):
group = match.group(i + 1)
if group is not None:
if i == 0: # keys
value = group.rstrip()
elif i == 1: # reals
value = float(group)
elif i == 2: # ints
value = int(group)
else:
value = group
if i != 6: # comments and whitespaces
yield Token(Pattern(i), value, lineno + 1, pos + 1)
pos += len(group)
break
lineno += 1
yield Token(None, None, lineno + 1, 1) # EOF
def unexpected(curr_token, expected):
category, value, lineno, pos = curr_token
value = repr(value) if value is not None else "EOF"
raise NetworkXError(f"expected {expected}, found {value} at ({lineno}, {pos})")
def consume(curr_token, category, expected):
if curr_token.category == category:
return next(tokens)
unexpected(curr_token, expected)
def parse_kv(curr_token):
dct = defaultdict(list)
while curr_token.category == Pattern.KEYS:
key = curr_token.value
curr_token = next(tokens)
category = curr_token.category
if category == Pattern.REALS or category == Pattern.INTS:
value = curr_token.value
curr_token = next(tokens)
elif category == Pattern.STRINGS:
value = unescape(curr_token.value[1:-1])
if destringizer:
try:
value = destringizer(value)
except ValueError:
pass
# Special handling for empty lists and tuples
if value == "()":
value = ()
if value == "[]":
value = []
curr_token = next(tokens)
elif category == Pattern.DICT_START:
curr_token, value = parse_dict(curr_token)
else:
# Allow for string convertible id and label values
if key in ("id", "label", "source", "target"):
try:
# String convert the token value
value = unescape(str(curr_token.value))
if destringizer:
try:
value = destringizer(value)
except ValueError:
pass
curr_token = next(tokens)
except Exception:
msg = (
"an int, float, string, '[' or string"
+ " convertible ASCII value for node id or label"
)
unexpected(curr_token, msg)
# Special handling for nan and infinity. Since the gml language
# defines unquoted strings as keys, the numeric and string branches
# are skipped and we end up in this special branch, so we need to
# convert the current token value to a float for NAN and plain INF.
# +/-INF are handled in the pattern for 'reals' in tokenize(). This
# allows labels and values to be nan or infinity, but not keys.
elif curr_token.value in {"NAN", "INF"}:
value = float(curr_token.value)
curr_token = next(tokens)
else: # Otherwise error out
unexpected(curr_token, "an int, float, string or '['")
dct[key].append(value)
def clean_dict_value(value):
if not isinstance(value, list):
return value
if len(value) == 1:
return value[0]
if value[0] == LIST_START_VALUE:
return value[1:]
return value
dct = {key: clean_dict_value(value) for key, value in dct.items()}
return curr_token, dct
def parse_dict(curr_token):
# dict start
curr_token = consume(curr_token, Pattern.DICT_START, "'['")
# dict contents
curr_token, dct = parse_kv(curr_token)
# dict end
curr_token = consume(curr_token, Pattern.DICT_END, "']'")
return curr_token, dct
def parse_graph():
curr_token, dct = parse_kv(next(tokens))
if curr_token.category is not None: # EOF
unexpected(curr_token, "EOF")
if "graph" not in dct:
raise NetworkXError("input contains no graph")
graph = dct["graph"]
if isinstance(graph, list):
raise NetworkXError("input contains more than one graph")
return graph
tokens = tokenize()
graph = parse_graph()
directed = graph.pop("directed", False)
multigraph = graph.pop("multigraph", False)
if not multigraph:
G = nx.DiGraph() if directed else nx.Graph()
else:
G = nx.MultiDiGraph() if directed else nx.MultiGraph()
graph_attr = {k: v for k, v in graph.items() if k not in ("node", "edge")}
G.graph.update(graph_attr)
def pop_attr(dct, category, attr, i):
try:
return dct.pop(attr)
except KeyError as err:
raise NetworkXError(f"{category} #{i} has no {attr!r} attribute") from err
nodes = graph.get("node", [])
mapping = {}
node_labels = set()
for i, node in enumerate(nodes if isinstance(nodes, list) else [nodes]):
id = pop_attr(node, "node", "id", i)
if id in G:
raise NetworkXError(f"node id {id!r} is duplicated")
if label is not None and label != "id":
node_label = pop_attr(node, "node", label, i)
if node_label in node_labels:
raise NetworkXError(f"node label {node_label!r} is duplicated")
node_labels.add(node_label)
mapping[id] = node_label
G.add_node(id, **node)
edges = graph.get("edge", [])
for i, edge in enumerate(edges if isinstance(edges, list) else [edges]):
source = pop_attr(edge, "edge", "source", i)
target = pop_attr(edge, "edge", "target", i)
if source not in G:
raise NetworkXError(f"edge #{i} has undefined source {source!r}")
if target not in G:
raise NetworkXError(f"edge #{i} has undefined target {target!r}")
if not multigraph:
if not G.has_edge(source, target):
G.add_edge(source, target, **edge)
else:
arrow = "->" if directed else "--"
msg = f"edge #{i} ({source!r}{arrow}{target!r}) is duplicated"
raise nx.NetworkXError(msg)
else:
key = edge.pop("key", None)
if key is not None and G.has_edge(source, target, key):
arrow = "->" if directed else "--"
msg = f"edge #{i} ({source!r}{arrow}{target!r}, {key!r})"
msg2 = 'Hint: If multigraph add "multigraph 1" to file header.'
raise nx.NetworkXError(msg + " is duplicated\n" + msg2)
G.add_edge(source, target, key, **edge)
if label is not None and label != "id":
G = nx.relabel_nodes(G, mapping)
return G
def literal_stringizer(value):
"""Convert a `value` to a Python literal in GML representation.
Parameters
----------
value : object
The `value` to be converted to GML representation.
Returns
-------
rep : string
A double-quoted Python literal representing value. Unprintable
characters are replaced by XML character references.
Raises
------
ValueError
If `value` cannot be converted to GML.
Notes
-----
The original value can be recovered using the
:func:`networkx.readwrite.gml.literal_destringizer` function.
"""
def stringize(value):
if isinstance(value, int | bool) or value is None:
if value is True: # GML uses 1/0 for boolean values.
buf.write(str(1))
elif value is False:
buf.write(str(0))
else:
buf.write(str(value))
elif isinstance(value, str):
text = repr(value)
if text[0] != "u":
try:
value.encode("latin1")
except UnicodeEncodeError:
text = "u" + text
buf.write(text)
elif isinstance(value, float | complex | str | bytes):
buf.write(repr(value))
elif isinstance(value, list):
buf.write("[")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write("]")
elif isinstance(value, tuple):
if len(value) > 1:
buf.write("(")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write(")")
elif value:
buf.write("(")
stringize(value[0])
buf.write(",)")
else:
buf.write("()")
elif isinstance(value, dict):
buf.write("{")
first = True
for key, value in value.items():
if not first:
buf.write(",")
else:
first = False
stringize(key)
buf.write(":")
stringize(value)
buf.write("}")
elif isinstance(value, set):
buf.write("{")
first = True
for item in value:
if not first:
buf.write(",")
else:
first = False
stringize(item)
buf.write("}")
else:
msg = f"{value!r} cannot be converted into a Python literal"
raise ValueError(msg)
buf = StringIO()
stringize(value)
return buf.getvalue()
def generate_gml(G, stringizer=None):
r"""Generate a single entry of the graph `G` in GML format.
Parameters
----------
G : NetworkX graph
The graph to be converted to GML.
stringizer : callable, optional
A `stringizer` which converts non-int/non-float/non-dict values into
strings. If it cannot convert a value into a string, it should raise a
`ValueError` to indicate that. Default value: None.
Returns
-------
lines: generator of strings
Lines of GML data. Newlines are not appended.
Raises
------
NetworkXError
If `stringizer` cannot convert a value into a string, or the value to
convert is not a string while `stringizer` is None.
See Also
--------
literal_stringizer
Notes
-----
Graph attributes named 'directed', 'multigraph', 'node' or
'edge', node attributes named 'id' or 'label', edge attributes
named 'source' or 'target' (or 'key' if `G` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
GML files are stored using a 7-bit ASCII encoding with any extended
ASCII characters (iso8859-1) appearing as HTML character entities.
Without specifying a `stringizer`/`destringizer`, the code is capable of
writing `int`/`float`/`str`/`dict`/`list` data as required by the GML
specification. For writing other data types, and for reading data other
than `str` you need to explicitly supply a `stringizer`/`destringizer`.
For additional documentation on the GML file format, please see the
`GML url <https://web.archive.org/web/20190207140002/http://www.fim.uni-passau.de/index.php?id=17297&L=1>`_.
See the module docstring :mod:`networkx.readwrite.gml` for more details.
Examples
--------
>>> G = nx.Graph()
>>> G.add_node("1")
>>> print("\n".join(nx.generate_gml(G)))
graph [
node [
id 0
label "1"
]
]
>>> G = nx.MultiGraph([("a", "b"), ("a", "b")])
>>> print("\n".join(nx.generate_gml(G)))
graph [
multigraph 1
node [
id 0
label "a"
]
node [
id 1
label "b"
]
edge [
source 0
target 1
key 0
]
edge [
source 0
target 1
key 1
]
]
"""
valid_keys = re.compile("^[A-Za-z][0-9A-Za-z_]*$")
def stringize(key, value, ignored_keys, indent, in_list=False):
if not isinstance(key, str):
raise NetworkXError(f"{key!r} is not a string")
if not valid_keys.match(key):
raise NetworkXError(f"{key!r} is not a valid key")
if not isinstance(key, str):
key = str(key)
if key not in ignored_keys:
if isinstance(value, int | bool):
if key == "label":
yield indent + key + ' "' + str(value) + '"'
elif value is True:
# python bool is an instance of int
yield indent + key + " 1"
elif value is False:
yield indent + key + " 0"
# GML only supports signed 32-bit integers
elif value < -(2**31) or value >= 2**31:
yield indent + key + ' "' + str(value) + '"'
else:
yield indent + key + " " + str(value)
elif isinstance(value, float):
text = repr(value).upper()
# GML matches INF to keys, so prepend + to INF. Use repr(float(*))
# instead of string literal to future proof against changes to repr.
if text == repr(float("inf")).upper():
text = "+" + text
else:
# GML requires that a real literal contain a decimal point, but
# repr may not output a decimal point when the mantissa is
# integral and hence needs fixing.
epos = text.rfind("E")
if epos != -1 and text.find(".", 0, epos) == -1:
text = text[:epos] + "." + text[epos:]
if key == "label":
yield indent + key + ' "' + text + '"'
else:
yield indent + key + " " + text
elif isinstance(value, dict):
yield indent + key + " ["
next_indent = indent + " "
for key, value in value.items():
yield from stringize(key, value, (), next_indent)
yield indent + "]"
elif isinstance(value, tuple) and key == "label":
yield indent + key + f' "({",".join(repr(v) for v in value)})"'
elif isinstance(value, list | tuple) and key != "label" and not in_list:
if len(value) == 0:
yield indent + key + " " + f'"{value!r}"'
if len(value) == 1:
yield indent + key + " " + f'"{LIST_START_VALUE}"'
for val in value:
yield from stringize(key, val, (), indent, True)
else:
if stringizer:
try:
value = stringizer(value)
except ValueError as err:
raise NetworkXError(
f"{value!r} cannot be converted into a string"
) from err
if not isinstance(value, str):
raise NetworkXError(f"{value!r} is not a string")
yield indent + key + ' "' + escape(value) + '"'
multigraph = G.is_multigraph()
yield "graph ["
# Output graph attributes
if G.is_directed():
yield " directed 1"
if multigraph:
yield " multigraph 1"
ignored_keys = {"directed", "multigraph", "node", "edge"}
for attr, value in G.graph.items():
yield from stringize(attr, value, ignored_keys, " ")
# Output node data
node_id = dict(zip(G, range(len(G))))
ignored_keys = {"id", "label"}
for node, attrs in G.nodes.items():
yield " node ["
yield " id " + str(node_id[node])
yield from stringize("label", node, (), " ")
for attr, value in attrs.items():
yield from stringize(attr, value, ignored_keys, " ")
yield " ]"
# Output edge data
ignored_keys = {"source", "target"}
kwargs = {"data": True}
if multigraph:
ignored_keys.add("key")
kwargs["keys"] = True
for e in G.edges(**kwargs):
yield " edge ["
yield " source " + str(node_id[e[0]])
yield " target " + str(node_id[e[1]])
if multigraph:
yield from stringize("key", e[2], (), " ")
for attr, value in e[-1].items():
yield from stringize(attr, value, ignored_keys, " ")
yield " ]"
yield "]"
@open_file(1, mode="wb")
def write_gml(G, path, stringizer=None):
"""Write a graph `G` in GML format to the file or file handle `path`.
Parameters
----------
G : NetworkX graph
The graph to be converted to GML.
path : string or file
Filename or file handle to write to.
Filenames ending in .gz or .bz2 will be compressed.
stringizer : callable, optional
A `stringizer` which converts non-int/non-float/non-dict values into
strings. If it cannot convert a value into a string, it should raise a
`ValueError` to indicate that. Default value: None.
Raises
------
NetworkXError
If `stringizer` cannot convert a value into a string, or the value to
convert is not a string while `stringizer` is None.
See Also
--------
read_gml, generate_gml
literal_stringizer
Notes
-----
Graph attributes named 'directed', 'multigraph', 'node' or
'edge', node attributes named 'id' or 'label', edge attributes
named 'source' or 'target' (or 'key' if `G` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
GML files are stored using a 7-bit ASCII encoding with any extended
ASCII characters (iso8859-1) appearing as HTML character entities.
Without specifying a `stringizer`/`destringizer`, the code is capable of
writing `int`/`float`/`str`/`dict`/`list` data as required by the GML
specification. For writing other data types, and for reading data other
than `str` you need to explicitly supply a `stringizer`/`destringizer`.
Note that while we allow non-standard GML to be read from a file, we make
sure to write GML format. In particular, underscores are not allowed in
attribute names.
For additional documentation on the GML file format, please see the
`GML url <https://web.archive.org/web/20190207140002/http://www.fim.uni-passau.de/index.php?id=17297&L=1>`_.
See the module docstring :mod:`networkx.readwrite.gml` for more details.
Examples
--------
>>> G = nx.path_graph(5)
>>> nx.write_gml(G, "test_path5.gml")
Filenames ending in .gz or .bz2 will be compressed.
>>> nx.write_gml(G, "test_path5.gml.gz")
"""
for line in generate_gml(G, stringizer):
path.write((line + "\n").encode("ascii"))
| Token |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/twitter/tests.py | {
"start": 296,
"end": 4150
} | class ____(OAuthTestsMixin, TestCase):
provider_id = TwitterProvider.id
def get_mocked_response(self):
# TODO: Replace with actual/complete Twitter response
return [
MockedResponse(
HTTPStatus.OK,
r"""
{"follow_request_sent": false,
"profile_use_background_image": true,
"id": 45671919, "verified": false, "profile_text_color": "333333",
"profile_image_url_https":
"https://pbs.twimg.com/profile_images/793142149/r_normal.png",
"profile_sidebar_fill_color": "DDEEF6",
"is_translator": false, "geo_enabled": false, "entities":
{"description": {"urls": []}}, "followers_count": 43, "protected": false,
"location": "The Netherlands", "default_profile_image": false,
"id_str": "45671919", "status": {"contributors": null, "truncated":
false, "text": "RT @denibertovic: Okay I'm definitely using django-allauth from now on. So easy to set up, far less time consuming, and it just works. #dja\u2026", "in_reply_to_status_id": null, "id": 400658301702381568, "favorite_count": 0, "source": "<a href=\"http://x.com\" rel=\"nofollow\">Twitter Web Client</a>", "retweeted": true, "coordinates": null, "entities": {"symbols": [], "user_mentions": [{"indices": [3, 16], "screen_name": "denibertovic", "id": 23508244, "name": "Deni Bertovic", "id_str": "23508244"}], "hashtags": [{"indices": [135, 139], "text": "dja"}], "urls": []}, "in_reply_to_screen_name": null, "id_str": "400658301702381568", "retweet_count": 6, "in_reply_to_user_id": null, "favorited": false, "retweeted_status": {"lang": "en", "favorited": false, "in_reply_to_user_id": null, "contributors": null, "truncated": false, "text": "Okay I'm definitely using django-allauth from now on. So easy to set up, far less time consuming, and it just works. #django", "created_at": "Sun Jul 28 19:56:26 +0000 2013", "retweeted": true, "in_reply_to_status_id": null, "coordinates": null, "id": 361575897674956800, "entities": {"symbols": [], "user_mentions": [], "hashtags": [{"indices": [117, 124], "text": "django"}], "urls": []}, "in_reply_to_status_id_str": null, "in_reply_to_screen_name": null, "source": "web", "place": null, "retweet_count": 6, "geo": null, "in_reply_to_user_id_str": null, "favorite_count": 8, "id_str": "361575897674956800"}, "geo": null, "in_reply_to_user_id_str": null, "lang": "en", "created_at": "Wed Nov 13 16:15:57 +0000 2013", "in_reply_to_status_id_str": null, "place": null}, "utc_offset": 3600, "statuses_count": 39, "description": "", "friends_count": 83, "profile_link_color": "0084B4", "profile_image_url": "http://pbs.twimg.com/profile_images/793142149/r_normal.png", "notifications": false, "profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme1/bg.png", "profile_background_color": "C0DEED", "profile_background_image_url": "http://abs.twimg.com/images/themes/theme1/bg.png", "name": "Raymond Penners", "lang": "nl", "profile_background_tile": false, "favourites_count": 0, "screen_name": "pennersr", "url": null, "created_at": "Mon Jun 08 21:10:45 +0000 2009", "contributors_enabled": false, "time_zone": "Amsterdam", "profile_sidebar_border_color": "C0DEED", "default_profile": true, "following": false, "listed_count": 1} """,
)
] # noqa
def get_expected_to_str(self):
return "pennersr"
def test_login(self):
super().test_login()
account = SocialAccount.objects.get(uid="45671919")
tw_account = account.get_provider_account()
self.assertEqual(tw_account.get_screen_name(), "pennersr")
self.assertEqual(
tw_account.get_avatar_url(),
"http://pbs.twimg.com/profile_images/793142149/r.png",
)
self.assertEqual(tw_account.get_profile_url(), "https://x.com/pennersr")
self.assertEqual(tw_account.to_str(), "pennersr")
| TwitterTests |
python | huggingface__transformers | src/transformers/utils/dummy_pt_objects.py | {
"start": 13435,
"end": 13702
} | class ____(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def torch_distributed_zero_first(*args, **kwargs):
requires_backends(torch_distributed_zero_first, ["torch"])
| Trainer |
python | readthedocs__readthedocs.org | readthedocs/proxito/exceptions.py | {
"start": 323,
"end": 1519
} | class ____(Http404):
"""
Base class for contextualized HTTP 404 handling.
Subclasses may define their own template name,
HTTP status and object that was not found.
The contextualized exception is handled by proxito's 404 handler
"""
template_name = "errors/proxito/404/base.html"
not_found_subject = pgettext_lazy(_not_found_subject_translation_context, "page")
def __init__(self, http_status=404, path_not_found=None, **kwargs):
"""
Constructor that all subclasses should call.
:param kwargs: all kwargs are added as page context for rendering the 404 template
:param http_status: 404 view should respect this and set the HTTP status.
:param path_not_found: Inform the template and 404 view about a different path from
request.path
"""
self.http_status = http_status
self.path_not_found = path_not_found
self.kwargs = kwargs
def get_context(self):
c = {
"not_found_subject": self.not_found_subject,
"path_not_found": self.path_not_found,
}
c.update(self.kwargs)
return c
| ContextualizedHttp404 |
python | django-guardian__django-guardian | guardian/mixins.py | {
"start": 10984,
"end": 15286
} | class ____:
"""A view mixin that filter a queryset by user and permission.
This mixin filter object retrieved by a queryset that the
logged-in user has the specified permission for.
Example:
```python
from django.views.generic import ListView
from guardian.mixins import PermissionListMixin
class SecureView(PermissionListMixin, ListView):
...
permission_required = 'articles.view_article'
...
# or
class SecureView(PermissionListMixin, ListView):
...
permission_required = 'auth.change_user'
get_objects_for_user_extra_kwargs = {'use_groups': False}
...
```
Attributes:
permission_required (str | list[str]): permissions to check
in format: `"<app_label>.<permission codename>"`.
Default is `None`
get_objects_for_user_extra_kwargs (dict): Extra params to pass to `guardian.shortcuts.get_objects_for_user`.
Default to `{}`,
"""
permission_required: Union[str, list[str], None] = None
# rename get_objects_for_user_kwargs to when get_get_objects_for_user_kwargs is removed
get_objects_for_user_extra_kwargs: dict = {}
def get_required_permissions(self, request: Optional[HttpRequest] = None) -> list[str]:
"""Get the required permissions.
Returns list of permissions in format *<app_label>.<codename>* that
should be checked against *request.user* and *object*.
By default, it returns a list from `permission_required` attribute.
Parameters:
request (HttpRequest): Original request.
Returns:
List of the required permissions.
"""
if isinstance(self.permission_required, str):
perms = [self.permission_required]
elif isinstance(self.permission_required, GeneratorType):
# This feature will be removed in v4. (#666)
warnings.warn(
"Using generators for 'permission_required' attribute is deprecated and will be removed in v4. "
"Use a list or tuple instead as generators can only be consumed once, "
"potentially leading to security issues.",
DeprecationWarning,
stacklevel=2,
)
perms = [p for p in self.permission_required]
elif isinstance(self.permission_required, Iterable):
perms = [p for p in self.permission_required]
else:
raise ImproperlyConfigured(
"'PermissionRequiredMixin' requires "
"'permission_required' attribute to be set to "
"'<app_label>.<permission codename>' but is set to '%s' instead" % self.permission_required
)
return perms
@deprecated(
"This method is deprecated and will be removed in future versions. Use get_user_object_kwargs instead which has identical behavior."
)
def get_get_objects_for_user_kwargs(self, queryset: QuerySet) -> dict:
"""Get kwargs to pass to `get_objects_for_user`.
Returns:
dict of kwargs to be passed to `get_objects_for_user`.
Parameters:
queryset (QuerySet): Queryset to filter.
Warning: Deprecation Warning
This method is deprecated and will be removed in future versions.
Use `get_user_object_kwargs` instead which has identical behavior.
"""
return self.get_user_object_kwargs(queryset)
def get_user_object_kwargs(self, queryset: QuerySet) -> dict:
"""Get kwargs to pass to `get_objects_for_user`.
Returns:
dict of kwargs to be passed to `get_objects_for_user`.
Parameters:
queryset (QuerySet): Queryset to filter.
"""
return dict(
user=self.request.user, # type: ignore[attr-defined]
perms=self.get_required_permissions(self.request), # type: ignore[attr-defined]
klass=queryset,
**self.get_objects_for_user_extra_kwargs,
)
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return get_objects_for_user(**self.get_user_object_kwargs(qs))
| PermissionListMixin |
python | huggingface__transformers | src/transformers/models/grounding_dino/modular_grounding_dino.py | {
"start": 2491,
"end": 5616
} | class ____(DetrImageProcessorFast):
def post_process_object_detection(
self,
outputs: "GroundingDinoObjectDetectionOutput",
threshold: float = 0.1,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`GroundingDinoObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.1):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the following keys:
- "scores": The confidence scores for each predicted box on the image.
- "labels": Indexes of the classes predicted by the model on the image.
- "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
"""
batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes
batch_size = len(batch_logits)
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# batch_logits of shape (batch_size, num_queries, num_classes)
batch_class_logits = torch.max(batch_logits, dim=-1)
batch_scores = torch.sigmoid(batch_class_logits.values)
batch_labels = batch_class_logits.indices
# Convert to [x0, y0, x1, y1] format
batch_boxes = center_to_corners_format(batch_boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes):
keep = scores > threshold
scores = scores[keep]
labels = labels[keep]
boxes = boxes[keep]
results.append({"scores": scores, "labels": labels, "boxes": boxes})
return results
def post_process_instance_segmentation(self):
raise NotImplementedError("Segmentation post-processing is not implemented for Grounding-Dino yet.")
def post_process_semantic_segmentation(self):
raise NotImplementedError("Semantic segmentation post-processing is not implemented for Grounding-Dino yet.")
def post_process_panoptic_segmentation(self):
raise NotImplementedError("Panoptic segmentation post-processing is not implemented for Grounding-Dino yet.")
__all__ = ["GroundingDinoImageProcessorFast"]
| GroundingDinoImageProcessorFast |
python | encode__httpx | tests/client/test_auth.py | {
"start": 311,
"end": 829
} | class ____:
"""
A mock app to test auth credentials.
"""
def __init__(self, auth_header: str = "", status_code: int = 200) -> None:
self.auth_header = auth_header
self.status_code = status_code
def __call__(self, request: httpx.Request) -> httpx.Response:
headers = {"www-authenticate": self.auth_header} if self.auth_header else {}
data = {"auth": request.headers.get("Authorization")}
return httpx.Response(self.status_code, headers=headers, json=data)
| App |
python | pytest-dev__pytest | src/_pytest/python.py | {
"start": 42158,
"end": 44650
} | class ____:
"""A planned parameterized invocation of a test function.
Calculated during collection for a given test function's Metafunc.
Once collection is over, each callspec is turned into a single Item
and stored in item.callspec.
"""
# arg name -> arg value which will be passed to a fixture or pseudo-fixture
# of the same name. (indirect or direct parametrization respectively)
params: dict[str, object] = dataclasses.field(default_factory=dict)
# arg name -> arg index.
indices: dict[str, int] = dataclasses.field(default_factory=dict)
# arg name -> parameter scope.
# Used for sorting parametrized resources.
_arg2scope: Mapping[str, Scope] = dataclasses.field(default_factory=dict)
# Parts which will be added to the item's name in `[..]` separated by "-".
_idlist: Sequence[str] = dataclasses.field(default_factory=tuple)
# Marks which will be applied to the item.
marks: list[Mark] = dataclasses.field(default_factory=list)
def setmulti(
self,
*,
argnames: Iterable[str],
valset: Iterable[object],
id: str | _HiddenParam,
marks: Iterable[Mark | MarkDecorator],
scope: Scope,
param_index: int,
nodeid: str,
) -> CallSpec2:
params = self.params.copy()
indices = self.indices.copy()
arg2scope = dict(self._arg2scope)
for arg, val in zip(argnames, valset, strict=True):
if arg in params:
raise nodes.Collector.CollectError(
f"{nodeid}: duplicate parametrization of {arg!r}"
)
params[arg] = val
indices[arg] = param_index
arg2scope[arg] = scope
return CallSpec2(
params=params,
indices=indices,
_arg2scope=arg2scope,
_idlist=self._idlist if id is HIDDEN_PARAM else [*self._idlist, id],
marks=[*self.marks, *normalize_mark_list(marks)],
)
def getparam(self, name: str) -> object:
try:
return self.params[name]
except KeyError as e:
raise ValueError(name) from e
@property
def id(self) -> str:
return "-".join(self._idlist)
def get_direct_param_fixture_func(request: FixtureRequest) -> Any:
return request.param
# Used for storing pseudo fixturedefs for direct parametrization.
name2pseudofixturedef_key = StashKey[dict[str, FixtureDef[Any]]]()
@final
| CallSpec2 |
python | numpy__numpy | benchmarks/benchmarks/bench_function_base.py | {
"start": 795,
"end": 1214
} | class ____(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 200000).reshape((-1, 2))
def time_full_coverage(self):
np.histogramdd(self.d, (200, 200), ((0, 100), (0, 100)))
def time_small_coverage(self):
np.histogramdd(self.d, (200, 200), ((50, 51), (50, 51)))
def time_fine_binning(self):
np.histogramdd(self.d, (10000, 10000), ((0, 100), (0, 100)))
| Histogram2D |
python | gevent__gevent | src/greentest/3.10/test_signal.py | {
"start": 40369,
"end": 47608
} | class ____(unittest.TestCase):
"""
Stress signal delivery, especially when a signal arrives in
the middle of recomputing the signal state or executing
previously tripped signal handlers.
"""
def setsig(self, signum, handler):
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
def measure_itimer_resolution(self):
N = 20
times = []
def handler(signum=None, frame=None):
if len(times) < N:
times.append(time.perf_counter())
# 1 µs is the smallest possible timer interval,
# we want to measure what the concrete duration
# will be on this platform
signal.setitimer(signal.ITIMER_REAL, 1e-6)
self.addCleanup(signal.setitimer, signal.ITIMER_REAL, 0)
self.setsig(signal.SIGALRM, handler)
handler()
while len(times) < N:
time.sleep(1e-3)
durations = [times[i+1] - times[i] for i in range(len(times) - 1)]
med = statistics.median(durations)
if support.verbose:
print("detected median itimer() resolution: %.6f s." % (med,))
return med
def decide_itimer_count(self):
# Some systems have poor setitimer() resolution (for example
# measured around 20 ms. on FreeBSD 9), so decide on a reasonable
# number of sequential timers based on that.
reso = self.measure_itimer_resolution()
if reso <= 1e-4:
return 10000
elif reso <= 1e-2:
return 100
else:
self.skipTest("detected itimer resolution (%.3f s.) too high "
"(> 10 ms.) on this platform (or system too busy)"
% (reso,))
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_dependent(self):
"""
This test uses dependent signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def first_handler(signum, frame):
# 1e-6 is the minimum non-zero value for `setitimer()`.
# Choose a random delay so as to improve chances of
# triggering a race condition. Ideally the signal is received
# when inside critical signal-handling routines such as
# Py_MakePendingCalls().
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
def second_handler(signum=None, frame=None):
sigs.append(signum)
# Here on Linux, SIGPROF > SIGALRM > SIGUSR1. By using both
# ascending and descending sequences (SIGUSR1 then SIGALRM,
# SIGPROF then SIGALRM), we maximize chances of hitting a bug.
self.setsig(signal.SIGPROF, first_handler)
self.setsig(signal.SIGUSR1, first_handler)
self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
os.kill(os.getpid(), signal.SIGPROF)
expected_sigs += 1
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 1
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_simultaneous(self):
"""
This test uses simultaneous signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def handler(signum, frame):
sigs.append(signum)
self.setsig(signal.SIGUSR1, handler)
self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
# Hopefully the SIGALRM will be received somewhere during
# initial processing of SIGUSR1.
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 2
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "SIGUSR1"),
"test needs SIGUSR1")
def test_stress_modifying_handlers(self):
# bpo-43406: race condition between trip_signal() and signal.signal
signum = signal.SIGUSR1
num_sent_signals = 0
num_received_signals = 0
do_stop = False
def custom_handler(signum, frame):
nonlocal num_received_signals
num_received_signals += 1
def set_interrupts():
nonlocal num_sent_signals
while not do_stop:
signal.raise_signal(signum)
num_sent_signals += 1
def cycle_handlers():
while num_sent_signals < 100:
for i in range(20000):
# Cycle between a Python-defined and a non-Python handler
for handler in [custom_handler, signal.SIG_IGN]:
signal.signal(signum, handler)
old_handler = signal.signal(signum, custom_handler)
self.addCleanup(signal.signal, signum, old_handler)
t = threading.Thread(target=set_interrupts)
try:
ignored = False
with support.catch_unraisable_exception() as cm:
t.start()
cycle_handlers()
do_stop = True
t.join()
if cm.unraisable is not None:
# An unraisable exception may be printed out when
# a signal is ignored due to the aforementioned
# race condition, check it.
self.assertIsInstance(cm.unraisable.exc_value, OSError)
self.assertIn(
f"Signal {signum:d} ignored due to race condition",
str(cm.unraisable.exc_value))
ignored = True
# bpo-43406: Even if it is unlikely, it's technically possible that
# all signals were ignored because of race conditions.
if not ignored:
# Sanity check that some signals were received, but not all
self.assertGreater(num_received_signals, 0)
self.assertLess(num_received_signals, num_sent_signals)
finally:
do_stop = True
t.join()
| StressTest |
python | django__django | tests/admin_views/admin.py | {
"start": 21120,
"end": 21245
} | class ____(admin.ModelAdmin):
ordering = ("order",)
list_display = ("stuff", "some_order")
| AdminOrderedModelMethodAdmin |
python | openai__gym | gym/wrappers/resize_observation.py | {
"start": 172,
"end": 2399
} | class ____(gym.ObservationWrapper):
"""Resize the image observation.
This wrapper works on environments with image observations (or more generally observations of shape AxBxC) and resizes
the observation to the shape given by the 2-tuple :attr:`shape`. The argument :attr:`shape` may also be an integer.
In that case, the observation is scaled to a square of side-length :attr:`shape`.
Example:
>>> import gym
>>> env = gym.make('CarRacing-v1')
>>> env.observation_space.shape
(96, 96, 3)
>>> env = ResizeObservation(env, 64)
>>> env.observation_space.shape
(64, 64, 3)
"""
def __init__(self, env: gym.Env, shape: Union[tuple, int]):
"""Resizes image observations to shape given by :attr:`shape`.
Args:
env: The environment to apply the wrapper
shape: The shape of the resized observations
"""
super().__init__(env)
if isinstance(shape, int):
shape = (shape, shape)
assert all(x > 0 for x in shape), shape
self.shape = tuple(shape)
assert isinstance(
env.observation_space, Box
), f"Expected the observation space to be Box, actual type: {type(env.observation_space)}"
obs_shape = self.shape + env.observation_space.shape[2:]
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def observation(self, observation):
"""Updates the observations by resizing the observation to shape given by :attr:`shape`.
Args:
observation: The observation to reshape
Returns:
The reshaped observations
Raises:
DependencyNotInstalled: opencv-python is not installed
"""
try:
import cv2
except ImportError:
raise DependencyNotInstalled(
"opencv is not install, run `pip install gym[other]`"
)
observation = cv2.resize(
observation, self.shape[::-1], interpolation=cv2.INTER_AREA
)
if observation.ndim == 2:
observation = np.expand_dims(observation, -1)
return observation
| ResizeObservation |
python | pennersr__django-allauth | allauth/socialaccount/providers/evernote/views.py | {
"start": 201,
"end": 1112
} | class ____(OAuthAdapter):
provider_id = "evernote"
settings = app_settings.PROVIDERS.get(provider_id, {})
request_token_url = "https://%s/oauth" % (
settings.get("EVERNOTE_HOSTNAME", "sandbox.evernote.com")
)
access_token_url = "https://%s/oauth" % (
settings.get("EVERNOTE_HOSTNAME", "sandbox.evernote.com")
)
authorize_url = "https://%s/OAuth.action" % (
settings.get("EVERNOTE_HOSTNAME", "sandbox.evernote.com")
)
def complete_login(self, request, app, token, response):
token.expires_at = datetime.fromtimestamp(
int(response["edam_expires"]) / 1000.0
)
extra_data = response
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth_login = OAuthLoginView.adapter_view(EvernoteOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(EvernoteOAuthAdapter)
| EvernoteOAuthAdapter |
python | vyperlang__vyper | vyper/codegen/context.py | {
"start": 646,
"end": 976
} | class ____:
name: str
offset: int
typ: VyperType
size: int
_id: int
# special metadata for calloca. hint for venom to tie calloca to call site.
_callsite: Optional[str] = None
def __post_init__(self):
assert self.typ.memory_bytes_required == self.size
# Function variable
@dataclass
| Alloca |
python | conda__conda | conda/gateways/repodata/__init__.py | {
"start": 15517,
"end": 22802
} | class ____:
"""
Handle caching for a single repodata.json + repodata.info.json
(<hex-string>*.json inside `dir`)
Avoid race conditions while loading, saving repodata.json and cache state.
Also support bytes as in repodata_shards.msgpack.zst
"""
def __init__(self, base, repodata_fn):
"""
base: directory and filename prefix for cache, e.g. /cache/dir/abc123;
writes /cache/dir/abc123.json
"""
cache_path_base = pathlib.Path(base)
self.cache_dir = cache_path_base.parent
self.name = cache_path_base.name
# XXX can we skip repodata_fn or include the full url for debugging
self.repodata_fn = repodata_fn
self.state = RepodataState(
self.cache_path_json, self.cache_path_state, repodata_fn
)
@property
def cache_path_json(self):
return pathlib.Path(
self.cache_dir,
self.name + ("1" if context.use_only_tar_bz2 else "") + ".json",
)
@property
def cache_path_shards(self):
return pathlib.Path(
self.cache_dir,
self.name + ("1" if context.use_only_tar_bz2 else "") + ".msgpack.zst",
)
@property
def cache_path_state(self):
"""Out-of-band etag and other state needed by the RepoInterface."""
return self.cache_path_json.with_suffix(CACHE_STATE_SUFFIX)
def load(self, *, state_only=False, binary=False) -> str | bytes:
# read state and repodata.json with locking
# lock {CACHE_STATE_SUFFIX} file
# read {CACHE_STATES_SUFFIX} file
# read repodata.json
# check stat, if wrong clear cache information
with self.lock("r+") as state_file:
# cannot use pathlib.read_text / write_text on any locked file, as
# it will release the lock early
state = json.loads(state_file.read())
cache_path = self.cache_path_shards if binary else self.cache_path_json
# json and state files should match. must read json before checking
# stat (if json_data is to be trusted)
if state_only:
json_data = b"" if binary else ""
else:
if binary:
json_data = cache_path.read_bytes()
else:
json_data = cache_path.read_text()
json_stat = cache_path.stat()
if not (
state.get("mtime_ns") == json_stat.st_mtime_ns
and state.get("size") == json_stat.st_size
):
# clear mod, etag, cache_control to encourage re-download
state.update(
{
ETAG_KEY: "",
LAST_MODIFIED_KEY: "",
CACHE_CONTROL_KEY: "",
"size": 0,
}
)
# Replace data in special self.state dict subclass with key aliases
self.state.clear()
self.state.update(state)
return json_data
def load_state(self, binary=False):
"""
Update self.state without reading repodata.
Return self.state.
"""
try:
self.load(state_only=True, binary=binary)
except (FileNotFoundError, json.JSONDecodeError) as e:
if isinstance(e, json.JSONDecodeError):
log.warning(f"{e.__class__.__name__} loading {self.cache_path_state}")
self.state.clear()
return self.state
def save(self, data: str | bytes):
"""Write data to <repodata> cache path, by calling self.replace()."""
temp_path = self.cache_dir / f"{self.name}.{os.urandom(2).hex()}.tmp"
if isinstance(data, bytes):
mode = "bx"
target = self.cache_path_shards
else:
mode = "x"
target = self.cache_path_json
try:
with temp_path.open(mode) as temp: # exclusive mode, error if exists
temp.write(data)
return self.replace(temp_path, target)
finally:
try:
temp_path.unlink()
except OSError:
pass
def replace(self, temp_path: Path, target=None):
"""
Rename path onto <repodata> path, synchronize state.
Relies on path's mtime not changing on move. `temp_path` should be
adjacent to `self.cache_path_json` to be on the same filesystem.
"""
if target is None:
target = self.cache_path_json
with self.lock() as state_file:
# "a+" creates the file if necessary, does not trunctate file.
state_file.seek(0)
state_file.truncate()
stat = temp_path.stat()
# UserDict has inscrutable typing, which we ignore
self.state["mtime_ns"] = stat.st_mtime_ns # type: ignore
self.state["size"] = stat.st_size # type: ignore
self.state["refresh_ns"] = time.time_ns() # type: ignore
try:
temp_path.rename(target)
except FileExistsError: # Windows
target.unlink()
temp_path.rename(target)
state_file.write(json.dumps(dict(self.state), indent=2))
def refresh(self, refresh_ns=0):
"""
Update access time in cache info file to indicate a HTTP 304 Not Modified response.
"""
# Note this is not thread-safe.
with self.lock() as state_file:
# "a+" creates the file if necessary, does not trunctate file.
state_file.seek(0)
state_file.truncate()
self.state["refresh_ns"] = refresh_ns or time.time_ns()
state_file.write(json.dumps(dict(self.state), indent=2))
@contextmanager
def lock(self, mode="a+"):
"""
Lock .info.json file. Hold lock while modifying related files.
mode: "a+" then seek(0) to write/create; "r+" to read.
"""
with self.cache_path_state.open(mode) as state_file, lock(state_file):
yield state_file
def stale(self):
"""
Compare state refresh_ns against cache control header and
context.local_repodata_ttl.
"""
if context.local_repodata_ttl > 1:
max_age = context.local_repodata_ttl
elif context.local_repodata_ttl == 1:
max_age = get_cache_control_max_age(self.state.cache_control)
else:
max_age = 0
max_age *= 10**9 # nanoseconds
now = time.time_ns()
refresh = self.state.get("refresh_ns", 0)
return (now - refresh) > max_age
def timeout(self):
"""
Return number of seconds until cache times out (<= 0 if already timed
out).
"""
if context.local_repodata_ttl > 1:
max_age = context.local_repodata_ttl
elif context.local_repodata_ttl == 1:
max_age = get_cache_control_max_age(self.state.cache_control)
else:
max_age = 0
max_age *= 10**9 # nanoseconds
now = time.time_ns()
refresh = self.state.get("refresh_ns", 0)
return ((now - refresh) + max_age) / 1e9
| RepodataCache |
python | aio-libs__aiohttp | tests/test_payload.py | {
"start": 1689,
"end": 4989
} | class ____(payload.Payload):
def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str:
assert False
async def write(self, writer: AbstractStreamWriter) -> None:
pass
def test_register_type(registry: payload.PayloadRegistry) -> None:
class TestProvider:
pass
payload.register_payload(Payload, TestProvider)
p = payload.get_payload(TestProvider())
assert isinstance(p, Payload)
def test_register_unsupported_order(registry: payload.PayloadRegistry) -> None:
class TestProvider:
pass
with pytest.raises(ValueError):
payload.register_payload(
Payload, TestProvider, order=object() # type: ignore[arg-type]
)
def test_payload_ctor() -> None:
p = Payload("test", encoding="utf-8", filename="test.txt")
assert p._value == "test"
assert p._encoding == "utf-8"
assert p.size is None
assert p.filename == "test.txt"
assert p.content_type == "text/plain"
def test_payload_content_type() -> None:
p = Payload("test", headers={"content-type": "application/json"})
assert p.content_type == "application/json"
def test_bytes_payload_default_content_type() -> None:
p = payload.BytesPayload(b"data")
assert p.content_type == "application/octet-stream"
def test_bytes_payload_explicit_content_type() -> None:
p = payload.BytesPayload(b"data", content_type="application/custom")
assert p.content_type == "application/custom"
def test_bytes_payload_bad_type() -> None:
with pytest.raises(TypeError):
payload.BytesPayload(object()) # type: ignore[arg-type]
def test_bytes_payload_memoryview_correct_size() -> None:
mv = memoryview(array.array("H", [1, 2, 3]))
p = payload.BytesPayload(mv)
assert p.size == 6
def test_string_payload() -> None:
p = payload.StringPayload("test")
assert p.encoding == "utf-8"
assert p.content_type == "text/plain; charset=utf-8"
p = payload.StringPayload("test", encoding="koi8-r")
assert p.encoding == "koi8-r"
assert p.content_type == "text/plain; charset=koi8-r"
p = payload.StringPayload("test", content_type="text/plain; charset=koi8-r")
assert p.encoding == "koi8-r"
assert p.content_type == "text/plain; charset=koi8-r"
def test_string_io_payload() -> None:
s = StringIO("ű" * 5000)
p = payload.StringIOPayload(s)
assert p.encoding == "utf-8"
assert p.content_type == "text/plain; charset=utf-8"
assert p.size == 10000
def test_async_iterable_payload_default_content_type() -> None:
async def gen() -> AsyncIterator[bytes]:
return
yield b"abc" # type: ignore[unreachable] # pragma: no cover
p = payload.AsyncIterablePayload(gen())
assert p.content_type == "application/octet-stream"
def test_async_iterable_payload_explicit_content_type() -> None:
async def gen() -> AsyncIterator[bytes]:
return
yield b"abc" # type: ignore[unreachable] # pragma: no cover
p = payload.AsyncIterablePayload(gen(), content_type="application/custom")
assert p.content_type == "application/custom"
def test_async_iterable_payload_not_async_iterable() -> None:
with pytest.raises(TypeError):
payload.AsyncIterablePayload(object()) # type: ignore[arg-type]
| Payload |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/activation_functions.py | {
"start": 1076,
"end": 1344
} | class ____():
def __init__(self, alpha=0.1):
self.alpha = alpha
def __call__(self, x):
return np.where(x >= 0.0, x, self.alpha * (np.exp(x) - 1))
def gradient(self, x):
return np.where(x >= 0.0, 1, self.__call__(x) + self.alpha)
| ELU |
python | kamyu104__LeetCode-Solutions | Python/string-matching-in-an-array.py | {
"start": 2937,
"end": 4354
} | class ____(object):
def stringMatching(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j != -1 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
def kmp(text, pattern, prefix):
if not pattern:
return 0
if len(text) < len(pattern):
return -1
j = -1
for i in xrange(len(text)):
while j != -1 and pattern[j+1] != text[i]:
j = prefix[j]
if pattern[j+1] == text[i]:
j += 1
if j+1 == len(pattern):
return i-j
return -1
result = []
for i, pattern in enumerate(words):
prefix = getPrefix(pattern)
for j, text in enumerate(words):
if i != j and kmp(text, pattern, prefix) != -1:
result.append(pattern)
break
return result
# Time: O(n^2 * l^2), n is the number of strings
# Space: O(1) , l is the max length of strings
| Solution2 |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/relativity/relativity.py | {
"start": 7931,
"end": 9220
} | class ____(pTypes.GroupParameter):
def __init__(self, **kwds):
defs = dict(name="Clock", autoIncrementName=True, renamable=True, removable=True, children=[
dict(name='Initial Position', type='float', value=0.0, step=0.1),
#dict(name='V0', type='float', value=0.0, step=0.1),
AccelerationGroup(),
dict(name='Rest Mass', type='float', value=1.0, step=0.1, limits=[1e-9, None]),
dict(name='Color', type='color', value=(100,100,150)),
dict(name='Size', type='float', value=0.5),
dict(name='Vertical Position', type='float', value=0.0, step=0.1),
])
#defs.update(kwds)
pTypes.GroupParameter.__init__(self, **defs)
self.restoreState(kwds, removeChildren=False)
def buildClocks(self):
x0 = self['Initial Position']
y0 = self['Vertical Position']
color = self['Color']
m = self['Rest Mass']
size = self['Size']
prog = self.param('Acceleration').generate()
c = Clock(x0=x0, m0=m, y0=y0, color=color, prog=prog, size=size)
return {self.name(): c}
def clockNames(self):
return [self.name()]
pTypes.registerParameterType('Clock', ClockParam)
| ClockParam |
python | getsentry__sentry | src/sentry/logging/__init__.py | {
"start": 0,
"end": 65
} | class ____:
HUMAN = "human"
MACHINE = "machine"
| LoggingFormat |
python | spack__spack | lib/spack/spack/util/elf.py | {
"start": 17660,
"end": 22567
} | class ____:
def __init__(self, old_value: bytes, new_value: bytes, offset: int):
self.old_value = old_value
self.new_value = new_value
self.offset = offset
@property
def inplace(self) -> bool:
return len(self.new_value) <= len(self.old_value)
def apply(self, f: BinaryIO) -> None:
assert self.inplace
f.seek(self.offset)
f.write(self.new_value)
# We zero out the bits we shortened because (a) it should be a
# C-string and (b) it's nice not to have spurious parts of old
# paths in the output of `strings file`. Note that we're all
# good when pad == 0; the original terminating null is used.
f.write(b"\x00" * (len(self.old_value) - len(self.new_value)))
def _get_rpath_substitution(
elf: ElfFile, regex: Pattern, substitutions: Dict[bytes, bytes]
) -> Optional[UpdateCStringAction]:
"""Make rpath substitutions in-place."""
# If there's no RPATH, then there's no need to replace anything.
if not elf.has_rpath:
return None
# Get the non-empty rpaths. Sometimes there's a bunch of trailing
# colons ::::: used for padding, we don't add them back to make it
# more likely that the string doesn't grow.
rpaths = list(filter(len, elf.dt_rpath_str.split(b":")))
num_rpaths = len(rpaths)
if num_rpaths == 0:
return None
changed = False
for i in range(num_rpaths):
old_rpath = rpaths[i]
match = regex.match(old_rpath)
if match:
changed = True
rpaths[i] = substitutions[match.group()] + old_rpath[match.end() :]
# Nothing to replace!
if not changed:
return None
return UpdateCStringAction(
old_value=elf.dt_rpath_str,
new_value=b":".join(rpaths),
# The rpath is at a given offset in the string table used by the dynamic section.
offset=elf.pt_dynamic_strtab_offset + elf.rpath_strtab_offset,
)
def _get_pt_interp_substitution(
elf: ElfFile, regex: Pattern, substitutions: Dict[bytes, bytes]
) -> Optional[UpdateCStringAction]:
"""Make interpreter substitutions in-place."""
if not elf.has_pt_interp:
return None
match = regex.match(elf.pt_interp_str)
if not match:
return None
return UpdateCStringAction(
old_value=elf.pt_interp_str,
new_value=substitutions[match.group()] + elf.pt_interp_str[match.end() :],
offset=elf.pt_interp_p_offset,
)
def substitute_rpath_and_pt_interp_in_place_or_raise(
path: str, substitutions: Dict[bytes, bytes]
) -> bool:
"""Returns true if the rpath and interpreter were modified, false if there was nothing to do.
Raises ElfCStringUpdatesFailed if the ELF file cannot be updated in-place. This exception
contains a list of actions to perform with other tools. The file is left untouched in this
case."""
regex = re.compile(b"|".join(re.escape(p) for p in substitutions.keys()))
try:
with open(path, "rb+") as f:
elf = parse_elf(f, interpreter=True, dynamic_section=True)
# Get the actions to perform.
rpath = _get_rpath_substitution(elf, regex, substitutions)
pt_interp = _get_pt_interp_substitution(elf, regex, substitutions)
# Nothing to do.
if not rpath and not pt_interp:
return False
# If we can't update in-place, leave it to other tools, don't do partial updates.
if rpath and not rpath.inplace or pt_interp and not pt_interp.inplace:
raise ElfCStringUpdatesFailed(rpath, pt_interp)
# Otherwise, apply the updates.
if rpath:
rpath.apply(f)
if pt_interp:
pt_interp.apply(f)
return True
except ElfParsingError:
# This just means the file wasn't an elf file, so there's no point
# in updating its rpath anyways; ignore this problem.
return False
def pt_interp(path: str) -> Optional[str]:
"""Retrieve the interpreter of an executable at ``path``."""
try:
with open(path, "rb") as f:
elf = parse_elf(f, interpreter=True)
except (OSError, ElfParsingError):
return None
if not elf.has_pt_interp:
return None
return elf.pt_interp_str.decode("utf-8")
def get_elf_compat(path: str) -> Tuple[bool, bool, int]:
"""Get a triplet (is_64_bit, is_little_endian, e_machine) from an ELF file, which can be used
to see if two ELF files are compatible."""
# On ELF platforms supporting, we try to be a bit smarter when it comes to shared
# libraries, by dropping those that are not host compatible.
with open(path, "rb") as f:
elf = parse_elf(f, only_header=True)
return (elf.is_64_bit, elf.is_little_endian, elf.elf_hdr.e_machine)
| UpdateCStringAction |
python | tensorflow__tensorflow | tensorflow/python/compiler/mlir/mlir_test.py | {
"start": 4233,
"end": 5648
} | class ____(test.TestCase):
@test_util.run_v2_only
def testImport(self):
@def_function.function
def sqr(i):
return i * i
concrete_function = sqr.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
mlir_module = mlir.convert_function(concrete_function, show_debug_info=True)
self.assertRegex(mlir_module, r'func @.*sqr.*\(')
self.assertRegex(mlir_module, r'loc\(".*mlir_test.py":.*:1\)')
@test_util.run_v2_only
def testImportWithCall(self):
@def_function.function
def callee(i):
return i
@def_function.function
def caller(i):
return callee(i)
concrete_function = caller.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
mlir_module = mlir.convert_function(concrete_function)
self.assertRegex(mlir_module, r'func @.*caller.*\(')
self.assertRegex(mlir_module, r'func private @.*callee.*\(')
@test_util.run_v2_only
def testImportWithControlRet(self):
@def_function.function
def logging():
logging_ops.print_v2('some message')
concrete_function = logging.get_concrete_function()
mlir_module = mlir.convert_function(concrete_function, pass_pipeline='')
self.assertRegex(mlir_module, r'tf\.PrintV2')
self.assertRegex(mlir_module, r'tf_executor.fetch.*: !tf_executor.control')
if __name__ == '__main__':
test.main()
| MLIRConcreteFunctionImportTest |
python | django__django | tests/auth_tests/models/custom_user.py | {
"start": 3783,
"end": 4680
} | class ____(AbstractBaseUser):
pk = models.CompositePrimaryKey("email", "date_of_birth")
email = models.EmailField(verbose_name="email address", max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
custom_objects = CustomUserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["date_of_birth"]
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
with RemoveGroupsAndPermissions():
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ["date_of_birth"]
| CustomUserCompositePrimaryKey |
python | encode__django-rest-framework | tests/test_versioning.py | {
"start": 12946,
"end": 15015
} | class ____(URLPatternsTestCase, APITestCase):
nested = [
path('namespaced/<int:pk>/', dummy_pk_view, name='nested'),
]
included = [
path('namespaced/<int:pk>/', dummy_pk_view, name='namespaced'),
path('nested/', include((nested, 'nested-namespace'), namespace='nested-namespace'))
]
urlpatterns = [
path('v1/', include((included, 'restframeworkv1'), namespace='v1')),
path('v2/', include((included, 'restframeworkv2'), namespace='v2')),
path('non-api/<int:pk>/', dummy_pk_view, name='non-api-view')
]
def _create_field(self, view_name, version):
request = factory.get("/")
request.versioning_scheme = NamespaceVersioning()
request.version = version
field = serializers.HyperlinkedRelatedField(
view_name=view_name,
read_only=True)
field._context = {'request': request}
return field
def test_api_url_is_properly_reversed_with_v1(self):
field = self._create_field('namespaced', 'v1')
assert field.to_representation(PKOnlyObject(3)) == 'http://testserver/v1/namespaced/3/'
def test_api_url_is_properly_reversed_with_v2(self):
field = self._create_field('namespaced', 'v2')
assert field.to_representation(PKOnlyObject(5)) == 'http://testserver/v2/namespaced/5/'
def test_api_url_is_properly_reversed_with_nested(self):
field = self._create_field('nested', 'v1:nested-namespace')
assert field.to_representation(PKOnlyObject(3)) == 'http://testserver/v1/nested/namespaced/3/'
def test_non_api_url_is_properly_reversed_regardless_of_the_version(self):
"""
Regression test for #2711
"""
field = self._create_field('non-api-view', 'v1')
assert field.to_representation(PKOnlyObject(10)) == 'http://testserver/non-api/10/'
field = self._create_field('non-api-view', 'v2')
assert field.to_representation(PKOnlyObject(10)) == 'http://testserver/non-api/10/'
| TestNamespaceVersioningHyperlinkedRelatedFieldScheme |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_allocated_device_status.py | {
"start": 383,
"end": 10542
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'conditions': 'list[V1Condition]',
'data': 'object',
'device': 'str',
'driver': 'str',
'network_data': 'V1beta2NetworkDeviceData',
'pool': 'str',
'share_id': 'str'
}
attribute_map = {
'conditions': 'conditions',
'data': 'data',
'device': 'device',
'driver': 'driver',
'network_data': 'networkData',
'pool': 'pool',
'share_id': 'shareID'
}
def __init__(self, conditions=None, data=None, device=None, driver=None, network_data=None, pool=None, share_id=None, local_vars_configuration=None): # noqa: E501
"""V1beta2AllocatedDeviceStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._conditions = None
self._data = None
self._device = None
self._driver = None
self._network_data = None
self._pool = None
self._share_id = None
self.discriminator = None
if conditions is not None:
self.conditions = conditions
if data is not None:
self.data = data
self.device = device
self.driver = driver
if network_data is not None:
self.network_data = network_data
self.pool = pool
if share_id is not None:
self.share_id = share_id
@property
def conditions(self):
"""Gets the conditions of this V1beta2AllocatedDeviceStatus. # noqa: E501
Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True. Must not contain more than 8 entries. # noqa: E501
:return: The conditions of this V1beta2AllocatedDeviceStatus. # noqa: E501
:rtype: list[V1Condition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1beta2AllocatedDeviceStatus.
Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True. Must not contain more than 8 entries. # noqa: E501
:param conditions: The conditions of this V1beta2AllocatedDeviceStatus. # noqa: E501
:type: list[V1Condition]
"""
self._conditions = conditions
@property
def data(self):
"""Gets the data of this V1beta2AllocatedDeviceStatus. # noqa: E501
Data contains arbitrary driver-specific data. The length of the raw data must be smaller or equal to 10 Ki. # noqa: E501
:return: The data of this V1beta2AllocatedDeviceStatus. # noqa: E501
:rtype: object
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this V1beta2AllocatedDeviceStatus.
Data contains arbitrary driver-specific data. The length of the raw data must be smaller or equal to 10 Ki. # noqa: E501
:param data: The data of this V1beta2AllocatedDeviceStatus. # noqa: E501
:type: object
"""
self._data = data
@property
def device(self):
"""Gets the device of this V1beta2AllocatedDeviceStatus. # noqa: E501
Device references one device instance via its name in the driver's resource pool. It must be a DNS label. # noqa: E501
:return: The device of this V1beta2AllocatedDeviceStatus. # noqa: E501
:rtype: str
"""
return self._device
@device.setter
def device(self, device):
"""Sets the device of this V1beta2AllocatedDeviceStatus.
Device references one device instance via its name in the driver's resource pool. It must be a DNS label. # noqa: E501
:param device: The device of this V1beta2AllocatedDeviceStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and device is None: # noqa: E501
raise ValueError("Invalid value for `device`, must not be `None`") # noqa: E501
self._device = device
@property
def driver(self):
"""Gets the driver of this V1beta2AllocatedDeviceStatus. # noqa: E501
Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:return: The driver of this V1beta2AllocatedDeviceStatus. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1beta2AllocatedDeviceStatus.
Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:param driver: The driver of this V1beta2AllocatedDeviceStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def network_data(self):
"""Gets the network_data of this V1beta2AllocatedDeviceStatus. # noqa: E501
:return: The network_data of this V1beta2AllocatedDeviceStatus. # noqa: E501
:rtype: V1beta2NetworkDeviceData
"""
return self._network_data
@network_data.setter
def network_data(self, network_data):
"""Sets the network_data of this V1beta2AllocatedDeviceStatus.
:param network_data: The network_data of this V1beta2AllocatedDeviceStatus. # noqa: E501
:type: V1beta2NetworkDeviceData
"""
self._network_data = network_data
@property
def pool(self):
"""Gets the pool of this V1beta2AllocatedDeviceStatus. # noqa: E501
This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`). Must not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes. # noqa: E501
:return: The pool of this V1beta2AllocatedDeviceStatus. # noqa: E501
:rtype: str
"""
return self._pool
@pool.setter
def pool(self, pool):
"""Sets the pool of this V1beta2AllocatedDeviceStatus.
This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`). Must not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes. # noqa: E501
:param pool: The pool of this V1beta2AllocatedDeviceStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pool is None: # noqa: E501
raise ValueError("Invalid value for `pool`, must not be `None`") # noqa: E501
self._pool = pool
@property
def share_id(self):
"""Gets the share_id of this V1beta2AllocatedDeviceStatus. # noqa: E501
ShareID uniquely identifies an individual allocation share of the device. # noqa: E501
:return: The share_id of this V1beta2AllocatedDeviceStatus. # noqa: E501
:rtype: str
"""
return self._share_id
@share_id.setter
def share_id(self, share_id):
"""Sets the share_id of this V1beta2AllocatedDeviceStatus.
ShareID uniquely identifies an individual allocation share of the device. # noqa: E501
:param share_id: The share_id of this V1beta2AllocatedDeviceStatus. # noqa: E501
:type: str
"""
self._share_id = share_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2AllocatedDeviceStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2AllocatedDeviceStatus):
return True
return self.to_dict() != other.to_dict()
| V1beta2AllocatedDeviceStatus |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datastore.py | {
"start": 6415,
"end": 6957
} | class ____:
@mock.patch(HOOK_PATH)
def test_execute(self, mock_hook):
op = CloudDatastoreRollbackOperator(
task_id="test_task",
gcp_conn_id=CONN_ID,
project_id=PROJECT_ID,
transaction=TRANSACTION,
)
op.execute({})
mock_hook.assert_called_once_with(gcp_conn_id=CONN_ID, impersonation_chain=None)
mock_hook.return_value.rollback.assert_called_once_with(
project_id=PROJECT_ID, transaction=TRANSACTION
)
| TestCloudDatastoreRollback |
python | pytorch__pytorch | torch/testing/_internal/common_distributed.py | {
"start": 40117,
"end": 46102
} | class ____(MultiProcessTestCase):
def setUp(self):
super().setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
self._spawn_processes()
def tearDown(self):
try:
torch.distributed.destroy_process_group()
except AssertionError:
pass
try:
os.remove(self.file_name)
except OSError:
pass
def backend(self, device) -> str:
if "cuda" in device:
return "nccl"
elif "hpu" in device: # intel gaudi
return "hccl"
elif "xpu" in device:
return "xccl"
else:
return "gloo"
def create_pg(self, device, world_size=None):
if world_size is None:
world_size = self.world_size
num_visible_devices = torch.get_device_module(device).device_count()
store = torch.distributed.FileStore(self.file_name, num_visible_devices)
torch.distributed.init_process_group(
backend=self.backend(device),
world_size=world_size,
rank=self.rank,
store=store,
)
if "nccl" in self.backend(device) or "xccl" in self.backend(device):
torch.accelerator.set_device_index(self.rank)
return torch.distributed.distributed_c10d._get_default_group()
def rank_to_device(self, device):
num_visible_devices = torch.get_device_module(device).device_count()
return {i: [i % num_visible_devices] for i in range(self.world_size)}
def run_subtests(
cls_inst,
subtest_config: dict[str, list[Any]],
test_fn: Callable,
*test_args,
**test_kwargs: Any,
):
"""
Runs a test function given by ``test_fn`` as a subtest according to the
configurations specified by ``subtest_config``. This amortizes the
costly setup overhead (including process spawn and initializing the
process group) over the subtests.
Args:
subtest_config (Dict[str, List[Any]]): A mapping from subtest
keyword argument name to a list of its possible values.
test_fn (Callable): A callable that runs the actual test.
test_args: Positional arguments to pass to ``test_fn``.
test_kwargs: Keyword arguments to pass to ``test_fn``.
"""
# Convert the config mapping to a list to have a fixed order
subtest_config_items: list[tuple[str, list[Any]]] = list(subtest_config.items())
subtest_config_keys: list[str] = [item[0] for item in subtest_config_items]
subtest_config_values: list[list[Any]] = [item[1] for item in subtest_config_items]
for values in itertools.product(*subtest_config_values):
# Map keyword to chosen value
subtest_kwargs = dict(zip(subtest_config_keys, values, strict=True))
with cls_inst.subTest(**subtest_kwargs):
torch._dynamo.reset()
test_fn(*test_args, **test_kwargs, **subtest_kwargs)
torch._dynamo.reset()
c10d.barrier()
@functools.cache
def has_efa() -> bool:
"""
If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has
Libfabric EFA interfaces and EFA software components installed,
see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html.
"""
try:
return (
subprocess.run(
["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False
).returncode
== 0
)
except FileNotFoundError:
pass
return False
def tp_transports():
"""
If the machine has Libfabric EFA interfaces and EFA software components installed it may cause
'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe
uses InfiniBand transport, so we exclude it from tensorpipe transports,
see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022
"""
return ["shm", "uv"] if has_efa() else None
def spawn_threads_and_init_comms(
func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE
):
"""
Wrapper to use with a test method
"""
if func is None:
return partial(
spawn_threads_and_init_comms, timeout=timeout, world_size=world_size
)
def _run_test_method_with_multi_threads(world_size, callback):
world = _install_threaded_pg()
global_store = c10d.HashStore()
def world_is_valid():
return world == c10d.distributed_c10d._world
def worker(rank, world_pg, store):
c10d.init_process_group(
backend="threaded", rank=rank, world_size=world_size, store=store
)
try:
callback()
except BaseException as ex: # noqa: B036
# Exceptions are handled in MultiThreadedTestCase
MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info()))
ProcessLocalGroup.exception_handle(
ex
) # trigger _terminate event and awaken worker threads
finally:
if world_is_valid():
c10d.destroy_process_group()
threads = []
for rank in range(world_size):
t = threading.Thread(target=worker, args=(rank, world, global_store))
t.start()
threads.append(t)
return threads
@wraps(func)
def wrapper(self, *args, **kwargs):
# TODO: get test name from kwargs
torch._C._distributed_c10d._set_thread_isolation_mode(True)
try:
threads = _run_test_method_with_multi_threads(
world_size, lambda: func(self, *args, **kwargs)
)
# join and error handling
MultiThreadedTestCase._join_threads(threads, func)
finally:
torch._C._distributed_c10d._set_thread_isolation_mode(False)
return wrapper
| DistributedTestBase |
python | getsentry__sentry | tests/sentry/integrations/aws_lambda/test_utils.py | {
"start": 2991,
"end": 4056
} | class ____(TestCase):
mock_client = MagicMock()
mock_client.get_paginator.return_value.paginate.return_value = [
{
"Functions": [
{"FunctionName": "lambdaA", "Runtime": "nodejs12.x"},
{"FunctionName": "lambdaB", "Runtime": "nodejs10.x"},
]
},
{
"Functions": [
{"FunctionName": "lambdaC", "Runtime": "nodejs12.x"},
{"FunctionName": "lambdaD", "Runtime": "python3.6"},
{"FunctionName": "lambdaE", "Runtime": "nodejs14.x"},
]
},
]
assert get_supported_functions(mock_client) == [
{"FunctionName": "lambdaA", "Runtime": "nodejs12.x"},
{"FunctionName": "lambdaB", "Runtime": "nodejs10.x"},
{"FunctionName": "lambdaC", "Runtime": "nodejs12.x"},
{"FunctionName": "lambdaD", "Runtime": "python3.6"},
{"FunctionName": "lambdaE", "Runtime": "nodejs14.x"},
]
mock_client.get_paginator.assert_called_once_with("list_functions")
| GetSupportedFunctionsTest |
python | tornadoweb__tornado | tornado/test/httpserver_test.py | {
"start": 52379,
"end": 53616
} | class ____(AsyncHTTPTestCase):
def get_app(self):
# The old request_callback interface does not implement the
# delegate interface, and writes its response via request.write
# instead of request.connection.write_headers.
def handle_request(request):
self.http1 = request.version.startswith("HTTP/1.")
if not self.http1:
# This test will be skipped if we're using HTTP/2,
# so just close it out cleanly using the modern interface.
request.connection.write_headers(
ResponseStartLine("", 200, "OK"), HTTPHeaders()
)
request.connection.finish()
return
message = b"Hello world"
request.connection.write(
utf8("HTTP/1.1 200 OK\r\n" "Content-Length: %d\r\n\r\n" % len(message))
)
request.connection.write(message)
request.connection.finish()
return handle_request
def test_legacy_interface(self):
response = self.fetch("/")
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(response.body, b"Hello world")
| LegacyInterfaceTest |
python | django__django | tests/model_forms/models.py | {
"start": 4237,
"end": 4325
} | class ____(models.Model):
f = CustomFileField(upload_to="unused", blank=True)
| CustomFF |
python | milvus-io__pymilvus | pymilvus/grpc_gen/milvus_pb2_grpc.py | {
"start": 196494,
"end": 197385
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def RegisterLink(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/milvus.proto.milvus.ProxyService/RegisterLink',
milvus__pb2.RegisterLinkRequest.SerializeToString,
milvus__pb2.RegisterLinkResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| ProxyService |
python | tornadoweb__tornado | tornado/auth.py | {
"start": 22040,
"end": 27029
} | class ____:
"""Abstract implementation of OAuth 2.0.
See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example
implementations.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
"""
def authorize_redirect(
self,
redirect_uri: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
extra_params: Optional[Dict[str, Any]] = None,
scope: Optional[List[str]] = None,
response_type: str = "code",
) -> None:
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a redirect URL with
your application instead of passing one via this method. You
should call this method to log the user in, and then call
``get_authenticated_user`` in the handler for your
redirect URL to complete the authorization process.
.. versionchanged:: 6.0
The ``callback`` argument and returned awaitable were removed;
this is now an ordinary synchronous function.
.. deprecated:: 6.4
The ``client_secret`` argument (which has never had any effect)
is deprecated and will be removed in Tornado 7.0.
"""
if client_secret is not None:
warnings.warn("client_secret argument is deprecated", DeprecationWarning)
handler = cast(RequestHandler, self)
args = {"response_type": response_type}
if redirect_uri is not None:
args["redirect_uri"] = redirect_uri
if client_id is not None:
args["client_id"] = client_id
if extra_params:
args.update(extra_params)
if scope:
args["scope"] = " ".join(scope)
url = self._OAUTH_AUTHORIZE_URL # type: ignore
handler.redirect(url_concat(url, args))
def _oauth_request_token_url(
self,
redirect_uri: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
code: Optional[str] = None,
extra_params: Optional[Dict[str, Any]] = None,
) -> str:
url = self._OAUTH_ACCESS_TOKEN_URL # type: ignore
args = {} # type: Dict[str, str]
if redirect_uri is not None:
args["redirect_uri"] = redirect_uri
if code is not None:
args["code"] = code
if client_id is not None:
args["client_id"] = client_id
if client_secret is not None:
args["client_secret"] = client_secret
if extra_params:
args.update(extra_params)
return url_concat(url, args)
async def oauth2_request(
self,
url: str,
access_token: Optional[str] = None,
post_args: Optional[Dict[str, Any]] = None,
**args: Any,
) -> Any:
"""Fetches the given URL auth an OAuth2 access token.
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
async def get(self):
new_entry = await self.oauth2_request(
"https://graph.facebook.com/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
.. versionadded:: 4.3
.. versionchanged::: 6.0
The ``callback`` argument was removed. Use the returned awaitable object instead.
"""
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib.parse.urlencode(all_args)
http = self.get_auth_http_client()
if post_args is not None:
response = await http.fetch(
url, method="POST", body=urllib.parse.urlencode(post_args)
)
else:
response = await http.fetch(url)
return escape.json_decode(response.body)
def get_auth_http_client(self) -> httpclient.AsyncHTTPClient:
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
.. versionadded:: 4.3
"""
return httpclient.AsyncHTTPClient()
| OAuth2Mixin |
python | django__django | tests/admin_scripts/management/commands/suppress_base_options_command.py | {
"start": 49,
"end": 650
} | class ____(BaseCommand):
help = "Test suppress base options command."
requires_system_checks = []
suppressed_base_arguments = {
"-v",
"--traceback",
"--settings",
"--pythonpath",
"--no-color",
"--force-color",
"--version",
"file",
}
def add_arguments(self, parser):
super().add_arguments(parser)
self.add_base_argument(parser, "file", nargs="?", help="input file")
def handle(self, *labels, **options):
print("EXECUTE:SuppressBaseOptionsCommand options=%s" % sorted(options.items()))
| Command |
python | davidhalter__jedi | test/completion/goto.py | {
"start": 3297,
"end": 3490
} | class ____():
def class_func(func):
return func
#! 14 ['def class_func']
@ClassDec.class_func
def x():
pass
#! 2 ['class ClassDec']
@ClassDec.class_func
def z():
pass
| ClassDec |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 21532,
"end": 21960
} | class ____(DataBlob):
"""
TicketDataBlob is a specific type that represents the data blob for a ticket creation action.
"""
# Dynamic form fields from customer configuration
dynamic_form_fields: list[dict[str, Any]] = field(default_factory=list)
# Store any additional fields that aren't part of standard fields
additional_fields: dict[str, Any] = field(default_factory=dict)
@dataclass
| TicketDataBlob |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 80404,
"end": 81460
} | class ____:
"""Test de_CH address provider methods"""
def test_canton_name(self, faker, num_samples):
for _ in range(num_samples):
canton_name = faker.canton_name()
assert isinstance(canton_name, str)
assert any(canton_name == cantons[1] for cantons in DeChAddressProvider.cantons)
def test_canton_code(self, faker, num_samples):
for _ in range(num_samples):
canton_code = faker.canton_code()
assert isinstance(canton_code, str)
assert any(canton_code == cantons[0] for cantons in DeChAddressProvider.cantons)
def test_canton(self, faker, num_samples):
for _ in range(num_samples):
canton = faker.canton()
assert isinstance(canton, tuple)
assert canton in DeChAddressProvider.cantons
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in DeChAddressProvider.cities
| TestDeCh |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.