language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | vyperlang__vyper | vyper/codegen/module.py | {
"start": 1052,
"end": 21300
} | class ____:
def __init__(self):
self._id = 0
def ensure_id(self, fn_t):
if fn_t._function_id is None:
fn_t._function_id = self._id
self._id += 1
def _is_constructor(func_ast):
return func_ast._metadata["func_type"].is_constructor
def _is_fallback(func_ast):
return func_ast._metadata["func_type"].is_fallback
def _is_internal(func_ast):
return func_ast._metadata["func_type"].is_internal
def _is_payable(func_ast):
return func_ast._metadata["func_type"].is_payable
def _annotated_method_id(abi_sig):
method_id = method_id_int(abi_sig)
annotation = f"{hex(method_id)}: {abi_sig}"
return IRnode(method_id, annotation=annotation)
def label_for_entry_point(abi_sig, entry_point):
method_id = method_id_int(abi_sig)
return f"{entry_point.func_t._ir_info.ir_identifier}{method_id}"
# adapt whatever generate_ir_for_function gives us into an IR node
def _ir_for_fallback_or_ctor(func_ast, *args, **kwargs):
func_t = func_ast._metadata["func_type"]
assert func_t.is_fallback or func_t.is_constructor
ret = ["seq"]
if not func_t.is_payable:
callvalue_check = ["assert", ["iszero", "callvalue"]]
ret.append(IRnode.from_list(callvalue_check, error_msg="nonpayable check"))
func_ir = generate_ir_for_external_function(func_ast, *args, **kwargs)
assert len(func_ir.entry_points) == 1
# add a goto to make the function entry look like other functions
# (for zksync interpreter)
ret.append(["goto", func_t._ir_info.external_function_base_entry_label])
ret.append(func_ir.common_ir)
return IRnode.from_list(ret)
def _ir_for_internal_function(func_ast, *args, **kwargs):
return generate_ir_for_internal_function(func_ast, *args, **kwargs).func_ir
def _generate_external_entry_points(external_functions, module_t):
entry_points = {} # map from ABI sigs to ir code
sig_of = {} # reverse map from method ids to abi sig
for code in external_functions:
func_ir = generate_ir_for_external_function(code, module_t)
for abi_sig, entry_point in func_ir.entry_points.items():
method_id = method_id_int(abi_sig)
assert abi_sig not in entry_points
assert method_id not in sig_of
entry_points[abi_sig] = entry_point
sig_of[method_id] = abi_sig
# stick function common body into final entry point to save a jump
ir_node = IRnode.from_list(["seq", entry_point.ir_node, func_ir.common_ir])
entry_point.ir_node = ir_node
return entry_points, sig_of
# codegen for all runtime functions + callvalue/calldata checks,
# with O(1) jumptable for selector table.
# uses two level strategy: uses `method_id % n_buckets` to descend
# into a bucket (of about 8-10 items), and then uses perfect hash
# to select the final function.
# costs about 212 gas for typical function and 8 bytes of code (+ ~87 bytes of global overhead)
def _selector_section_dense(external_functions, module_t):
function_irs = []
if len(external_functions) == 0:
return IRnode.from_list(["seq"])
entry_points, sig_of = _generate_external_entry_points(external_functions, module_t)
# generate the label so the jumptable works
for abi_sig, entry_point in entry_points.items():
label = label_for_entry_point(abi_sig, entry_point)
ir_node = ["label", label, ["var_list"], entry_point.ir_node]
function_irs.append(IRnode.from_list(ir_node))
n_buckets, jumptable_info = jumptable_utils.generate_dense_jumptable_info(entry_points.keys())
# note: we are guaranteed by jumptable_utils that there are no buckets
# which are empty. sanity check that the bucket ids are well-behaved:
assert n_buckets == len(jumptable_info)
for i, (bucket_id, _) in enumerate(sorted(jumptable_info.items())):
assert i == bucket_id
# bucket magic <2 bytes> | bucket location <2 bytes> | bucket size <1 byte>
# TODO: can make it smaller if the largest bucket magic <= 255
SZ_BUCKET_HEADER = 5
selector_section = ["seq"]
bucket_id = ["mod", "_calldata_method_id", n_buckets]
bucket_hdr_location = [
"add",
["symbol", "BUCKET_HEADERS"],
["mul", bucket_id, SZ_BUCKET_HEADER],
]
# get bucket header
dst = 32 - SZ_BUCKET_HEADER
assert dst >= 0
if _is_debug_mode():
selector_section.append(["assert", ["eq", "msize", 0]])
selector_section.append(["codecopy", dst, bucket_hdr_location, SZ_BUCKET_HEADER])
# figure out the minimum number of bytes we can use to encode
# min_calldatasize in function info
largest_mincalldatasize = max(f.min_calldatasize for f in entry_points.values())
FN_METADATA_BYTES = (largest_mincalldatasize.bit_length() + 7) // 8
func_info_size = 4 + 2 + FN_METADATA_BYTES
# grab function info.
# method id <4 bytes> | label <2 bytes> | func info <1-3 bytes>
# func info (1-3 bytes, packed) for: expected calldatasize, is_nonpayable bit
# NOTE: might be able to improve codesize if we use variable # of bytes
# per bucket
hdr_info = IRnode.from_list(["mload", 0])
with hdr_info.cache_when_complex("hdr_info") as (b1, hdr_info):
bucket_location = ["and", 0xFFFF, shr(8, hdr_info)]
bucket_magic = shr(24, hdr_info)
bucket_size = ["and", 0xFF, hdr_info]
# ((method_id * bucket_magic) >> BITS_MAGIC) % bucket_size
func_id = [
"mod",
shr(jumptable_utils.BITS_MAGIC, ["mul", bucket_magic, "_calldata_method_id"]),
bucket_size,
]
func_info_location = ["add", bucket_location, ["mul", func_id, func_info_size]]
dst = 32 - func_info_size
assert func_info_size >= SZ_BUCKET_HEADER # otherwise mload will have dirty bytes
assert dst >= 0
selector_section.append(b1.resolve(["codecopy", dst, func_info_location, func_info_size]))
func_info = IRnode.from_list(["mload", 0])
fn_metadata_mask = 2 ** (FN_METADATA_BYTES * 8) - 1
calldatasize_mask = fn_metadata_mask - 1 # ex. 0xFFFE
with func_info.cache_when_complex("func_info") as (b1, func_info):
x = ["seq"]
# expected calldatasize always satisfies (x - 4) % 32 == 0
# the lower 5 bits are always 0b00100, so we can use those
# bits for other purposes.
is_nonpayable = ["and", 1, func_info]
expected_calldatasize = ["and", calldatasize_mask, func_info]
label_bits_ofst = FN_METADATA_BYTES * 8
function_label = ["and", 0xFFFF, shr(label_bits_ofst, func_info)]
method_id_bits_ofst = (FN_METADATA_BYTES + 2) * 8
function_method_id = shr(method_id_bits_ofst, func_info)
# check method id is right, if not then fallback.
# need to check calldatasize >= 4 in case there are
# trailing 0s in the method id.
calldatasize_valid = ["gt", "calldatasize", 3]
method_id_correct = ["eq", function_method_id, "_calldata_method_id"]
should_fallback = ["iszero", ["and", calldatasize_valid, method_id_correct]]
x.append(["if", should_fallback, ["goto", "fallback"]])
# assert callvalue == 0 if nonpayable
bad_callvalue = ["mul", is_nonpayable, "callvalue"]
# assert calldatasize at least minimum for the abi type
bad_calldatasize = ["lt", "calldatasize", expected_calldatasize]
failed_entry_conditions = ["or", bad_callvalue, bad_calldatasize]
check_entry_conditions = IRnode.from_list(
["assert", ["iszero", failed_entry_conditions]],
error_msg="bad calldatasize or callvalue",
)
x.append(check_entry_conditions)
jump_targets = [func.args[0].value for func in function_irs]
jump_instr = IRnode.from_list(["djump", function_label, *jump_targets])
x.append(jump_instr)
selector_section.append(b1.resolve(x))
bucket_headers = ["data", "BUCKET_HEADERS"]
for bucket_id, bucket in sorted(jumptable_info.items()):
bucket_headers.append(bucket.magic.to_bytes(2, "big"))
bucket_headers.append(["symbol", f"bucket_{bucket_id}"])
# note: buckets are usually ~10 items. to_bytes would
# fail if the int is too big.
bucket_headers.append(bucket.bucket_size.to_bytes(1, "big"))
selector_section.append(bucket_headers)
for bucket_id, bucket in jumptable_info.items():
function_infos = ["data", f"bucket_{bucket_id}"]
# sort function infos by their image.
for method_id in bucket.method_ids_image_order:
abi_sig = sig_of[method_id]
entry_point = entry_points[abi_sig]
method_id_bytes = method_id.to_bytes(4, "big")
symbol = ["symbol", label_for_entry_point(abi_sig, entry_point)]
func_metadata_int = entry_point.min_calldatasize | int(
not entry_point.func_t.is_payable
)
func_metadata = func_metadata_int.to_bytes(FN_METADATA_BYTES, "big")
function_infos.extend([method_id_bytes, symbol, func_metadata])
selector_section.append(function_infos)
ret = ["seq", ["with", "_calldata_method_id", shr(224, ["calldataload", 0]), selector_section]]
ret.extend(function_irs)
return ret
# codegen for all runtime functions + callvalue/calldata checks,
# with O(1) jumptable for selector table.
# uses two level strategy: uses `method_id % n_methods` to calculate
# a bucket, and then descends into linear search from there.
# costs about 126 gas for typical (nonpayable, >0 args, avg bucket size 1.5)
# function and 24 bytes of code (+ ~23 bytes of global overhead)
def _selector_section_sparse(external_functions, module_t):
ret = ["seq"]
if len(external_functions) == 0:
return ret
entry_points, sig_of = _generate_external_entry_points(external_functions, module_t)
n_buckets, buckets = jumptable_utils.generate_sparse_jumptable_buckets(entry_points.keys())
# 2 bytes for bucket location
SZ_BUCKET_HEADER = 2
if n_buckets > 1:
bucket_id = ["mod", "_calldata_method_id", n_buckets]
bucket_hdr_location = [
"add",
["symbol", "selector_buckets"],
["mul", bucket_id, SZ_BUCKET_HEADER],
]
# get bucket header
dst = 32 - SZ_BUCKET_HEADER
assert dst >= 0
if _is_debug_mode():
ret.append(["assert", ["eq", "msize", 0]])
ret.append(["codecopy", dst, bucket_hdr_location, SZ_BUCKET_HEADER])
jump_targets = []
for i in range(n_buckets):
if i in buckets:
bucket_label = f"selector_bucket_{i}"
jump_targets.append(bucket_label)
else:
# empty bucket
jump_targets.append("fallback")
jumptable_data = ["data", "selector_buckets"]
jumptable_data.extend(["symbol", label] for label in jump_targets)
jumpdest = IRnode.from_list(["mload", 0])
jump_instr = IRnode.from_list(["djump", jumpdest, *jump_targets])
ret.append(jump_instr)
ret.append(jumptable_data)
for bucket_id, bucket in buckets.items():
bucket_label = f"selector_bucket_{bucket_id}"
ret.append(["label", bucket_label, ["var_list"], ["seq"]])
handle_bucket = ["seq"]
for method_id in bucket:
sig = sig_of[method_id]
entry_point = entry_points[sig]
func_t = entry_point.func_t
expected_calldatasize = entry_point.min_calldatasize
dispatch = ["seq"] # code to dispatch into the function
skip_callvalue_check = func_t.is_payable
skip_calldatasize_check = expected_calldatasize == 4
bad_callvalue = [0] if skip_callvalue_check else ["callvalue"]
bad_calldatasize = (
[0] if skip_calldatasize_check else ["lt", "calldatasize", expected_calldatasize]
)
dispatch.append(
IRnode.from_list(
["assert", ["iszero", ["or", bad_callvalue, bad_calldatasize]]],
error_msg="bad calldatasize or callvalue",
)
)
# we could skip a jumpdest per method if we out-lined the entry point
# so the dispatcher looks just like -
# ```(if (eq <calldata_method_id> method_id)
# (goto entry_point_label))```
# it would another optimization for patterns like
# `if ... (goto)` though.
dispatch.append(entry_point.ir_node)
method_id_check = ["eq", "_calldata_method_id", _annotated_method_id(sig)]
has_trailing_zeroes = method_id.to_bytes(4, "big").endswith(b"\x00")
if has_trailing_zeroes:
# if the method id check has trailing 0s, we need to include
# a calldatasize check to distinguish from when not enough
# bytes are provided for the method id in calldata.
method_id_check = ["and", ["ge", "calldatasize", 4], method_id_check]
handle_bucket.append(["if", method_id_check, dispatch])
# close out the bucket with a goto fallback so we don't keep searching
handle_bucket.append(["goto", "fallback"])
ret.append(handle_bucket)
ret = ["seq", ["with", "_calldata_method_id", shr(224, ["calldataload", 0]), ret]]
return ret
# codegen for all runtime functions + callvalue/calldata checks,
# O(n) linear search for the method id
# mainly keep this in for backends which cannot handle the indirect jump
# in selector_section_dense and selector_section_sparse
def _selector_section_linear(external_functions, module_t):
ret = ["seq"]
if len(external_functions) == 0:
return ret
ret.append(["if", ["lt", "calldatasize", 4], ["goto", "fallback"]])
entry_points, sig_of = _generate_external_entry_points(external_functions, module_t)
dispatcher = ["seq"]
for sig, entry_point in entry_points.items():
func_t = entry_point.func_t
expected_calldatasize = entry_point.min_calldatasize
dispatch = ["seq"] # code to dispatch into the function
if not func_t.is_payable:
callvalue_check = ["assert", ["iszero", "callvalue"]]
dispatch.append(IRnode.from_list(callvalue_check, error_msg="nonpayable check"))
good_calldatasize = ["ge", "calldatasize", expected_calldatasize]
calldatasize_check = ["assert", good_calldatasize]
dispatch.append(IRnode.from_list(calldatasize_check, error_msg="calldatasize check"))
dispatch.append(entry_point.ir_node)
method_id_check = ["eq", "_calldata_method_id", _annotated_method_id(sig)]
dispatcher.append(["if", method_id_check, dispatch])
ret.append(["with", "_calldata_method_id", shr(224, ["calldataload", 0]), dispatcher])
return ret
# take a ModuleT, and generate the runtime and deploy IR
def generate_ir_for_module(module_t: ModuleT) -> tuple[IRnode, IRnode]:
# order functions so that each function comes after all of its callees
id_generator = IDGenerator()
runtime_reachable = _runtime_reachable_functions(module_t, id_generator)
function_defs = [fn_t.ast_def for fn_t in runtime_reachable]
runtime_functions = [f for f in function_defs if not _is_constructor(f)]
internal_functions = [f for f in runtime_functions if _is_internal(f)]
external_functions = [
f for f in runtime_functions if not _is_internal(f) and not _is_fallback(f)
]
default_function = next((f for f in runtime_functions if _is_fallback(f)), None)
internal_functions_ir: list[IRnode] = []
# module_t internal functions first so we have the function info
for func_ast in internal_functions:
func_ir = _ir_for_internal_function(func_ast, module_t, False)
internal_functions_ir.append(IRnode.from_list(func_ir))
# TODO: add option to specifically force linear selector section,
# useful for testing and downstream tooling.
if core._opt_none():
selector_section = _selector_section_linear(external_functions, module_t)
# dense vs sparse global overhead is amortized after about 4 methods.
# (--debug will force dense selector table anyway if _opt_codesize is selected.)
elif core._opt_codesize() and (len(external_functions) > 4 or _is_debug_mode()):
selector_section = _selector_section_dense(external_functions, module_t)
else:
selector_section = _selector_section_sparse(external_functions, module_t)
if default_function:
fallback_ir = _ir_for_fallback_or_ctor(default_function, module_t)
else:
fallback_ir = IRnode.from_list(
["revert", 0, 0], annotation="Default function", error_msg="fallback function"
)
runtime = ["seq", selector_section]
runtime.append(["goto", "fallback"])
runtime.append(["label", "fallback", ["var_list"], fallback_ir])
runtime.extend(internal_functions_ir)
deploy_code: List[Any] = ["seq"]
immutables_len = module_t.immutable_section_bytes
if (init_func_t := module_t.init_function) is not None:
# cleanly rerun codegen for internal functions with `is_ctor_ctx=True`
id_generator.ensure_id(init_func_t)
ctor_internal_func_irs = []
reachable_from_ctor = init_func_t.reachable_internal_functions
for func_t in reachable_from_ctor:
id_generator.ensure_id(func_t)
fn_ast = func_t.ast_def
func_ir = _ir_for_internal_function(fn_ast, module_t, is_ctor_context=True)
ctor_internal_func_irs.append(func_ir)
# generate init_func_ir after callees to ensure they have analyzed
# memory usage.
# TODO might be cleaner to separate this into an _init_ir helper func
init_func_ir = _ir_for_fallback_or_ctor(init_func_t.ast_def, module_t)
# pass the amount of memory allocated for the init function
# so that deployment does not clobber while preparing immutables
# note: (deploy mem_ofst, code, extra_padding)
init_mem_used = init_func_t._ir_info.frame_info.mem_used
# force msize to be initialized past the end of immutables section
# so that builtins which use `msize` for "dynamic" memory
# allocation do not clobber uninitialized immutables.
# cf. GH issue 3101.
# note mload/iload X touches bytes from X to X+32, and msize rounds up
# to the nearest 32, so `iload`ing `immutables_len - 32` guarantees
# that `msize` will refer to a memory location of at least
# `<immutables_start> + immutables_len` (where <immutables_start> ==
# `_mem_deploy_end` as defined in the assembler).
# note:
# mload 32 => msize == 64
# mload 33 => msize == 96
# assumption in general: (mload X) => msize == ceil32(X + 32)
# see py-evm extend_memory: after_size = ceil32(start_position + size)
if immutables_len > 0:
deploy_code.append(["iload", max(0, immutables_len - 32)])
deploy_code.append(init_func_ir)
deploy_code.append(["deploy", init_mem_used, runtime, immutables_len])
# internal functions come at end of initcode
deploy_code.extend(ctor_internal_func_irs)
else:
if immutables_len != 0: # pragma: nocover
raise CompilerPanic("unreachable")
deploy_code.append(["deploy", 0, runtime, 0])
# compile all remaining internal functions so that _ir_info is populated
# (whether or not it makes it into the final IR artifact)
to_visit: OrderedSet = OrderedSet()
for func_ast in module_t.function_defs:
fn_t = func_ast._metadata["func_type"]
if fn_t.is_internal:
to_visit.update(fn_t.reachable_internal_functions)
to_visit.add(fn_t)
for fn_t in to_visit:
if fn_t._ir_info is None:
id_generator.ensure_id(fn_t)
_ = _ir_for_internal_function(fn_t.ast_def, module_t, False)
return IRnode.from_list(deploy_code), IRnode.from_list(runtime)
| IDGenerator |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/inputs.py | {
"start": 1557,
"end": 2284
} | class ____:
"""Holds information for how to prepare an input for an ExecutionStep."""
name: str
dagster_type_key: str
source: "StepInputSource"
@property
def dependency_keys(self) -> AbstractSet[str]:
return self.source.step_key_dependencies
def get_step_output_handle_dependencies(self) -> Sequence[StepOutputHandle]:
return self.source.step_output_handle_dependencies
def join_and_hash(*args: Optional[str]) -> Optional[str]:
lst = [check.opt_str_param(elem, "elem") for elem in args]
if None in lst:
return None
str_lst = cast("list[str]", lst)
unhashed = "".join(sorted(str_lst))
return hashlib.sha1(unhashed.encode("utf-8")).hexdigest()
| StepInput |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/llama_index/vector_stores/azurecosmosmongo/base.py | {
"start": 650,
"end": 15786
} | class ____(BasePydanticVectorStore):
"""
Azure CosmosDB MongoDB vCore Vector Store.
To use, you should have both:
- the ``pymongo`` python package installed
- a connection string associated with an Azure Cosmodb MongoDB vCore Cluster
Examples:
`pip install llama-index-vector-stores-azurecosmosmongo`
```python
import pymongo
from llama_index.vector_stores.azurecosmosmongo import AzureCosmosDBMongoDBVectorSearch
# Set up the connection string with your Azure CosmosDB MongoDB URI
connection_string = "YOUR_AZURE_COSMOSDB_MONGODB_URI"
mongodb_client = pymongo.MongoClient(connection_string)
# Create an instance of AzureCosmosDBMongoDBVectorSearch
vector_store = AzureCosmosDBMongoDBVectorSearch(
mongodb_client=mongodb_client,
db_name="demo_vectordb",
collection_name="paul_graham_essay",
)
```
"""
stores_text: bool = True
flat_metadata: bool = True
_collection: Any = PrivateAttr()
_index_name: str = PrivateAttr()
_embedding_key: str = PrivateAttr()
_id_key: str = PrivateAttr()
_text_key: str = PrivateAttr()
_metadata_key: str = PrivateAttr()
_insert_kwargs: dict = PrivateAttr()
_db_name: str = PrivateAttr()
_collection_name: str = PrivateAttr()
_cosmos_search_kwargs: dict = PrivateAttr()
_mongodb_client: Any = PrivateAttr()
def __init__(
self,
mongodb_client: Optional[Any] = None,
db_name: str = "default_db",
collection_name: str = "default_collection",
index_name: str = "default_vector_search_index",
id_key: str = "id",
embedding_key: str = "content_vector",
text_key: str = "text",
metadata_key: str = "metadata",
cosmos_search_kwargs: Optional[Dict] = None,
insert_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> None:
"""
Initialize the vector store.
Args:
mongodb_client: An Azure CosmoDB MongoDB client (type: MongoClient, shown any for lazy import).
db_name: An Azure CosmosDB MongoDB database name.
collection_name: An Azure CosmosDB collection name.
index_name: An Azure CosmosDB MongoDB vCore Vector Search index name.
id_key: The data field to use as the id.
embedding_key: An Azure CosmosDB MongoDB field that will contain
the embedding for each document.
text_key: An Azure CosmosDB MongoDB field that will contain the text for each document.
metadata_key: An Azure CosmosDB MongoDB field that will contain
the metadata for each document.
cosmos_search_kwargs: An Azure CosmosDB MongoDB field that will
contain search options, such as kind, numLists, similarity, and dimensions.
insert_kwargs: The kwargs used during `insert`.
"""
super().__init__()
if mongodb_client is not None:
self._mongodb_client = cast(pymongo.MongoClient, mongodb_client)
else:
if "AZURE_COSMOSDB_MONGODB_URI" not in os.environ:
raise ValueError(
"Must specify Azure cosmodb 'AZURE_COSMOSDB_MONGODB_URI' via env variable "
"if not directly passing in client."
)
self._mongodb_client = pymongo.MongoClient(
os.environ["AZURE_COSMOSDB_MONGODB_URI"],
appname="LLAMAINDEX_PYTHON",
)
self._collection = self._mongodb_client[db_name][collection_name]
self._index_name = index_name
self._embedding_key = embedding_key
self._id_key = id_key
self._text_key = text_key
self._metadata_key = metadata_key
self._insert_kwargs = insert_kwargs or {}
self._db_name = db_name
self._collection_name = collection_name
self._cosmos_search_kwargs = cosmos_search_kwargs or {}
self._create_vector_search_index()
def _create_vector_search_index(self) -> None:
db = self._mongodb_client[self._db_name]
create_index_commands = {}
kind = self._cosmos_search_kwargs.get("kind", "vector-hnsw")
if kind == "vector-ivf":
create_index_commands = self._get_vector_index_ivf(kind)
elif kind == "vector-hnsw":
create_index_commands = self._get_vector_index_hnsw(kind)
elif kind == "vector-diskann":
create_index_commands = self._get_vector_index_diskann(kind)
db.command(create_index_commands)
def _get_vector_index_ivf(
self,
kind: str,
) -> Dict[str, Any]:
indexes = {
"name": self._index_name,
"key": {self._embedding_key: "cosmosSearch"},
"cosmosSearchOptions": {
"kind": kind,
"numLists": self._cosmos_search_kwargs.get("numLists", 1),
"similarity": self._cosmos_search_kwargs.get("similarity", "COS"),
"dimensions": self._cosmos_search_kwargs.get("dimensions", 1536),
},
}
if self._cosmos_search_kwargs.get("compression", None) == "half":
indexes["cosmosSearchOptions"]["compression"] = "half"
return {
"createIndexes": self._collection_name,
"indexes": [indexes],
}
def _get_vector_index_hnsw(
self,
kind: str,
) -> Dict[str, Any]:
indexes = {
"name": self._index_name,
"key": {self._embedding_key: "cosmosSearch"},
"cosmosSearchOptions": {
"kind": kind,
"m": self._cosmos_search_kwargs.get("m", 2),
"efConstruction": self._cosmos_search_kwargs.get("efConstruction", 64),
"similarity": self._cosmos_search_kwargs.get("similarity", "COS"),
"dimensions": self._cosmos_search_kwargs.get("dimensions", 1536),
},
}
if self._cosmos_search_kwargs.get("compression", None) == "half":
indexes["cosmosSearchOptions"]["compression"] = "half"
return {
"createIndexes": self._collection_name,
"indexes": [indexes],
}
def _get_vector_index_diskann(
self,
kind: str,
) -> Dict[str, Any]:
indexes = {
"name": self._index_name,
"key": {self._embedding_key: "cosmosSearch"},
"cosmosSearchOptions": {
"kind": kind,
"maxDegree": self._cosmos_search_kwargs.get("maxDegree", 32),
"lBuild": self._cosmos_search_kwargs.get("lBuild", 50),
"similarity": self._cosmos_search_kwargs.get("similarity", "COS"),
"dimensions": self._cosmos_search_kwargs.get("dimensions", 1536),
},
}
if self._cosmos_search_kwargs.get("compression", None) == "pq":
indexes["cosmosSearchOptions"]["compression"] = "pq"
indexes["cosmosSearchOptions"]["pqCompressedDims"] = (
self._cosmos_search_kwargs.get(
"pqCompressedDims",
self._cosmos_search_kwargs.get("dimensions", 1536),
),
)
indexes["cosmosSearchOptions"]["pqSampleSize"] = (
self._cosmos_search_kwargs.get("pqSampleSize", 1000),
)
return {
"createIndexes": self._collection_name,
"indexes": [indexes],
}
def create_filter_index(
self,
property_to_filter: str,
index_name: str,
) -> dict[str, Any]:
db = self._mongodb_client[self._db_name]
command = {
"createIndexes": self._collection.name,
"indexes": [
{
"key": {property_to_filter: 1},
"name": index_name,
}
],
}
create_index_responses: dict[str, Any] = db.command(command)
return create_index_responses
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
Returns:
A List of ids for successfully added nodes.
"""
ids = []
data_to_insert = []
for node in nodes:
metadata = node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
)
entry = {
self._id_key: node.node_id,
self._embedding_key: node.get_embedding(),
self._text_key: node.get_content(metadata_mode=MetadataMode.NONE) or "",
self._metadata_key: metadata,
}
data_to_insert.append(entry)
ids.append(node.node_id)
logger.debug("Inserting data into MongoDB: %s", data_to_insert)
insert_result = self._collection.insert_many(
data_to_insert, **self._insert_kwargs
)
logger.debug("Result of insert: %s", insert_result)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
# delete by filtering on the doc_id metadata
self._collection.delete_one(
filter={self._metadata_key + ".ref_doc_id": ref_doc_id}, **delete_kwargs
)
@property
def client(self) -> Any:
"""Return MongoDB client."""
return self._mongodb_client
def _query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
pipeline: List[dict[str, Any]] = []
kind = self._cosmos_search_kwargs.get("kind", "vector-hnsw")
if kind == "vector-ivf":
pipeline = self._get_pipeline_vector_ivf(
query, kwargs.get("oversampling", 1.0), kwargs.get("pre_filter", {})
)
elif kind == "vector-hnsw":
pipeline = self._get_pipeline_vector_hnsw(
query,
kwargs.get("ef_search", 40),
kwargs.get("oversampling", 1.0),
kwargs.get("pre_filter", {}),
)
elif kind == "vector-diskann":
pipeline = self._get_pipeline_vector_diskann(
query,
kwargs.get("lSearch", 40),
kwargs.get("oversampling", 1.0),
kwargs.get("pre_filter", {}),
)
logger.debug("Running query pipeline: %s", pipeline)
cursor = self._collection.aggregate(pipeline) # type: ignore
top_k_nodes = []
top_k_ids = []
top_k_scores = []
for res in cursor:
text = res["document"].pop(self._text_key)
score = res.pop("similarityScore")
id = res["document"].pop(self._id_key)
metadata_dict = res["document"].pop(self._metadata_key)
try:
node = metadata_dict_to_node(metadata_dict)
node.set_content(text)
except Exception:
# NOTE: deprecated legacy logic for backward compatibility
metadata, node_info, relationships = legacy_metadata_dict_to_node(
metadata_dict
)
node = TextNode(
text=text,
id_=id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships=relationships,
)
top_k_ids.append(id)
top_k_nodes.append(node)
top_k_scores.append(score)
result = VectorStoreQueryResult(
nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids
)
logger.debug("Result of query: %s", result)
return result
def _get_pipeline_vector_ivf(
self, query: VectorStoreQuery, oversampling: float, pre_filter: Optional[Dict]
) -> List[dict[str, Any]]:
params = {
"vector": query.query_embedding,
"path": self._embedding_key,
"k": query.similarity_top_k,
"oversampling": oversampling,
}
if pre_filter:
params["filter"] = pre_filter
pipeline: List[dict[str, Any]] = [
{
"$search": {
"cosmosSearch": params,
"returnStoredSource": True,
}
},
{
"$project": {
"similarityScore": {"$meta": "searchScore"},
"document": "$$ROOT",
}
},
]
return pipeline
def _get_pipeline_vector_hnsw(
self,
query: VectorStoreQuery,
ef_search: int,
oversampling: float,
pre_filter: Optional[Dict],
) -> List[dict[str, Any]]:
params = {
"vector": query.query_embedding,
"path": self._embedding_key,
"k": query.similarity_top_k,
"efSearch": ef_search,
"oversampling": oversampling,
}
if pre_filter:
params["filter"] = pre_filter
pipeline: List[dict[str, Any]] = [
{
"$search": {
"cosmosSearch": params,
}
},
{
"$project": {
"similarityScore": {"$meta": "searchScore"},
"document": "$$ROOT",
}
},
]
return pipeline
def _get_pipeline_vector_diskann(
self,
query: VectorStoreQuery,
l_search: int,
oversampling: float,
pre_filter: Optional[Dict],
) -> List[dict[str, Any]]:
params = {
"vector": query.query_embedding,
"path": self._embedding_key,
"k": query.similarity_top_k,
"lSearch": l_search,
"oversampling": oversampling,
}
if pre_filter:
params["filter"] = pre_filter
pipeline: List[dict[str, Any]] = [
{
"$search": {
"cosmosSearch": params,
}
},
{
"$project": {
"similarityScore": {"$meta": "searchScore"},
"document": "$$ROOT",
}
},
]
return pipeline
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query: a VectorStoreQuery object.
Returns:
A VectorStoreQueryResult containing the results of the query.
"""
return self._query(query, **kwargs)
| AzureCosmosDBMongoDBVectorSearch |
python | pikepdf__pikepdf | src/pikepdf/_methods.py | {
"start": 2997,
"end": 5713
} | class ____:
def _ipython_key_completions_(self):
if isinstance(self, Dictionary | Stream):
return self.keys()
return None
def emplace(self, other: Object, retain=(Name.Parent,)):
if not self.same_owner_as(other):
raise TypeError("Objects must have the same owner for emplace()")
# .keys() returns strings, so make all strings
retain = {str(k) for k in retain}
self_keys = set(self.keys())
other_keys = set(other.keys())
assert all(isinstance(k, str) for k in (retain | self_keys | other_keys))
del_keys = self_keys - other_keys - retain
for k in (k for k in other_keys if k not in retain):
self[k] = other[k] # pylint: disable=unsupported-assignment-operation
for k in del_keys:
del self[k] # pylint: disable=unsupported-delete-operation
def _type_check_write(self, filter_, decode_parms):
if isinstance(filter_, list):
filter_ = Array(filter_)
filter_ = filter_.wrap_in_array()
if isinstance(decode_parms, list):
decode_parms = Array(decode_parms)
elif decode_parms is None:
decode_parms = Array([])
else:
decode_parms = decode_parms.wrap_in_array()
if not all(isinstance(item, Name) for item in filter_):
raise TypeError(
"filter must be: pikepdf.Name or pikepdf.Array([pikepdf.Name])"
)
if not all(
(isinstance(item, Dictionary) or item is None) for item in decode_parms
):
raise TypeError(
"decode_parms must be: pikepdf.Dictionary or "
"pikepdf.Array([pikepdf.Dictionary])"
)
if len(decode_parms) != 0 and len(filter_) != len(decode_parms):
raise ValueError(
f"filter ({repr(filter_)}) and decode_parms "
f"({repr(decode_parms)}) must be arrays of same length"
)
if len(filter_) == 1:
filter_ = filter_[0]
if len(decode_parms) == 0:
decode_parms = None
elif len(decode_parms) == 1:
decode_parms = decode_parms[0]
return filter_, decode_parms
def write(
self,
data: bytes,
*,
filter: Name | Array | None = None,
decode_parms: Dictionary | Array | None = None,
type_check: bool = True,
): # pylint: disable=redefined-builtin
if type_check and filter is not None:
filter, decode_parms = self._type_check_write(filter, decode_parms)
self._write(data, filter=filter, decode_parms=decode_parms)
@augments(Pdf)
| Extend_Object |
python | openai__openai-python | src/openai/__init__.py | {
"start": 4378,
"end": 7467
} | class ____(OpenAI):
# Note: we have to use type: ignores here as overriding class members
# with properties is technically unsafe but it is fine for our use case
@property # type: ignore
@override
def api_key(self) -> str | None:
return api_key
@api_key.setter # type: ignore
def api_key(self, value: str | None) -> None: # type: ignore
global api_key
api_key = value
@property # type: ignore
@override
def organization(self) -> str | None:
return organization
@organization.setter # type: ignore
def organization(self, value: str | None) -> None: # type: ignore
global organization
organization = value
@property # type: ignore
@override
def project(self) -> str | None:
return project
@project.setter # type: ignore
def project(self, value: str | None) -> None: # type: ignore
global project
project = value
@property # type: ignore
@override
def webhook_secret(self) -> str | None:
return webhook_secret
@webhook_secret.setter # type: ignore
def webhook_secret(self, value: str | None) -> None: # type: ignore
global webhook_secret
webhook_secret = value
@property
@override
def base_url(self) -> _httpx.URL:
if base_url is not None:
return _httpx.URL(base_url)
return super().base_url
@base_url.setter
def base_url(self, url: _httpx.URL | str) -> None:
super().base_url = url # type: ignore[misc]
@property # type: ignore
@override
def timeout(self) -> float | Timeout | None:
return timeout
@timeout.setter # type: ignore
def timeout(self, value: float | Timeout | None) -> None: # type: ignore
global timeout
timeout = value
@property # type: ignore
@override
def max_retries(self) -> int:
return max_retries
@max_retries.setter # type: ignore
def max_retries(self, value: int) -> None: # type: ignore
global max_retries
max_retries = value
@property # type: ignore
@override
def _custom_headers(self) -> _t.Mapping[str, str] | None:
return default_headers
@_custom_headers.setter # type: ignore
def _custom_headers(self, value: _t.Mapping[str, str] | None) -> None: # type: ignore
global default_headers
default_headers = value
@property # type: ignore
@override
def _custom_query(self) -> _t.Mapping[str, object] | None:
return default_query
@_custom_query.setter # type: ignore
def _custom_query(self, value: _t.Mapping[str, object] | None) -> None: # type: ignore
global default_query
default_query = value
@property # type: ignore
@override
def _client(self) -> _httpx.Client:
return http_client or super()._client
@_client.setter # type: ignore
def _client(self, value: _httpx.Client) -> None: # type: ignore
global http_client
http_client = value
| _ModuleClient |
python | encode__django-rest-framework | tests/test_pagination.py | {
"start": 423,
"end": 4785
} | class ____:
"""
Integration tests.
"""
def setup_method(self):
class PassThroughSerializer(serializers.BaseSerializer):
def to_representation(self, item):
return item
class EvenItemsOnly(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return [item for item in queryset if item % 2 == 0]
class BasicPagination(pagination.PageNumberPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 20
self.view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=range(1, 101),
filter_backends=[EvenItemsOnly],
pagination_class=BasicPagination
)
def test_filtered_items_are_paginated(self):
request = factory.get('/', {'page': 2})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [12, 14, 16, 18, 20],
'previous': 'http://testserver/',
'next': 'http://testserver/?page=3',
'count': 50
}
def test_setting_page_size(self):
"""
When 'paginate_by_param' is set, the client may choose a page size.
"""
request = factory.get('/', {'page_size': 10})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20],
'previous': None,
'next': 'http://testserver/?page=2&page_size=10',
'count': 50
}
def test_setting_page_size_over_maximum(self):
"""
When page_size parameter exceeds maximum allowable,
then it should be capped to the maximum.
"""
request = factory.get('/', {'page_size': 1000})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [
2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
22, 24, 26, 28, 30, 32, 34, 36, 38, 40
],
'previous': None,
'next': 'http://testserver/?page=2&page_size=1000',
'count': 50
}
def test_setting_page_size_to_zero(self):
"""
When page_size parameter is invalid it should return to the default.
"""
request = factory.get('/', {'page_size': 0})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [2, 4, 6, 8, 10],
'previous': None,
'next': 'http://testserver/?page=2&page_size=0',
'count': 50
}
def test_additional_query_params_are_preserved(self):
request = factory.get('/', {'page': 2, 'filter': 'even'})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [12, 14, 16, 18, 20],
'previous': 'http://testserver/?filter=even',
'next': 'http://testserver/?filter=even&page=3',
'count': 50
}
def test_empty_query_params_are_preserved(self):
request = factory.get('/', {'page': 2, 'filter': ''})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [12, 14, 16, 18, 20],
'previous': 'http://testserver/?filter=',
'next': 'http://testserver/?filter=&page=3',
'count': 50
}
def test_404_not_found_for_zero_page(self):
request = factory.get('/', {'page': '0'})
response = self.view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.data == {
'detail': 'Invalid page.'
}
def test_404_not_found_for_invalid_page(self):
request = factory.get('/', {'page': 'invalid'})
response = self.view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.data == {
'detail': 'Invalid page.'
}
| TestPaginationIntegration |
python | wandb__wandb | wandb/sdk/artifacts/_generated/project_artifacts.py | {
"start": 748,
"end": 1189
} | class ____(GQLResult):
typename__: Typename[
Literal["ArtifactCollection", "ArtifactPortfolio", "ArtifactSequence"]
]
artifacts: Optional[VersionedArtifactConnectionFragment]
ProjectArtifacts.model_rebuild()
ProjectArtifactsProject.model_rebuild()
ProjectArtifactsProjectArtifactType.model_rebuild()
ProjectArtifactsProjectArtifactTypeArtifactCollection.model_rebuild()
| ProjectArtifactsProjectArtifactTypeArtifactCollection |
python | ray-project__ray | python/ray/_private/thirdparty/dacite/exceptions.py | {
"start": 1317,
"end": 1560
} | class ____(WrongTypeError):
def __str__(self) -> str:
return (
f'can not match type "{_name(type(self.value))}" to any type '
f'of "{self.field_path}" union: {_name(self.field_type)}'
)
| UnionMatchError |
python | celery__celery | t/unit/utils/test_deprecated.py | {
"start": 1591,
"end": 1739
} | class ____:
@patch('warnings.warn')
def test_warn_deprecated(self, warn):
deprecated.warn('Foo')
warn.assert_called()
| test_warn |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/types.py | {
"start": 4054,
"end": 5266
} | class ____:
def _literal_processor_datetime(self, dialect):
def process(value):
if getattr(value, "microsecond", None):
value = (
f"""TO_TIMESTAMP"""
f"""('{value.isoformat().replace("T", " ")}', """
"""'YYYY-MM-DD HH24:MI:SS.FF')"""
)
else:
value = (
f"""TO_DATE"""
f"""('{value.isoformat().replace("T", " ")}', """
"""'YYYY-MM-DD HH24:MI:SS')"""
)
return value
return process
def _literal_processor_date(self, dialect):
def process(value):
if getattr(value, "microsecond", None):
value = (
f"""TO_TIMESTAMP"""
f"""('{value.isoformat().split("T")[0]}', """
"""'YYYY-MM-DD')"""
)
else:
value = (
f"""TO_DATE"""
f"""('{value.isoformat().split("T")[0]}', """
"""'YYYY-MM-DD')"""
)
return value
return process
| _OracleDateLiteralRender |
python | django__django | tests/admin_inlines/admin.py | {
"start": 1522,
"end": 1607
} | class ____(admin.TabularInline):
model = EditablePKBook
| EditablePKBookTabularInline |
python | jazzband__django-model-utils | tests/test_fields/test_uuid_field.py | {
"start": 177,
"end": 1260
} | class ____(TestCase):
def test_uuid_version_default(self) -> None:
instance = UUIDField()
self.assertEqual(instance.default, uuid.uuid4)
def test_uuid_version_1(self) -> None:
instance = UUIDField(version=1)
self.assertEqual(instance.default, uuid.uuid1)
def test_uuid_version_2_error(self) -> None:
self.assertRaises(ValidationError, UUIDField, 'version', 2)
def test_uuid_version_3(self) -> None:
instance = UUIDField(version=3)
self.assertEqual(instance.default, uuid.uuid3)
def test_uuid_version_4(self) -> None:
instance = UUIDField(version=4)
self.assertEqual(instance.default, uuid.uuid4)
def test_uuid_version_5(self) -> None:
instance = UUIDField(version=5)
self.assertEqual(instance.default, uuid.uuid5)
def test_uuid_version_bellow_min(self) -> None:
self.assertRaises(ValidationError, UUIDField, 'version', 0)
def test_uuid_version_above_max(self) -> None:
self.assertRaises(ValidationError, UUIDField, 'version', 6)
| UUIDFieldTests |
python | walkccc__LeetCode | solutions/807. Max Increase to Keep City Skyline/807.py | {
"start": 0,
"end": 239
} | class ____:
def maxIncreaseKeepingSkyline(self, grid: list[list[int]]) -> int:
rowMax = list(map(max, grid))
colMax = list(map(max, zip(*grid)))
return sum(min(i, j) for i in rowMax for j in colMax) - sum(map(sum, grid))
| Solution |
python | streamlit__streamlit | lib/streamlit/runtime/caching/cache_errors.py | {
"start": 1659,
"end": 2934
} | class ____(StreamlitAPIException):
def __init__(
self,
cache_type: CacheType,
func: Callable[..., Any],
arg_name: str | None,
arg_value: Any,
orig_exc: BaseException,
) -> None:
msg = self._create_message(cache_type, func, arg_name, arg_value)
super().__init__(msg)
self.with_traceback(orig_exc.__traceback__)
@staticmethod
def _create_message(
cache_type: CacheType,
func: Callable[..., Any],
arg_name: str | None,
arg_value: Any,
) -> str:
arg_name_str = arg_name if arg_name is not None else "(unnamed)"
arg_type = type_util.get_fqn_type(arg_value)
func_name = func.__name__ if hasattr(func, "__name__") else "unknown"
arg_replacement_name = f"_{arg_name}" if arg_name is not None else "_arg"
return (
f"""
Cannot hash argument '{arg_name_str}' (of type `{arg_type}`) in '{func_name}'.
To address this, you can tell Streamlit not to hash this argument by adding a
leading underscore to the argument's name in the function signature:
```
@st.{get_decorator_api_name(cache_type)}
def {func_name}({arg_replacement_name}, ...):
...
```
"""
).strip("\n")
| UnhashableParamError |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/gradients_test.py | {
"start": 8363,
"end": 13419
} | class ____(tf_layers.Layer):
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
"""
super(Mnist, self).__init__()
if data_format == "channels_first":
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == "channels_last"
self._input_shape = [-1, 28, 28, 1]
self.conv1 = tf_layers.Conv2D(
32, 5, padding="same", data_format=data_format, activation=nn.relu)
self.conv2 = tf_layers.Conv2D(
64, 5, padding="same", data_format=data_format, activation=nn.relu)
self.fc1 = tf_layers.Dense(1024, activation=nn.relu)
self.fc2 = tf_layers.Dense(10)
self.dropout = tf_layers.Dropout(0.4)
self.max_pool2d = tf_layers.MaxPooling2D(
(2, 2), (2, 2), padding="same", data_format=data_format)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, 10].
"""
y = array_ops.reshape(inputs, self._input_shape)
y = self.conv1(y)
y = self.max_pool2d(y)
y = self.conv2(y)
y = self.max_pool2d(y)
y = tf_layers.flatten(y)
y = self.fc1(y)
y = self.dropout(y, training=training)
return self.fc2(y)
def create_mnist_autobatch(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
manual = model(images, training=training)
def loop_fn(i):
image = array_ops.gather(images, i)
return model(image, training=training)
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
while_outputs = control_flow_ops.for_loop(
loop_fn, dtypes.float32, batch_size)
return pfor_outputs, while_outputs, manual
def create_mnist_per_eg_grad(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
sparse_labels = np.random.randint(
low=0, high=10, size=[batch_size]).astype(np.int32)
labels = np.zeros((batch_size, 10)).astype(np.float32)
labels[np.arange(batch_size), sparse_labels] = 1.
model = Mnist(data_format)
def loop_fn(i):
image = array_ops.gather(images, i)
label = array_ops.gather(labels, i)
logits = array_ops.reshape(model(image, training=training), [-1])
loss = losses.softmax_cross_entropy(
logits=logits, onehot_labels=label, reduction=losses.Reduction.NONE)
return gradient_ops.gradients(loss, variables.trainable_variables())
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
while_outputs = control_flow_ops.for_loop(
loop_fn, [dtypes.float32] * len(variables.trainable_variables()),
batch_size)
return pfor_outputs, while_outputs
def create_mnist_batch_jacobian(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
logits = model(images, training=training)
pfor_jacobian = gradients.batch_jacobian(logits, images, use_pfor=True)
while_jacobian = gradients.batch_jacobian(logits, images, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_mnist_per_eg_jacobian(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
def loop_fn(i, use_pfor):
image = array_ops.gather(images, i)
logits = array_ops.reshape(model(image, training=training), [-1])
return gradients.jacobian(
logits, variables.trainable_variables(), use_pfor=use_pfor)
pfor_outputs = control_flow_ops.pfor(
functools.partial(loop_fn, use_pfor=True),
batch_size)
while_outputs = control_flow_ops.for_loop(
functools.partial(loop_fn, use_pfor=False),
[dtypes.float32] * len(variables.trainable_variables()), batch_size)
return pfor_outputs, while_outputs
def create_fc_per_eg_jacobians(batch_size, activation_size, num_layers):
model = FullyConnectedModel(activation_size=activation_size,
num_layers=num_layers)
inp = random_ops.random_normal([batch_size, activation_size])
output = model(inp)
jacobians = gradients.jacobian(output, variables.trainable_variables())
def loop_fn(i, use_pfor):
inp_i = array_ops.expand_dims(array_ops.gather(inp, i), 0)
output = array_ops.reshape(model(inp_i), [-1])
return gradients.jacobian(
output, variables.trainable_variables(), use_pfor=use_pfor)
per_eg_jacobians_pfor = control_flow_ops.pfor(
functools.partial(loop_fn, use_pfor=True),
batch_size)
per_eg_jacobians_while = control_flow_ops.for_loop(
functools.partial(loop_fn, use_pfor=False),
[dtypes.float32] * len(variables.trainable_variables()), batch_size)
return jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while
@test_util.run_v1_only("b/122612051")
| Mnist |
python | getsentry__sentry | src/sentry/analytics/events/auth_v2.py | {
"start": 83,
"end": 204
} | class ____(analytics.Event):
event: str
@analytics.eventclass("auth_v2.csrf_token.delete_login")
| AuthV2CsrfTokenRotated |
python | huggingface__transformers | src/transformers/models/granitemoeshared/modeling_granitemoeshared.py | {
"start": 20463,
"end": 23509
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: GraniteMoeSharedConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[GraniteMoeSharedConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| GraniteMoeSharedRotaryEmbedding |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/interfaces.py | {
"start": 4658,
"end": 4957
} | class ____(TypedDict):
name: str
# TODO: add python_type and sql_type here; combining them
# into "type" is a bad idea
type: Union[Type[Any], TypeEngine[Any]]
aliased: bool
expr: _ColumnsClauseArgument[Any]
entity: Optional[_ColumnsClauseArgument[Any]]
| ORMColumnDescription |
python | kamyu104__LeetCode-Solutions | Python/the-number-of-beautiful-subsets.py | {
"start": 86,
"end": 668
} | class ____(object):
def beautifulSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def count(x):
y = x
while y-k in cnt:
y -= k
dp = [1, 0] # dp[0]: count without i, dp[1]: count with i
for i in xrange(y, x+1, k):
dp = [dp[0]+dp[1], dp[0]*((1<<cnt[i])-1)]
return sum(dp)
cnt = collections.Counter(nums)
return reduce(operator.mul, (count(i) for i in cnt.iterkeys() if i+k not in cnt))-1
| Solution |
python | gevent__gevent | src/greentest/3.10/test_threading.py | {
"start": 2462,
"end": 2704
} | class ____(unittest.TestCase):
def setUp(self):
self._threads = threading_helper.threading_setup()
def tearDown(self):
threading_helper.threading_cleanup(*self._threads)
test.support.reap_children()
| BaseTestCase |
python | django__django | tests/select_for_update/models.py | {
"start": 70,
"end": 138
} | class ____(Entity):
name = models.CharField(max_length=30)
| Country |
python | mlflow__mlflow | mlflow/system_metrics/metrics/disk_monitor.py | {
"start": 150,
"end": 689
} | class ____(BaseMetricsMonitor):
"""Class for monitoring disk stats."""
def collect_metrics(self):
# Get disk usage metrics.
disk_usage = psutil.disk_usage(os.sep)
self._metrics["disk_usage_percentage"].append(disk_usage.percent)
self._metrics["disk_usage_megabytes"].append(disk_usage.used / 1e6)
self._metrics["disk_available_megabytes"].append(disk_usage.free / 1e6)
def aggregate_metrics(self):
return {k: round(sum(v) / len(v), 1) for k, v in self._metrics.items()}
| DiskMonitor |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-transform-array.py | {
"start": 38,
"end": 549
} | class ____(object):
def minOperations(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
result = 0
cnt = float("inf")
for i in xrange(len(nums1)):
result += abs(nums1[i]-nums2[i])
if (nums2[-1]-nums1[i])*(nums2[-1]-nums2[i]) <= 0:
cnt = 0
cnt = min(cnt, abs(nums2[-1]-nums1[i]), abs(nums2[-1]-nums2[i]))
result += 1+cnt
return result
| Solution |
python | pennersr__django-allauth | allauth/idp/oidc/admin.py | {
"start": 252,
"end": 1191
} | class ____(admin.ModelAdmin):
raw_id_fields = ("owner",)
list_display = (
"name",
"id",
"type",
"owner",
"skip_consent",
"allow_uri_wildcards",
"created_at",
)
readonly_fields = ("secret", "created_at")
list_filter = ("type", "skip_consent", "allow_uri_wildcards")
def save_model(self, request, obj, form, change):
if not change:
adapter = get_adapter()
secret = adapter.generate_client_secret()
obj.set_secret(secret)
self.message_user(
request,
mark_safe(
f'The client secret is only shown once: <input readonly size="{len(secret)}" type="text" value="{escape(secret)}">'
), # nosec
level=messages.WARNING,
)
return super().save_model(request, obj, form, change)
@admin.register(Token)
| ClientAdmin |
python | walkccc__LeetCode | solutions/3213. Construct String with Minimum Cost/3213.py | {
"start": 0,
"end": 734
} | class ____:
def minimumCost(self, target: str, words: list[str], costs: list[int]) -> int:
n = len(target)
# dp[i] := the minimum cost to construct target[0..i)
dp = [0] + [math.inf] * n
# minCost[c][word] := the minimum cost to construct word starting with `c`
minCost: dict[str, dict[str, int]] = collections.defaultdict(dict)
for word, cost in zip(words, costs):
c = word[0]
minCost[c][word] = min(minCost[c].get(word, math.inf), cost)
for i, c in enumerate(target):
for word, cost in minCost[c].items():
j = i + len(word)
if j <= n and cost + dp[i] < dp[j] and target[i:j] == word:
dp[j] = cost + dp[i]
return -1 if dp[n] == math.inf else dp[n]
| Solution |
python | django__django | django/utils/safestring.py | {
"start": 397,
"end": 653
} | class ____:
__slots__ = ()
def __html__(self):
"""
Return the html representation of a string for interoperability.
This allows other template engines to understand Django's SafeData.
"""
return self
| SafeData |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/matrix_multiply.py | {
"start": 3935,
"end": 7012
} | class ____(MatrixMultiplyOperator):
"""Operator for additive matrix multiplication (torch.addmm)."""
def __init__(self):
super().__init__("addmm")
self.weight = 5.0
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.addmm"
def can_produce(self, output_spec: Spec) -> bool:
"""Addmm requires exactly 2D tensors."""
if not isinstance(output_spec, TensorSpec):
return False
# Must have exactly 2 dimensions for torch.addmm
if len(output_spec.size) != 2:
return False
# Matrix multiply doesn't work with bool or integer types for gradients
if output_spec.dtype in [
torch.bool,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
return False
return True
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for additive matrix multiplication."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("AddmmOperator can only produce TensorSpec outputs")
if len(output_spec.size) != 2:
raise ValueError("torch.addmm requires 2D output tensor")
m, n = output_spec.size
# Choose a random inner dimension k
k = random.randint(1, 16)
dtypes = self._get_compatible_dtype(output_spec.dtype)
# Bias tensor: [m, n] (same shape as output)
bias_spec = TensorSpec(
size=(m, n),
stride=(n, 1), # Contiguous stride
dtype=dtypes[0],
)
# First matrix: [m, k]
input1_spec = TensorSpec(
size=(m, k),
stride=(k, 1), # Contiguous stride
dtype=dtypes[1] if len(dtypes) > 1 else dtypes[0],
)
# Second matrix: [k, n]
input2_spec = TensorSpec(
size=(k, n),
stride=(n, 1), # Contiguous stride
dtype=dtypes[1] if len(dtypes) > 1 else dtypes[0],
)
return [bias_spec, input1_spec, input2_spec]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for additive matrix multiplication."""
if len(input_names) != 3:
raise ValueError("torch.addmm requires exactly 3 inputs")
# Get target dtype
if isinstance(output_spec, TensorSpec):
target_dtype_str = f"torch.{output_spec.dtype}".replace(
"torch.torch.", "torch."
)
# Cast inputs to ensure compatible types
return (
f"{output_name} = torch.addmm("
f"{input_names[0]}.to({target_dtype_str}), "
f"{input_names[1]}.to({target_dtype_str}), "
f"{input_names[2]}.to({target_dtype_str}))"
)
else:
return f"{output_name} = torch.addmm({input_names[0]}, {input_names[1]}, {input_names[2]})"
| AddmmOperator |
python | django-guardian__django-guardian | guardian/testapp/models.py | {
"start": 2403,
"end": 2552
} | class ____(UserObjectPermissionBase):
content_object = models.ForeignKey("ReverseMixed", on_delete=models.CASCADE)
| ReverseMixedUserObjectPermission |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N805.py | {
"start": 723,
"end": 862
} | class ____(abc.ABCMeta):
def bad_method(self):
pass
def good_method(cls):
pass
def func(x):
return x
| MetaClass |
python | huggingface__transformers | src/transformers/models/gemma3n/modeling_gemma3n.py | {
"start": 110638,
"end": 119409
} | class ____(Gemma3nPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: Gemma3nConfig):
super().__init__(config)
self.model = Gemma3nModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_image_features(self, pixel_values):
return self.model.get_image_features(pixel_values)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None, # text inputs
pixel_values: Optional[torch.FloatTensor] = None, # vision inputs
input_features: Optional[torch.FloatTensor] = None, # audio inputs
attention_mask: Optional[torch.Tensor] = None,
input_features_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
token_type_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**lm_kwargs,
) -> Gemma3nCausalLMOutputWithPast:
r"""
input_features_mask (torch.Tensor, *optional*, defaults to None):
The attention mask for the input audio.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in
`[0, ..., config.text_config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Gemma3ForConditionalGeneration
>>> model = Gemma3ForConditionalGeneration.from_pretrained("google/gemma-3-4b-it")
>>> processor = AutoProcessor.from_pretrained("google/gemma-3-4b-it")
>>> messages = [
... {
... "role": "system",
... "content": [
... {"type": "text", "text": "You are a helpful assistant."}
... ]
... },
... {
... "role": "user", "content": [
... {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"},
... {"type": "text", "text": "Where is the cat standing?"},
... ]
... },
... ]
>>> inputs = processor.apply_chat_template(
... messages,
... tokenizer=True,
... return_dict=True,
... return_tensors="pt",
... add_generation_prompt=True
... )
>>> # Generate
>>> generate_ids = model.generate(**inputs)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"user\nYou are a helpful assistant.\n\n\n\n\n\nWhere is the cat standing?\nmodel\nBased on the image, the cat is standing in a snowy area, likely outdoors. It appears to"
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
input_features=input_features,
attention_mask=attention_mask,
input_features_mask=input_features_mask,
position_ids=position_ids,
past_key_values=past_key_values,
token_type_ids=token_type_ids,
cache_position=cache_position,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
**lm_kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
if (final_logit_softcapping := self.config.get_text_config().final_logit_softcapping) is not None:
logits = logits / final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * final_logit_softcapping
loss = None
if labels is not None:
# Upcast to float if we need to compute the loss to avoid potential precision issues
logits = logits.float()
shift_logits = logits[..., :-1, :]
shift_labels = labels[..., 1:]
if attention_mask is not None:
# we use the input attention mask to shift the logits and labels, because it is 2D.
# we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous()
shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous()
else:
shift_logits = shift_logits.contiguous()
shift_labels = shift_labels.contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
flat_labels = shift_labels.view(-1).to(shift_logits.device)
loss = loss_fct(flat_logits, flat_labels)
return Gemma3nCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
audio_hidden_states=outputs.audio_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
pixel_values=None,
input_features=None,
attention_mask=None,
input_features_mask=None,
token_type_ids=None,
use_cache=True,
logits_to_keep=None,
labels=None,
**kwargs,
):
# Overwritten -- custom `position_ids` and `pixel_values` handling
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
cache_position=cache_position,
use_cache=use_cache,
logits_to_keep=logits_to_keep,
token_type_ids=token_type_ids,
**kwargs,
)
# If we're in cached decoding stage, multimodal inputs should be None because input ids do not contain special
# tokens anymore. Otherwise multimodal inputs should be passed to model.
# NOTE: use_cache=False always needs pixel_values, input_features, and input_features_mask
if cache_position[0] == 0:
model_inputs["pixel_values"] = pixel_values
model_inputs["input_features"] = input_features
model_inputs["input_features_mask"] = input_features_mask
return model_inputs
__all__ = [
"Gemma3nAudioEncoder",
"Gemma3nForCausalLM",
"Gemma3nForConditionalGeneration",
"Gemma3nModel",
"Gemma3nPreTrainedModel",
"Gemma3nTextModel",
]
| Gemma3nForConditionalGeneration |
python | huggingface__transformers | src/transformers/models/ctrl/configuration_ctrl.py | {
"start": 838,
"end": 4644
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`CTRLModel`]. It is used to
instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[Salesforce/ctrl](https://huggingface.co/Salesforce/ctrl) architecture from SalesForce.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 246534):
Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`CTRLModel`].
n_positions (`int`, *optional*, defaults to 256):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 1280):
Dimensionality of the embeddings and hidden states.
dff (`int`, *optional*, defaults to 8192):
Dimensionality of the inner dimension of the feed forward networks (FFN).
n_layer (`int`, *optional*, defaults to 48):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`int`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-06):
The epsilon to use in the layer normalization layers
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Examples:
```python
>>> from transformers import CTRLConfig, CTRLModel
>>> # Initializing a CTRL configuration
>>> configuration = CTRLConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = CTRLModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ctrl"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=246534,
n_positions=256,
n_embd=1280,
dff=8192,
n_layer=48,
n_head=16,
resid_pdrop=0.1,
embd_pdrop=0.1,
layer_norm_epsilon=1e-6,
initializer_range=0.02,
use_cache=True,
**kwargs,
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.dff = dff
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.use_cache = use_cache
super().__init__(**kwargs)
__all__ = ["CTRLConfig"]
| CTRLConfig |
python | streamlit__streamlit | lib/streamlit/runtime/session_manager.py | {
"start": 2123,
"end": 2974
} | class ____:
"""Type containing data related to an AppSession."""
client: SessionClient | None
session: AppSession
# The number of times the script has been run for this session.
# At the moment, this is only used for testing and debugging purposes.
script_run_count: int = 0
def is_active(self) -> bool:
return self.client is not None
def to_active(self) -> ActiveSessionInfo:
if not self.is_active():
raise RuntimeError("A SessionInfo with no client cannot be active!")
# NOTE: The cast here (rather than copying this SessionInfo's fields into a new
# ActiveSessionInfo) is important as the Runtime expects to be able to mutate
# what's returned from get_active_session_info to increment script_run_count.
return cast("ActiveSessionInfo", self)
| SessionInfo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum3.py | {
"start": 124,
"end": 170
} | class ____(Enum):
bad = 0
good = 1
| EnumA |
python | PrefectHQ__prefect | src/prefect/settings/models/server/logs.py | {
"start": 221,
"end": 756
} | class ____(PrefectBaseSettings):
"""
Settings for controlling behavior of the logs subsystem
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "logs")
)
stream_out_enabled: bool = Field(
default=False,
description="Whether or not to stream logs out to the API via websockets.",
)
stream_publishing_enabled: bool = Field(
default=False,
description="Whether or not to publish logs to the streaming system.",
)
| ServerLogsSettings |
python | ansible__ansible | test/lib/ansible_test/_internal/classification/python.py | {
"start": 8587,
"end": 13886
} | class ____(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path: str, module_utils: set[str]) -> None:
self.path = path
self.module_utils = module_utils
self.imports: set[str] = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
self.module = None
if data_context().content.is_ansible:
# Various parts of the Ansible source tree execute within different modules.
# To support import analysis, each file which uses relative imports must reside under a path defined here.
# The mapping is a tuple consisting of a path pattern to match and a replacement path.
# During analysis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
path_map = (
('^lib/ansible/', 'ansible/'),
('^test/lib/ansible_test/_util/controller/sanity/validate-modules/', 'validate_modules/'),
('^test/units/', 'test/units/'),
('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
('^test/integration/targets/.*/library/', 'ansible/modules/'),
)
for pattern, replacement in path_map:
if re.search(pattern, self.path):
revised_path = re.sub(pattern, replacement, self.path)
self.module = path_to_module(revised_path)
break
else:
# This assumes that all files within the collection are executed by Ansible as part of the collection.
# While that will usually be true, there are exceptions which will result in this resolution being incorrect.
self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node: ast.Import) -> None:
"""Visit an import node."""
self.generic_visit(node)
# import ansible.module_utils.MODULE[.MODULE]
# import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
self.add_imports([alias.name for alias in node.names], node.lineno)
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
"""Visit an import from node."""
self.generic_visit(node)
if not node.module:
return
module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
if not module.startswith('ansible'):
return
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
def add_import(self, name: str, line_number: int) -> None:
"""Record the specified import."""
import_name = name
while self.is_module_util_name(name):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if is_subdir(self.path, data_context().content.test_path):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
def add_imports(self, names: list[str], line_no: int) -> None:
"""Add the given import names if they are module_utils imports."""
for name in names:
if self.is_module_util_name(name):
self.add_import(name, line_no)
@staticmethod
def is_module_util_name(name: str) -> bool:
"""Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
return True
if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
return True
return False
| ModuleUtilFinder |
python | getsentry__sentry | tests/apidocs/endpoints/releases/test_organization_sessions.py | {
"start": 240,
"end": 1166
} | class ____(APIDocsTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.organization = self.create_organization(owner=self.user, name="foo")
self.project = self.create_project(name="bar", organization=self.organization, teams=[])
self.url = reverse(
"sentry-api-0-organization-sessions",
kwargs={"organization_id_or_slug": self.organization.slug},
)
self.login_as(user=self.user)
def test_get(self) -> None:
query = {
"project": [self.project.id],
"statsPeriod": "30d",
"field": ["sum(session)"],
"groupBy": ["release"],
}
request = RequestFactory().get(self.url)
response = self.client.get(self.url, query)
assert response.status_code == 200, response.content
self.validate_schema(request, response)
| OrganizationSessionsDocsTest |
python | ray-project__ray | rllib/examples/centralized_critic.py | {
"start": 8918,
"end": 11236
} | class ____(PPO):
@classmethod
@override(PPO)
def get_default_policy_class(cls, config):
if config["framework"] == "torch":
return CCPPOTorchPolicy
elif config["framework"] == "tf":
return CCPPOStaticGraphTFPolicy
else:
return CCPPOEagerTFPolicy
if __name__ == "__main__":
ray.init(local_mode=True)
args = parser.parse_args()
ModelCatalog.register_custom_model(
"cc_model",
TorchCentralizedCriticModel
if args.framework == "torch"
else CentralizedCriticModel,
)
config = (
PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.environment(TwoStepGame)
.framework(args.framework)
.env_runners(batch_mode="complete_episodes", num_env_runners=0)
.training(model={"custom_model": "cc_model"})
.multi_agent(
policies={
"pol1": (
None,
Discrete(6),
TwoStepGame.action_space,
# `framework` would also be ok here.
PPOConfig.overrides(framework_str=args.framework),
),
"pol2": (
None,
Discrete(6),
TwoStepGame.action_space,
# `framework` would also be ok here.
PPOConfig.overrides(framework_str=args.framework),
),
},
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: "pol1"
if agent_id == 0
else "pol2",
)
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
.resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
)
stop = {
TRAINING_ITERATION: args.stop_iters,
NUM_ENV_STEPS_SAMPLED_LIFETIME: args.stop_timesteps,
f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": args.stop_reward,
}
tuner = tune.Tuner(
CentralizedCritic,
param_space=config.to_dict(),
run_config=tune.RunConfig(stop=stop, verbose=1),
)
results = tuner.fit()
if args.as_test:
check_learning_achieved(results, args.stop_reward)
| CentralizedCritic |
python | pytorch__pytorch | torch/testing/_internal/distributed/multi_threaded_pg.py | {
"start": 7915,
"end": 9516
} | class ____:
def __init__(self, op):
if op != dist.ReduceOp.SUM and op != dist.ReduceOp.AVG:
raise NotImplementedError(f"ReduceScatter does not support {op}")
self.op = op
@torch.no_grad()
def work(self, data):
start_reduction = [False for _ in range(len(data))]
for each_rank_data in data:
# Can't handle reduce_scatter with multiple scatter list
assert len(each_rank_data[1]) == 1
to_scatter = each_rank_data[1][0]
for i in range(len(to_scatter)):
dest_tensor_on_rank_i = data[i][0]
# Can't handle reduce_scatter with multiple output tensor
assert len(dest_tensor_on_rank_i) == 1
dst_tensor_device = dest_tensor_on_rank_i[0].device
if not start_reduction[i]:
# See Note [Hide collectives mutation from autograd]
dest_tensor_on_rank_i[0].detach().copy_(
to_scatter[i].to(dst_tensor_device)
)
start_reduction[i] = True
else:
# See Note [Hide collectives mutation from autograd]
dest_tensor_on_rank_i[0].detach().add_(
to_scatter[i].to(dst_tensor_device)
)
if self.op == dist.ReduceOp.AVG:
num_ranks = len(data)
for each_rank_data in data:
# See Note [Hide collectives mutation from autograd]
each_rank_data[0][0].detach().div_(num_ranks)
| ReduceScatter |
python | huggingface__transformers | src/transformers/models/lightglue/image_processing_lightglue.py | {
"start": 5381,
"end": 21913
} | class ____(BaseImageProcessor):
r"""
Constructs a LightGlue image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
`True`. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_grayscale (`bool`, *optional*, defaults to `True`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
do_grayscale: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 480, "width": 640}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_grayscale = do_grayscale
def resize(
self,
image: np.ndarray,
size: dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be inferred from the input
image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
size = get_size_dict(size, default_to_square=False)
return resize(
image,
size=(size["height"], size["width"]),
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def preprocess(
self,
images,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_grayscale: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image pairs to preprocess. Expects either a list of 2 images or a list of list of 2 images list with
pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set
`do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
Whether to convert the image to grayscale.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_grayscale = do_grayscale if do_grayscale is not None else self.do_grayscale
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
# Validate and convert the input images into a flattened list of images for all subsequent processing steps.
images = validate_and_format_image_pairs(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_grayscale:
image = convert_to_grayscale(image, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
all_images.append(image)
# Convert back the flattened list of images into a list of pairs of images.
image_pairs = [all_images[i : i + 2] for i in range(0, len(all_images), 2)]
data = {"pixel_values": image_pairs}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_keypoint_matching(
self,
outputs: "LightGlueKeypointMatchingOutput",
target_sizes: Union[TensorType, list[tuple]],
threshold: float = 0.0,
) -> list[dict[str, torch.Tensor]]:
"""
Converts the raw output of [`LightGlueKeypointMatchingOutput`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`LightGlueKeypointMatchingOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` or `list[tuple[tuple[int, int]]]`, *optional*):
Tensor of shape `(batch_size, 2, 2)` or list of tuples of tuples (`tuple[int, int]`) containing the
target size `(height, width)` of each image in the batch. This must be the original image size (before
any processing).
threshold (`float`, *optional*, defaults to 0.0):
Threshold to filter out the matches with low scores.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the keypoints in the first and second image
of the pair, the matching scores and the matching indices.
"""
if outputs.mask.shape[0] != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the mask")
if not all(len(target_size) == 2 for target_size in target_sizes):
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
if isinstance(target_sizes, list):
image_pair_sizes = torch.tensor(target_sizes, device=outputs.mask.device)
else:
if target_sizes.shape[1] != 2 or target_sizes.shape[2] != 2:
raise ValueError(
"Each element of target_sizes must contain the size (h, w) of each image of the batch"
)
image_pair_sizes = target_sizes
keypoints = outputs.keypoints.clone()
keypoints = keypoints * image_pair_sizes.flip(-1).reshape(-1, 2, 1, 2)
keypoints = keypoints.to(torch.int32)
results = []
for mask_pair, keypoints_pair, matches, scores in zip(
outputs.mask, keypoints, outputs.matches[:, 0], outputs.matching_scores[:, 0]
):
mask0 = mask_pair[0] > 0
mask1 = mask_pair[1] > 0
keypoints0 = keypoints_pair[0][mask0]
keypoints1 = keypoints_pair[1][mask1]
matches0 = matches[mask0]
scores0 = scores[mask0]
# Filter out matches with low scores, invalid matches, and out-of-bounds indices
valid_matches = (scores0 > threshold) & (matches0 > -1) & (matches0 < keypoints1.shape[0])
matched_keypoints0 = keypoints0[valid_matches]
matched_keypoints1 = keypoints1[matches0[valid_matches]]
matching_scores = scores0[valid_matches]
results.append(
{
"keypoints0": matched_keypoints0,
"keypoints1": matched_keypoints1,
"matching_scores": matching_scores,
}
)
return results
def visualize_keypoint_matching(
self,
images: ImageInput,
keypoint_matching_output: list[dict[str, torch.Tensor]],
) -> list["Image.Image"]:
"""
Plots the image pairs side by side with the detected keypoints as well as the matching between them.
Args:
images (`ImageInput`):
Image pairs to plot. Same as `LightGlueImageProcessor.preprocess`. Expects either a list of 2
images or a list of list of 2 images list with pixel values ranging from 0 to 255.
keypoint_matching_output (List[Dict[str, torch.Tensor]]]):
A post processed keypoint matching output
Returns:
`List[PIL.Image.Image]`: A list of PIL images, each containing the image pairs side by side with the detected
keypoints as well as the matching between them.
"""
images = validate_and_format_image_pairs(images)
images = [to_numpy_array(image) for image in images]
image_pairs = [images[i : i + 2] for i in range(0, len(images), 2)]
results = []
for image_pair, pair_output in zip(image_pairs, keypoint_matching_output):
height0, width0 = image_pair[0].shape[:2]
height1, width1 = image_pair[1].shape[:2]
plot_image = np.zeros((max(height0, height1), width0 + width1, 3), dtype=np.uint8)
plot_image[:height0, :width0] = image_pair[0]
plot_image[:height1, width0:] = image_pair[1]
plot_image_pil = Image.fromarray(plot_image)
draw = ImageDraw.Draw(plot_image_pil)
keypoints0_x, keypoints0_y = pair_output["keypoints0"].unbind(1)
keypoints1_x, keypoints1_y = pair_output["keypoints1"].unbind(1)
for keypoint0_x, keypoint0_y, keypoint1_x, keypoint1_y, matching_score in zip(
keypoints0_x, keypoints0_y, keypoints1_x, keypoints1_y, pair_output["matching_scores"]
):
color = self._get_color(matching_score)
draw.line(
(keypoint0_x, keypoint0_y, keypoint1_x + width0, keypoint1_y),
fill=color,
width=3,
)
draw.ellipse((keypoint0_x - 2, keypoint0_y - 2, keypoint0_x + 2, keypoint0_y + 2), fill="black")
draw.ellipse(
(keypoint1_x + width0 - 2, keypoint1_y - 2, keypoint1_x + width0 + 2, keypoint1_y + 2),
fill="black",
)
results.append(plot_image_pil)
return results
def _get_color(self, score):
"""Maps a score to a color."""
r = int(255 * (1 - score))
g = int(255 * score)
b = 0
return (r, g, b)
__all__ = ["LightGlueImageProcessor"]
| LightGlueImageProcessor |
python | scrapy__scrapy | tests/test_command_runspider.py | {
"start": 3223,
"end": 7194
} | class ____(scrapy.Spider):
name = 'myspider'
start_urls = ['http://localhost:12345']
custom_settings = {
"ROBOTSTXT_OBEY": False,
"RETRY_ENABLED": False,
}
def parse(self, response):
return {'test': 'value'}
"""
log = self.get_log(
tmp_path, dnscache_spider, args=("-s", "DNSCACHE_ENABLED=False")
)
assert "DNSLookupError" not in log
assert "INFO: Spider opened" in log
@pytest.mark.parametrize("value", [False, True])
def test_runspider_log_short_names(self, tmp_path: Path, value: bool) -> None:
log1 = self.get_log(
tmp_path, self.debug_log_spider, args=("-s", f"LOG_SHORT_NAMES={value}")
)
assert "[myspider] DEBUG: It Works!" in log1
assert ("[scrapy]" in log1) is value
assert ("[scrapy.core.engine]" in log1) is not value
def test_runspider_no_spider_found(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, "from scrapy.spiders import Spider\n")
assert "No spider found in file" in log
def test_runspider_file_not_found(self) -> None:
_, _, log = proc("runspider", "some_non_existent_file")
assert "File not found: some_non_existent_file" in log
def test_runspider_unable_to_load(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, "", name="myspider.txt")
assert "Unable to load" in log
def test_start_errors(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, self.badspider, name="badspider.py")
assert "start" in log
assert "badspider.py" in log, log
def test_asyncio_enabled_true(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path,
self.debug_log_spider,
args=[
"-s",
"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor",
],
)
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
def test_asyncio_enabled_default(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, self.debug_log_spider)
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
def test_asyncio_enabled_false(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path,
self.debug_log_spider,
args=["-s", "TWISTED_REACTOR=twisted.internet.selectreactor.SelectReactor"],
)
assert "Using reactor: twisted.internet.selectreactor.SelectReactor" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
not in log
)
@pytest.mark.requires_uvloop
def test_custom_asyncio_loop_enabled_true(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path,
self.debug_log_spider,
args=[
"-s",
"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"-s",
"ASYNCIO_EVENT_LOOP=uvloop.Loop",
],
)
assert "Using asyncio event loop: uvloop.Loop" in log
def test_custom_asyncio_loop_enabled_false(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path,
self.debug_log_spider,
args=[
"-s",
"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor",
],
)
if sys.platform != "win32":
loop = asyncio.new_event_loop()
else:
loop = asyncio.SelectorEventLoop()
assert (
f"Using asyncio event loop: {loop.__module__}.{loop.__class__.__name__}"
in log
)
def test_output(self, tmp_path: Path) -> None:
spider_code = """
import scrapy
| MySpider |
python | django__django | tests/multiple_database/routers.py | {
"start": 41,
"end": 664
} | class ____:
"""
Vaguely behave like primary/replica, but the databases aren't assumed to
propagate changes.
"""
def db_for_read(self, model, instance=None, **hints):
if instance:
return instance._state.db or "other"
return "other"
def db_for_write(self, model, **hints):
return DEFAULT_DB_ALIAS
def allow_relation(self, obj1, obj2, **hints):
return obj1._state.db in ("default", "other") and obj2._state.db in (
"default",
"other",
)
def allow_migrate(self, db, app_label, **hints):
return True
| TestRouter |
python | pandas-dev__pandas | pandas/tests/arrays/sparse/test_accessor.py | {
"start": 181,
"end": 3084
} | class ____:
def test_to_dense(self):
ser = pd.Series([0, 1, 0, 10], dtype="Sparse[int64]")
result = ser.sparse.to_dense()
expected = pd.Series([0, 1, 0, 10])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("attr", ["npoints", "density", "fill_value", "sp_values"])
def test_get_attributes(self, attr):
arr = SparseArray([0, 1])
ser = pd.Series(arr)
result = getattr(ser.sparse, attr)
expected = getattr(arr, attr)
assert result == expected
def test_from_coo(self):
scipy_sparse = pytest.importorskip("scipy.sparse")
row = [0, 3, 1, 0]
col = [0, 3, 1, 2]
data = [4, 5, 7, 9]
sp_array = scipy_sparse.coo_matrix((data, (row, col)))
result = pd.Series.sparse.from_coo(sp_array)
index = pd.MultiIndex.from_arrays(
[
np.array([0, 0, 1, 3], dtype=np.int32),
np.array([0, 2, 1, 3], dtype=np.int32),
],
)
expected = pd.Series([4, 9, 7, 5], index=index, dtype="Sparse[int]")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"sort_labels, expected_rows, expected_cols, expected_values_pos",
[
(
False,
[("b", 2), ("a", 2), ("b", 1), ("a", 1)],
[("z", 1), ("z", 2), ("x", 2), ("z", 0)],
{1: (1, 0), 3: (3, 3)},
),
(
True,
[("a", 1), ("a", 2), ("b", 1), ("b", 2)],
[("x", 2), ("z", 0), ("z", 1), ("z", 2)],
{1: (1, 2), 3: (0, 1)},
),
],
)
def test_to_coo(
self, sort_labels, expected_rows, expected_cols, expected_values_pos
):
sp_sparse = pytest.importorskip("scipy.sparse")
values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0)
index = pd.MultiIndex.from_tuples(
[
("b", 2, "z", 1),
("a", 2, "z", 2),
("a", 2, "z", 1),
("a", 2, "x", 2),
("b", 1, "z", 1),
("a", 1, "z", 0),
]
)
ss = pd.Series(values, index=index)
expected_A = np.zeros((4, 4))
for value, (row, col) in expected_values_pos.items():
expected_A[row, col] = value
A, rows, cols = ss.sparse.to_coo(
row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels
)
assert isinstance(A, sp_sparse.coo_matrix)
tm.assert_numpy_array_equal(A.toarray(), expected_A)
assert rows == expected_rows
assert cols == expected_cols
def test_non_sparse_raises(self):
ser = pd.Series([1, 2, 3])
with pytest.raises(AttributeError, match=".sparse"):
ser.sparse.density
| TestSeriesAccessor |
python | huggingface__transformers | src/transformers/models/olmo/modular_olmo.py | {
"start": 1985,
"end": 2470
} | class ____(LlamaMLP):
def __init__(self, config):
super().__init__(config)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
# This is identical to LlamaRotaryEmbedding except the output cos and sin are returned
# as float32 rather than the input type.
| OlmoMLP |
python | spyder-ide__spyder | spyder/plugins/outlineexplorer/api.py | {
"start": 273,
"end": 3288
} | class ____(OutlineExplorerProxy):
...
def handle_go_to(name, line, text):
...
outlineexplorer = OutlineExplorerWidget(None)
oe_proxy = OutlineExplorerProxyCustom(name)
outlineexplorer.set_current_editor(oe_proxy, update=True, clear=False)
outlineexplorer.edit_goto.connect(handle_go_to)
"""
# Standard library imports
import re
# Third party imports
from qtpy.QtCore import Signal, QObject
from qtpy.QtGui import QTextBlock
# Local imports
from spyder.api.translations import _
from spyder.config.base import running_under_pytest
def document_cells(block, forward=True, cell_list=None):
"""
Get cells oedata before or after block in the document.
Parameters
----------
forward : bool, optional
Whether to iterate forward or backward from the current block.
cell_list: list of tuple containing (block_number, oedata)
This is the list of all cells in a file to avoid having to parse
the file every time.
"""
if not block.isValid():
# Not a valid block
return
if forward:
block = block.next()
else:
block = block.previous()
if not block.isValid():
return
if cell_list is not None:
cell_list = sorted(cell_list)
block_line = block.blockNumber()
if forward:
for cell_line, oedata in cell_list:
if cell_line >= block_line:
yield oedata
else:
for cell_line, oedata in cell_list[::-1]:
if cell_line <= block_line:
yield oedata
return
# If the cell_list was not provided, search the cells
while block.isValid():
data = block.userData()
if (data
and data.oedata
and data.oedata.def_type == OutlineExplorerData.CELL):
yield data.oedata
if forward:
block = block.next()
else:
block = block.previous()
def is_cell_header(block):
"""Check if the given block is a cell header."""
if not block.isValid():
return False
data = block.userData()
return (data
and data.oedata
and data.oedata.def_type == OutlineExplorerData.CELL)
def cell_index(block):
"""Get the cell index of the given block."""
index = len(list(document_cells(block, forward=False)))
if is_cell_header(block):
return index + 1
return index
def cell_name(block):
"""
Get the cell name the block is in.
If the cell is unnamed, return the cell index instead.
"""
if is_cell_header(block):
header = block.userData().oedata
else:
try:
header = next(document_cells(block, forward=False))
except StopIteration:
# This cell has no header, so it is the first cell.
return 0
if header.has_name():
return header.def_name
else:
# No name, return the index
return cell_index(block)
| OutlineExplorerProxyCustom |
python | doocs__leetcode | solution/1600-1699/1644.Lowest Common Ancestor of a Binary Tree II/Solution.py | {
"start": 164,
"end": 758
} | class ____:
def lowestCommonAncestor(
self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode'
) -> 'TreeNode':
def dfs(root, p, q):
if root is None:
return False
l = dfs(root.left, p, q)
r = dfs(root.right, p, q)
nonlocal ans
if l and r:
ans = root
if (l or r) and (root.val == p.val or root.val == q.val):
ans = root
return l or r or root.val == p.val or root.val == q.val
ans = None
dfs(root, p, q)
return ans
| Solution |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 153538,
"end": 170830
} | class ____:
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
@pytest.mark.xfail(IS_PYPY, reason="Bug in pypy3.{9, 10}-v7.3.13, #24862")
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
'matmul': (np.matmul, True, float),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace[f"__{op}__"] = op_impl
class_namespace[f"__r{op}__"] = rop_impl
class_namespace[f"__i{op}__"] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, f"__{op}__")
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
elif (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, f"__r{op}__")
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
elif (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, f"__i{op}__")
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
elif (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
@pytest.mark.parametrize("priority", [None, "runtime error"])
def test_ufunc_binop_bad_array_priority(self, priority):
# Mainly checks that this does not crash. The second array has a lower
# priority than -1 ("error value"). If the __radd__ actually exists,
# bad things can happen (I think via the scalar paths).
# In principle both of these can probably just be errors in the future.
class BadPriority:
@property
def __array_priority__(self):
if priority == "runtime error":
raise RuntimeError("RuntimeError in __array_priority__!")
return priority
def __radd__(self, other):
return "result"
class LowPriority(np.ndarray):
__array_priority__ = -1000
# Priority failure uses the same as scalars (smaller -1000). So the
# LowPriority wins with 'result' for each element (inner operation).
res = np.arange(3).view(LowPriority) + BadPriority()
assert res.shape == (3,)
assert res[0] == 'result'
@pytest.mark.parametrize("scalar", [
np.longdouble(1), np.timedelta64(120, 'm')])
@pytest.mark.parametrize("op", [operator.add, operator.xor])
def test_scalar_binop_guarantees_ufunc(self, scalar, op):
# Test that __array_ufunc__ will always cause ufunc use even when
# we have to protect some other calls from recursing (see gh-26904).
class SomeClass:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return "result"
assert SomeClass() + scalar == "result"
assert scalar + SomeClass() == "result"
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs
np.modf(dummy, out=a)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass:
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __truediv__(self, other):
raise AssertionError('__truediv__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pow_calls_square_structured_dtype(self):
# gh-29388
dt = np.dtype([('a', 'i4'), ('b', 'i4')])
a = np.array([(1, 2), (3, 4)], dtype=dt)
with pytest.raises(TypeError, match="ufunc 'square' not supported"):
a ** 2
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
| TestBinop |
python | astropy__astropy | astropy/units/format/console.py | {
"start": 262,
"end": 1869
} | class ____(base.Base):
"""
Output-only format for to display pretty formatting at the
console.
For example::
>>> import astropy.units as u
>>> print(u.Ry.decompose().to_string('console')) # doctest: +FLOAT_CMP
2.1798721*10^-18 m^2 kg s^-2
>>> print(u.Ry.decompose().to_string('console', fraction='multiline')) # doctest: +FLOAT_CMP
m^2 kg
2.1798721*10^-18 ------
s^2
>>> print(u.Ry.decompose().to_string('console', fraction='inline')) # doctest: +FLOAT_CMP
2.1798721*10^-18 m^2 kg / s^2
"""
_line: ClassVar[str] = "-"
_space: ClassVar[str] = " "
@classmethod
def _format_superscript(cls, number: str) -> str:
return f"^{number}"
@classmethod
def _format_multiline_fraction(
cls, scale: str, numerator: str, denominator: str
) -> str:
fraclength = max(len(numerator), len(denominator))
f = f"{{0:<{len(scale)}s}}{{1:^{fraclength}s}}"
return "\n".join(
(
f.format("", numerator),
f.format(scale, cls._line * fraclength),
f.format("", denominator),
)
)
@classmethod
def to_string(
cls,
unit: UnitBase,
fraction: bool | Literal["inline", "multiline"] = False,
deprecations: DeprecatedUnitAction = DeprecatedUnitAction.WARN,
) -> str:
# Change default of fraction to False, i.e., we typeset
# without a fraction by default.
return super().to_string(unit, fraction=fraction)
| Console |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/int32_test.py | {
"start": 2235,
"end": 3145
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Test execution of calibration with int32 input"""
def GraphFn(self, inp):
# Can use any op that is converted to TRT with int32 inputs
inp_transposed = array_ops.transpose(inp, [0, 3, 2, 1], name='transpose_0')
return array_ops.identity(inp_transposed, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[3, 4, 5, 6]],
[[3, 6, 5, 4]])
def ShouldRunTest(self, run_params):
# Although test passes with all configurations but only
# execute INT8 with use_calibration=True because
# that is the purpose of the test.
return trt_test.IsQuantizationWithCalibration(
run_params), 'test calibration and INT8'
def ExpectedEnginesToBuild(self, run_params):
return ['TRTEngineOp_000']
if __name__ == '__main__':
test.main()
| CalibrationInt32Support |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_connection_command.py | {
"start": 1561,
"end": 2294
} | class ____:
parser = cli_parser.get_parser()
def setup_method(self):
clear_db_connections(add_default_connections_back=True)
def test_cli_connection_get(self, stdout_capture):
with stdout_capture as capture:
connection_command.connections_get(
self.parser.parse_args(["connections", "get", "google_cloud_default", "--output", "json"])
)
assert "google-cloud-platform:///default" in capture.getvalue()
def test_cli_connection_get_invalid(self):
with pytest.raises(SystemExit, match=re.escape("Connection not found.")):
connection_command.connections_get(self.parser.parse_args(["connections", "get", "INVALID"]))
| TestCliGetConnection |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_length.py | {
"start": 124,
"end": 209
} | class ____:
def __len__(self):
return 3.05 # [invalid-length-return]
| Float |
python | django-haystack__django-haystack | haystack/exceptions.py | {
"start": 305,
"end": 428
} | class ____(HaystackError):
"""Raised when a library a backend depends on can not be found."""
pass
| MissingDependency |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/assets.py | {
"start": 2763,
"end": 2892
} | class ____(BaseModel):
"""Asset alias serializer for responses."""
id: int
name: str
group: str
| AssetAliasResponse |
python | great-expectations__great_expectations | docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/expect_multicolumn_values_to_be_multiples_of_three.py | {
"start": 869,
"end": 2254
} | class ____(MulticolumnMapMetricProvider):
# </snippet>
"""MetricProvider Class for Multicolumn Values Multiple Of Three MetricProvider"""
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_multicolumn_values_to_be_multiples_of_three.py condition_metric_name">
condition_metric_name = "multicolumn_values.multiple_three"
# </snippet>
condition_domain_keys = ("column_list",)
condition_value_keys = ()
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_multicolumn_values_to_be_multiples_of_three.py _pandas">
@multicolumn_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_list, **kwargs):
return column_list.apply(lambda x: abs(x) % 3 == 0)
# </snippet>
@multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column_list, **kwargs):
return sa.and_(sa.func.abs(x) % 3 == 0 for x in column_list)
@multicolumn_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column_list, **kwargs):
raise NotImplementedError
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_multicolumn_values_to_be_multiples_of_three.py ExpectMulticolumnValuesToBeMultiplesOfThree class_def">
| MulticolumnValuesMultipleThree |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/kubernetes_engine.py | {
"start": 1729,
"end": 2388
} | class ____(BaseGoogleLink):
"""Helper class for constructing Kubernetes Engine Cluster Link."""
name = "Kubernetes Cluster"
key = "kubernetes_cluster_conf"
format_str = KUBERNETES_CLUSTER_LINK
@classmethod
def persist(cls, context: Context, **value):
cluster = value.get("cluster")
if isinstance(cluster, dict):
cluster = Cluster.from_json(json.dumps(cluster))
if not cluster:
raise ValueError("Cluster must be provided for KubernetesEngineClusterLink.")
super().persist(
context=context,
cluster_name=cluster.name,
)
| KubernetesEngineClusterLink |
python | scipy__scipy | scipy/optimize/_trustregion_constr/report.py | {
"start": 765,
"end": 1066
} | class ____(ReportBase):
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
"opt", "c viol"]
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10]
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e",
"^10.2e", "^10.2e", "^10.2e"]
| BasicReport |
python | mlflow__mlflow | tests/genai/judges/optimizers/conftest.py | {
"start": 21104,
"end": 21991
} | class ____(dspy.BaseLM):
"""Mock DSPy LM class for testing that inherits from DSPy's BaseLM."""
def __init__(self, model_name):
super().__init__(model_name)
self.model = model_name
self.name = model_name
self._context_calls = []
def basic_request(self, prompt, **kwargs):
# Track that this LM was called
self._context_calls.append(
{
"model": self.model,
"prompt": prompt,
"kwargs": kwargs,
"context": "lm_basic_request_called",
}
)
# Return a default answer
return [{"text": '{"result": "pass", "rationale": "test rationale"}'}]
def __call__(self, *args, **kwargs):
return self.basic_request(str(args), **kwargs)
@property
def context_calls(self):
return self._context_calls
| MockDSPyLM |
python | ipython__ipython | IPython/core/prefilter.py | {
"start": 21347,
"end": 21656
} | class ____(PrefilterHandler):
handler_name = Unicode("macro")
def handle(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
pre_space = line_info.pre_whitespace
line_sep = "\n" + pre_space
return pre_space + line_sep.join(obj.value.splitlines())
| MacroHandler |
python | pallets__werkzeug | src/werkzeug/datastructures/headers.py | {
"start": 19459,
"end": 21540
} | class ____(ImmutableHeadersMixin, Headers): # type: ignore[misc]
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ: WSGIEnvironment) -> None:
super().__init__()
self.environ = environ
def __eq__(self, other: object) -> bool:
if not isinstance(other, EnvironHeaders):
return NotImplemented
return self.environ is other.environ
__hash__ = None
def __getitem__(self, key: str) -> str: # type: ignore[override]
return self._get_key(key)
def _get_key(self, key: str) -> str:
if not isinstance(key, str):
raise BadRequestKeyError(key)
key = key.upper().replace("-", "_")
if key in {"CONTENT_TYPE", "CONTENT_LENGTH"}:
return self.environ[key] # type: ignore[no-any-return]
return self.environ[f"HTTP_{key}"] # type: ignore[no-any-return]
def __len__(self) -> int:
return sum(1 for _ in self)
def __iter__(self) -> cabc.Iterator[tuple[str, str]]:
for key, value in self.environ.items():
if key.startswith("HTTP_") and key not in {
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
}:
yield key[5:].replace("_", "-").title(), value
elif key in {"CONTENT_TYPE", "CONTENT_LENGTH"} and value:
yield key.replace("_", "-").title(), value
def copy(self) -> t.NoReturn:
raise TypeError(f"cannot create {type(self).__name__!r} copies")
def __or__(self, other: t.Any) -> t.NoReturn:
raise TypeError(f"cannot create {type(self).__name__!r} copies")
# circular dependencies
from .. import http # noqa: E402
| EnvironHeaders |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 60056,
"end": 60940
} | class ____(Glyph, LineGlyph, FillGlyph, HatchGlyph):
""" Horizontal strips of infinite width. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/HStrip.py"
_args = ("y0", "y1")
y0 = NumberSpec(default=field("y0"), help="""
The y-coordinates of the coordinates of one side of the strips.
""")
y1 = NumberSpec(default=field("y1"), help="""
The y-coordinates of the coordinates of the other side of the strips.
""")
line_props = Include(LineProps, help="""
The {prop} values for the strips.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the strips.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the strips.
""")
| HStrip |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/packaging.py | {
"start": 4102,
"end": 5175
} | class ____(PackagingCheck):
name = "Connectors must be licensed under MIT or Elv2"
description = f"Connectors must be licensed under the MIT or Elv2 license. This is to ensure that all connectors are licensed under a permissive license. More details in our [License FAQ]({consts.LICENSE_FAQ_URL})."
def _run(self, connector: Connector) -> CheckResult:
metadata_license = get(connector.metadata, "license")
if metadata_license is None:
return self.fail(
connector=connector,
message="License is missing in the metadata file",
)
elif metadata_license.upper() not in consts.VALID_LICENSES:
return self.fail(
connector=connector,
message=f"Connector is not using a valid license. Please use any of: {', '.join(consts.VALID_LICENSES)}",
)
else:
return self.pass_(
connector=connector,
message=f"Connector is licensed under {metadata_license}",
)
| CheckConnectorLicense |
python | huggingface__transformers | tests/models/dinov2/test_modeling_dinov2.py | {
"start": 11907,
"end": 12181
} | class ____(unittest.TestCase, BackboneTesterMixin):
all_model_classes = (Dinov2Backbone,) if is_torch_available() else ()
config_class = Dinov2Config
has_attentions = False
def setUp(self):
self.model_tester = Dinov2ModelTester(self)
| Dinov2BackboneTest |
python | ray-project__ray | python/ray/data/preprocessors/encoder.py | {
"start": 14038,
"end": 20562
} | class ____(SerializablePreprocessorBase):
r"""Multi-hot encode categorical data.
This preprocessor replaces each list of categories with an :math:`m`-length binary
list, where :math:`m` is the number of unique categories in the column or the value
specified in ``max_categories``. The :math:`i\\text{-th}` element of the binary list
is :math:`1` if category :math:`i` is in the input list and :math:`0` otherwise.
Columns must contain hashable objects or lists of hashable objects.
Also, you can't have both types in the same column.
.. note::
The logic is similar to scikit-learn's [MultiLabelBinarizer][1]
Examples:
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import MultiHotEncoder
>>>
>>> df = pd.DataFrame({
... "name": ["Shaolin Soccer", "Moana", "The Smartest Guys in the Room"],
... "genre": [
... ["comedy", "action", "sports"],
... ["animation", "comedy", "action"],
... ["documentary"],
... ],
... })
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>>
>>> encoder = MultiHotEncoder(columns=["genre"])
>>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP
name genre
0 Shaolin Soccer [1, 0, 1, 0, 1]
1 Moana [1, 1, 1, 0, 0]
2 The Smartest Guys in the Room [0, 0, 0, 1, 0]
:class:`MultiHotEncoder` can also be used in append mode by providing the
name of the output_columns that should hold the encoded values.
>>> encoder = MultiHotEncoder(columns=["genre"], output_columns=["genre_encoded"])
>>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP
name genre genre_encoded
0 Shaolin Soccer [comedy, action, sports] [1, 0, 1, 0, 1]
1 Moana [animation, comedy, action] [1, 1, 1, 0, 0]
2 The Smartest Guys in the Room [documentary] [0, 0, 0, 1, 0]
If you specify ``max_categories``, then :class:`MultiHotEncoder`
creates features for only the most frequent categories.
>>> encoder = MultiHotEncoder(columns=["genre"], max_categories={"genre": 3})
>>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP
name genre
0 Shaolin Soccer [1, 1, 1]
1 Moana [1, 1, 0]
2 The Smartest Guys in the Room [0, 0, 0]
>>> encoder.stats_ # doctest: +SKIP
OrderedDict([('unique_values(genre)', {'comedy': 0, 'action': 1, 'sports': 2})])
Args:
columns: The columns to separately encode.
max_categories: The maximum number of features to create for each column.
If a value isn't specified for a column, then a feature is created
for every unique category in that column.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
.. seealso::
:class:`OneHotEncoder`
If you're encoding individual categories instead of lists of
categories, use :class:`OneHotEncoder`.
:class:`OrdinalEncoder`
If your categories are ordered, you may want to use
:class:`OrdinalEncoder`.
[1]: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MultiLabelBinarizer.html
"""
def __init__(
self,
columns: List[str],
*,
max_categories: Optional[Dict[str, int]] = None,
output_columns: Optional[List[str]] = None,
):
super().__init__()
# TODO: add `drop` parameter.
self.columns = columns
self.max_categories = max_categories or {}
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
def _fit(self, dataset: "Dataset") -> Preprocessor:
self.stat_computation_plan.add_callable_stat(
stat_fn=lambda key_gen: compute_unique_value_indices(
dataset=dataset,
columns=self.columns,
encode_lists=True,
key_gen=key_gen,
max_categories=self.max_categories,
),
post_process_fn=unique_post_fn(),
stat_key_fn=lambda col: f"unique({col})",
post_key_fn=lambda col: f"unique_values({col})",
columns=self.columns,
)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, *self.columns)
def encode_list(element: list, *, name: str):
if isinstance(element, np.ndarray):
element = element.tolist()
elif not isinstance(element, list):
element = [element]
stats = self.stats_[f"unique_values({name})"]
counter = Counter(element)
return [counter.get(x, 0) for x in stats]
for column, output_column in zip(self.columns, self.output_columns):
df[output_column] = df[column].map(partial(encode_list, name=column))
return df
def _get_serializable_fields(self) -> Dict[str, Any]:
return {
"columns": self.columns,
"output_columns": self.output_columns,
"max_categories": self.max_categories,
"_fitted": getattr(self, "_fitted", None),
}
def _set_serializable_fields(self, fields: Dict[str, Any], version: int):
# required fields
self.columns = fields["columns"]
self.output_columns = fields["output_columns"]
self.max_categories = fields["max_categories"]
# optional fields
self._fitted = fields.get("_fitted")
def __repr__(self):
return (
f"{self.__class__.__name__}(columns={self.columns!r}, "
f"max_categories={self.max_categories!r}, "
f"output_columns={self.output_columns})"
)
@PublicAPI(stability="alpha")
@SerializablePreprocessor(version=1, identifier="io.ray.preprocessors.label_encoder")
| MultiHotEncoder |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 48368,
"end": 50295
} | class ____(SetMixin):
"""
Represents all the selected options in a ``<select multiple>`` element.
You can add to this set-like option to select an option, or remove
to unselect the option.
"""
def __init__(self, select):
self.select = select
@property
def options(self):
"""
Iterator of all the ``<option>`` elements.
"""
return iter(_options_xpath(self.select))
def __iter__(self):
for option in self.options:
if 'selected' in option.attrib:
opt_value = option.get('value')
if opt_value is None:
opt_value = (option.text or '').strip()
yield opt_value
def add(self, item):
for option in self.options:
opt_value = option.get('value')
if opt_value is None:
opt_value = (option.text or '').strip()
if opt_value == item:
option.set('selected', '')
break
else:
raise ValueError(
"There is no option with the value %r" % item)
def remove(self, item):
for option in self.options:
opt_value = option.get('value')
if opt_value is None:
opt_value = (option.text or '').strip()
if opt_value == item:
if 'selected' in option.attrib:
del option.attrib['selected']
else:
raise ValueError(
"The option %r is not currently selected" % item)
break
else:
raise ValueError(
"There is not option with the value %r" % item)
def __repr__(self):
return '<%s {%s} for select name=%r>' % (
self.__class__.__name__,
', '.join([repr(v) for v in self]),
self.select.name)
| MultipleSelectOptions |
python | getsentry__sentry | tests/sentry/sentry_apps/api/bases/test_sentryapps.py | {
"start": 3870,
"end": 5074
} | class ____(TestCase):
def setUp(self) -> None:
self.permission = SentryAppAndStaffPermission()
self.sentry_app = self.create_sentry_app(name="foo", organization=self.organization)
def test_superuser_has_permission(self) -> None:
superuser = self.create_user(is_superuser=True)
request = drf_request_from_request(
self.make_request(user=superuser, method="GET", is_superuser=True)
)
assert self.permission.has_object_permission(request, APIView(), self.sentry_app)
request.method = "POST"
assert self.permission.has_object_permission(request, APIView(), self.sentry_app)
def test_staff_has_permission(self) -> None:
staff_user = self.create_user(is_staff=True)
self.login_as(user=staff_user, staff=True)
request = drf_request_from_request(
self.make_request(user=staff_user, method="GET", is_staff=True)
)
assert self.permission.has_object_permission(request, APIView(), self.sentry_app)
request.method = "POST"
assert self.permission.has_object_permission(request, APIView(), self.sentry_app)
@control_silo_test
| SentryAppAndStaffPermissionTest |
python | kennethreitz__tablib | src/tablib/formats/_csv.py | {
"start": 68,
"end": 1611
} | class ____:
title = 'csv'
extensions = ('csv',)
DEFAULT_DELIMITER = ','
@classmethod
def export_stream_set(cls, dataset, **kwargs):
"""Returns CSV representation of Dataset as file-like."""
stream = StringIO()
kwargs.setdefault('delimiter', cls.DEFAULT_DELIMITER)
_csv = csv.writer(stream, **kwargs)
for row in dataset._package(dicts=False):
_csv.writerow(row)
stream.seek(0)
return stream
@classmethod
def export_set(cls, dataset, **kwargs):
"""Returns CSV representation of Dataset."""
stream = cls.export_stream_set(dataset, **kwargs)
return stream.getvalue()
@classmethod
def import_set(cls, dset, in_stream, headers=True, **kwargs):
"""Returns dataset from CSV stream."""
dset.wipe()
kwargs.setdefault('delimiter', cls.DEFAULT_DELIMITER)
rows = csv.reader(in_stream, **kwargs)
for i, row in enumerate(rows):
if (i == 0) and (headers):
dset.headers = row
elif row:
if i > 0 and len(row) < dset.width:
row += [''] * (dset.width - len(row))
dset.append(row)
@classmethod
def detect(cls, stream, delimiter=None):
"""Returns True if given stream is valid CSV."""
try:
csv.Sniffer().sniff(stream.read(1024), delimiters=delimiter or cls.DEFAULT_DELIMITER)
return True
except Exception:
return False
| CSVFormat |
python | huggingface__transformers | tests/trainer/test_trainer_callback.py | {
"start": 3266,
"end": 17822
} | class ____(unittest.TestCase):
def setUp(self):
self.output_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.output_dir)
def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
train_dataset = RegressionDataset(length=train_len)
eval_dataset = RegressionDataset(length=eval_len)
config = RegressionModelConfig(a=a, b=b)
model = RegressionPreTrainedModel(config)
args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs)
return Trainer(
model,
args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
callbacks=callbacks,
)
def check_callbacks_equality(self, cbs1, cbs2):
self.assertEqual(len(cbs1), len(cbs2))
# Order doesn't matter
cbs1 = sorted(cbs1, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__)
cbs2 = sorted(cbs2, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__)
for cb1, cb2 in zip(cbs1, cbs2):
if isinstance(cb1, type) and isinstance(cb2, type):
self.assertEqual(cb1, cb2)
elif isinstance(cb1, type) and not isinstance(cb2, type):
self.assertEqual(cb1, cb2.__class__)
elif not isinstance(cb1, type) and isinstance(cb2, type):
self.assertEqual(cb1.__class__, cb2)
else:
self.assertEqual(cb1, cb2)
def get_expected_events(self, trainer):
expected_events = ["on_init_end", "on_train_begin"]
step = 0
train_dl_len = len(trainer.get_eval_dataloader())
evaluation_events = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("on_epoch_begin")
for _ in range(train_dl_len):
step += 1
expected_events += ["on_step_begin", "on_pre_optimizer_step", "on_optimizer_step", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log")
if trainer.args.eval_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0 or step == trainer.state.max_steps:
expected_events.append("on_save")
expected_events.append("on_epoch_end")
if trainer.args.eval_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def test_init_callback(self):
trainer = self.get_trainer()
expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
# Callbacks passed at init are added to the default callbacks
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(MyTestTrainerCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
trainer = self.get_trainer(disable_tqdm=True)
expected_callbacks = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
def test_add_remove_callback(self):
expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
trainer = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(DefaultFlowCallback)
expected_callbacks.remove(DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
trainer = self.get_trainer()
cb = trainer.pop_callback(DefaultFlowCallback)
self.assertEqual(cb.__class__, DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
trainer.add_callback(DefaultFlowCallback)
expected_callbacks.insert(0, DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
# We can also add, pop, or remove by instance
trainer = self.get_trainer()
cb = trainer.callback_handler.callbacks[0]
trainer.remove_callback(cb)
expected_callbacks.remove(DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
trainer = self.get_trainer()
cb1 = trainer.callback_handler.callbacks[0]
cb2 = trainer.pop_callback(cb1)
self.assertEqual(cb1, cb2)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
trainer.add_callback(cb1)
expected_callbacks.insert(0, DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
def test_event_flow(self):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=UserWarning)
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
# Independent log/save/eval
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5)
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5)
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, eval_strategy="steps")
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_strategy="epoch")
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
# A bit of everything
trainer = self.get_trainer(
callbacks=[MyTestTrainerCallback],
logging_steps=3,
save_steps=10,
eval_steps=5,
eval_strategy="steps",
)
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning") as warn_mock:
trainer = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback],
)
assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0]
def test_stateful_callbacks(self):
# Use something with non-defaults
cb = EarlyStoppingCallback(early_stopping_patience=5, early_stopping_threshold=0.2)
trainer = self.get_trainer(
callbacks=[cb],
load_best_model_at_end=True,
save_strategy="steps",
eval_strategy="steps",
save_steps=2,
eval_steps=2,
max_steps=2,
)
trainer.train()
# Create a new trainer with defaults
trainer = self.get_trainer(
callbacks=[EarlyStoppingCallback()],
load_best_model_at_end=True,
save_strategy="steps",
eval_strategy="steps",
save_steps=2,
eval_steps=2,
max_steps=2,
restore_callback_states_from_checkpoint=True,
)
# Load it back in and verify values
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
trainer.train(resume_from_checkpoint=checkpoint)
cb = [
callback for callback in trainer.callback_handler.callbacks if isinstance(callback, EarlyStoppingCallback)
][0]
assert cb.early_stopping_patience == 5
assert cb.early_stopping_threshold == 0.2
def test_stateful_mixed_callbacks(self):
# Use two callbacks, one stateful one not
# Use something with non-defaults
cbs = [
MyTestTrainerCallback(my_test_state="another value"),
EarlyStoppingCallback(early_stopping_patience=5, early_stopping_threshold=0.2),
]
trainer = self.get_trainer(
callbacks=cbs,
load_best_model_at_end=True,
save_strategy="steps",
eval_strategy="steps",
save_steps=2,
eval_steps=2,
max_steps=2,
)
trainer.train()
# Create a new trainer with defaults
trainer = self.get_trainer(
callbacks=[EarlyStoppingCallback(), MyTestTrainerCallback()],
load_best_model_at_end=True,
save_strategy="steps",
eval_strategy="steps",
save_steps=2,
eval_steps=2,
max_steps=2,
restore_callback_states_from_checkpoint=True,
)
# Load it back in and verify values
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
trainer.train(resume_from_checkpoint=checkpoint)
cbs = [
callback
for callback in trainer.callback_handler.callbacks
if isinstance(callback, (EarlyStoppingCallback, MyTestTrainerCallback))
]
assert len(cbs) == 2
my_test, early_stopping = cbs
assert early_stopping.early_stopping_patience == 5
assert early_stopping.early_stopping_threshold == 0.2
assert my_test.my_test_state == "test"
def test_stateful_duplicate_callbacks(self):
# Use something with non-defaults
cbs = [MyTestExportableCallback("first"), MyTestExportableCallback("second")]
trainer = self.get_trainer(
callbacks=cbs,
load_best_model_at_end=True,
save_strategy="steps",
eval_strategy="steps",
save_steps=2,
eval_steps=2,
max_steps=2,
)
trainer.train()
# Create a new trainer with defaults
trainer = self.get_trainer(
callbacks=[MyTestExportableCallback(), MyTestExportableCallback()],
load_best_model_at_end=True,
save_strategy="steps",
eval_strategy="steps",
save_steps=2,
eval_steps=2,
max_steps=2,
restore_callback_states_from_checkpoint=True,
)
# Load it back in and verify values
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
trainer.train(resume_from_checkpoint=checkpoint)
cbs = [
callback
for callback in trainer.callback_handler.callbacks
if isinstance(callback, MyTestExportableCallback)
]
assert len(cbs) == 2
assert cbs[0].my_test_state == "first"
assert cbs[1].my_test_state == "second"
def test_missing_stateful_callback(self):
cb = EarlyStoppingCallback()
trainer = self.get_trainer(
callbacks=[cb],
load_best_model_at_end=True,
save_strategy="steps",
eval_strategy="steps",
save_steps=2,
eval_steps=2,
max_steps=2,
)
trainer.train()
# Create a new trainer with defaults
trainer = self.get_trainer(
save_strategy="steps",
eval_strategy="steps",
save_steps=2,
eval_steps=2,
max_steps=2,
restore_callback_states_from_checkpoint=True,
)
# Load it back in and verify values
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
# warning should be emitted for not-present callbacks
with patch("transformers.trainer.logger.warning") as warn_mock:
trainer.train(resume_from_checkpoint=checkpoint)
assert "EarlyStoppingCallback" in warn_mock.call_args[0][0]
def test_stateful_control(self):
trainer = self.get_trainer(
max_steps=2,
save_strategy="steps",
save_steps=2,
)
trainer.train()
# Load it back in and verify values
trainer = self.get_trainer(max_steps=2, restore_callback_states_from_checkpoint=True)
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
trainer.state = TrainerState.load_from_json(os.path.join(checkpoint, TRAINER_STATE_NAME))
trainer._load_callback_state()
assert trainer.control.should_training_stop
def test_no_duplicate_save_on_epoch_save_strategy(self):
times_saved = 0
class OnEndCallback(TrainerCallback):
def on_step_end(self, args: TrainingArguments, state: TrainerState, control, **kwargs):
nonlocal times_saved
if control.should_save:
times_saved += 1
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control, **kwargs):
nonlocal times_saved
if control.should_save:
times_saved += 1
trainer = self.get_trainer(max_steps=2, save_strategy="epoch", callbacks=[OnEndCallback])
trainer.train()
assert times_saved == 1
| TrainerCallbackTest |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/widgets/main_widget.py | {
"start": 3607,
"end": 106698
} | class ____(PluginMainWidget, CachedKernelMixin): # noqa: PLR0904
"""
IPython Console plugin
This is a widget with tabs where each one is a ClientWidget.
"""
# Signals
sig_open_preferences_requested = Signal()
"""
Signal to open the main interpreter preferences.
"""
sig_append_to_history_requested = Signal(str, str)
"""
This signal is emitted when the plugin requires to add commands to a
history file.
Parameters
----------
filename: str
History file filename.
text: str
Text to append to the history file.
"""
sig_history_requested = Signal(str)
"""
This signal is emitted when the plugin wants a specific history file
to be shown.
Parameters
----------
path: str
Path to history file.
"""
sig_focus_changed = Signal()
"""
This signal is emitted when the plugin focus changes.
"""
sig_switch_to_plugin_requested = Signal()
"""
This signal will request to change the focus to the plugin.
"""
sig_edit_goto_requested = Signal(str, int, str)
"""
This signal will request to open a file in a given row and column
using a code editor.
Parameters
----------
path: str
Path to file.
row: int
Cursor starting row position.
word: str
Word to select on given row.
"""
sig_edit_new = Signal(str)
"""
This signal will request to create a new file in a code editor.
Parameters
----------
path: str
Path to file.
"""
sig_shellwidget_created = Signal(object)
"""
This signal is emitted when a shellwidget is created.
Parameters
----------
shellwidget: spyder.plugins.ipyconsole.widgets.shell.ShellWidget
The shellwigdet.
"""
sig_shellwidget_deleted = Signal(object)
"""
This signal is emitted when a shellwidget is deleted/removed.
Parameters
----------
shellwidget: spyder.plugins.ipyconsole.widgets.shell.ShellWidget
The shellwigdet.
"""
sig_shellwidget_changed = Signal(object)
"""
This signal is emitted when the current shellwidget changes.
Parameters
----------
shellwidget: spyder.plugins.ipyconsole.widgets.shell.ShellWidget
The shellwigdet.
"""
sig_shellwidget_errored = Signal(object)
"""
This signal is emitted when the current shellwidget failed to start.
Parameters
----------
shellwidget: spyder.plugins.ipyconsole.widgets.shell.ShellWidget
The shellwigdet.
"""
sig_render_plain_text_requested = Signal(str)
"""
This signal is emitted to request a plain text help render.
Parameters
----------
plain_text: str
The plain text to render.
"""
sig_render_rich_text_requested = Signal(str, bool)
"""
This signal is emitted to request a rich text help render.
Parameters
----------
rich_text: str
The rich text.
collapse: bool
If the text contains collapsed sections, show them closed (True) or
open (False).
"""
sig_help_requested = Signal(dict)
"""
This signal is emitted to request help on a given object `name`.
Parameters
----------
help_data: dict
Example `{'name': str, 'ignore_unknown': bool}`.
"""
sig_current_directory_changed = Signal(str, str)
"""
This signal is emitted when the current directory of the active shell
widget has changed.
Parameters
----------
working_directory: str
The new working directory path.
server_id: str
The server identification from where the working directory is
reachable.
"""
sig_interpreter_changed = Signal(str)
"""
This signal is emitted when the interpreter of the active shell widget has
changed.
Parameters
----------
path: str
Path to the new interpreter.
"""
sig_edit_action_enabled = Signal(str, bool)
"""
This signal is emitted to enable or disable an edit action.
Parameters
----------
action_name: str
Name of the edit action to be enabled or disabled.
enabled: bool
True if the action should be enabled, False if it should disabled.
"""
def __init__(self, name=None, plugin=None, parent=None):
super().__init__(name, plugin, parent)
self.menu_actions = None
self.master_clients = 0
self.clients = []
self.filenames = []
self.mainwindow_close = False
self.active_project_path = None
self.create_new_client_if_empty = True
self.run_cell_filename = None
self.interrupt_action = None
self.registered_spyder_kernel_handlers = {}
self.envs = {}
# Attributes needed for the restart dialog
self._restart_dialog = ConsoleRestartDialog(self)
self._initial_conf_options = self.get_conf_options()
self._last_time_for_restart_dialog = None
# Disable infowidget if requested by the user
self.enable_infowidget = True
if plugin:
cli_options = plugin.get_command_line_options()
if cli_options.no_web_widgets or not WEBENGINE:
self.enable_infowidget = False
# Attrs for testing
self._testing = bool(os.environ.get('IPYCONSOLE_TESTING'))
layout = QVBoxLayout()
layout.setSpacing(0)
self.tabwidget = Tabs(self, rename_tabs=True, split_char='/',
split_index=0)
if (hasattr(self.tabwidget, 'setDocumentMode')
and not sys.platform == 'darwin'):
# Don't set document mode to true on OSX because it generates
# a crash when the console is detached from the main window
# Fixes spyder-ide/spyder#561.
self.tabwidget.setDocumentMode(True)
self.tabwidget.currentChanged.connect(
lambda idx: self.refresh_container(give_focus=True))
self.tabwidget.tabBar().tabMoved.connect(self.move_tab)
self.tabwidget.tabBar().sig_name_changed.connect(
self.rename_tabs_after_change)
self.tabwidget.set_close_function(self.close_client)
if sys.platform == 'darwin':
tab_container = QWidget()
tab_container.setObjectName('tab-container')
tab_layout = QHBoxLayout(tab_container)
tab_layout.setContentsMargins(0, 0, 0, 0)
tab_layout.addWidget(self.tabwidget)
layout.addWidget(tab_container)
else:
layout.addWidget(self.tabwidget)
# Info widget
if self.enable_infowidget:
self._infowidget = self._create_info_widget()
layout.addWidget(self._infowidget)
else:
self._infowidget = None
# Label to inform users how to get out of the pager
self.pager_label = QLabel(_("Press <b>Q</b> to exit pager"), self)
pager_label_css = qstylizer.style.StyleSheet()
pager_label_css.setValues(**{
'background-color': f'{SpyderPalette.COLOR_ACCENT_2}',
'color': f'{SpyderPalette.COLOR_TEXT_1}',
'margin': '2px 1px 1px 1px',
'padding': '5px',
'qproperty-alignment': 'AlignCenter',
'border-radius': f'{SpyderPalette.SIZE_BORDER_RADIUS}'
})
self.pager_label.setStyleSheet(pager_label_css.toString())
self.pager_label.hide()
layout.addWidget(self.pager_label)
# Find/replace widget
self.find_widget = FindReplace(self)
self.find_widget.hide()
# Manually adjust margins for the find/replace widget
if not self.get_conf('vertical_tabs', section='main'):
# Remove an extra pixel that it's not present in other plugins
layout.addSpacing(-1)
# Align close button with the one in other plugins
self.find_widget.setStyleSheet("margin-left: 1px")
else:
self.find_widget.setStyleSheet("margin-bottom: 1px")
layout.addWidget(self.find_widget)
# Manually adjust pane margins, don't know why this is necessary.
# Note: Do this before setting the layout.
if not self.get_conf('vertical_tabs', section='main'):
self._margin_left = self._margin_right = AppStyle.MarginSize - 1
else:
self._margin_right = self._margin_bottom = AppStyle.MarginSize - 1
self.setLayout(layout)
# Accepting drops
self.setAcceptDrops(True)
# Needed to start Spyder in Windows with Python 3.8
# See spyder-ide/spyder#11880
self._init_asyncio_patch()
# Create status widgets
self.matplotlib_status = MatplotlibStatus(self)
self.pythonenv_status = PythonEnvironmentStatus(self)
self.pythonenv_status.sig_interpreter_changed.connect(
self.sig_interpreter_changed
)
self.pythonenv_status.sig_open_preferences_requested.connect(
self.sig_open_preferences_requested)
# Initial value for the current working directory
self._current_working_directory = get_home_dir()
# Remote Consoles menu
self._remote_consoles_menu = None
# ---- PluginMainWidget API and settings handling
# ------------------------------------------------------------------------
def get_title(self):
return _('IPython Console')
def get_focus_widget(self):
client = self.tabwidget.currentWidget()
if client is not None:
return client.get_control()
def setup(self):
# --- Console environments menu
self.console_environment_menu = self.create_menu(
IPythonConsoleWidgetMenus.EnvironmentConsoles,
_('New console in environment')
)
css = self.console_environment_menu.css
css["QMenu"]["menu-scrollable"].setValue("1")
self.console_environment_menu.setStyleSheet(css.toString())
self.console_environment_menu.aboutToShow.connect(
self._update_environment_menu
)
# Submenus
self.conda_envs_menu = self.create_menu(
EnvironmentConsolesSubmenus.CondaMenu,
"Conda",
)
self.pyenv_envs_menu = self.create_menu(
EnvironmentConsolesSubmenus.PyenvMenu,
"Pyenv",
)
self.custom_envs_menu = self.create_menu(
EnvironmentConsolesSubmenus.CustomMenu,
_("Custom"),
)
# --- Main and options menu actions
self.create_client_action = self.create_action(
IPythonConsoleWidgetActions.CreateNewClient,
text=_("New console (default settings)"),
icon=self.create_icon('ipython_console'),
triggered=self.create_new_client,
register_shortcut=True
)
self.restart_action = self.create_action(
IPythonConsoleWidgetActions.Restart,
text=_("Restart kernel"),
icon=self.create_icon('restart'),
triggered=lambda checked: self.restart_kernel(),
register_shortcut=True
)
self.reset_action = self.create_action(
IPythonConsoleWidgetActions.ResetNamespace,
text=_("Remove all variables"),
icon=self.create_icon('editdelete'),
triggered=self.reset_namespace,
register_shortcut=True
)
self.interrupt_action = self.create_action(
IPythonConsoleWidgetActions.Interrupt,
text=_("Interrupt kernel"),
icon=self.create_icon('stop'),
triggered=self.interrupt_kernel,
)
self.connect_to_kernel_action = self.create_action(
IPythonConsoleWidgetActions.ConnectToKernel,
text=_("Connect to existing kernel..."),
tip=_("Open an IPython console connected to an existing kernel"),
triggered=self._create_client_for_kernel,
)
self.rename_tab_action = self.create_action(
IPythonConsoleWidgetActions.RenameTab,
text=_("Rename tab"),
icon=self.create_icon('rename'),
triggered=self.tab_name_editor,
)
next_console_action = self.create_action(
IPythonConsoleWidgetActions.NextConsole,
text=_("Switch to next console"),
icon=self.create_icon('next_wng'),
triggered=lambda: self.tabwidget.tab_navigate(+1),
register_shortcut=True
)
previous_console_action = self.create_action(
IPythonConsoleWidgetActions.PreviousConsole,
text=_("Switch to previous console"),
icon=self.create_icon('prev_wng'),
triggered=lambda: self.tabwidget.tab_navigate(-1),
register_shortcut=True
)
# Register shortcuts to switch to the right/left console
self.register_shortcut_for_widget(
IPythonConsoleWidgetActions.NextConsole,
lambda: self.tabwidget.tab_navigate(+1),
)
self.register_shortcut_for_widget(
IPythonConsoleWidgetActions.PreviousConsole,
lambda: self.tabwidget.tab_navigate(-1),
)
# --- For the client
self.env_action = self.create_action(
IPythonConsoleWidgetActions.ShowEnvironmentVariables,
text=_("Show environment variables"),
icon=self.create_icon('environ'),
triggered=lambda:
self.get_current_shellwidget().request_env()
if self.get_current_shellwidget() else None,
)
self.syspath_action = self.create_action(
IPythonConsoleWidgetActions.ShowSystemPath,
text=_("Show sys.path contents"),
icon=self.create_icon('syspath'),
triggered=lambda:
self.get_current_shellwidget().request_syspath()
if self.get_current_shellwidget() else None,
)
self.show_time_action = self.create_action(
IPythonConsoleWidgetActions.ToggleElapsedTime,
text=_("Show elapsed time"),
toggled=self.set_show_elapsed_time_current_client,
initial=self.get_conf('show_elapsed_time')
)
# --- Context menu actions
# TODO: Shortcut registration not working
self.cut_action = self.create_action(
ClientContextMenuActions.Cut,
text=_("Cut"),
icon=self.create_icon("editcut"),
triggered=self.current_client_cut
)
self.cut_action.setShortcut(QKeySequence.Cut)
self.copy_action = self.create_action(
ClientContextMenuActions.Copy,
text=_("Copy"),
icon=self.create_icon("editcopy"),
triggered=self.current_client_copy
)
self.copy_action.setShortcut(QKeySequence.Copy)
self.copy_raw_action = self.create_action(
ClientContextMenuActions.CopyRaw,
text=_("Copy (raw text)"),
triggered=self._current_client_copy_raw
)
self.paste_action = self.create_action(
ClientContextMenuActions.Paste,
text=_("Paste"),
icon=self.create_icon("editpaste"),
triggered=self.current_client_paste
)
self.paste_action.setShortcut(QKeySequence.Paste)
self.select_all_action = self.create_action(
ClientContextMenuActions.SelectAll,
text=_("Select all"),
icon=self.create_icon("selectall"),
triggered=self.current_client_select_all
)
self.inspect_object_action = self.create_action(
ClientContextMenuActions.InspectObject,
text=_("Inspect current object"),
icon=self.create_icon('MessageBoxInformation'),
triggered=self._current_client_inspect_object,
register_shortcut=True
)
self.enter_array_table_action = self.create_action(
ClientContextMenuActions.ArrayTable,
text=_("Enter array table"),
icon=self.create_icon("arredit"),
triggered=self._current_client_enter_array_table,
register_shortcut=True
)
self.enter_array_inline_action = self.create_action(
ClientContextMenuActions.ArrayInline,
text=_("Enter array inline"),
triggered=self._current_client_enter_array_inline,
register_shortcut=True
)
self.export_html_action = self.export_action = self.create_action(
ClientContextMenuActions.Export,
text=_("Save as html..."),
icon=self.create_icon("CodeFileIcon"),
triggered=self._current_client_export
)
self.print_action = self.create_action(
ClientContextMenuActions.Print,
text=_("Print..."),
icon=self.create_icon("print"),
triggered=self._current_client_print
)
self.clear_line_action = self.create_action(
ClientContextMenuActions.ClearLine,
text=_("Clear line or block"),
icon=self.create_icon("clear_text"),
triggered=self._current_client_clear_line,
register_shortcut=True
)
self.clear_console_action = self.create_action(
ClientContextMenuActions.ClearConsole,
text=_("Clear console"),
icon=self.create_icon("clear_console"),
triggered=self._current_client_clear_console,
register_shortcut=True
)
self.copy_image_action = self.create_action(
ClientContextMenuActions.CopyImage,
text=_("Copy image"),
triggered=self._current_client_copy_image
)
self.save_image_action = self.create_action(
ClientContextMenuActions.SaveImage,
text=_("Save image as..."),
triggered=self._current_client_save_image
)
self.copy_svg_action = self.create_action(
ClientContextMenuActions.CopySvg,
text=_("Copy SVG"),
triggered=self._current_client_copy_svg
)
self.save_svg_action = self.create_action(
ClientContextMenuActions.SaveSvg,
text=_("Save SVG as..."),
triggered=self._current_client_save_svg
)
# The Quit entry was available in Spyder 5 and before, and some users
# were accustomed to click on it.
# Fixes spyder-ide/spyder#24096
self.quit_action = self.create_action(
ClientContextMenuActions.Quit,
_("&Quit"),
icon=self.create_icon('exit'),
triggered=self._current_client_quit,
)
# --- Context menu
self.create_menu(IPythonConsoleWidgetMenus.ClientContextMenu)
# --- Setting options menu
options_menu = self.get_options_menu()
self.special_console_menu = self.create_menu(
IPythonConsoleWidgetMenus.SpecialConsoles,
_('New special console'))
for item in [
self.interrupt_action,
self.restart_action,
self.reset_action,
self.rename_tab_action]:
self.add_item_to_menu(
item,
menu=options_menu,
section=IPythonConsoleWidgetOptionsMenuSections.Edit,
)
for item in [
self.env_action,
self.syspath_action,
self.show_time_action]:
self.add_item_to_menu(
item,
menu=options_menu,
section=IPythonConsoleWidgetOptionsMenuSections.View,
)
for item in [next_console_action, previous_console_action]:
self.add_item_to_menu(
item,
menu=options_menu,
section=IPythonConsoleWidgetOptionsMenuSections.Switch,
)
create_pylab_action = self.create_action(
IPythonConsoleWidgetActions.CreatePyLabClient,
text=_("New Pylab console (data plotting)"),
icon=self.create_icon('ipython_console'),
triggered=self.create_pylab_client,
)
create_sympy_action = self.create_action(
IPythonConsoleWidgetActions.CreateSymPyClient,
text=_("New SymPy console (symbolic math)"),
icon=self.create_icon('ipython_console'),
triggered=self.create_sympy_client,
)
create_cython_action = self.create_action(
IPythonConsoleWidgetActions.CreateCythonClient,
_("New Cython console (Python with C extensions)"),
icon=self.create_icon('ipython_console'),
triggered=self.create_cython_client,
)
for item in [
create_pylab_action,
create_sympy_action,
create_cython_action]:
self.add_item_to_menu(
item,
menu=self.special_console_menu
)
# --- Widgets for the tab corner
self.clear_button = self.create_toolbutton(
IPythonConsoleWidgetCornerWidgets.ClearButton,
text=_("Clear console"),
tip=_("Clear console"),
icon=self.create_icon("clear_console"),
triggered=self._current_client_clear_console,
)
self.stop_button = self.create_toolbutton(
IPythonConsoleWidgetCornerWidgets.InterruptButton,
text=_("Interrupt kernel"),
tip=_("Interrupt kernel"),
icon=self.create_icon('stop'),
triggered=self.interrupt_kernel,
)
self.time_label = QLabel("")
self.time_label.name = (
IPythonConsoleWidgetCornerWidgets.TimeElapsedLabel
)
# --- Add tab corner widgets.
self.add_corner_widget(self.stop_button)
self.add_corner_widget(self.clear_button)
self.add_corner_widget(self.time_label)
# --- Tabs context menu
tabs_context_menu = self.create_menu(
IPythonConsoleWidgetMenus.TabsContextMenu)
for item in [
self.create_client_action,
self.console_environment_menu,
self.special_console_menu,
self.connect_to_kernel_action]:
self.add_item_to_menu(
item,
menu=tabs_context_menu,
section=IPythonConsoleWidgetTabsContextMenuSections.Consoles,
)
for item in [
self.interrupt_action,
self.restart_action,
self.reset_action,
self.rename_tab_action]:
self.add_item_to_menu(
item,
menu=tabs_context_menu,
section=IPythonConsoleWidgetTabsContextMenuSections.Edit,
)
self.tabwidget.menu = tabs_context_menu
# --- Create IPython documentation menu
self.ipython_menu = self.create_menu(
menu_id=IPythonConsoleWidgetMenus.Documentation,
title=_("IPython documentation"))
intro_action = self.create_action(
IPythonConsoleWidgetActions.IPythonDocumentation,
text=_("Intro to IPython"),
triggered=self.show_intro
)
quickref_action = self.create_action(
IPythonConsoleWidgetActions.QuickReference,
text=_("Quick reference"),
triggered=self.show_quickref
)
guiref_action = self.create_action(
IPythonConsoleWidgetActions.ConsoleHelp,
text=_("Console help"),
triggered=self.show_guiref
)
for help_action in [
intro_action, guiref_action, quickref_action]:
self.ipython_menu.add_action(help_action)
def set_show_elapsed_time_current_client(self, state):
if self.get_current_client():
client = self.get_current_client()
client.set_show_elapsed_time(state)
self.refresh_container()
def update_actions(self):
client = self.get_current_client()
if client is not None:
# Executing state
executing = client.is_client_executing()
self.interrupt_action.setEnabled(executing)
self.stop_button.setEnabled(executing)
# Client is loading or showing a kernel error
if (
client.infowidget is not None
and client.info_page is not None
):
error_or_loading = not client.is_kernel_active()
self.restart_action.setEnabled(not error_or_loading)
self.reset_action.setEnabled(not error_or_loading)
self.env_action.setEnabled(not error_or_loading)
self.syspath_action.setEnabled(not error_or_loading)
self.show_time_action.setEnabled(not error_or_loading)
# ---- GUI options
@on_conf_change(section='help', option='connect/ipython_console')
def change_clients_help_connection(self, value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.get_control(pager=False).set_help_enabled,
value
)
@on_conf_change(section='appearance', option=['selected', 'ui_theme'])
def change_clients_color_scheme(self, option, value):
if option == 'ui_theme':
value = self.get_conf('selected', section='appearance')
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.set_color_scheme,
value)
@on_conf_change(option='show_elapsed_time')
def change_clients_show_elapsed_time(self, value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.set_show_elapsed_time,
value)
if self.get_current_client():
self.refresh_container()
@on_conf_change(option='show_reset_namespace_warning')
def change_clients_show_reset_namespace_warning(self, value):
for idx, client in enumerate(self.clients):
def change_client_reset_warning(value=value):
client.reset_warning = value
self._change_client_conf(
client,
change_client_reset_warning,
value)
@on_conf_change(option='show_calltips')
def change_clients_show_calltips(self, value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.shellwidget.set_show_calltips,
value)
@on_conf_change(option='buffer_size')
def change_clients_buffer_size(self, value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.shellwidget.set_buffer_size,
value)
@on_conf_change(option='completion_type')
def change_clients_completion_type(self, value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.shellwidget._set_completion_widget,
COMPLETION_WIDGET_TYPE[value])
# ---- Advanced GUI options
@on_conf_change(option='in_prompt')
def change_clients_in_prompt(self, value):
if bool(value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.shellwidget.set_in_prompt,
value)
@on_conf_change(option='out_prompt')
def change_clients_out_prompt(self, value):
if bool(value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.shellwidget.set_out_prompt,
value)
# ---- Advanced options
@on_conf_change(option='greedy_completer')
def change_clients_greedy_completer(self, value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.shellwidget.set_greedy_completer,
value)
@on_conf_change(option='jedi_completer')
def change_clients_jedi_completer(self, value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.shellwidget.set_jedi_completer,
value)
@on_conf_change(option='autocall')
def change_clients_autocall(self, value):
for idx, client in enumerate(self.clients):
self._change_client_conf(
client,
client.shellwidget.set_autocall,
value)
@on_conf_change(
option=[
"symbolic_math",
"hide_cmd_windows",
"startup/run_lines",
"startup/use_run_file",
"startup/run_file",
"pylab",
"pylab/backend",
"pylab/autoload",
"pylab/inline/figure_format",
"pylab/inline/resolution",
"pylab/inline/width",
"pylab/inline/height",
"pylab/inline/fontsize",
"pylab/inline/bottom",
"pylab/inline/bbox_inches",
]
)
def change_possible_restart_and_mpl_conf(self, option, value):
"""
Apply options that possibly require a kernel restart or related to
Matplotlib inline backend options.
"""
# Check that we are not triggering validations in the initial
# notification sent when Spyder is starting.
if not self._testing:
if option in self._initial_conf_options:
self._initial_conf_options.remove(option)
return
restart_needed = False
restart_options = []
# Startup options (needs a restart)
autoload_n = "pylab/autoload"
run_lines_n = 'startup/run_lines'
use_run_file_n = 'startup/use_run_file'
run_file_n = 'startup/run_file'
# Graphic options
pylab_n = 'pylab'
pylab_o = self.get_conf(pylab_n)
pylab_backend_n = 'pylab/backend'
# Advanced options (needs a restart)
symbolic_math_n = 'symbolic_math'
hide_cmd_windows_n = 'hide_cmd_windows'
restart_options += [
autoload_n,
run_lines_n,
use_run_file_n,
run_file_n,
symbolic_math_n,
hide_cmd_windows_n,
]
restart_needed = (
option in restart_options
# Deactivating graphics support
or (option == pylab_n and not value)
)
inline_backend = 'inline'
pylab_restart = False
clients_backend_require_restart = [False] * len(self.clients)
current_client = self.get_current_client()
current_client_backend_require_restart = False
if pylab_o and pylab_backend_n == option and current_client:
pylab_backend_o = self.get_conf(pylab_backend_n)
# Check if clients require a restart due to a change in
# interactive backend.
clients_backend_require_restart = []
for client in self.clients:
if pylab_backend_o == inline_backend:
# No restart is needed if the new backend is inline
clients_backend_require_restart.append(False)
continue
# Need to know the interactive state
sw = client.shellwidget
if sw._starting:
# If the kernel didn't start and no backend was requested,
# the backend is inline
interactive_backend = inline_backend
else:
# Must ask the kernel. Will not work if the kernel was set
# to another backend and is not now inline
interactive_backend = sw.get_mpl_interactive_backend()
if (
# There was an error getting the interactive backend in
# the kernel, so we can't proceed.
interactive_backend is not None
# There has to be an interactive backend (i.e. different
# from inline) set in the kernel before. Else, a restart
# is not necessary.
and interactive_backend != inline_backend
# The interactive backend to switch to has to be different
# from the current one
and interactive_backend != pylab_backend_o
# There's no need to request a restart for the auto backend
and pylab_backend_o != "auto"
):
clients_backend_require_restart.append(True)
# Detect if current client requires restart
if id(client) == id(current_client):
current_client_backend_require_restart = True
# For testing
if self._testing:
os.environ['BACKEND_REQUIRE_RESTART'] = 'true'
else:
clients_backend_require_restart.append(False)
pylab_restart = any(clients_backend_require_restart)
if (restart_needed or pylab_restart) and not running_under_pytest():
# This allows us to decide if we need to show the restart dialog.
# For that we compare the last time it was shown with the current
# one. If difference is less than 300 ms, it means this method was
# called after users changed several options at the same time in
# Preferences. And if that's case, we don't need to show the
# dialog each time.
show_restart_dialog = True
if self._last_time_for_restart_dialog is not None:
current_time = time.time()
if current_time - self._last_time_for_restart_dialog < 0.3:
show_restart_dialog = False
self._last_time_for_restart_dialog = current_time
else:
self._last_time_for_restart_dialog = None
if show_restart_dialog:
self._restart_dialog.exec_()
(
restart_all,
restart_current,
no_restart,
) = self._restart_dialog.get_action_value()
if self._last_time_for_restart_dialog is None:
self._last_time_for_restart_dialog = time.time()
else:
# If there's no need to show the dialog, we reuse the values
# saved on it the last time it was used.
restart_all = self._restart_dialog.restart_all
restart_current = self._restart_dialog.restart_current
no_restart = self._restart_dialog.no_restart
else:
restart_all = False
restart_current = False
no_restart = True
# Apply settings
options = {option: value}
for idx, client in enumerate(self.clients):
restart = (
(pylab_restart and clients_backend_require_restart[idx]) or
restart_needed
)
_options = options.copy()
if autoload_n in options:
# Autoload can't be applied without a restart. This avoids an
# incorrect message in the console too.
_options.pop(autoload_n)
if pylab_n in _options and pylab_o:
# Activating support requires sending all inline configs
_options = None
if not (restart and restart_all) or no_restart:
sw = client.shellwidget
if sw.is_debugging() and sw._executing:
# Apply conf when the next Pdb prompt is available
def change_client_mpl_conf(o=_options, c=client):
self._change_client_mpl_conf(o, c)
sw.sig_pdb_prompt_ready.disconnect(
change_client_mpl_conf)
sw.sig_pdb_prompt_ready.connect(change_client_mpl_conf)
else:
self._change_client_mpl_conf(_options, client)
elif restart and restart_all:
self.restart_kernel(client, ask_before_restart=False)
if (
(
(pylab_restart and current_client_backend_require_restart)
or restart_needed
)
and restart_current
and current_client
):
self.restart_kernel(current_client, ask_before_restart=False)
# ---- Private methods
# -------------------------------------------------------------------------
def _create_info_widget(self):
from spyder.widgets.browser import FrameWebView
infowidget = FrameWebView(self)
if WEBENGINE:
infowidget.page().setBackgroundColor(
QColor(MAIN_BG_COLOR))
else:
infowidget.setStyleSheet(
"background:{}".format(MAIN_BG_COLOR)
)
return infowidget
def _change_client_conf(self, client, client_conf_func, value):
"""
Change a client configuration option, taking into account if it is
in a debugging session.
Parameters
----------
client : ClientWidget
Client to update configuration.
client_conf_func : Callable
Client method to use to change the configuration.
value : any
New value for the client configuration.
Returns
-------
None.
"""
sw = client.shellwidget
if not client.is_client_executing():
client_conf_func(value)
elif client.shellwidget.is_debugging():
def change_conf(c=client, ccf=client_conf_func, value=value):
ccf(value)
c.shellwidget.sig_pdb_prompt_ready.disconnect(change_conf)
sw.sig_pdb_prompt_ready.connect(change_conf)
else:
def change_conf(c=client, ccf=client_conf_func, value=value):
ccf(value)
c.shellwidget.sig_prompt_ready.disconnect(change_conf)
sw.sig_prompt_ready.connect(change_conf)
def _change_client_mpl_conf(self, options, client):
"""Apply Matplotlib related configurations to a client."""
client.shellwidget.send_mpl_backend(options)
def _init_asyncio_patch(self):
"""
- This was fixed in Tornado 6.1!
- Same workaround fix as ipython/ipykernel#564
- ref: tornadoweb/tornado#2608
- On Python 3.8+, Tornado 6.0 is not compatible with the default
asyncio implementation on Windows. Pick the older
SelectorEventLoopPolicy if the known-incompatible default policy is
in use.
- Do this as early as possible to make it a low priority and
overrideable.
"""
if os.name == 'nt' and sys.version_info[:2] >= (3, 8):
# Tests on Linux hang if we don't leave this import here.
import tornado
if tornado.version_info >= (6, 1):
return
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
# not affected
pass
else:
if isinstance(
asyncio.get_event_loop_policy(),
WindowsProactorEventLoopPolicy):
# WindowsProactorEventLoopPolicy is not compatible
# with tornado 6 fallback to the pre-3.8
# default of Selector
asyncio.set_event_loop_policy(
WindowsSelectorEventLoopPolicy())
@Slot()
def _create_client_for_kernel(self):
"""Create a client connected to an existing kernel"""
connect_output = KernelConnectionDialog.get_connection_parameters(self)
(connection_file, hostname, sshkey, password, ok) = connect_output
if not ok:
return
try:
# Fix path
connection_file = self.find_connection_file(connection_file)
except (IOError, UnboundLocalError):
QMessageBox.critical(self, _('IPython'),
_("Unable to connect to "
"<b>%s</b>") % connection_file)
return
self.create_client_for_kernel(
connection_file, hostname, sshkey, password)
def _update_environment_menu(self):
"""Update submenu with entries for available interpreters."""
# Clear menu and submenus before rebuilding them
self.console_environment_menu.clear_actions()
self.conda_envs_menu.clear_actions()
self.pyenv_envs_menu.clear_actions()
self.custom_envs_menu.clear_actions()
internal_action = None
conda_actions = []
pyenv_actions = []
custom_actions = []
for env_key, env_info in self.envs.items():
env_name = env_key.split()[-1]
path_to_interpreter, python_version = env_info
fm = QFontMetrics(self.font())
env_elided = fm.elidedText(
env_key, Qt.ElideMiddle, 250
)
# Text for actions
text = f"{env_elided} ({python_version})"
# Change text in case env is the default or internal (i.e. same as
# Spyder) one
default_interpreter = self.get_conf(
"executable", section="main_interpreter"
)
if path_to_interpreter == default_interpreter:
text = _("Default") + " / " + text
elif (
path_to_interpreter == sys.executable
and default_interpreter != sys.executable
):
text = _("Internal") + " / " + text
# Create action
action = self.create_action(
name=env_key,
text=text,
icon=self.create_icon('ipython_console'),
triggered=(
self.create_new_client
if path_to_interpreter == default_interpreter
else functools.partial(
self.create_environment_client,
env_name,
path_to_interpreter
)
),
overwrite=True,
register_action=False,
tip=text,
)
# Add default env as the first entry in the menu
if text.startswith(_("Default")):
self.add_item_to_menu(
action,
menu=self.console_environment_menu,
section=EnvironmentConsolesMenuSections.Default
)
# Group other actions to add them later
if text.startswith(_("Internal")):
internal_action = action
elif text.startswith("Conda"):
conda_actions.append(action)
elif text.startswith("Pyenv"):
pyenv_actions.append(action)
elif text.startswith(_("Custom")):
custom_actions.append(action)
# Add internal action, if available
if internal_action:
self.add_item_to_menu(
internal_action,
menu=self.console_environment_menu,
section=EnvironmentConsolesMenuSections.Default
)
# Add other envs to their respective submenus or sections
max_actions_in_menu = 20
n_categories = len(
[
actions
for actions in [conda_actions, pyenv_actions, custom_actions]
if len(actions) > 0
]
)
actions = conda_actions + pyenv_actions + custom_actions
# We use submenus if there are more envs than we'd like to see
# displayed in the consoles menu, and there are at least two non-empty
# categories.
if len(actions) > max_actions_in_menu and n_categories > 1:
conda_menu = self.conda_envs_menu
if conda_actions:
self.add_item_to_menu(
conda_menu,
menu=self.console_environment_menu,
section=EnvironmentConsolesMenuSections.Submenus,
)
pyenv_menu = self.pyenv_envs_menu
if pyenv_actions:
self.add_item_to_menu(
pyenv_menu,
menu=self.console_environment_menu,
section=EnvironmentConsolesMenuSections.Submenus,
)
custom_menu = self.custom_envs_menu
if custom_actions:
self.add_item_to_menu(
custom_menu,
menu=self.console_environment_menu,
section=EnvironmentConsolesMenuSections.Submenus,
)
# Submenus don't have sections
conda_section = pyenv_section = custom_section = None
else:
# If there are few envs, we add their actions to the consoles menu.
# But we use sections only if there are two or more envs per
# category. Otherwise we group them in a single section called
# "Other". We do that because having many menu sections with a
# single entry makes the UI look odd.
conda_menu = (
pyenv_menu
) = custom_menu = self.console_environment_menu
conda_section = (
EnvironmentConsolesMenuSections.Conda
if len(conda_actions) > 1
else EnvironmentConsolesMenuSections.Other
)
pyenv_section = (
EnvironmentConsolesMenuSections.Pyenv
if len(pyenv_actions) > 1
else EnvironmentConsolesMenuSections.Other
)
custom_section = (
EnvironmentConsolesMenuSections.Custom
if len(custom_actions) > 1
else EnvironmentConsolesMenuSections.Other
)
# Add actions to menu or submenus
for action in actions:
if action in conda_actions:
menu = conda_menu
section = conda_section
elif action in pyenv_actions:
menu = pyenv_menu
section = pyenv_section
else:
menu = custom_menu
section = custom_section
self.add_item_to_menu(
action,
menu=menu,
section=section,
)
# Render consoles menu and submenus
self.console_environment_menu.render()
def _connect_new_client_to_kernel(
self, cache, path_to_custom_interpreter, client, future
):
"""Connect kernel to client after environment variables are obtained"""
try:
# Create new kernel
kernel_spec = SpyderKernelSpec(
path_to_custom_interpreter=path_to_custom_interpreter
)
kernel_spec.env = future.result()
kernel_handler = self.get_cached_kernel(kernel_spec, cache=cache)
except Exception as e:
client.show_kernel_error(e)
return
# Connect kernel to client
client.connect_kernel(kernel_handler)
def _run_script(
self,
filename,
wdir,
args,
post_mortem,
clear_variables,
console_namespace,
method,
client,
current_client
):
if method is None:
method = "runfile"
def norm(text):
return remove_backslashes(str(text))
# The kernel must be connected before the following condition is
# tested. This is why self._run_script must wait for sig_prompt_ready
# if a new client was created.
if client.shellwidget.is_spyder_kernel:
# If spyder-kernels, use runfile
magic_arguments = [norm(filename)]
if args:
magic_arguments.append("--args")
magic_arguments.append(norm(args))
if wdir:
if wdir == os.path.dirname(filename):
# No working directory for external kernels
# if it has not been explicitly given.
if not client.shellwidget.is_external_kernel:
magic_arguments.append("--wdir")
else:
magic_arguments.append("--wdir")
magic_arguments.append(norm(wdir))
if post_mortem:
magic_arguments.append("--post-mortem")
if console_namespace:
magic_arguments.append("--current-namespace")
line = "%{} {}".format(method, shlex.join(magic_arguments))
elif method in ["runfile", "debugfile"]:
# External, non spyder-kernels, use %run
magic_arguments = []
if method == "debugfile":
magic_arguments.append("-d")
magic_arguments.append(filename)
if args:
magic_arguments.append(norm(args))
line = "%run " + shlex.join(magic_arguments)
else:
client.shellwidget.append_html_message(
_(
"The console is not running a Spyder-kernel, so it can't "
"execute <b>{}</b>.<br><br>"
"Please use a Spyder-kernel for this."
).format(method),
before_prompt=True
)
return
try:
if client.shellwidget._executing:
# Don't allow multiple executions when there's
# still an execution taking place
# Fixes spyder-ide/spyder#7293.
pass
else:
self.execute_code(
line,
current_client,
clear_variables,
shellwidget=client.shellwidget,
)
except AttributeError:
pass
# ---- Public API
# -------------------------------------------------------------------------
def find_connection_file(self, connection_file):
"""Fix connection file path."""
cf_path = osp.dirname(connection_file)
cf_filename = osp.basename(connection_file)
# To change a possible empty string to None
cf_path = cf_path if cf_path else None
# This error is raised when find_connection_file can't find the file
try:
connection_file = find_connection_file(
filename=cf_filename, path=cf_path
)
except OSError:
connection_file = None
if connection_file and os.path.splitext(connection_file)[1] != ".json":
# There might be a file with the same id in the path.
connection_file = find_connection_file(
filename=cf_filename + ".json", path=cf_path
)
return connection_file
@property
def infowidget(self):
"""
This is necessary to prevent an error when, in some situations, Python
garbage collects _infowidget.
Notes
-----
* See spyder-ide/spyder#21509 and spyder-ide/spyder#23529
"""
try:
# We need to call a method to detect if the object was garbage
# collected and trigger the RuntimeError we want to catch here.
self._infowidget.isVisible()
return self._infowidget
except RuntimeError:
self._infowidget = self._create_info_widget()
return self._infowidget
except AttributeError:
return None
# ---- General
# -------------------------------------------------------------------------
def update_edit_menu(self) -> None:
"""
Enable edition related actions when a client is available.
"""
undo_action_enabled = False
redo_action_enabled = False
cut_action_enabled = False
copy_action_enabled = False
paste_action_enabled = False
client = self.get_current_client()
if client:
undo_action_enabled = (
client.shellwidget._control.document().isUndoAvailable()
)
redo_action_enabled = (
client.shellwidget._control.document().isRedoAvailable()
)
cut_action_enabled = client.shellwidget.can_cut()
copy_action_enabled = client.shellwidget.can_copy()
paste_action_enabled = client.shellwidget.can_paste()
for action, enabled in [
(ApplicationActions.Undo, undo_action_enabled),
(ApplicationActions.Redo, redo_action_enabled),
(ApplicationActions.Cut, cut_action_enabled),
(ApplicationActions.Copy, copy_action_enabled),
(ApplicationActions.Paste, paste_action_enabled),
]:
self.sig_edit_action_enabled.emit(action, enabled)
def update_font(self, font, app_font):
self._font = font
self._app_font = app_font
if self.enable_infowidget and self.infowidget is not None:
self.infowidget.set_font(app_font)
for client in self.clients:
client.set_font(font)
def update_envs(self, envs: dict):
"""Update the detected environments in the system."""
self.envs = envs
def refresh_container(self, give_focus=False):
"""
Refresh interface depending on the current widget client available.
Refreshes corner widgets and actions as well as the info widget and
sets the shellwidget and client signals
"""
client = None
if self.tabwidget.count():
for instance_client in self.clients:
try:
instance_client.timer.timeout.disconnect()
except (RuntimeError, TypeError):
pass
client = self.tabwidget.currentWidget()
# Decide what to show for each client
if (client.info_page != client.blank_page and
self.enable_infowidget):
# Show info_page if it has content
client.set_info_page()
client.shellwidget.hide()
client.layout.addWidget(self.infowidget)
if self.infowidget is not None:
self.infowidget.show()
else:
if self.enable_infowidget and self.infowidget is not None:
self.infowidget.hide()
client.shellwidget.show()
# Get reference for the control widget of the selected tab
# and give focus if needed
control = client.get_control()
if give_focus:
control.setFocus()
if isinstance(control, PageControlWidget):
self.pager_label.show()
else:
self.pager_label.hide()
# Setup elapsed time
show_elapsed_time = client.show_elapsed_time
self.show_time_action.setChecked(show_elapsed_time)
client.timer.timeout.connect(client.show_time)
client.timer.start(1000)
client.timer.timeout.emit()
else:
control = None
self.find_widget.set_editor(control)
if client:
sw = client.shellwidget
self.sig_shellwidget_changed.emit(sw)
# This is necessary to sync the current client cwd with the working
# directory displayed by other plugins in Spyder (e.g. Files).
# NOTE: Instead of emitting sig_current_directory_changed directly,
# we call on_working_directory_changed to validate that the cwd
# exists (this couldn't be the case for remote kernels).
if sw.get_cwd() != self.get_working_directory():
self.on_working_directory_changed(sw.get_cwd(), sw.server_id)
self.update_tabs_text()
self.update_actions()
# ---- For tabs
# -------------------------------------------------------------------------
def add_tab(self, client, name, filename='', give_focus=True):
"""Add tab."""
if not isinstance(client, ClientWidget):
return
self.clients.append(client)
index = self.tabwidget.addTab(client, name)
self.filenames.insert(index, filename)
self.tabwidget.setCurrentIndex(index)
if self.dockwidget and give_focus:
self.sig_switch_to_plugin_requested.emit()
self.activateWindow()
client.get_control().setFocus()
self.update_tabs_text()
# Register client
self.register_client(client)
def select_tab(self, shellwidget):
"""
Select tab with given shellwidget.
"""
for client in self.clients:
if client.shellwidget == shellwidget:
self.tabwidget.setCurrentWidget(client)
return
def move_tab(self, index_from, index_to):
"""
Move tab (tabs themselves have already been moved by the tabwidget).
"""
filename = self.filenames.pop(index_from)
client = self.clients.pop(index_from)
self.filenames.insert(index_to, filename)
self.clients.insert(index_to, client)
self.update_tabs_text()
def disambiguate_fname(self, fname):
"""Generate a file name without ambiguation."""
files_path_list = [filename for filename in self.filenames
if filename]
return sourcecode.disambiguate_fname(files_path_list, fname)
def update_tabs_text(self):
"""Update the text from the tabs."""
# This is needed to prevent that hanged consoles make reference
# to an index that doesn't exist. See spyder-ide/spyder#4881.
try:
for index, fname in enumerate(self.filenames):
client = self.clients[index]
if fname:
self.rename_client_tab(client,
self.disambiguate_fname(fname))
else:
self.rename_client_tab(client, None)
except IndexError:
pass
def rename_client_tab(self, client, given_name):
"""Rename a client's tab."""
index = self.get_client_index_from_id(id(client))
if given_name is not None:
client.given_name = given_name
self.tabwidget.setTabText(index, client.get_name())
def rename_tabs_after_change(self, given_name):
"""Rename tabs after a change in name."""
client = self.get_current_client()
# Prevent renames that want to assign the same name of a previous tab
repeated = False
for cl in self.clients:
if id(client) != id(cl) and given_name == cl.given_name:
repeated = True
break
# Rename current client tab to add str_id
if client.allow_rename and u'/' not in given_name and not repeated:
self.rename_client_tab(client, given_name)
else:
self.rename_client_tab(client, None)
# Rename related clients
if client.allow_rename and u'/' not in given_name and not repeated:
for cl in self.get_related_clients(client):
self.rename_client_tab(cl, given_name)
def rename_remote_clients(self, server_id):
"""Rename all clients connected to a remote server."""
hostname = self._plugin._remote_client.get_server_name(server_id)
for client in self.clients:
if (
client.is_remote()
and client.jupyter_api.server_id == server_id
):
client.hostname = hostname
index = self.get_client_index_from_id(id(client))
self.tabwidget.setTabText(index, client.get_name())
def tab_name_editor(self):
"""Trigger the tab name editor."""
index = self.tabwidget.currentIndex()
self.tabwidget.tabBar().tab_name_editor.edit_tab(index)
# --- For clients
# -------------------------------------------------------------------------
# ---- For magics and configurations
@Slot(object, object)
def edit_file(self, filename, line):
"""Handle %edit magic petitions."""
if not osp.isfile(filename):
self.sig_edit_new.emit(filename)
if encoding.is_text_file(filename):
# The default line number sent by ipykernel is always the last
# one, but we prefer to use the first.
self.sig_edit_goto_requested.emit(filename, 1, '')
def config_options(self):
"""
Generate a Trailets Config instance for shell widgets using our
config system
This lets us create each widget with its own config
"""
# ---- Jupyter config ----
try:
full_cfg = load_pyconfig_files(['jupyter_qtconsole_config.py'],
jupyter_config_dir())
# From the full config we only select the JupyterWidget section
# because the others have no effect here.
cfg = Config({'JupyterWidget': full_cfg.JupyterWidget})
except Exception:
cfg = Config()
# ---- Spyder config ----
spy_cfg = Config()
# Make the pager widget a rich one (i.e a QTextEdit)
spy_cfg.JupyterWidget.kind = 'rich'
# Gui completion widget
completion_type_o = self.get_conf('completion_type')
completions = COMPLETION_WIDGET_TYPE
spy_cfg.JupyterWidget.gui_completion = completions[completion_type_o]
# Calltips
calltips_o = self.get_conf('show_calltips')
spy_cfg.JupyterWidget.enable_calltips = calltips_o
# Buffer size
buffer_size_o = self.get_conf('buffer_size')
spy_cfg.JupyterWidget.buffer_size = buffer_size_o
# Prompts
in_prompt_o = self.get_conf('in_prompt')
out_prompt_o = self.get_conf('out_prompt')
if bool(in_prompt_o):
spy_cfg.JupyterWidget.in_prompt = in_prompt_o
if bool(out_prompt_o):
spy_cfg.JupyterWidget.out_prompt = out_prompt_o
# Style
color_scheme = self.get_conf('selected', section='appearance')
style_sheet = create_qss_style(color_scheme)[0]
spy_cfg.JupyterWidget.style_sheet = style_sheet
spy_cfg.JupyterWidget.syntax_style = color_scheme
# Merge QtConsole and Spyder configs. Spyder prefs will have
# prevalence over QtConsole ones
cfg._merge(spy_cfg)
return cfg
def additional_options(self, special=None):
"""
Additional options for shell widgets that are not defined
in JupyterWidget config options
"""
options = dict(
pylab=self.get_conf('pylab'),
autoload_pylab=self.get_conf('pylab/autoload'),
sympy=self.get_conf('symbolic_math'),
show_banner=self.get_conf('show_banner')
)
if special == "pylab":
options['autoload_pylab'] = True
options['sympy'] = False
elif special == "sympy":
options['autoload_pylab'] = False
options['sympy'] = True
return options
# ---- For client widgets
def get_focus_client(self) -> ClientWidget | None:
"""Return current client with focus, if any"""
widget = QApplication.focusWidget()
for client in self.clients:
if widget is client or widget is client.get_control():
return client
def get_current_client(self) -> ClientWidget | None:
"""Return the currently selected client"""
client = self.tabwidget.currentWidget()
if client is not None:
return client
def get_current_shellwidget(self) -> ShellWidget | None:
"""Return the shellwidget of the current client"""
client = self.get_current_client()
if client is not None:
return client.shellwidget
@Slot()
@Slot(bool)
@Slot(str)
@Slot(bool, str)
@Slot(bool, str, str)
@Slot(bool, bool)
@Slot(bool, str, bool)
def create_new_client(
self,
give_focus=True,
filename='',
special=None,
given_name=None,
cache=True,
initial_cwd=None,
path_to_custom_interpreter=None
):
"""
Create a new client.
Uses asynchronous get_user_environment_variables and connects to kernel
upon future completion.
"""
self.master_clients += 1
client_id = dict(int_id=str(self.master_clients),
str_id='A')
# Find what kind of kernel we want
if self.get_conf('pylab/autoload'):
special = "pylab"
elif self.get_conf('symbolic_math'):
special = "sympy"
client = ClientWidget(
self,
id_=client_id,
config_options=self.config_options(),
additional_options=self.additional_options(special),
given_name=given_name,
give_focus=give_focus,
handlers=self.registered_spyder_kernel_handlers,
initial_cwd=initial_cwd,
forcing_custom_interpreter=path_to_custom_interpreter is not None,
special_kernel=special
)
future = get_user_environment_variables()
future.connect(
AsyncDispatcher.QtSlot(
functools.partial(
self._connect_new_client_to_kernel,
cache,
path_to_custom_interpreter,
client,
)
)
)
# Add client to widget
self.add_tab(
client, name=client.get_name(), filename=filename,
give_focus=give_focus)
return client
def create_client_for_kernel(
self,
connection_file,
hostname,
sshkey,
password,
jupyter_api=None,
files_api=None,
give_focus=False,
can_close=True
):
"""Create a client connected to an existing kernel."""
given_name = None
master_client = None
related_clients = []
for cl in self.clients:
if cl.connection_file and connection_file in cl.connection_file:
if (
cl.kernel_handler is not None and
hostname == cl.kernel_handler.hostname and
sshkey == cl.kernel_handler.sshkey and
password == cl.kernel_handler.password
):
related_clients.append(cl)
if len(related_clients) > 0:
# Get master client
master_client = related_clients[0]
given_name = master_client.given_name
slave_ord = ord('A') - 1
for cl in related_clients:
new_slave_ord = ord(cl.id_['str_id'])
if new_slave_ord > slave_ord:
slave_ord = new_slave_ord
# Set full client name
client_id = dict(int_id=master_client.id_['int_id'],
str_id=chr(slave_ord + 1))
else:
# If we couldn't find a client with the same connection file,
# it means this is a new master client
self.master_clients += 1
# Set full client name
client_id = dict(int_id=str(self.master_clients), str_id='A')
# Creating the client
client = ClientWidget(
self,
id_=client_id,
given_name=given_name,
config_options=self.config_options(),
additional_options=self.additional_options(),
handlers=self.registered_spyder_kernel_handlers,
jupyter_api=jupyter_api,
files_api=files_api,
give_focus=give_focus,
can_close=can_close,
)
# add hostname for get_name
client.hostname = hostname
# Adding a new tab for the client
self.add_tab(client, name=client.get_name())
# Set elapsed time, if possible
if master_client is not None:
client.t0 = master_client.t0
client.timer.timeout.connect(client.show_time)
client.timer.start(1000)
client.timer.timeout.emit()
if jupyter_api is not None:
# This is a client created by the RemoteClient plugin. So, we only
# create the client and show it as loading because the kernel
# connection part will be done by that plugin.
client._show_loading_page()
else:
try:
# Get new client for kernel
if master_client is not None:
kernel_handler = master_client.kernel_handler.copy()
else:
kernel_handler = KernelHandler.from_connection_file(
connection_file, hostname, sshkey, password)
except Exception as e:
client.show_kernel_error(e)
return
# Connect kernel
client.connect_kernel(kernel_handler)
return client
def create_pylab_client(self):
"""Force creation of Pylab client"""
self.create_new_client(special="pylab", given_name="Pylab")
def create_sympy_client(self):
"""Force creation of SymPy client"""
self.create_new_client(special="sympy", given_name="SymPy")
def create_cython_client(self):
"""Force creation of Cython client"""
self.create_new_client(special="cython", given_name="Cython")
def create_environment_client(
self, environment, path_to_custom_interpreter
):
"""Create a client for a Python environment."""
self.create_new_client(
given_name=environment,
path_to_custom_interpreter=path_to_custom_interpreter
)
@Slot(str)
def create_client_from_path(self, path):
"""Create a client with its cwd pointing to path."""
self.create_new_client(initial_cwd=path)
def create_client_for_file(self, filename, is_cython=False):
"""Create a client to execute code related to a file."""
special = None
if is_cython:
special = "cython"
# Create client
client = self.create_new_client(
filename=filename, special=special
)
# Don't increase the count of master clients
self.master_clients -= 1
# Rename client tab with filename
if client is not None:
client.allow_rename = False
tab_text = self.disambiguate_fname(filename)
self.rename_client_tab(client, tab_text)
return client
def get_client_for_file(self, filename):
"""Get client associated with a given file."""
client = None
for idx, cl in enumerate(self.clients):
if self.filenames[idx] == filename:
self.tabwidget.setCurrentIndex(idx)
client = cl
break
return client
def register_client(self, client):
"""Register new client"""
client.connect_shellwidget_signals()
# Local vars
shellwidget = client.shellwidget
control = shellwidget._control
# Create new clients with Ctrl+T shortcut
shellwidget.sig_new_client.connect(self.create_new_client)
# For tracebacks
control.sig_go_to_error_requested.connect(self.go_to_error)
# For help requests
control.sig_help_requested.connect(self.sig_help_requested)
# To handle %edit magic petitions
shellwidget.custom_edit_requested.connect(self.edit_file)
# Connect client to history log
self.sig_history_requested.emit(client.history_filename)
client.sig_append_to_history_requested.connect(
self.sig_append_to_history_requested)
# Set font for client
client.set_font(self._font)
# Set editor for the find widget
self.find_widget.set_editor(control)
# Connect to working directory
shellwidget.sig_working_directory_changed.connect(
self.on_working_directory_changed)
# Connect client execution state to be reflected in the interface
client.sig_execution_state_changed.connect(self.update_actions)
# Show time label
client.sig_time_label.connect(self.time_label.setText)
client.timer.timeout.emit()
# Exception handling
shellwidget.sig_exception_occurred.connect(
self.sig_exception_occurred)
# Signals
shellwidget.sig_shellwidget_deleted.connect(
self.sig_shellwidget_deleted)
shellwidget.sig_shellwidget_created.connect(
self.sig_shellwidget_created)
shellwidget.sig_shellwidget_errored.connect(
self.sig_shellwidget_errored)
shellwidget.sig_restart_kernel.connect(self.restart_kernel)
def close_client(self, index=None, client=None, ask_recursive=True):
"""Close client tab from index or widget (or close current tab)"""
if not self.tabwidget.count():
return
if client is not None:
# Client already closed
if client not in self.clients:
return
# if index is not found in tabwidget it's because this client was
# already closed and the call was performed by the exit callback
index = self.tabwidget.indexOf(client)
if index == -1:
return
if index is None and client is None:
index = self.tabwidget.currentIndex()
if index is not None:
client = self.tabwidget.widget(index)
if not client.can_close:
return
# Check if related clients or kernels are opened
# and eventually ask before closing them
if not self.mainwindow_close and ask_recursive:
close_all = True
if self.get_conf('ask_before_closing'):
close = QMessageBox.question(
self,
self._plugin.get_name(),
_("Do you want to close this console?"),
QMessageBox.Yes | QMessageBox.No)
if close == QMessageBox.No:
return
if len(self.get_related_clients(client)) > 0:
close_all = QMessageBox.question(
self,
self._plugin.get_name(),
_("Do you want to close all other consoles connected "
"to the same kernel as this one?"),
QMessageBox.Yes | QMessageBox.No)
if close_all == QMessageBox.Yes:
self.close_related_clients(client)
# Note: client index may have changed after closing related widgets
self.tabwidget.removeTab(self.tabwidget.indexOf(client))
self.clients.remove(client)
is_last_client = len(self.get_related_clients(client)) == 0
client.close_client(is_last_client)
# This is needed to prevent that hanged consoles make reference
# to an index that doesn't exist. See spyder-ide/spyder#4881
try:
self.filenames.pop(index)
except IndexError:
pass
self.update_tabs_text()
# Create a new client if the console is about to become empty
if not self.tabwidget.count() and self.create_new_client_if_empty:
self.create_new_client()
def close_all_clients(self):
"""
Perform close actions for each running client.
Returns
-------
bool
If the closing action was succesful.
"""
# IMPORTANT: **Do not** change this way of closing clients, which uses
# a copy of `self.clients`, because it preserves the main window layout
# when Spyder is closed.
# Fixes spyder-ide/spyder#19084
open_clients = self.clients.copy()
for client in self.clients:
is_last_client = (
len(self.get_related_clients(client, open_clients)) == 0
)
client.close_client(is_last_client, close_console=True)
open_clients.remove(client)
# Wait for all KernelHandler threads to shutdown.
KernelHandler.wait_all_shutdown_threads()
# Close cached kernel
self.close_cached_kernel()
self.filenames = []
return True
def close_remote_clients(self, server_id):
"""Close all clients connected to a remote server."""
open_clients = self.clients.copy()
for client in self.clients:
if (
client.is_remote()
and client.jupyter_api.server_id == server_id
):
is_last_client = (
len(self.get_related_clients(client, open_clients)) == 0
)
client.close_client(is_last_client)
open_clients.remove(client)
def get_client_index_from_id(self, client_id):
"""Return client index from id"""
for index, client in enumerate(self.clients):
if id(client) == client_id:
return index
def get_related_clients(self, client, clients_list=None):
"""
Get all other clients that are connected to the same kernel as `client`
"""
# At the moment it's not possible to have two clients connected to the
# same remote kernel.
if client.is_remote():
return []
if clients_list is None:
clients_list = self.clients
related_clients = []
for cl in clients_list:
if (cl.connection_file == client.connection_file and
cl is not client):
related_clients.append(cl)
return related_clients
def close_related_clients(self, client):
"""Close all clients related to *client*, except itself"""
related_clients = self.get_related_clients(client)
for client in related_clients:
self.close_client(client=client, ask_recursive=False)
def restart(self):
"""
Restart the console
This is needed when we switch projects to update PYTHONPATH
and the selected interpreter
"""
self.master_clients = 0
self.create_new_client_if_empty = False
for i in range(len(self.clients)):
client = self.clients[-1]
self.close_client(client=client, ask_recursive=False)
self.create_new_client(give_focus=False, cache=False)
self.create_new_client_if_empty = True
def current_client_undo(self):
client = self.get_current_client()
if client:
client.shellwidget.undo()
def current_client_redo(self):
client = self.get_current_client()
if client:
client.shellwidget.redo()
def current_client_cut(self):
client = self.get_current_client()
if client:
client.shellwidget.cut()
def current_client_copy(self):
client = self.get_current_client()
if client:
client.shellwidget.copy()
def _current_client_copy_raw(self):
client = self.get_current_client()
if client:
client.shellwidget.copy_raw()
def current_client_paste(self):
client = self.get_current_client()
if client:
client.shellwidget.paste()
def current_client_select_all(self):
client = self.get_current_client()
if client:
client.shellwidget.select_all_smart()
def _current_client_inspect_object(self):
client = self.get_current_client()
if client:
client.inspect_object()
def _current_client_enter_array_inline(self):
client = self.get_current_client()
if client:
client.enter_array_inline()
def _current_client_enter_array_table(self):
client = self.get_current_client()
if client:
client.enter_array_table()
def _current_client_export(self):
client = self.get_current_client()
if client:
client.shellwidget.export_html()
def _current_client_print(self):
client = self.get_current_client()
if client:
# This makes the print dialog have the same style as the rest of
# the app.
printer = SpyderPrinter(mode=QPrinter.HighResolution)
if (QPrintDialog(printer, self).exec_() != QPrintDialog.Accepted):
return
client.shellwidget._control.print_(printer)
def _current_client_clear_line(self):
client = self.get_current_client()
if client:
client.clear_line()
def _current_client_clear_console(self):
client = self.get_current_client()
if client:
client.clear_console()
def _current_client_copy_image(self):
client = self.get_current_client()
if client:
action = self.get_action(ClientContextMenuActions.CopyImage)
client.shellwidget._copy_image(action.data())
def _current_client_save_image(self):
client = self.get_current_client()
if client:
action = self.get_action(ClientContextMenuActions.SaveImage)
client.shellwidget._save_image(action.data())
def _current_client_copy_svg(self):
client = self.get_current_client()
if client:
action = self.get_action(ClientContextMenuActions.CopySvg)
svg_to_clipboard(action.data())
def _current_client_save_svg(self):
client = self.get_current_client()
if client:
action = self.get_action(ClientContextMenuActions.SaveSvg)
save_svg(action.data(), client.shellwidget._control)
def _current_client_quit(self):
client = self.get_current_client()
if client:
client.exit_callback()
# ---- For kernels
# -------------------------------------------------------------------------
def register_spyder_kernel_call_handler(self, handler_id, handler):
"""
Register a callback for it to be available for newly created
client kernels.
Parameters
----------
handler_id : str
Handler name to be registered and that will be used to
call the respective handler from the Spyder kernel.
handler : func
Callback function that will be called when the kernel request
the handler_id identifier.
Returns
-------
None.
"""
self.registered_spyder_kernel_handlers[handler_id] = handler
def unregister_spyder_kernel_call_handler(self, handler_id):
"""
Unregister and remove a handler to not be added to newly created
client kernels.
Parameters
----------
handler_id : str
Handler name that was registered and will be removed from
the Spyder kernel available handlers.
Returns
-------
None.
"""
self.registered_spyder_kernel_handlers.pop(handler_id, None)
@qdebounced(timeout=200)
def restart_kernel(self, client=None, ask_before_restart=True):
"""Restart kernel of current client."""
if client is None:
client = self.get_current_client()
if client is None:
return
km = client.kernel_handler.kernel_manager
if km is None and not client.is_remote():
client.shellwidget._append_plain_text(
_('Cannot restart a kernel not started by Spyder\n'),
before_prompt=True
)
return
self.sig_switch_to_plugin_requested.emit()
ask_before_restart = (
ask_before_restart and self.get_conf('ask_before_restart'))
do_restart = True
if ask_before_restart and not running_under_pytest():
message = MessageCheckBox(icon=QMessageBox.Question, parent=self)
message.set_checkbox_text(_("Don't ask again."))
message.set_checked(False)
message.set_check_visible(True)
message.setText(_('Are you sure you want to restart the kernel?'))
message.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
result = message.exec_()
check = message.is_checked()
if check:
self.set_conf('ask_before_restart', not check)
do_restart = result == QMessageBox.Yes
if not do_restart:
return
# For remote kernels we need to request the server for a restart
if client.is_remote():
client.restart_remote_kernel()
return
# Get new kernel
try:
kernel_handler = self.get_cached_kernel(km._kernel_spec)
except Exception as e:
client.show_kernel_error(e)
return
# Replace in all related clients
for cl in self.get_related_clients(client):
cl.replace_kernel(kernel_handler.copy(), shutdown_kernel=False)
client.replace_kernel(kernel_handler, shutdown_kernel=True)
def reset_namespace(self):
"""Reset namespace of current client."""
client = self.get_current_client()
if client is not None:
self.sig_switch_to_plugin_requested.emit()
client.reset_namespace()
def interrupt_kernel(self):
"""Interrupt kernel of current client."""
client = self.get_current_client()
if client is not None:
self.sig_switch_to_plugin_requested.emit()
client.stop_button_click_handler()
# ---- For cells
def run_cell(self, code, cell_name, filename, method='runcell'):
"""Run cell in current or dedicated client."""
def norm(text):
return remove_backslashes(str(text))
self.run_cell_filename = filename
# Select client to execute code on it
client = self.get_client_for_file(filename)
if client is None:
client = self.get_current_client()
if client is not None:
is_spyder_kernel = client.shellwidget.is_spyder_kernel
if is_spyder_kernel:
magic_arguments = []
if isinstance(cell_name, int):
magic_arguments.append("-i")
else:
magic_arguments.append("-n")
magic_arguments.append(str(cell_name))
magic_arguments.append(norm(filename))
line = "%" + method + " " + shlex.join(magic_arguments)
elif method == 'runcell':
# Use copy of cell
line = code.strip()
elif method == 'debugcell':
line = "%%debug\n" + code.strip()
else:
# Can not use custom function on non-spyder kernels
client.shellwidget.append_html_message(
_("The console is not running a Spyder-kernel, so it "
"can't execute <b>{}</b>.<br><br>"
"Please use a Spyder-kernel for this.").format(method),
before_prompt=True
)
return
try:
self.execute_code(line)
except AttributeError:
pass
else:
# XXX: not sure it can really happen
QMessageBox.warning(
self,
_('Warning'),
_("No IPython console is currently available "
"to run <b>{}</b>.<br><br>Please open a new "
"one and try again.").format(osp.basename(filename)),
QMessageBox.Ok
)
# ---- For scripts
def run_script(self, filename, wdir, args, post_mortem, current_client,
clear_variables, console_namespace, method=None):
"""Run script in current or dedicated client."""
# Run Cython files in a dedicated console
is_cython = osp.splitext(filename)[1] == '.pyx'
if is_cython:
current_client = False
# Select client to execute code on it
if current_client:
client = self.get_current_client()
else:
client = self.get_client_for_file(filename)
if client is None:
# Create new client before running script
client = self.create_client_for_file(
filename, is_cython=is_cython
)
if client is None:
QMessageBox.warning(
self,
_('Warning'),
_("No IPython console is currently available to run <b>%s</b>."
"<br><br>Please open a new one and try again."
) % osp.basename(filename),
QMessageBox.Ok
)
return
def _run():
# Freeze parameters for use in signal connect
self._run_script(
filename,
wdir,
args,
post_mortem,
clear_variables,
console_namespace,
method,
client,
current_client
)
if client.shellwidget.spyder_kernel_ready:
_run()
else:
client.shellwidget.sig_prompt_ready.connect(_run)
# ---- For working directory and path management
def save_working_directory(self, dirname):
"""
Save current working directory when changed by the Working Directory
plugin.
"""
self._current_working_directory = dirname
def get_working_directory(self):
"""Get saved value of current working directory."""
return self._current_working_directory
def set_current_client_working_directory(self, directory, server_id=None):
"""Set current client working directory."""
shellwidget = self.get_current_shellwidget()
if shellwidget is not None:
shellwidget.set_cwd(directory)
def on_working_directory_changed(self, dirname, server_id):
"""
Notify that the working directory was changed in the current console
to other plugins.
"""
logger.debug(f"Changing working directory: {server_id} - {dirname}")
if dirname:
self.sig_current_directory_changed.emit(dirname, server_id)
def update_path(self, new_path, prioritize):
"""Update path on consoles."""
logger.debug("Update sys.path in all console clients")
for client in self.clients:
shell = client.shellwidget
if shell is not None:
shell.update_syspath(new_path, prioritize)
def get_active_project_path(self):
"""Get the active project path."""
return self.active_project_path
def update_active_project_path(self, active_project_path):
"""
Update the active project path attribute used to set the current
working directory on the shells in case a project is active
Parameters
----------
active_project_path : str
Root path of the active project if any.
Returns
-------
None.
"""
self.active_project_path = active_project_path
# ---- For execution
def execute_code(self, lines, current_client=True, clear_variables=False,
shellwidget=None):
"""Execute code instructions."""
if current_client:
sw = self.get_current_shellwidget()
else:
sw = shellwidget
if sw is not None:
if not current_client:
# Clear console and reset namespace for
# dedicated clients.
# See spyder-ide/spyder#5748.
try:
sw.sig_prompt_ready.disconnect()
except TypeError:
pass
if clear_variables:
sw.reset_namespace(warning=False)
# Needed to handle an error when kernel_client is none.
# See spyder-ide/spyder#6308.
try:
sw.execute(str(lines))
except AttributeError:
pass
# ---- For error handling
def go_to_error(self, text):
"""Go to error if relevant"""
match = get_error_match(str(text))
if match:
fname, lnb = match.groups()
if (
"<ipython-input-" in fname
and self.run_cell_filename is not None
):
fname = self.run_cell_filename
# For IPython 8+ tracebacks.
# Fixes spyder-ide/spyder#20407
if '~' in fname:
fname = osp.expanduser(fname)
# This is needed to fix issue spyder-ide/spyder#9217.
try:
self.sig_edit_goto_requested.emit(
osp.abspath(fname), int(lnb), '')
except ValueError:
pass
# ---- For documentation and help using the Help plugin
# ------------------------------------------------------------------------
@Slot()
def show_intro(self):
"""Show intro to IPython help"""
from IPython.core.usage import interactive_usage
self.sig_render_rich_text_requested.emit(interactive_usage, False)
@Slot()
def show_guiref(self):
"""Show qtconsole help"""
from qtconsole.usage import gui_reference
self.sig_render_rich_text_requested.emit(gui_reference, True)
@Slot()
def show_quickref(self):
"""Show IPython Cheat Sheet"""
from IPython.core.usage import quick_reference
self.sig_render_plain_text_requested.emit(quick_reference)
# ---- For remote kernels
# -------------------------------------------------------------------------
def create_ipyclient_for_server(self, server_id, kernel_spec=None):
jupyter_api = self._plugin._remote_client.get_jupyter_api(server_id)
files_api = self._plugin._remote_client.get_file_api(server_id)()
client = self.create_client_for_kernel(
# The connection file will be supplied when connecting a remote
# kernel to this client
connection_file="",
# We use the server name as hostname because for clients it's the
# attribute used by the IPython console to set their tab name.
hostname=jupyter_api.server_name,
# These values are not necessary for the new remote development
# architecture.
sshkey=None,
password=None,
# We save the jupyter_api in the client to perform on it operations
# related to this plugin.
jupyter_api=jupyter_api,
# We save the files_api in the client to get the remote machine
# home directory.
files_api=files_api,
# This is necessary because it takes a while before getting a
# response from the server with the kernel id that will be
# associated to this client. So, if users could close it before
# that then it'll not be possible to shutdown that kernel unless
# the server is stopped as well.
can_close=False,
)
client.start_remote_kernel(kernel_spec)
def setup_remote_consoles_submenu(self, render=True):
"""Create the remote consoles submenu in the Consoles app one."""
if self._remote_consoles_menu is None:
self._remote_consoles_menu = self.create_menu(
RemoteConsolesMenus.RemoteConsoles,
_("New console in remote server")
)
self._remote_consoles_menu.clear_actions()
self.add_item_to_menu(
self.get_action(
RemoteClientActions.ManageConnections,
self._plugin._remote_client.CONTEXT_NAME,
self._plugin._remote_client.PLUGIN_NAME
),
menu=self._remote_consoles_menu,
section=RemoteConsolesMenuSections.ManagerSection,
)
for config_id in self._plugin._remote_client.get_config_ids():
name = self._plugin._remote_client.get_server_name(config_id)
action = self.create_action(
name=config_id,
text=f"New console in {name} server",
icon=self.create_icon("ipython_console"),
triggered=functools.partial(
self.create_ipyclient_for_server,
config_id,
),
overwrite=True,
)
self.add_item_to_menu(
action,
menu=self._remote_consoles_menu,
section=RemoteConsolesMenuSections.ConsolesSection,
)
self.add_item_to_menu(
self._remote_consoles_menu,
self.get_menu(IPythonConsoleWidgetMenus.TabsContextMenu),
section=IPythonConsoleWidgetTabsContextMenuSections.Consoles,
before=IPythonConsoleWidgetActions.ConnectToKernel,
)
# This is necessary to reposition the menu correctly when rebuilt
if render:
self._remote_consoles_menu.render()
def setup_server_consoles_submenu(self, config_id: str):
"""Add remote kernel specs to the remote consoles submenu."""
if self._remote_consoles_menu is None:
self._remote_consoles_menu = self.create_menu(
RemoteConsolesMenus.RemoteConsoles,
_("New console in remote server")
)
for action in self._remote_consoles_menu.get_actions():
action_id = getattr(action, "action_id", None)
if (
action_id is None
or action_id == config_id
or not action_id.startswith(config_id)
):
continue
self._remote_consoles_menu.remove_action(action_id)
server_name = self._plugin._remote_client.get_server_name(config_id)
self.__get_remote_kernel_specs(config_id).connect(
self.__add_kernels_specs_callback(config_id, server_name),
)
def clear_server_consoles_submenu(self, config_id: str):
"""Clear the remote consoles submenu."""
if self._remote_consoles_menu is None:
return
for action in self._remote_consoles_menu.get_actions():
action_id = getattr(action, "action_id", None)
if (
action_id is None
or action_id == config_id
or not action_id.startswith(config_id)
):
continue
self._remote_consoles_menu.remove_action(action.action_id)
@AsyncDispatcher(loop="ipythonconsole")
async def __get_remote_kernel_specs(self, config_id: str):
"""Get kernel specs from remote Jupyter API."""
async with self._plugin._remote_client.get_jupyter_api(
config_id
) as jupyter_api:
return (
await jupyter_api.list_kernel_specs(),
jupyter_api.manager.options.get("default_kernel_spec")
)
def __add_kernels_specs_callback(self, config_id: str, server_name: str):
"""Callback to add remote kernel specs."""
@AsyncDispatcher.QtSlot
def callback(future):
try:
result = future.result()
if result[0]:
self._add_remote_kernel_spec_action(
config_id, server_name, *result,
)
except Exception:
logger.exception("Failed to get remote kernel specs")
return callback
def _add_remote_kernel_spec_action(
self,
config_id: str,
server_name: str,
kernel_specs: dict,
default_spec_name: str | None = None,
):
"""Add remote kernel spec actions to the remote consoles submenu."""
default_spec_name = default_spec_name or kernel_specs['default']
for spec_name, spec_info in kernel_specs['kernelspecs'].items():
if spec_name == default_spec_name:
# Skip the default kernel spec, as it is already handled by the
# default action in the remote consoles menu.
continue
# Create an action for each kernel spec
spec_display_name = (
spec_info["spec"].get("display_name")
or spec_info["name"]
)
action = self.create_action(
name=f"{config_id}_{spec_name}",
text=f"{spec_display_name} ({server_name})",
tip=(f"New console with {spec_display_name}"
f" at {server_name} server"),
icon=self.create_icon("ipython_console"),
triggered=functools.partial(
self.create_ipyclient_for_server,
config_id,
spec_name,
),
overwrite=True,
)
self.add_item_to_menu(
action,
menu=self._remote_consoles_menu,
section=RemoteConsolesMenuSections.ConsolesSection,
)
self._remote_consoles_menu.render()
| IPythonConsoleWidget |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 56132,
"end": 57601
} | class ____(Module):
r"""Applies the Softmin function to an n-dimensional input Tensor.
Rescales them so that the elements of the n-dimensional output Tensor
lie in the range `[0, 1]` and sum to 1.
Softmin is defined as:
.. math::
\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Args:
dim (int): A dimension along which Softmin will be computed (so every slice
along dim will sum to 1).
Returns:
a Tensor of the same dimension and shape as the input, with
values in the range [0, 1]
Examples::
>>> m = nn.Softmin(dim=1)
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ["dim"]
dim: Optional[int]
def __init__(self, dim: Optional[int] = None) -> None:
super().__init__()
self.dim = dim
def __setstate__(self, state):
super().__setstate__(state)
if not hasattr(self, "dim"):
self.dim = None
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.softmin(input, self.dim, _stacklevel=5)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return f"dim={self.dim}"
| Softmin |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/basic.py | {
"start": 1851,
"end": 2781
} | class ____(MetaflowCardComponent):
"""
The `DefaultCard` and the `BlankCard` use a JS framework that build the HTML dynamically from JSON.
The `DefaultComponent` is the base component that helps build the JSON when `render` is called.
The underlying JS framework consists of various types of objects.
These can be found in: "metaflow/plugins/cards/ui/types.ts".
The `type` attribute in a `DefaultComponent` corresponds to the type of component in the Javascript framework.
"""
type = None
def __init__(self, title=None, subtitle=None):
self._title = title
self._subtitle = subtitle
def render(self):
datadict = dict(
type=self.type,
)
if self._title is not None:
datadict["title"] = self._title
if self._subtitle is not None:
datadict["subtitle"] = self._subtitle
return datadict
| DefaultComponent |
python | python__mypy | mypy/nodes.py | {
"start": 85384,
"end": 85808
} | class ____(Expression):
"""Set comprehension (e.g. {x + 1 for x in a})"""
__slots__ = ("generator",)
__match_args__ = ("generator",)
generator: GeneratorExpr
def __init__(self, generator: GeneratorExpr) -> None:
super().__init__()
self.generator = generator
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_set_comprehension(self)
| SetComprehension |
python | run-llama__llama_index | llama-index-core/llama_index/core/memory/memory_blocks/fact.py | {
"start": 2312,
"end": 6280
} | class ____(BaseMemoryBlock[str]):
"""
A memory block that extracts key facts from conversation history using an LLM.
This block identifies and stores discrete facts disclosed during the conversation,
structuring them in XML format for easy parsing and retrieval.
"""
name: str = Field(
default="ExtractedFacts", description="The name of the memory block."
)
llm: LLM = Field(
default_factory=get_default_llm,
description="The LLM to use for fact extraction.",
)
facts: List[str] = Field(
default_factory=list,
description="List of extracted facts from the conversation.",
)
max_facts: int = Field(
default=50, description="The maximum number of facts to store."
)
fact_extraction_prompt_template: BasePromptTemplate = Field(
default=DEFAULT_FACT_EXTRACT_PROMPT,
description="Template for the fact extraction prompt.",
)
fact_condense_prompt_template: BasePromptTemplate = Field(
default=DEFAULT_FACT_CONDENSE_PROMPT,
description="Template for the fact condense prompt.",
)
@field_validator("fact_extraction_prompt_template", mode="before")
@classmethod
def validate_fact_extraction_prompt_template(
cls, v: Union[str, BasePromptTemplate]
) -> BasePromptTemplate:
if isinstance(v, str):
if "{{" in v and "}}" in v:
v = RichPromptTemplate(v)
else:
v = PromptTemplate(v)
return v
async def _aget(
self, messages: Optional[List[ChatMessage]] = None, **block_kwargs: Any
) -> str:
"""Return the current facts as formatted text."""
if not self.facts:
return ""
return "\n".join([f"<fact>{fact}</fact>" for fact in self.facts])
async def _aput(self, messages: List[ChatMessage]) -> None:
"""Extract facts from new messages and add them to the facts list."""
# Skip if no messages
if not messages:
return
# Format existing facts for the prompt
existing_facts_text = ""
if self.facts:
existing_facts_text = "\n".join(
[f"<fact>{fact}</fact>" for fact in self.facts]
)
# Create the prompt
prompt_messages = self.fact_extraction_prompt_template.format_messages(
existing_facts=existing_facts_text,
)
# Get the facts extraction
response = await self.llm.achat(messages=[*messages, *prompt_messages])
# Parse the XML response to extract facts
facts_text = response.message.content or ""
new_facts = self._parse_facts_xml(facts_text)
# Add new facts to the list, avoiding exact-match duplicates
for fact in new_facts:
if fact not in self.facts:
self.facts.append(fact)
# Condense the facts if they exceed the max_facts
if len(self.facts) > self.max_facts:
existing_facts_text = "\n".join(
[f"<fact>{fact}</fact>" for fact in self.facts]
)
prompt_messages = self.fact_condense_prompt_template.format_messages(
existing_facts=existing_facts_text,
max_facts=self.max_facts,
)
response = await self.llm.achat(messages=[*messages, *prompt_messages])
new_facts = self._parse_facts_xml(response.message.content or "")
self.facts = new_facts
def _parse_facts_xml(self, xml_text: str) -> List[str]:
"""Parse facts from XML format."""
facts = []
# Extract content between <fact> tags
pattern = r"<fact>(.*?)</fact>"
matches = re.findall(pattern, xml_text, re.DOTALL)
# Clean up extracted facts
for match in matches:
fact = match.strip()
if fact:
facts.append(fact)
return facts
| FactExtractionMemoryBlock |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 88239,
"end": 91089
} | class ____(system_info):
""" Usage of libflame for LAPACK operations
This requires libflame to be compiled with lapack wrappers:
./configure --enable-lapack2flame ...
Be aware that libflame 5.1.0 has some missing names in the shared library, so
if you have problems, try the static flame library.
"""
section = 'flame'
_lib_names = ['flame']
notfounderror = FlameNotFoundError
def check_embedded_lapack(self, info):
""" libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
s = textwrap.dedent("""\
void zungqr_();
int main(int argc, const char *argv[])
{
zungqr_();
return 0;
}""")
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
extra_args = info.get('extra_link_args', [])
try:
with open(src, 'w') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
return True
except distutils.ccompiler.LinkError:
return False
finally:
shutil.rmtree(tmpdir)
def calc_info(self):
lib_dirs = self.get_lib_dirs()
flame_libs = self.get_libs('libraries', self._lib_names)
info = self.check_libs2(lib_dirs, flame_libs, [])
if info is None:
return
# Add the extra flag args to info
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if self.check_embedded_lapack(info):
# check if the user has supplied all information required
self.set_info(**info)
else:
# Try and get the BLAS lib to see if we can get it to work
blas_info = get_info('blas_opt')
if not blas_info:
# since we already failed once, this ain't going to work either
return
# Now we need to merge the two dictionaries
for key in blas_info:
if isinstance(blas_info[key], list):
info[key] = info.get(key, []) + blas_info[key]
elif isinstance(blas_info[key], tuple):
info[key] = info.get(key, ()) + blas_info[key]
else:
info[key] = info.get(key, '') + blas_info[key]
# Now check again
if self.check_embedded_lapack(info):
self.set_info(**info)
| flame_info |
python | astropy__astropy | astropy/io/ascii/core.py | {
"start": 18004,
"end": 25539
} | class ____:
"""
Base table header reader.
"""
auto_format = "col{}"
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class: ClassVar[type[BaseSplitter]] = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ["ASCII_TABLE_WRITE_SPACER_LINE"]
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [
re.sub("^" + self.comment, "", x).strip() for x in comment_lines
]
if comment_lines:
meta.setdefault("table", {})["comments"] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError(
"No data lines found so cannot autogenerate column names"
)
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i) for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError("No header line found in table")
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines."""
re_comment = re.compile(self.comment) if self.comment else None
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment not in (False, None):
for comment in meta.get("comments", []):
lines.append(self.write_comment + comment)
def write(self, lines: list[str]) -> None:
if self.start_line is not None:
for i, spacer_line in zip(
range(self.start_line), itertools.cycle(self.write_spacer_lines)
):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self) -> tuple[str, ...]:
"""Return the column names of the table."""
return tuple(
col.name if isinstance(col, Column) else col.info.name for col in self.cols
)
def remove_columns(self, names: list[str]) -> None:
"""
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
"""
colnames = self.colnames
for name in names:
if name not in colnames:
raise KeyError(f"Column {name} does not exist")
self.cols = [col for col in self.cols if col.name not in names]
def rename_column(self, name: str, new_name: str) -> None:
"""
Rename a column.
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
"""
try:
idx = self.colnames.index(name)
except ValueError:
raise KeyError(f"Column {name} does not exist")
col = self.cols[idx]
# For writing self.cols can contain cols that are not Column. Raise
# exception in that case.
if isinstance(col, Column):
col.name = new_name
else:
raise TypeError(f"got column type {type(col)} instead of required {Column}")
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError(
f'Unknown data type ""{col.raw_type}"" for column "{col.name}"'
)
def check_column_names(
self, names: list[str], strict_names: bool, guessing: bool
) -> None:
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (
_is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads
):
raise InconsistentTableError(
f"Column name {name!r} does not meet strict name requirements"
)
# When guessing require at least two columns, except for ECSV which can
# reliably be guessed from the header requirements.
if (
guessing
and len(self.colnames) <= 1
and self.__class__.__name__ != "EcsvHeader"
):
raise ValueError(
"Table format guessing requires at least two columns, "
f"got {list(self.colnames)}"
)
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError(
f"Length of names argument ({len(names)}) does not match number "
f"of table columns ({len(self.colnames)})"
)
| BaseHeader |
python | pennersr__django-allauth | allauth/socialaccount/providers/feishu/views.py | {
"start": 410,
"end": 2373
} | class ____(OAuth2Adapter):
provider_id = "feishu"
authorization_url = "https://open.feishu.cn/open-apis/authen/v1/index"
access_token_url = (
"https://open.feishu.cn/open-apis/authen/v1/access_token" # nosec
)
app_access_token_url = (
"https://open.feishu.cn/open-apis/auth/v3/app_access_token/internal/" # nosec
)
user_info_url = "https://open.feishu.cn/open-apis/authen/v1/user_info"
@property
def authorize_url(self):
settings = self.get_provider().get_settings()
url = settings.get("AUTHORIZE_URL", self.authorization_url)
return url
def complete_login(self, request, app, token, **kwargs):
resp = (
get_adapter()
.get_requests_session()
.get(
self.user_info_url,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer " + token.token,
},
)
)
resp.raise_for_status()
extra_data = resp.json()
if extra_data["code"] != 0:
raise OAuth2Error("Error retrieving code: %s" % resp.content)
extra_data = extra_data["data"]
return self.get_provider().sociallogin_from_response(request, extra_data)
def get_client(self, request, app):
callback_url = reverse(self.provider_id + "_callback")
protocol = self.redirect_uri_protocol or app_settings.DEFAULT_HTTP_PROTOCOL
callback_url = build_absolute_uri(request, callback_url, protocol=protocol)
client = FeishuOAuth2Client(
request,
app.client_id,
app.secret,
self.access_token_method,
self.access_token_url,
callback_url,
)
return client
oauth2_login = OAuth2LoginView.adapter_view(FeishuOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FeishuOAuth2Adapter)
| FeishuOAuth2Adapter |
python | mkdocs__mkdocs | mkdocs/tests/config/config_options_legacy_tests.py | {
"start": 19813,
"end": 22036
} | class ____(TestCase):
def test_int_type(self):
class Schema:
option = c.ListOfItems(c.Type(int))
conf = self.get_config(Schema, {'option': [1, 2, 3]})
self.assertEqual(conf['option'], [1, 2, 3])
with self.expect_error(
option="Expected type: <class 'int'> but received: <class 'NoneType'>"
):
conf = self.get_config(Schema, {'option': [1, None, 3]})
def test_combined_float_type(self):
class Schema:
option = c.ListOfItems(c.Type((int, float)))
conf = self.get_config(Schema, {'option': [1.4, 2, 3]})
self.assertEqual(conf['option'], [1.4, 2, 3])
with self.expect_error(
option="Expected type: (<class 'int'>, <class 'float'>) but received: <class 'str'>"
):
self.get_config(Schema, {'option': ['a']})
def test_list_default(self):
class Schema:
option = c.ListOfItems(c.Type(int), default=[])
conf = self.get_config(Schema, {})
self.assertEqual(conf['option'], [])
conf = self.get_config(Schema, {'option': None})
self.assertEqual(conf['option'], [])
def test_none_without_default(self):
class Schema:
option = c.ListOfItems(c.Type(str))
with self.expect_error(option="Required configuration not provided."):
conf = self.get_config(Schema, {})
with self.expect_error(option="Required configuration not provided."):
conf = self.get_config(Schema, {'option': None})
conf = self.get_config(Schema, {'option': ['foo']})
self.assertEqual(conf['option'], ['foo'])
def test_string_not_a_list_of_strings(self):
class Schema:
option = c.ListOfItems(c.Type(str))
with self.expect_error(option="Expected a list of items, but a <class 'str'> was given."):
self.get_config(Schema, {'option': 'foo'})
def test_post_validation_error(self):
class Schema:
option = c.ListOfItems(c.IpAddress())
with self.expect_error(option="'asdf' is not a valid port"):
self.get_config(Schema, {'option': ["localhost:8000", "1.2.3.4:asdf"]})
| ListOfItemsTest |
python | numpy__numpy | numpy/_core/tests/test_deprecations.py | {
"start": 10444,
"end": 11450
} | class ____(_DeprecationTestCase):
# Deprecated in Numpy 1.26.0, 2023-09
def test_lib_functions_deprecation_call(self):
from numpy import row_stack
from numpy._core.numerictypes import maximum_sctype
from numpy.lib._npyio_impl import recfromcsv, recfromtxt
from numpy.lib._shape_base_impl import get_array_wrap
from numpy.lib._utils_impl import safe_eval
from numpy.lib.tests.test_io import TextIO
self.assert_deprecated(lambda: safe_eval("None"))
data_gen = lambda: TextIO('A,B\n0,1\n2,3')
kwargs = {'delimiter': ",", 'missing_values': "N/A", 'names': True}
self.assert_deprecated(lambda: recfromcsv(data_gen()))
self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs))
self.assert_deprecated(get_array_wrap)
self.assert_deprecated(lambda: maximum_sctype(int))
self.assert_deprecated(lambda: row_stack([[]]))
self.assert_deprecated(lambda: np.chararray)
| TestLibImports |
python | spyder-ide__spyder | spyder/app/tests/spyder-boilerplate/spyder_boilerplate/spyder/plugin.py | {
"start": 948,
"end": 1019
} | class ____:
ExampleAction = "example_action"
| SpyderBoilerplateActions |
python | pytorch__pytorch | test/mobile/model_test/nn_ops.py | {
"start": 4077,
"end": 5267
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.input1d = torch.randn(1, 4, 50)
self.module1d = nn.ModuleList(
[
nn.BatchNorm1d(4),
nn.InstanceNorm1d(4),
]
)
self.input2d = torch.randn(1, 4, 30, 10)
self.module2d = nn.ModuleList(
[
nn.BatchNorm2d(4),
nn.GroupNorm(4, 4),
nn.InstanceNorm2d(4),
nn.LayerNorm([4, 30, 10]),
nn.LocalResponseNorm(2),
]
)
self.input3d = torch.randn(1, 4, 10, 4, 4)
self.module3d = nn.ModuleList(
[
nn.BatchNorm3d(4),
nn.InstanceNorm3d(4),
nn.ChannelShuffle(2),
]
)
def forward(self):
return len(
(
[module(self.input1d) for i, module in enumerate(self.module1d)],
[module(self.input2d) for i, module in enumerate(self.module2d)],
[module(self.input3d) for i, module in enumerate(self.module3d)],
)
)
| NNNormalizationModule |
python | mkdocstrings__mkdocstrings | src/mkdocstrings/_internal/extension.py | {
"start": 1567,
"end": 11019
} | class ____(BlockProcessor):
"""Our "autodoc" Markdown block processor.
It has a [`test` method][mkdocstrings.AutoDocProcessor.test] that tells if a block matches a criterion,
and a [`run` method][mkdocstrings.AutoDocProcessor.run] that processes it.
It also has utility methods allowing to get handlers and their configuration easily, useful when processing
a matched block.
"""
regex = re.compile(r"^(?P<heading>#{1,6} *|)::: ?(?P<name>.+?) *$", flags=re.MULTILINE)
"""The regular expression to match our autodoc instructions."""
def __init__(
self,
md: Markdown,
*,
handlers: Handlers,
autorefs: AutorefsPlugin,
) -> None:
"""Initialize the object.
Arguments:
md: A `markdown.Markdown` instance.
handlers: The handlers container.
autorefs: The autorefs plugin instance.
"""
super().__init__(parser=md.parser)
self.md = md
"""The Markdown instance."""
self._handlers = handlers
self._autorefs = autorefs
self._updated_envs: set = set()
def test(self, parent: Element, block: str) -> bool: # noqa: ARG002
"""Match our autodoc instructions.
Arguments:
parent: The parent element in the XML tree.
block: The block to be tested.
Returns:
Whether this block should be processed or not.
"""
return bool(self.regex.search(block))
def run(self, parent: Element, blocks: MutableSequence[str]) -> None:
"""Run code on the matched blocks.
The identifier and configuration lines are retrieved from a matched block
and used to collect and render an object.
Arguments:
parent: The parent element in the XML tree.
blocks: The rest of the blocks to be processed.
"""
block = blocks.pop(0)
match = self.regex.search(block)
if match:
if match.start() > 0:
self.parser.parseBlocks(parent, [block[: match.start()]])
# removes the first line
block = block[match.end() :]
block, the_rest = self.detab(block)
if not block and blocks and blocks[0].startswith((" handler:", " options:")):
# YAML options were separated from the `:::` line by a blank line.
block = blocks.pop(0)
if match:
identifier = match["name"]
heading_level = match["heading"].count("#")
_logger.debug("Matched '::: %s'", identifier)
html, handler, _ = self._process_block(identifier, block, heading_level)
el = Element("div", {"class": "mkdocstrings"})
# The final HTML is inserted as opaque to subsequent processing, and only revealed at the end.
el.text = self.md.htmlStash.store(html)
if handler.outer_layer:
self._process_headings(handler, el)
parent.append(el)
if the_rest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, the_rest)
def _process_block(
self,
identifier: str,
yaml_block: str,
heading_level: int = 0,
) -> tuple[str, BaseHandler, CollectorItem]:
"""Process an autodoc block.
Arguments:
identifier: The identifier of the object to collect and render.
yaml_block: The YAML configuration.
heading_level: Suggested level of the heading to insert (0 to ignore).
Raises:
PluginError: When something wrong happened during collection.
TemplateNotFound: When a template used for rendering could not be found.
Returns:
Rendered HTML, the handler that was used, and the collected item.
"""
local_config = yaml.safe_load(yaml_block) or {}
handler_name = self._handlers.get_handler_name(local_config)
_logger.debug("Using handler '%s'", handler_name)
handler = self._handlers.get_handler(handler_name)
local_options = local_config.get("options", {})
if heading_level:
# Heading level obtained from Markdown (`##`) takes precedence.
local_options["heading_level"] = heading_level
options = handler.get_options(local_options)
_logger.debug("Collecting data")
try:
data: CollectorItem = handler.collect(identifier, options)
except CollectionError as exception:
_logger.error("%s", exception) # noqa: TRY400
raise PluginError(f"Could not collect '{identifier}'") from exception
if handler_name not in self._updated_envs: # We haven't seen this handler before on this document.
_logger.debug("Updating handler's rendering env")
handler._update_env(self.md, config=self._handlers._tool_config)
self._updated_envs.add(handler_name)
_logger.debug("Rendering templates")
if "locale" in signature(handler.render).parameters:
render = partial(handler.render, locale=self._handlers._locale)
else:
render = handler.render # type: ignore[assignment]
try:
rendered = render(data, options)
except TemplateNotFound as exc:
_logger.error( # noqa: TRY400
"Template '%s' not found for '%s' handler and theme '%s'.",
exc.name,
handler_name,
self._handlers._theme,
)
raise
return rendered, handler, data
def _process_headings(self, handler: BaseHandler, element: Element) -> None:
# We're in the outer handler layer, as well as the outer extension layer.
#
# The "handler layer" tracks the nesting of the autodoc blocks, which can appear in docstrings.
#
# - Render ::: Object1 # Outer handler layer
# - Render Object1's docstring # Outer handler layer
# - Docstring renders ::: Object2 # Inner handler layers
# - etc. # Inner handler layers
#
# The "extension layer" tracks whether we're converting an autodoc instruction
# or nested content within it, like docstrings. Markdown conversion within Markdown conversion.
#
# - Render ::: Object1 # Outer extension layer
# - Render Object1's docstring # Inner extension layer
#
# The generated HTML was just stashed, and the `toc` extension won't be able to see headings.
# We need to duplicate the headings directly, just so `toc` can pick them up,
# otherwise they wouldn't appear in the final table of contents.
#
# These headings are generated by the `BaseHandler.do_heading` method (Jinja filter),
# which runs in the inner extension layer, and not in the outer one where we are now.
headings = handler.get_headings()
element.extend(headings)
# These duplicated headings will later be removed by our `_HeadingsPostProcessor` processor,
# which runs right after `toc` (see `MkdocstringsExtension.extendMarkdown`).
#
# If we were in an inner handler layer, we wouldn't do any of this
# and would just let headings bubble up to the outer handler layer.
if (page := self._autorefs.current_page) is None:
return
for heading in headings:
rendered_id = heading.attrib["id"]
skip_inventory = "data-skip-inventory" in heading.attrib
if skip_inventory:
_logger.debug(
"Skipping heading with id %r because data-skip-inventory is present",
rendered_id,
)
continue
# The title is registered to be used as tooltip by autorefs.
self._autorefs.register_anchor(page, rendered_id, title=heading.text, primary=True)
# Register all identifiers for this object
# both in the autorefs plugin and in the inventory.
aliases: tuple[str, ...]
aliases = handler.get_aliases(rendered_id)
for alias in aliases:
if alias != rendered_id:
self._autorefs.register_anchor(page, alias, rendered_id, primary=False)
if "data-role" in heading.attrib:
self._handlers.inventory.register(
name=rendered_id,
domain=handler.domain,
role=heading.attrib["data-role"],
priority=1, # Register with standard priority.
uri=f"{page.url}#{rendered_id}",
)
for alias in aliases:
if alias not in self._handlers.inventory:
self._handlers.inventory.register(
name=alias,
domain=handler.domain,
role=heading.attrib["data-role"],
priority=2, # Register with lower priority.
uri=f"{page.url}#{rendered_id}",
)
| AutoDocProcessor |
python | doocs__leetcode | lcci/02.03.Delete Middle Node/Solution.py | {
"start": 136,
"end": 252
} | class ____:
def deleteNode(self, node):
node.val = node.next.val
node.next = node.next.next
| Solution |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/memory_assert.py | {
"start": 349,
"end": 1870
} | class ____(InMemorySaver):
storage_for_copies: defaultdict[str, dict[str, dict[str, Checkpoint]]]
def __init__(
self,
*,
serde: SerializerProtocol | None = None,
put_sleep: float | None = None,
) -> None:
_, filename = tempfile.mkstemp()
super().__init__(serde=serde, factory=partial(PersistentDict, filename=filename))
self.storage_for_copies = defaultdict(lambda: defaultdict(dict))
self.put_sleep = put_sleep
self.stack.callback(os.remove, filename)
def put(
self,
config: dict,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> None:
if self.put_sleep:
import time
time.sleep(self.put_sleep)
# assert checkpoint hasn't been modified since last written
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"]["checkpoint_ns"]
if saved := super().get(config):
assert (
self.serde.loads_typed(
self.storage_for_copies[thread_id][checkpoint_ns][saved["id"]]
)
== saved
)
self.storage_for_copies[thread_id][checkpoint_ns][checkpoint["id"]] = (
self.serde.dumps_typed(copy_checkpoint(checkpoint))
)
# call super to write checkpoint
return super().put(config, checkpoint, metadata, new_versions)
| MemorySaverAssertImmutable |
python | cython__cython | tests/run/test_patma.py | {
"start": 93366,
"end": 94842
} | class ____(unittest.TestCase):
def test_mapping_pattern_checks_duplicate_key_1(self):
return # disabled
class Keys:
KEY = "a"
x = {"a": 0, "b": 1}
w = y = z = None
with self.assertRaises(ValueError):
match x:
case {Keys.KEY: y, "a": z}:
w = 0
self.assertIs(w, None)
self.assertIs(y, None)
self.assertIs(z, None)
def run_pyperf():
import pyperf
class PerfPatma(TestPatma):
def assertEqual(*_, **__):
pass
def assertIs(*_, **__):
pass
def assertRaises(*_, **__):
assert False, "this test should be a method of a different class!"
def run_perf(self, count):
tests = []
for attr in vars(TestPatma):
if attr.startswith("test_"):
tests.append(getattr(self, attr))
tests *= count
start = pyperf.perf_counter()
for test in tests:
test()
return pyperf.perf_counter() - start
runner = pyperf.Runner()
runner.bench_time_func("patma", PerfPatma().run_perf)
if __name__ == "__main__":
"""
# From inside environment using this Python, with pyperf installed:
sudo $(which pyperf) system tune && \
$(which python) -m test.test_patma --rigorous; \
sudo $(which pyperf) system reset
"""
run_pyperf()
| TestValueErrors |
python | run-llama__llama_index | llama-index-core/llama_index/core/types.py | {
"start": 4291,
"end": 4525
} | class ____(str, Enum):
"""Pydantic program mode."""
DEFAULT = "default"
OPENAI = "openai"
LLM = "llm"
FUNCTION = "function"
GUIDANCE = "guidance"
LM_FORMAT_ENFORCER = "lm-format-enforcer"
| PydanticProgramMode |
python | pdm-project__pdm | src/pdm/models/serializers.py | {
"start": 1390,
"end": 4645
} | class ____(hishel.BaseSerializer):
KNOWN_REQUEST_EXTENSIONS = ("timeout", "sni_hostname")
KNOWN_RESPONSE_EXTENSIONS = ("http_version", "reason_phrase")
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
implementation = _get_msgpack_implementation()
def dumps(self, response: Response, request: Request, metadata: Metadata) -> bytes:
from hishel._utils import normalized_url
response_dict = {
"status": response.status,
"headers": response.headers,
"content": response.content,
"extensions": {
key: value for key, value in response.extensions.items() if key in self.KNOWN_RESPONSE_EXTENSIONS
},
}
request_dict = {
"method": request.method.decode("ascii"),
"url": normalized_url(request.url),
"headers": request.headers,
"extensions": {
key: value for key, value in request.extensions.items() if key in self.KNOWN_REQUEST_EXTENSIONS
},
}
metadata_dict = {
"cache_key": metadata["cache_key"],
"number_of_uses": metadata["number_of_uses"],
"created_at": metadata["created_at"].strftime(self.DATETIME_FORMAT),
}
full_dict = {
"response": response_dict,
"request": request_dict,
"metadata": metadata_dict,
}
return cast(bytes, self.implementation.packb(full_dict, use_bin_type=True))
def loads(self, data: bytes) -> tuple[Response, Request, Metadata] | None:
from datetime import datetime
try:
full_dict = cast("dict[str, Any]", self.implementation.loads(data, raw=False))
except UnicodeDecodeError:
# For compatibility: loaded by json, while data was dumped by MsgPack
return None
except self.implementation.UnpackValueError:
if not data.strip().startswith(b"{"):
return None
# Dumped by json, but tried to load by MsgPack
try:
full_dict = cast("dict[str, Any]", json.loads(data, object_hook=Encoder.object_hook))
except json.JSONDecodeError:
return None
response_dict = full_dict["response"]
request_dict = full_dict["request"]
metadata_dict = full_dict["metadata"]
metadata_dict["created_at"] = datetime.strptime(metadata_dict["created_at"], self.DATETIME_FORMAT)
response = Response(
status=response_dict["status"],
headers=response_dict["headers"],
content=response_dict["content"],
extensions=response_dict["extensions"],
)
request = Request(
method=request_dict["method"],
url=request_dict["url"],
headers=request_dict["headers"],
extensions=request_dict["extensions"],
)
metadata = Metadata(
cache_key=metadata_dict["cache_key"],
created_at=metadata_dict["created_at"],
number_of_uses=metadata_dict["number_of_uses"],
)
return response, request, metadata
@property
def is_binary(self) -> bool:
return True
| MsgPackSerializer |
python | getsentry__sentry | tests/sentry/integrations/jira/test_csp.py | {
"start": 235,
"end": 2007
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.issue_key = "APP-123"
self.path = absolute_uri(f"extensions/jira/issue/{self.issue_key}/") + "?xdm_e=base_url"
def _split_csp_policy(self, policy):
csp = {}
for directive in policy.split("; "):
parts = directive.split(" ")
csp[parts[0]] = parts[1:]
return csp
def test_xframeoptions_path(self) -> None:
response = self.client.get(self.path)
assert "Content-Security-Policy-Report-Only" in response
assert "X-Frame-Options" not in response
ui_hook_url = absolute_uri("extensions/jira/ui-hook/")
with assume_test_silo_mode(SiloMode.CONTROL):
response = self.client.get(ui_hook_url)
assert "Content-Security-Policy-Report-Only" in response
assert "X-Frame-Options" not in response
def test_csp_frame_ancestors(self) -> None:
response = self.client.get(self.path)
csp = self._split_csp_policy(response["Content-Security-Policy-Report-Only"])
assert "base_url" in csp["frame-ancestors"]
assert "http://testserver" in csp["frame-ancestors"]
@override_settings(STATIC_FRONTEND_APP_URL="https://sentry.io/_static/dist/")
def test_csp_remote_style(self) -> None:
response = self.client.get(self.path)
assert "Content-Security-Policy-Report-Only" in response
csp = self._split_csp_policy(response["Content-Security-Policy-Report-Only"])
assert "https://sentry.io" in csp["style-src"]
@override_settings(CSP_REPORT_ONLY=False)
def test_csp_enforce(self) -> None:
response = self.client.get(self.path)
assert "Content-Security-Policy" in response
| JiraCSPTest |
python | getsentry__sentry | src/sentry/dynamic_sampling/rules/biases/boost_environments_bias.py | {
"start": 285,
"end": 1049
} | class ____(Bias):
def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]:
return [
{
"samplingValue": {
"type": "sampleRate",
"value": 1.0,
},
"type": "trace",
"condition": {
"op": "or",
"inner": [
{
"op": "glob",
"name": "trace.environment",
"value": ENVIRONMENT_GLOBS,
}
],
},
"id": RESERVED_IDS[RuleType.BOOST_ENVIRONMENTS_RULE],
}
]
| BoostEnvironmentsBias |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_cleanup_pods.py | {
"start": 2392,
"end": 14954
} | class ____:
"""Tests cleanup of pods."""
def test_should_create_cronjob_for_enabled_cleanup(self):
docs = render_chart(
values={
"cleanup": {"enabled": True},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert (
jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].name", docs[0])
== "airflow-cleanup-pods"
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].image", docs[0]).startswith(
"apache/airflow"
)
assert {"name": "config", "configMap": {"name": "release-name-config"}} in jmespath.search(
"spec.jobTemplate.spec.template.spec.volumes", docs[0]
)
assert {
"name": "config",
"mountPath": "/opt/airflow/airflow.cfg",
"subPath": "airflow.cfg",
"readOnly": True,
} in jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0])
assert "successfulJobsHistoryLimit" not in docs[0]["spec"]
assert "failedJobsHistoryLimit" not in docs[0]["spec"]
def test_should_pass_validation_with_v1beta1_api(self):
render_chart(
values={"cleanup": {"enabled": True}},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
kubernetes_version="1.16.0",
) # checks that no validation exception is raised
def test_should_change_image_when_set_airflow_image(self):
docs = render_chart(
values={
"cleanup": {"enabled": True},
"images": {"airflow": {"repository": "airflow", "tag": "test"}},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert (
jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].image", docs[0])
== "airflow:test"
)
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
}
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert jmespath.search("kind", docs[0]) == "CronJob"
assert (
jmespath.search(
"spec.jobTemplate.spec.template.spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
== "foo"
)
assert (
jmespath.search(
"spec.jobTemplate.spec.template.spec.nodeSelector.diskType",
docs[0],
)
== "ssd"
)
assert (
jmespath.search(
"spec.jobTemplate.spec.template.spec.tolerations[0].key",
docs[0],
)
== "dynamic-pods"
)
def test_scheduler_name(self):
docs = render_chart(
values={"cleanup": {"enabled": True}, "schedulerName": "airflow-scheduler"},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert (
jmespath.search(
"spec.jobTemplate.spec.template.spec.schedulerName",
docs[0],
)
== "airflow-scheduler"
)
def test_default_command_and_args(self):
docs = render_chart(
values={"cleanup": {"enabled": True}}, show_only=["templates/cleanup/cleanup-cronjob.yaml"]
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].command", docs[0]) is None
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].args", docs[0]) == [
"bash",
"-c",
"exec airflow kubernetes cleanup-pods --namespace=default",
]
def test_should_add_extraEnvs(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"env": [{"name": "TEST_ENV_1", "value": "test_env_1"}],
},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert {"name": "TEST_ENV_1", "value": "test_env_1"} in jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].env", docs[0]
)
@pytest.mark.parametrize("command", [None, ["custom", "command"]])
@pytest.mark.parametrize("args", [None, ["custom", "args"]])
def test_command_and_args_overrides(self, command, args):
docs = render_chart(
values={"cleanup": {"enabled": True, "command": command, "args": args}},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert command == jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].command", docs[0]
)
assert args == jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].args", docs[0])
def test_command_and_args_overrides_are_templated(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"command": ["{{ .Release.Name }}"],
"args": ["{{ .Release.Service }}"],
}
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].command", docs[0]) == [
"release-name"
]
assert jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].args", docs[0]) == ["Helm"]
def test_should_set_labels_to_jobs_from_cronjob(self):
docs = render_chart(
values={
"cleanup": {"enabled": True},
"labels": {"project": "airflow"},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.jobTemplate.spec.template.metadata.labels", docs[0]) == {
"tier": "airflow",
"component": "airflow-cleanup-pods",
"release": "release-name",
"project": "airflow",
}
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert "test_label" in jmespath.search("spec.jobTemplate.spec.template.metadata.labels", docs[0])
assert (
jmespath.search("spec.jobTemplate.spec.template.metadata.labels", docs[0])["test_label"]
== "test_label_value"
)
def test_should_add_component_specific_annotations(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"jobAnnotations": {"test_cronjob_annotation": "test_cronjob_annotation_value"},
"podAnnotations": {"test_pod_annotation": "test_pod_annotation_value"},
},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert "test_cronjob_annotation" in jmespath.search("metadata.annotations", docs[0])
assert (
jmespath.search("metadata.annotations", docs[0])["test_cronjob_annotation"]
== "test_cronjob_annotation_value"
)
assert "test_pod_annotation" in jmespath.search(
"spec.jobTemplate.spec.template.metadata.annotations", docs[0]
)
assert (
jmespath.search("spec.jobTemplate.spec.template.metadata.annotations", docs[0])[
"test_pod_annotation"
]
== "test_pod_annotation_value"
)
def test_cleanup_resources_are_configurable(self):
resources = {
"requests": {
"cpu": "128m",
"memory": "256Mi",
},
"limits": {
"cpu": "256m",
"memory": "512Mi",
},
}
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"resources": resources,
},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert resources == jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].resources", docs[0]
)
def test_should_set_job_history_limits(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"failedJobsHistoryLimit": 2,
"successfulJobsHistoryLimit": 4,
},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.failedJobsHistoryLimit", docs[0]) == 2
assert jmespath.search("spec.successfulJobsHistoryLimit", docs[0]) == 4
def test_should_set_zero_job_history_limits(self):
docs = render_chart(
values={
"cleanup": {
"enabled": True,
"failedJobsHistoryLimit": 0,
"successfulJobsHistoryLimit": 0,
},
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert jmespath.search("spec.failedJobsHistoryLimit", docs[0]) == 0
assert jmespath.search("spec.successfulJobsHistoryLimit", docs[0]) == 0
def test_no_airflow_local_settings(self):
docs = render_chart(
values={
"cleanup": {"enabled": True},
"airflowLocalSettings": None,
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
volume_mounts = jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0]
)
assert "airflow_local_settings.py" not in str(volume_mounts)
def test_airflow_local_settings(self):
docs = render_chart(
values={
"cleanup": {"enabled": True},
"airflowLocalSettings": "# Well hello!",
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
} in jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0])
def test_global_volumes_and_volume_mounts(self):
docs = render_chart(
values={
"cleanup": {"enabled": True},
"volumes": [{"name": "test-volume", "emptyDir": {}}],
"volumeMounts": [{"name": "test-volume", "mountPath": "/test"}],
},
show_only=["templates/cleanup/cleanup-cronjob.yaml"],
)
assert {
"name": "test-volume",
"mountPath": "/test",
} in jmespath.search("spec.jobTemplate.spec.template.spec.containers[0].volumeMounts", docs[0])
assert {
"name": "test-volume",
"emptyDir": {},
} in jmespath.search("spec.jobTemplate.spec.template.spec.volumes", docs[0])
| TestCleanupPods |
python | networkx__networkx | networkx/classes/coreviews.py | {
"start": 2738,
"end": 4302
} | class ____(Mapping):
"""A read-only union of two atlases (dict-of-dict).
The two dict-of-dicts represent the inner dict of
an Adjacency: `G.succ[node]` and `G.pred[node]`.
The inner level of dict of both hold attribute key:value
pairs and is read-write. But the outer level is read-only.
See Also
========
UnionAdjacency: View into dict-of-dict-of-dict
UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ("_succ", "_pred")
def __getstate__(self):
return {"_succ": self._succ, "_pred": self._pred}
def __setstate__(self, state):
self._succ = state["_succ"]
self._pred = state["_pred"]
def __init__(self, succ, pred):
self._succ = succ
self._pred = pred
def __len__(self):
return len(self._succ.keys() | self._pred.keys())
def __iter__(self):
return iter(set(self._succ.keys()) | set(self._pred.keys()))
def __getitem__(self, key):
try:
return self._succ[key]
except KeyError:
return self._pred[key]
def copy(self):
result = {nbr: dd.copy() for nbr, dd in self._succ.items()}
for nbr, dd in self._pred.items():
if nbr in result:
result[nbr].update(dd)
else:
result[nbr] = dd.copy()
return result
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})"
| UnionAtlas |
python | facebook__pyre-check | tools/incremental_test/batch.py | {
"start": 674,
"end": 747
} | class ____:
integers: Dict[str, int]
normals: Dict[str, str]
| Sample |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/multi_device_iterator_test.py | {
"start": 13697,
"end": 19219
} | class ____(test_base.DatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(OwnedMultiDeviceIteratorTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(
max_buffer_size=[0, 1, 10], prefetch_buffer_size=[0, 1, 10])))
def testBasic(self, max_buffer_size, prefetch_buffer_size):
dataset = dataset_ops.Dataset.range(1000)
mdi = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]],
max_buffer_size=max_buffer_size,
prefetch_buffer_size=prefetch_buffer_size)
for i, el in enumerate(mdi):
self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()])
@combinations.generate(test_base.eager_only_combinations())
def testBasicFunction(self):
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
with ops.device(self._devices[0]):
dataset = dataset_ops.Dataset.range(10)
iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
for _ in range(5):
el0, el1 = next(iterator)
queue.enqueue(el0)
queue.enqueue(el1)
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@combinations.generate(test_base.eager_only_combinations())
def testFunctionError(self):
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
dataset = from_generator_op._GeneratorDataset(
1,
init_fn,
next_fn,
finalize_fn,
output_signature=tensor_spec.TensorSpec([], dtypes.int64))
iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
@combinations.generate(test_base.eager_only_combinations())
def testMultipleInitializations(self):
dataset = dataset_ops.Dataset.range(1000)
for _ in range(5):
multi_device_iterator = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]]))
for i, el in enumerate(multi_device_iterator):
self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()])
@combinations.generate(test_base.eager_only_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(iterator):
trace_count[0] += 1
counter = np.int64(0)
for _ in range(5):
elem = next(iterator)
counter += elem[0]
counter += elem[1]
return counter
dataset = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(20)
for _ in range(10):
multi_device_iterator = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]]))
self.assertEqual(self.evaluate(f(multi_device_iterator)), 45)
multi_device_iterator2 = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset2, [self._devices[1], self._devices[2]]))
self.assertEqual(self.evaluate(f(multi_device_iterator2)), 45)
self.assertEqual(trace_count[0], 1)
@combinations.generate(test_base.eager_only_combinations())
def testMissingDevices(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaisesRegex(ValueError, "`devices` must be provided."):
multi_device_iterator_ops.OwnedMultiDeviceIterator(dataset)
@combinations.generate(test_base.eager_only_combinations())
def testMissingInput(self):
with self.assertRaisesRegex(
ValueError,
"When `dataset` is not provided, both `components` and `element_spec` "
"must be specified."):
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset=None, devices=[self._devices[1], self._devices[2]])
@combinations.generate(test_base.eager_only_combinations())
def testExtraElementSpecInput(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaisesRegex(
ValueError,
"When `dataset` is provided, `element_spec` and `components` must "
"not be specified."):
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, devices=[self._devices[1], self._devices[2]],
element_spec=dataset.element_spec)
@combinations.generate(test_base.graph_only_combinations())
def testGraphMode(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaisesRegex(
RuntimeError,
"OwnedMultiDeviceIterator is only supported inside of tf.function or "
"when eager execution is enabled."):
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, devices=[self._devices[1], self._devices[2]])
if __name__ == "__main__":
test.main()
| OwnedMultiDeviceIteratorTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor10.py | {
"start": 243,
"end": 515
} | class ____(Iterator[_T_co]):
def __new__(cls, __iterable: Iterable[_T]) -> "A[tuple[_T, _T]]": ...
def __next__(self) -> _T_co: ...
def func1(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T, _T]]:
for (a, _), (b, c) in A(A(iterable)):
yield a, b, c
| A |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 841325,
"end": 842058
} | class ____(sgqlc.types.relay.Connection):
"""A list of projects associated with the owner."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Project"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| ProjectConnection |
python | aio-libs__aiohttp | examples/retry_middleware.py | {
"start": 2868,
"end": 8863
} | class ____:
"""Test server with stateful endpoints for retry testing."""
def __init__(self) -> None:
self.request_counters: dict[str, int] = {}
self.status_sequences: dict[str, list[int]] = {
"eventually-ok": [500, 503, 502, 200], # Fails 3 times, then succeeds
"always-error": [500, 500, 500, 500], # Always fails
"immediate-ok": [200], # Succeeds immediately
"flaky": [503, 200], # Fails once, then succeeds
}
async def handle_status(self, request: web.Request) -> web.Response:
"""Return the status code specified in the path."""
status = int(request.match_info["status"])
return web.Response(status=status, text=f"Status: {status}")
async def handle_status_sequence(self, request: web.Request) -> web.Response:
"""Return different status codes on sequential requests."""
path = request.path
# Initialize counter for this path if needed
if path not in self.request_counters:
self.request_counters[path] = 0
# Get the status sequence for this path
sequence_name = request.match_info["name"]
if sequence_name not in self.status_sequences:
return web.Response(status=404, text="Sequence not found")
sequence = self.status_sequences[sequence_name]
# Get the current status based on request count
count = self.request_counters[path]
if count < len(sequence):
status = sequence[count]
else:
# After sequence ends, always return the last status
status = sequence[-1]
# Increment counter for next request
self.request_counters[path] += 1
return web.Response(
status=status, text=f"Request #{count + 1}: Status {status}"
)
async def handle_delay(self, request: web.Request) -> web.Response:
"""Delay response by specified seconds."""
delay = float(request.match_info["delay"])
await asyncio.sleep(delay)
return web.json_response({"delay": delay, "message": "Response after delay"})
async def handle_reset(self, request: web.Request) -> web.Response:
"""Reset request counters."""
self.request_counters = {}
return web.Response(text="Counters reset")
async def run_test_server() -> web.AppRunner:
"""Run a simple test server."""
app = web.Application()
server = TestServer()
app.router.add_get("/status/{status}", server.handle_status)
app.router.add_get("/sequence/{name}", server.handle_status_sequence)
app.router.add_get("/delay/{delay}", server.handle_delay)
app.router.add_post("/reset", server.handle_reset)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, "localhost", 8080)
await site.start()
return runner
async def run_tests() -> None:
"""Run all retry middleware tests."""
# Create retry middleware with custom settings
retry_middleware = RetryMiddleware(
max_retries=3,
retry_statuses=DEFAULT_RETRY_STATUSES,
initial_delay=0.5,
backoff_factor=2.0,
)
async with ClientSession(middlewares=(retry_middleware,)) as session:
# Reset counters before tests
await session.post("http://localhost:8080/reset")
# Test 1: Request that succeeds immediately
print("=== Test 1: Immediate success ===")
async with session.get("http://localhost:8080/sequence/immediate-ok") as resp:
text = await resp.text()
print(f"Final status: {resp.status}")
print(f"Response: {text}")
print("Success - no retries needed\n")
# Test 2: Request that eventually succeeds after retries
print("=== Test 2: Eventually succeeds (500->503->502->200) ===")
async with session.get("http://localhost:8080/sequence/eventually-ok") as resp:
text = await resp.text()
print(f"Final status: {resp.status}")
print(f"Response: {text}")
if resp.status == 200:
print("Success after retries!\n")
else:
print("Failed after retries\n")
# Test 3: Request that always fails
print("=== Test 3: Always fails (500->500->500->500) ===")
async with session.get("http://localhost:8080/sequence/always-error") as resp:
text = await resp.text()
print(f"Final status: {resp.status}")
print(f"Response: {text}")
print("Failed after exhausting all retries\n")
# Test 4: Flaky service (fails once then succeeds)
print("=== Test 4: Flaky service (503->200) ===")
await session.post("http://localhost:8080/reset") # Reset counters
async with session.get("http://localhost:8080/sequence/flaky") as resp:
text = await resp.text()
print(f"Final status: {resp.status}")
print(f"Response: {text}")
print("Success after one retry!\n")
# Test 5: Non-retryable status
print("=== Test 5: Non-retryable status (404) ===")
async with session.get("http://localhost:8080/status/404") as resp:
print(f"Final status: {resp.status}")
print("Failed immediately - not a retryable status\n")
# Test 6: Delayed response
print("=== Test 6: Testing with delay endpoint ===")
try:
async with session.get("http://localhost:8080/delay/0.5") as resp:
print(f"Status: {resp.status}")
data = await resp.json()
print(f"Response received after delay: {data}\n")
except asyncio.TimeoutError:
print("Request timed out\n")
async def main() -> None:
# Start test server
server = await run_test_server()
try:
await run_tests()
finally:
await server.cleanup()
if __name__ == "__main__":
asyncio.run(main())
| TestServer |
python | django__django | tests/model_fields/models.py | {
"start": 11996,
"end": 12172
} | class ____(json.JSONEncoder):
def default(self, o):
if isinstance(o, models.JSONNull):
return None
return super().default(o)
| JSONNullCustomEncoder |
python | facebookresearch__faiss | tests/test_fast_scan_ivf.py | {
"start": 21009,
"end": 28513
} | class ____(unittest.TestCase):
def subtest_accuracy(self, aq, st, by_residual, implem, metric_type='L2'):
"""
Compare IndexIVFAdditiveQuantizerFastScan with
IndexIVFAdditiveQuantizer
"""
nlist, d = 16, 8
ds = datasets.SyntheticDataset(d, 1000, 1000, 500, metric_type)
gt = ds.get_groundtruth(k=1)
if metric_type == 'L2':
metric = faiss.METRIC_L2
postfix1 = '_Nqint8'
postfix2 = f'_N{st}2x4'
else:
metric = faiss.METRIC_INNER_PRODUCT
postfix1 = postfix2 = ''
index = faiss.index_factory(d, f'IVF{nlist},{aq}3x4{postfix1}', metric)
index.by_residual = by_residual
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 16
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.index_factory(
d, f'IVF{nlist},{aq}3x4fs_32{postfix2}', metric)
indexfs.by_residual = by_residual
indexfs.train(ds.get_train())
indexfs.add(ds.get_database())
indexfs.nprobe = 16
indexfs.implem = implem
D1, I1 = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall1 = (I1 == gt).sum() / nq
assert abs(recall_ref - recall1) < 0.051
def xx_test_accuracy(self):
# generated programatically below
for metric in 'L2', 'IP':
for byr in True, False:
for implem in 0, 10, 11, 12, 13, 14, 15:
self.subtest_accuracy('RQ', 'rq', byr, implem, metric)
self.subtest_accuracy('LSQ', 'lsq', byr, implem, metric)
def subtest_rescale_accuracy(self, aq, st, by_residual, implem):
"""
we set norm_scale to 2 and compare it with IndexIVFAQ
"""
nlist, d = 16, 8
ds = datasets.SyntheticDataset(d, 1000, 1000, 500)
gt = ds.get_groundtruth(k=1)
metric = faiss.METRIC_L2
postfix1 = '_Nqint8'
postfix2 = f'_N{st}2x4'
index = faiss.index_factory(
d, f'IVF{nlist},{aq}3x4{postfix1}', metric)
index.by_residual = by_residual
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 16
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.index_factory(
d, f'IVF{nlist},{aq}3x4fs_32{postfix2}', metric)
indexfs.by_residual = by_residual
indexfs.norm_scale = 2
indexfs.train(ds.get_train())
indexfs.add(ds.get_database())
indexfs.nprobe = 16
indexfs.implem = implem
D1, I1 = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall1 = (I1 == gt).sum() / nq
assert abs(recall_ref - recall1) < 0.05
def xx_test_rescale_accuracy(self):
for byr in True, False:
for implem in 0, 10, 11, 12, 13, 14, 15:
self.subtest_accuracy('RQ', 'rq', byr, implem, 'L2')
self.subtest_accuracy('LSQ', 'lsq', byr, implem, 'L2')
def subtest_from_ivfaq(self, implem):
d = 8
ds = datasets.SyntheticDataset(d, 1000, 2000, 1000, metric='IP')
gt = ds.get_groundtruth(k=1)
index = faiss.index_factory(d, 'IVF16,RQ8x4', faiss.METRIC_INNER_PRODUCT)
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 16
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.IndexIVFAdditiveQuantizerFastScan(index)
D1, I1 = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall1 = (I1 == gt).sum() / nq
assert abs(recall_ref - recall1) < 0.02
def test_from_ivfaq(self):
for implem in 0, 1, 2:
self.subtest_from_ivfaq(implem)
def subtest_factory(self, aq, M, bbs, st, r='r'):
"""
Format: IVF{nlist},{AQ}{M}x4fs{r}_{bbs}_N{st}
nlist (int): number of inverted lists
AQ (str): `LSQ` or `RQ`
M (int): number of sub-quantizers
bbs (int): build block size
st (str): search type, `lsq2x4` or `rq2x4`
r (str): `r` or ``, by_residual or not
"""
AQ = faiss.AdditiveQuantizer
nlist, d = 128, 16
if bbs > 0:
index = faiss.index_factory(
d, f'IVF{nlist},{aq}{M}x4fs{r}_{bbs}_N{st}2x4')
else:
index = faiss.index_factory(
d, f'IVF{nlist},{aq}{M}x4fs{r}_N{st}2x4')
bbs = 32
assert index.nlist == nlist
assert index.bbs == bbs
q = faiss.downcast_Quantizer(index.aq)
assert q.M == M
if aq == 'LSQ':
assert isinstance(q, faiss.LocalSearchQuantizer)
if aq == 'RQ':
assert isinstance(q, faiss.ResidualQuantizer)
if st == 'lsq':
assert q.search_type == AQ.ST_norm_lsq2x4
if st == 'rq':
assert q.search_type == AQ.ST_norm_rq2x4
assert index.by_residual == (r == 'r')
def test_factory(self):
self.subtest_factory('LSQ', 16, 64, 'lsq')
self.subtest_factory('LSQ', 16, 64, 'rq')
self.subtest_factory('RQ', 16, 64, 'rq')
self.subtest_factory('RQ', 16, 64, 'lsq')
self.subtest_factory('LSQ', 64, 0, 'lsq')
self.subtest_factory('LSQ', 64, 0, 'lsq', r='')
def subtest_io(self, factory_str):
d = 8
ds = datasets.SyntheticDataset(d, 1000, 2000, 1000)
index = faiss.index_factory(d, factory_str)
index.train(ds.get_train())
index.add(ds.get_database())
D1, I1 = index.search(ds.get_queries(), 1)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index(index, fname)
index2 = faiss.read_index(fname)
D2, I2 = index2.search(ds.get_queries(), 1)
np.testing.assert_array_equal(I1, I2)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_io(self):
self.subtest_io('IVF16,LSQ4x4fs_Nlsq2x4')
self.subtest_io('IVF16,LSQ4x4fs_Nrq2x4')
self.subtest_io('IVF16,RQ4x4fs_Nrq2x4')
self.subtest_io('IVF16,RQ4x4fs_Nlsq2x4')
# add more tests programatically
def add_TestIVFAQFastScan_subtest_accuracy(
aq, st, by_residual, implem, metric='L2'):
setattr(
TestIVFAQFastScan,
f"test_accuracy_{metric}_{aq}_implem{implem}_residual{by_residual}",
lambda self:
self.subtest_accuracy(aq, st, by_residual, implem, metric)
)
def add_TestIVFAQFastScan_subtest_rescale_accuracy(aq, st, by_residual, implem):
setattr(
TestIVFAQFastScan,
f"test_rescale_accuracy_{aq}_implem{implem}_residual{by_residual}",
lambda self:
self.subtest_rescale_accuracy(aq, st, by_residual, implem)
)
for byr in True, False:
for implem in 0, 10, 11, 12, 13, 14, 15:
for mt in 'L2', 'IP':
add_TestIVFAQFastScan_subtest_accuracy('RQ', 'rq', byr, implem, mt)
add_TestIVFAQFastScan_subtest_accuracy('LSQ', 'lsq', byr, implem, mt)
add_TestIVFAQFastScan_subtest_rescale_accuracy('LSQ', 'lsq', byr, implem)
add_TestIVFAQFastScan_subtest_rescale_accuracy('RQ', 'rq', byr, implem)
| TestIVFAQFastScan |
python | doocs__leetcode | solution/0900-0999/0942.DI String Match/Solution.py | {
"start": 0,
"end": 335
} | class ____:
def diStringMatch(self, s: str) -> List[int]:
low, high = 0, len(s)
ans = []
for c in s:
if c == "I":
ans.append(low)
low += 1
else:
ans.append(high)
high -= 1
ans.append(low)
return ans
| Solution |
python | pypa__warehouse | tests/unit/admin/views/test_projects.py | {
"start": 34888,
"end": 37253
} | class ____:
def test_delete_role(self, db_request):
project = ProjectFactory.create(name="foo")
user = UserFactory.create(username="bar")
role = RoleFactory.create(project=project, user=user)
UserFactory.create(username="admin")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST["username"] = user.username
db_request.matchdict["role_id"] = role.id
db_request.user = UserFactory.create()
views.delete_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call(
f"Removed '{role.user.username}' as '{role.role_name}' "
f"on '{project.name}'",
queue="success",
)
]
assert db_request.db.query(Role).all() == []
def test_delete_role_not_found(self, db_request):
project = ProjectFactory.create(name="foo")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.matchdict["role_id"] = uuid.uuid4()
db_request.user = UserFactory.create()
with pytest.raises(HTTPSeeOther):
views.delete_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("This role no longer exists", queue="error")
]
def test_delete_role_no_confirm(self, db_request):
project = ProjectFactory.create(name="foo")
user = UserFactory.create(username="bar")
role = RoleFactory.create(project=project, user=user)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.matchdict["role_id"] = role.id
db_request.user = UserFactory.create()
with pytest.raises(HTTPSeeOther):
views.delete_role(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Confirm the request", queue="error")
]
| TestDeleteRole |
python | great-expectations__great_expectations | great_expectations/core/partitioners.py | {
"start": 1374,
"end": 1563
} | class ____(pydantic.BaseModel):
column_name: str
sort_ascending: bool = True
method_name: Literal["partition_on_column_value"] = "partition_on_column_value"
| PartitionerColumnValue |
python | pytorch__pytorch | test/dynamo/test_export.py | {
"start": 116746,
"end": 154074
} | class ____(torch.nn.Module):
def forward(self, pred, x):
arg0: "Sym(Eq(s26, {size}))"; arg1: "f32[s77, s27]";
arg0, arg1, = fx_pytree.tree_flatten_spec(([pred, x], {{}}), self._in_spec)
l_x_ = arg1
cos: "f32[s77, s27]" = l_x_.cos(); l_x_ = None
return pytree.tree_unflatten([cos], self._out_spec)
"""
true_guard_code = [
"cast_symbool_to_symint_guardless(L['pred']) == 1",
]
false_guard_code = [
"cast_symbool_to_symint_guardless(L['pred']) != 1",
]
test_symbool_guards(
f,
[3, 3, 4, 5],
[true_graph, true_graph, false_graph, false_graph],
[true_guard_code, true_guard_code, false_guard_code, false_guard_code],
# Outer shape env should have no guards in it because we never specialize on the outer symbool.
[[], [], [], []],
)
def test_input_global(self) -> None:
global bulbous_bouffant
bulbous_bouffant = torch.randn(3)
def f(y):
return bulbous_bouffant + y
torch._dynamo.export(f)(torch.randn(3))
def test_input_global_multiple_access(self) -> None:
global macademia
macademia = torch.randn(3)
def g(y):
global macademia
y = macademia + y
return y
def f(y):
global macademia
y = g(y)
return macademia + y
torch._dynamo.export(f)(torch.randn(3))
def test_input_nonlocal(self) -> None:
arglebargle = torch.randn(3)
def f(y):
return arglebargle + y
torch._dynamo.export(f)(torch.randn(3))
def test_input_unused_nonlocal_ok(self) -> None:
arglebargle = torch.randn(3)
def f(y):
x = arglebargle # noqa: F841
return y
torch._dynamo.export(f)(torch.randn(3))
def test_symbolic_tracing_within_fake_mode_with_constraints(self):
from torch._subclasses import fake_tensor
fake_mode = fake_tensor.FakeTensorMode()
class DynamicShapeSimpleModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b, c) -> torch.Tensor:
d = (torch.matmul(a, b) + c) / 2
d_s0 = d.shape[0]
d_s1 = d.shape[1]
d_s3 = d_s0 * d_s1
e = d.view(d_s3)
return torch.cat([e, e])
with fake_mode:
model = DynamicShapeSimpleModel()
inputs = (torch.randn(2, 4), torch.randn(4, 7), torch.randn(2, 7))
dim = torch.export.Dim("dim")
dynamic_shapes = ({0: dim}, None, {0: dim})
for aten_graph in [True, False]:
gm = torch._dynamo.export(
model,
dynamic_shapes=dynamic_shapes,
aten_graph=aten_graph,
)(*inputs).graph_module
# Since there are no parameters we can do this
inputs = (torch.randn(2, 4), torch.randn(4, 7), torch.randn(2, 7))
self.assertEqual(model(*inputs), gm(*inputs))
def test_symbolic_tracing_within_fake_mode_with_constraints_with_parameters(self):
from torch._subclasses import fake_tensor
fake_mode = fake_tensor.FakeTensorMode()
# TODO: Seems to choke if you don't make a fresh model and
# just try to export Linear directly...
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(2, 2)
def forward(self, x):
out = self.linear(x)
return out
with fake_mode:
model = Model()
inputs = (torch.randn(10, 2, 2),)
dynamic_shapes = ({0: torch.export.Dim("dim")},)
for aten_graph in [True, False]:
torch._dynamo.export(
model,
dynamic_shapes=dynamic_shapes,
aten_graph=aten_graph,
)(*inputs).graph_module
def test_capture_symbolic_tracing_within_fake_mode(self):
from torch._dynamo.output_graph import config
from torch._subclasses import fake_tensor
from torch.fx.experimental.symbolic_shapes import ShapeEnv
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(2, 2)
self.linear2 = torch.nn.Linear(2, 2)
def forward(self, x):
out = self.linear(x)
out = self.linear2(out)
return out
# User-instantiated FakeTensorMode
fake_mode = fake_tensor.FakeTensorMode(
allow_non_fake_inputs=False,
allow_fallback_kernels=True,
shape_env=ShapeEnv(
allow_scalar_outputs=config.capture_scalar_outputs,
allow_dynamic_output_shape_ops=config.capture_dynamic_output_shape_ops,
),
)
# Fakefy input+model before exporting it
with fake_mode:
x = torch.rand(5, 2, 2)
model = Model()
# Export the model with fake inputs and parameters
for aten_graph in [True, False]:
graph_module, _ = torch._dynamo.export(model, aten_graph=aten_graph)(x)
self.assertTrue(
isinstance(graph_module, torch.fx.GraphModule),
msg="test_capture_symbolic_tracing_within_fake_mode_aten_graph_"
+ str(aten_graph),
)
def test_cond_op_param_buffer_lifted(self):
class A(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer1 = torch.nn.Buffer(torch.zeros(6, 4))
def forward(self):
return self.buffer1.sum()
class B(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer2 = torch.nn.Buffer(torch.ones(6, 4))
def forward(self):
return self.buffer2.sum()
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = A()
self.b = B()
def forward(self, x):
def true_fn(x):
return x.cos() + self.a()
def false_fn(x):
return x.sin() + self.b()
return (cond(x.shape[0] > 4, true_fn, false_fn, [x]),)
gm, _ = torch._dynamo.export(M(), aten_graph=False)(torch.ones(6, 4))
self.assertEqual(gm(torch.ones(6, 4)), M()(torch.ones(6, 4)))
self.assertEqual(gm(torch.ones(3, 4)), M()(torch.ones(3, 4)))
def test_nested_cond_op_param_buffer_lifted(self):
class A(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer1 = torch.nn.Buffer(torch.zeros(6, 4))
def forward(self):
return self.buffer1.sum()
class B(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer2 = torch.nn.Buffer(torch.ones(6, 4))
def forward(self):
return self.buffer2.sum()
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = A()
self.b = B()
def forward(self, x):
def true_true_fn(x):
return x.cos() + self.a()
def true_false_fn(x):
return x.cos() + self.a() + 1
def true_fn(x):
return cond(x.shape[0] > 5, true_true_fn, true_false_fn, [x])
def false_fn(x):
return x.sin() + self.b()
return (cond(x.shape[0] > 4, true_fn, false_fn, [x]),)
gm, _ = torch._dynamo.export(M(), aten_graph=False)(torch.ones(6, 4))
self.assertEqual(gm(torch.ones(6, 4)), M()(torch.ones(6, 4)))
self.assertEqual(gm(torch.ones(5, 4)), M()(torch.ones(5, 4)))
self.assertEqual(gm(torch.ones(3, 4)), M()(torch.ones(3, 4)))
def test_map_cond_param_buffer_lifted(self):
from functorch.experimental.control_flow import cond, map
class A(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer1 = torch.nn.Buffer(torch.zeros(6, 4))
def forward(self):
return self.buffer1.sum()
class B(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer2 = torch.nn.Buffer(torch.ones(6, 4))
def forward(self):
return self.buffer2.sum()
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = A()
self.b = B()
def inner(self, x, pred):
def true_fn(x):
return x + x + self.a()
def false_fn(x):
return x * x + self.b()
return cond(pred, true_fn, false_fn, [x])
def forward(self, pred, xs):
def body(x, pred):
return self.inner(x, pred) + self.b()
return map(body, xs, pred)
mod = Module()
x = torch.randn(3, 2, 1)
pred_x = torch.tensor(True)
y = torch.randn(4, 3, 2)
pred_y = torch.tensor(False)
real_result = mod(pred_y, y)
out_graph, _ = torch._dynamo.export(mod)(pred_x, x)
self.assertEqual(real_result, out_graph(pred_y, y))
def test_cond_free_variables_overlapping(self):
from functorch.experimental.control_flow import cond
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, pred, x):
a = torch.ones(6, 4)
b = torch.ones(6, 4)
c = torch.ones(6, 4)
d = torch.ones(6, 4)
def true_fn(x):
return x + x + a.cos() + b.cos() + d.cos()
def false_fn(x):
return x * x + a.sin() + b.sin() + c.sin()
return cond(pred, true_fn, false_fn, [x])
mod = Module()
x = torch.ones(6, 4)
pred_x = torch.tensor(True)
out_graph, _ = torch._dynamo.export(mod)(pred_x, x)
self.assertExpectedInline(
out_graph.code.strip(),
"""\
def forward(self, pred, x):
arg0, arg1, = fx_pytree.tree_flatten_spec(([pred, x], {}), self._in_spec)
l_pred_ = arg0
l_x_ = arg1
a = torch.ones(6, 4)
b = torch.ones(6, 4)
c = torch.ones(6, 4)
d = torch.ones(6, 4)
cond_true_0 = self.cond_true_0
cond_false_0 = self.cond_false_0
cond = torch.ops.higher_order.cond(l_pred_, cond_true_0, cond_false_0, (a, b, l_x_, d, c)); l_pred_ = cond_true_0 = cond_false_0 = a = b = l_x_ = d = c = None
getitem = cond[0]; cond = None
return pytree.tree_unflatten([getitem], self._out_spec)""", # noqa: B950,E122
)
self.assertExpectedInline(
out_graph.cond_true_0.code.strip(),
"""\
def forward(self, a, b, l_x_, d_true_branch, c_false_branch):
a_1 = a
b_1 = b
l_x__1 = l_x_
add = l_x__1 + l_x__1; l_x__1 = None
cos = a_1.cos(); a_1 = None
add_1 = add + cos; add = cos = None
cos_1 = b_1.cos(); b_1 = None
add_2 = add_1 + cos_1; add_1 = cos_1 = None
cos_2 = d_true_branch.cos(); d_true_branch = None
add_3 = add_2 + cos_2; add_2 = cos_2 = None
return (add_3,)""",
)
self.assertExpectedInline(
out_graph.cond_false_0.code.strip(),
"""\
def forward(self, a, b, l_x_, d_true_branch, c_false_branch):
a_1 = a
b_1 = b
l_x__1 = l_x_
mul = l_x__1 * l_x__1; l_x__1 = None
sin = a_1.sin(); a_1 = None
add = mul + sin; mul = sin = None
sin_1 = b_1.sin(); b_1 = None
add_1 = add + sin_1; add = sin_1 = None
sin_2 = c_false_branch.sin(); c_false_branch = None
add_2 = add_1 + sin_2; add_1 = sin_2 = None
return (add_2,)""",
)
@unittest.skipIf(
common_utils.TEST_WITH_ASAN,
"Times out with ASAN, see https://github.com/pytorch/pytorch/issues/110416",
)
def test_retracibility(self):
class MyLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.randn(20, 98)
self.bias = torch.randn(20)
def forward(self, x):
return torch.nn.functional.linear(x, self.weight, self.bias)
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(16, 33, 3)
self.linear = MyLinear()
def forward(self, x):
a, b = x
a_conv = self.conv(a)
a_linear = self.linear(a_conv)
b_conv = self.conv(b)
b_linear = self.linear(b_conv)
return (
a_linear.cos() + b_linear.sin(),
a_linear.sin() + b_linear.cos(),
)
inp_container = (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100))
gm, _ = torch._dynamo.export(Foo(), inp_container, aten_graph=True)
gm2, _ = torch._dynamo.export(gm, inp_container, aten_graph=True)
inp_test = (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100))
self.assertTrue(torch.allclose(gm(inp_test)[0], gm2(inp_test)[0]))
self.assertTrue(torch.allclose(gm(inp_test)[1], gm2(inp_test)[1]))
def test_retracibility_dict_container_inp_out(self):
class MyLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.randn(20, 98)
self.bias = torch.randn(20)
def forward(self, x):
return torch.nn.functional.linear(x, self.weight, self.bias)
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(16, 33, 3)
self.linear = MyLinear()
def forward(self, x):
a1, a2 = x["a"]
b = x["b"]
a1_conv = self.conv(a1)
a1_linear = self.linear(a1_conv)
a2_conv = self.conv(a2)
a2_linear = self.linear(a2_conv)
b_conv = self.conv(b)
b_linear = self.linear(b_conv)
return {
"a": [
a1_linear.cos() + b_linear.sin(),
a1_linear.cos() + b_linear.sin(),
],
"b": a2_linear.sin() + b_linear.cos(),
}
inp_container = {
"a": (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100)),
"b": torch.randn(20, 16, 50, 100),
}
gm, _ = torch._dynamo.export(Foo(), inp_container, aten_graph=True)
gm2, _ = torch._dynamo.export(gm, inp_container, aten_graph=True)
inp_test = {
"a": (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100)),
"b": torch.randn(20, 16, 50, 100),
}
self.assertTrue(torch.allclose(gm(inp_test)["a"][0], gm2(inp_test)["a"][0]))
self.assertTrue(torch.allclose(gm(inp_test)["a"][1], gm2(inp_test)["a"][1]))
self.assertTrue(torch.allclose(gm(inp_test)["b"], gm2(inp_test)["b"]))
def test_retracibility_nested_list_out(self):
class MyLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.randn(20, 98)
self.bias = torch.randn(20)
def forward(self, x):
return torch.nn.functional.linear(x, self.weight, self.bias)
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(16, 33, 3)
self.linear = MyLinear()
def forward(self, x):
a1, a2 = x["a"]
b = x["b"]
a1_conv = self.conv(a1)
a1_linear = self.linear(a1_conv)
a2_conv = self.conv(a2)
a2_linear = self.linear(a2_conv)
b_conv = self.conv(b)
b_linear = self.linear(b_conv)
return [
[
a1_linear.cos() + b_linear.sin(),
a1_linear.cos() + b_linear.sin(),
],
[
a2_linear.sin() + b_linear.cos(),
a2_linear.sin() + b_linear.cos(),
],
]
inp_container = {
"a": (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100)),
"b": torch.randn(20, 16, 50, 100),
}
gm, _ = torch._dynamo.export(Foo(), inp_container, aten_graph=True)
gm2, _ = torch._dynamo.export(gm, inp_container, aten_graph=True)
inp_test = {
"a": (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100)),
"b": torch.randn(20, 16, 50, 100),
}
self.assertTrue(torch.allclose(gm(inp_test)[0][0], gm2(inp_test)[0][0]))
self.assertTrue(torch.allclose(gm(inp_test)[0][1], gm2(inp_test)[0][1]))
self.assertTrue(torch.allclose(gm(inp_test)[1][0], gm2(inp_test)[1][0]))
self.assertTrue(torch.allclose(gm(inp_test)[1][1], gm2(inp_test)[1][1]))
def test_fx_pytree(self):
def foo(args):
flat_args, spec = torch.utils._pytree.tree_flatten(args)
flat_args_fx = torch.fx._pytree.tree_flatten_spec(args, spec)
return flat_args_fx[0] + flat_args[0]
inp_container = (torch.randn(20, 16, 50, 100), torch.randn(20, 16, 50, 100))
gm, _ = torch._dynamo.export(foo, inp_container, aten_graph=True)
self.assertTrue(torch.allclose(foo(inp_container), gm(inp_container)))
@config.patch(suppress_errors=True)
@config.patch(verbose=True)
def test_export_with_map_zero_sized_tensor_suppress_errors(self):
from functorch.experimental.control_flow import map
class Module(torch.nn.Module):
def forward(self, xs):
def body(x):
return x + 1
return map(body, xs)
mod = Module()
xs = torch.randn(0, 2)
with self.assertRaises(
torch._dynamo.exc.Unsupported,
):
torch._dynamo.export(mod, xs)
def test_param_buffer_safe_from_mutation_simple(self):
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer1 = torch.nn.Buffer(torch.zeros(5, 5))
def forward(self, x):
self.buffer1.add_(1)
return x + self.buffer1
gm, _ = torch._dynamo.export(Module(), torch.ones(5, 5), aten_graph=False)
buffers = list(gm.named_buffers())
self.assertEqual(len(buffers), 1)
name, buffer = buffers[0]
self.assertEqual(name, "L__self___buffer1")
self.assertTrue(torch.allclose(buffer, torch.zeros(5)))
def test_param_buffer_safe_from_mutation_recurse(self):
class Child(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer2 = torch.nn.Buffer(torch.zeros(5))
def forward(self, x):
return x.sum() + self.buffer2.sum()
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer1 = torch.nn.Buffer(torch.zeros(5))
self.child = Child()
def forward(self, x):
self.buffer1.add_(1)
self.child.buffer2.add_(2)
return x.sum() + self.buffer1.sum() + self.child(x)
gm, _ = torch._dynamo.export(Module(), torch.ones(5), aten_graph=False)
for _, buffer in gm.named_buffers():
self.assertTrue(torch.allclose(buffer, torch.zeros(5)))
def test_predispatch_with_higher_order(self):
def f(x):
return cond(x.shape[0] > 4, lambda x: x + 5, lambda x: x - 3, [x])
gm, _ = torch._dynamo.export(f, aten_graph=True, pre_dispatch=True)(
torch.randn(4, 4)
)
inp1 = torch.randn(4, 4)
inp2 = torch.randn(6, 4)
self.assertTrue(torch.allclose(f(inp1), gm(inp1)))
self.assertTrue(torch.allclose(f(inp2), gm(inp2)))
def test_predispatch_with_higher_order_nested(self):
def f(x):
def true_fn(x):
return cond(x.shape[0] > 6, lambda x: x + 10, lambda x: x - 10, [x])
return cond(x.shape[0] > 4, true_fn, lambda x: x - 3, [x])
gm, _ = torch._dynamo.export(f, aten_graph=True, pre_dispatch=True)(
torch.randn(4, 4)
)
inp1 = torch.randn(4, 4)
inp2 = torch.randn(6, 4)
inp3 = torch.randn(8, 4)
self.assertTrue(torch.allclose(f(inp1), gm(inp1)))
self.assertTrue(torch.allclose(f(inp2), gm(inp2)))
self.assertTrue(torch.allclose(f(inp3), gm(inp3)))
def test_predispatch_with_for_out_dtype(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, self.weight)
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
m = M(weight)
x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
gm, _ = torch._dynamo.export(m, x, aten_graph=True, pre_dispatch=True)
self.assertTrue(torch.allclose(m(x), gm(x)))
def test_predispatch_with_for_out_dtype_nested(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def true_fn(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
).sum()
def false_fn(self, x):
return out_dtype(
torch.ops.aten.mul.Tensor, torch.int32, x, self.weight
).sum()
def forward(self, x):
return cond(x.sum() != 0, self.true_fn, self.false_fn, [x])
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
m = M(weight)
x = torch.ones((5, 5), dtype=torch.int8)
gm, _ = torch._dynamo.export(m, x, aten_graph=True, pre_dispatch=True)
self.assertTrue(torch.allclose(m(x), gm(x)))
y = torch.zeros((5, 5), dtype=torch.int8)
self.assertTrue(torch.allclose(m(y), gm(y)))
self.assertExpectedInline(
gm.true_graph_0.code.strip(),
"""\
def forward(self, arg0_1, arg1_1):
out_dtype = torch.ops.higher_order.out_dtype(torch.ops.aten.mm.default, torch.int32, arg1_1, arg0_1); arg1_1 = arg0_1 = None
sum_1 = torch.ops.aten.sum.default(out_dtype); out_dtype = None
return (sum_1,)""",
)
self.assertExpectedInline(
gm.false_graph_0.code.strip(),
"""\
def forward(self, arg0_1, arg1_1):
out_dtype = torch.ops.higher_order.out_dtype(torch.ops.aten.mul.Tensor, torch.int32, arg1_1, arg0_1); arg1_1 = arg0_1 = None
sum_1 = torch.ops.aten.sum.default(out_dtype); out_dtype = None
return (sum_1,)""",
)
def test_export_nn_module_stack_patched_module(self):
def forward(self, x, y):
return x * y
class Toplevel(torch.nn.Module):
def __init__(self, m):
super().__init__()
self.m = m
def forward(self, x, y):
return self.m(x, y)
class M(torch.nn.Module):
def forward(self, x, y):
return x + y
t = Toplevel(M())
t.m.forward = forward.__get__(t.m, M)
x, y = torch.rand(3), torch.rand(3)
gm, _ = torch._dynamo.export(t, x, y)
self.assertTrue(torch.allclose(forward(None, x, y), gm(x, y)))
for node in gm.graph.nodes:
if node.op == "call_function":
self.assertIn("nn_module_stack", node.meta)
def test_preserve_fx_node_metadata(self):
class Module1(torch.nn.Module):
def forward(self, x):
return torch.sin(x)
class Module2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod1 = Module1()
def forward(self, x):
x = torch.cos(x)
x = self.mod1(x)
x = torch.relu(x)
return x
def fn(x):
return torch.abs(x)
mod = Module2()
inp = torch.randn(3, 3)
gm, _ = torch._dynamo.export(mod)(inp)
# replace relu with fn
gm_edit = copy.deepcopy(gm)
for nd in gm_edit.graph.nodes:
if nd.target == torch.relu:
nd.target = fn
nd.meta.clear()
break
gm_edit.recompile()
gm2, _ = torch._dynamo.export(gm_edit)(inp)
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_x_ = arg0
x = torch.cos(l_x_); l_x_ = None
x_1 = torch.sin(x); x = None
x_2 = torch.relu(x_1); x_1 = None
return pytree.tree_unflatten([x_2], self._out_spec)""",
)
def _constais_op(gm, target):
for nd in gm.graph.nodes:
if nd.target == target:
return True
return False
self.assertTrue(_constais_op(gm_edit, torch.cos))
self.assertTrue(_constais_op(gm_edit, torch.sin))
self.assertTrue(not _constais_op(gm_edit, torch.relu))
self.assertExpectedInline(
gm2.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_x_ = arg0
x = torch.cos(l_x_); l_x_ = None
x_1 = torch.sin(x); x = None
x_2 = torch.abs(x_1); x_1 = None
return pytree.tree_unflatten([x_2], self._out_spec)""",
)
# check for other metadata
for op in (torch.sin, torch.cos):
nd1 = next(filter(lambda nd: nd.target == op, gm.graph.nodes))
nd2 = next(filter(lambda nd: nd.target == op, gm2.graph.nodes))
self.assertTrue(
("nn_module_stack" in nd1.meta) == ("nn_module_stack" in nd2.meta)
)
if "nn_module_stack" in nd1.meta:
self.assertEqual(
nd1.meta["nn_module_stack"], nd2.meta["nn_module_stack"]
)
self.assertEqual(nd1.meta["stack_trace"], nd2.meta["stack_trace"])
def test_preserve_fx_node_metadata_recompile(self):
def fn(x):
return torch.sin(x)
gm, _ = torch._dynamo.export(fn)(torch.randn(3, 3))
do_export = torch._dynamo.export(gm)
torch.compile(fn, backend="eager")(torch.randn(3, 3))
gm1, _ = do_export(torch.randn(3, 3))
gm2, _ = do_export(torch.randn(5, 3))
self.assertExpectedInline(
gm1.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_x_ = arg0
sin = torch.sin(l_x_); l_x_ = None
return pytree.tree_unflatten([sin], self._out_spec)""",
)
self.assertExpectedInline(
gm2.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_x_ = arg0
sin = torch.sin(l_x_); l_x_ = None
return pytree.tree_unflatten([sin], self._out_spec)""",
)
def test_preserve_fx_node_metadata_inline(self):
def f1(x):
return torch.sin(x)
gm, _ = torch._dynamo.export(f1)(torch.randn(3, 3))
def f2(x):
x = torch.cos(x)
return gm(x)
gm2, _ = torch._dynamo.export(f2)(torch.randn(3, 3))
self.assertExpectedInline(
gm2.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_x_ = arg0
x = torch.cos(l_x_); l_x_ = None
sin = torch.sin(x); x = None
return pytree.tree_unflatten([sin], self._out_spec)""",
)
def test_preserve_fx_node_metadata_graph_break(self):
def fn(x):
x = torch.sin(x)
x = torch.abs(x)
return torch.cos(x)
def bad_fn(x):
torch._dynamo.graph_break()
return x
gm, _ = torch._dynamo.export(fn)(torch.randn(3, 3))
# replace abs with graph break
gm_edit = copy.deepcopy(gm)
for nd in gm_edit.graph.nodes:
if nd.target == torch.abs:
nd.target = bad_fn
nd.meta.clear()
break
gm_edit.recompile()
expected = [
"""x = torch.sin(l_x_)""",
"""cos = torch.cos(l_stack0_)""",
]
def test_backend(gm: torch.fx.GraphModule, example_inputs):
self.assertTrue(expected)
# Normalize output for dynamic and not
for nd in gm.graph.nodes:
if "example_value" in nd.meta:
del nd.meta["example_value"]
self.assertIn(expected[0], gm.print_readable(print_output=False))
expected.pop(0)
return gm.forward
torch._dynamo.reset()
opt_gm_edit = torch.compile(gm_edit, backend=test_backend)
opt_gm_edit(torch.randn(3, 3))
def test_torch_inference_mode_ctx(self):
@torch.inference_mode()
def fn(x):
return x + 1
gm, _ = torch._dynamo.export(fn, torch.rand(2, 2))
inp = torch.randn(2, 2)
out = gm(inp)
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_args_0_ = arg0
_enter_inference_mode = torch.autograd.grad_mode._enter_inference_mode(True)
add = l_args_0_ + 1; l_args_0_ = None
_exit_inference_mode = torch.autograd.grad_mode._exit_inference_mode(_enter_inference_mode); _enter_inference_mode = _exit_inference_mode = None
return pytree.tree_unflatten([add], self._out_spec)""", # NOQA: B950
)
self.assertEqual(out.requires_grad, False)
with self.assertRaisesRegex(
RuntimeError,
"Setting requires_grad=True on inference tensor outside InferenceMode is not allowed.",
):
out.requires_grad = True
@torch.inference_mode(False)
def fn_no_inference(x):
return x + 1
gm_no_inference, _ = torch._dynamo.export(fn_no_inference, torch.rand(2, 2))
self.assertExpectedInline(
gm_no_inference.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_args_0_ = arg0
_enter_inference_mode = torch.autograd.grad_mode._enter_inference_mode(False)
add = l_args_0_ + 1; l_args_0_ = None
_exit_inference_mode = torch.autograd.grad_mode._exit_inference_mode(_enter_inference_mode); _enter_inference_mode = _exit_inference_mode = None
return pytree.tree_unflatten([add], self._out_spec)""", # NOQA: B950
)
inp = torch.randn(2, 2)
out = gm_no_inference(inp)
self.assertEqual(out.requires_grad, False)
out.requires_grad = True
def fn(x):
with torch.inference_mode():
return x + 1
gm, _ = torch._dynamo.export(fn)(torch.rand(2, 2))
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_x_ = arg0
_enter_inference_mode = torch.autograd.grad_mode._enter_inference_mode(True)
add = l_x_ + 1; l_x_ = None
_exit_inference_mode = torch.autograd.grad_mode._exit_inference_mode(_enter_inference_mode); _enter_inference_mode = _exit_inference_mode = None
return pytree.tree_unflatten([add], self._out_spec)""", # NOQA: B950
)
inp = torch.randn(2, 2, requires_grad=True)
out = gm(inp)
self.assertEqual(out.requires_grad, False)
def test_export_masking_with_no_grad(self):
def fn(x, b, y):
x = x.clone()
x[b] = y
return x
def fn_no_grad(x, b, y):
with torch.no_grad():
return fn(x, b, y)
def fn_inference_mode(x, b, y):
with torch.inference_mode():
return fn(x, b, y)
x = torch.randn(4, requires_grad=True)
b = torch.tensor([True, False, True, False])
y = torch.randn(2, requires_grad=True)
gm, _ = torch._dynamo.export(fn_no_grad)(x, b, y)
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x, b, y):
arg0, arg1, arg2, = fx_pytree.tree_flatten_spec(([x, b, y], {}), self._in_spec)
l_x_ = arg0
l_b_ = arg1
l_y_ = arg2
_set_grad_enabled = torch._C._set_grad_enabled(False); _set_grad_enabled = None
x = l_x_.clone(); l_x_ = None
x[l_b_] = l_y_; setitem = x; l_b_ = l_y_ = setitem = None
_set_grad_enabled_1 = torch._C._set_grad_enabled(True); _set_grad_enabled_1 = None
return pytree.tree_unflatten([x], self._out_spec)""",
)
gm, _ = torch._dynamo.export(fn_inference_mode)(x, b, y)
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x, b, y):
arg0, arg1, arg2, = fx_pytree.tree_flatten_spec(([x, b, y], {}), self._in_spec)
l_x_ = arg0
l_b_ = arg1
l_y_ = arg2
_enter_inference_mode = torch.autograd.grad_mode._enter_inference_mode(True)
x = l_x_.clone(); l_x_ = None
x[l_b_] = l_y_; setitem = x; l_b_ = l_y_ = setitem = None
_exit_inference_mode = torch.autograd.grad_mode._exit_inference_mode(_enter_inference_mode); _enter_inference_mode = _exit_inference_mode = None
return pytree.tree_unflatten([x], self._out_spec)""", # NOQA: B950
)
gm, _ = torch._dynamo.export(fn)(x, b, y)
def test_dynamo_list_index(self):
def fn(x, in_list):
return x + in_list.index(2)
inputs = (torch.ones(2, 2), [1, 2])
graph, _ = torch._dynamo.export(fn)(*inputs)
out = graph(*inputs)
self.assertEqual(out, torch.ones(2, 2) + 1)
def test_dynamo_enum_in_tuple(self):
class IntEnum(int, Enum):
X = 0
def fn(tensor):
return tensor[..., IntEnum.X]
tensor = torch.rand((5, 5))
graph, _ = torch._dynamo.export(fn)(tensor)
out = graph(tensor)
self.assertEqual(out, tensor[:, 0])
def test_subclass_parameters(self):
from torch.testing._internal.two_tensor import TwoTensor
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.p1 = torch.nn.Parameter(torch.ones(3, 4))
self.p2 = torch.nn.Parameter(
TwoTensor(torch.zeros(3, 4), torch.zeros(3, 4))
)
def forward(self, x):
return x + 2 * self.p1 + self.p2
m = M()
ref_x = torch.randn(3, 4)
ref_out = m(ref_x)
from torch._functorch._aot_autograd.subclass_parametrization import (
unwrap_tensor_subclass_parameters,
)
unwrap_tensor_subclass_parameters(m)
ref_x2 = ref_x.detach().clone()
ref_out2 = m(ref_x2)
self.assertEqual(ref_out2, ref_out)
x = ref_x.detach().clone()
graph, _ = torch._dynamo.export(m)(x)
out = graph(x)
self.assertEqual(ref_out, out)
def test_strict_fake_tensor_prop_real_tensors(self):
class Foo(torch.nn.Module):
def forward(self, x):
return bool(x.eq(0.1).any().item())
model = Foo()
inputs = (torch.randn(64),)
ref = model(*inputs)
with torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True):
ep = torch.export.export(model, inputs, strict=True)
res = ep.module()(*inputs)
self.assertEqual(ref, res)
| GraphModule |
python | RaRe-Technologies__gensim | gensim/examples/dmlcz/sources.py | {
"start": 8154,
"end": 12864
} | class ____(ArticleSource):
"""
Article source for articles in arxmliv format:
1) articles = directories starting with '#'
2) content is stored in tex.xml
3) metadata in special tags within tex.xml
Article URI is currently (a part of) the article's path on filesystem.
See the ArticleSource class for general info on sources.
"""
class ArxmlivContentHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self.path = [''] # help structure for sax event parsing
self.tokens = [] # will contain tokens once parsing is finished
def startElement(self, name, attr):
# for math tokens, we only care about Math elements directly below <p>
if name == 'Math' and self.path[-1] == 'p' and attr.get('mode', '') == 'inline':
tex = attr.get('tex', '')
if tex and not tex.isdigit():
self.tokens.append('$%s$' % tex.encode('utf8'))
self.path.append(name)
def endElement(self, name):
self.path.pop()
def characters(self, text):
# for text, we only care about tokens directly within the <p> tag
if self.path[-1] == 'p':
tokens = [
token.encode('utf8') for token in utils.tokenize(text, errors='ignore') if not token.isdigit()
]
self.tokens.extend(tokens)
# endclass ArxmlivHandler
class ArxmlivErrorHandler(xml.sax.handler.ErrorHandler):
# Python2.5 implementation of xml.sax is broken -- character streams and
# byte encodings of InputSource are ignored, bad things sometimes happen
# in buffering of multi-byte files (such as utf8), characters get cut in
# the middle, resulting in invalid tokens...
# This is not really a problem with arxmliv xml files themselves, so ignore
# these errors silently.
def error(self, exception):
pass
warning = fatalError = error
# endclass ArxmlivErrorHandler
def __init__(self, sourceId, baseDir):
self.sourceId = sourceId
self.baseDir = os.path.normpath(baseDir)
def __str__(self):
return self.sourceId
def idFromDir(self, path):
assert len(path) > len(self.baseDir)
intId = path[1 + path.rfind('#'):]
pathId = path[1 + len(self.baseDir):]
return (intId, pathId)
def isArticle(self, path):
# in order to be valid, the article directory must start with '#'
if not os.path.basename(path).startswith('#'):
return False
# and contain the tex.xml file
if not os.path.exists(os.path.join(path, 'tex.xml')):
logger.warning('missing tex.xml in %s', path)
return False
return True
def findArticles(self):
dirTotal = artAccepted = 0
logger.info("looking for '%s' articles inside %s", self.sourceId, self.baseDir)
for root, dirs, files in os.walk(self.baseDir):
dirTotal += 1
root = os.path.normpath(root)
if self.isArticle(root):
artAccepted += 1
yield self.idFromDir(root)
logger.info('%i directories processed, found %i articles', dirTotal, artAccepted)
def getContent(self, uri):
"""
Return article content as a single large string.
"""
intId, pathId = uri
filename = os.path.join(self.baseDir, pathId, 'tex.xml')
return open(filename).read()
def getMeta(self, uri):
"""
Return article metadata as an attribute->value dictionary.
"""
# intId, pathId = uri
# filename = os.path.join(self.baseDir, pathId, 'tex.xml')
return {'language': 'eng'} # TODO maybe parse out some meta; but currently not needed for anything...
def tokenize(self, content):
"""
Parse tokens out of xml. There are two types of token: normal text and
mathematics. Both are returned interspersed in a single list, in the same
order as they appeared in the content.
The math tokens will be returned in the form $tex_expression$, ie. with
a dollar sign prefix and suffix.
"""
handler = ArxmlivSource.ArxmlivContentHandler()
xml.sax.parseString(content, handler, ArxmlivSource.ArxmlivErrorHandler())
return handler.tokens
def normalizeWord(self, word):
if word[0] == '$': # ignore math tokens
return word
wordU = unicode(word, 'utf8')
return wordU.lower().encode('utf8') # lowercase and then convert back to bytestring
# endclass ArxmlivSource
| ArxmlivSource |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.