language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
PrefectHQ__prefect
src/prefect/flows.py
{ "start": 74016, "end": 86353 }
class ____: @overload def __call__(self, __fn: Callable[P, R]) -> Flow[P, R]: ... @overload def __call__( self, __fn: None = None, *, name: Optional[str] = None, version: Optional[str] = None, flow_run_name: Optional[Union[Callable[[], str], str]] = None, retries: Optional[int] = None, retry_delay_seconds: Optional[Union[int, float]] = None, task_runner: None = None, description: Optional[str] = None, timeout_seconds: Union[int, float, None] = None, validate_parameters: bool = True, persist_result: Optional[bool] = None, result_storage: Optional[ResultStorage] = None, result_serializer: Optional[ResultSerializer] = None, cache_result_in_memory: bool = True, log_prints: Optional[bool] = None, on_completion: Optional[list[FlowStateHook[..., Any]]] = None, on_failure: Optional[list[FlowStateHook[..., Any]]] = None, on_cancellation: Optional[list[FlowStateHook[..., Any]]] = None, on_crashed: Optional[list[FlowStateHook[..., Any]]] = None, on_running: Optional[list[FlowStateHook[..., Any]]] = None, ) -> Callable[[Callable[P, R]], Flow[P, R]]: ... @overload def __call__( self, __fn: None = None, *, name: Optional[str] = None, version: Optional[str] = None, flow_run_name: Optional[Union[Callable[[], str], str]] = None, retries: Optional[int] = None, retry_delay_seconds: Optional[Union[int, float]] = None, task_runner: Optional[TaskRunner[PrefectFuture[Any]]] = None, description: Optional[str] = None, timeout_seconds: Union[int, float, None] = None, validate_parameters: bool = True, persist_result: Optional[bool] = None, result_storage: Optional[ResultStorage] = None, result_serializer: Optional[ResultSerializer] = None, cache_result_in_memory: bool = True, log_prints: Optional[bool] = None, on_completion: Optional[list[FlowStateHook[..., Any]]] = None, on_failure: Optional[list[FlowStateHook[..., Any]]] = None, on_cancellation: Optional[list[FlowStateHook[..., Any]]] = None, on_crashed: Optional[list[FlowStateHook[..., Any]]] = None, on_running: Optional[list[FlowStateHook[..., Any]]] = None, ) -> Callable[[Callable[P, R]], Flow[P, R]]: ... def __call__( self, __fn: Optional[Callable[P, R]] = None, *, name: Optional[str] = None, version: Optional[str] = None, flow_run_name: Optional[Union[Callable[[], str], str]] = None, retries: Optional[int] = None, retry_delay_seconds: Union[int, float, None] = None, task_runner: Optional[TaskRunner[PrefectFuture[Any]]] = None, description: Optional[str] = None, timeout_seconds: Union[int, float, None] = None, validate_parameters: bool = True, persist_result: Optional[bool] = None, result_storage: Optional[ResultStorage] = None, result_serializer: Optional[ResultSerializer] = None, cache_result_in_memory: bool = True, log_prints: Optional[bool] = None, on_completion: Optional[list[FlowStateHook[..., Any]]] = None, on_failure: Optional[list[FlowStateHook[..., Any]]] = None, on_cancellation: Optional[list[FlowStateHook[..., Any]]] = None, on_crashed: Optional[list[FlowStateHook[..., Any]]] = None, on_running: Optional[list[FlowStateHook[..., Any]]] = None, ) -> Union[Flow[P, R], Callable[[Callable[P, R]], Flow[P, R]]]: """ Decorator to designate a function as a Prefect workflow. This decorator may be used for asynchronous or synchronous functions. Flow parameters must be serializable by Pydantic. Args: name: An optional name for the flow; if not provided, the name will be inferred from the given function. version: An optional version string for the flow; if not provided, we will attempt to create a version string as a hash of the file containing the wrapped function; if the file cannot be located, the version will be null. flow_run_name: An optional name to distinguish runs of this flow; this name can be provided as a string template with the flow's parameters as variables, or a function that returns a string. retries: An optional number of times to retry on flow run failure. retry_delay_seconds: An optional number of seconds to wait before retrying the flow after failure. This is only applicable if `retries` is nonzero. task_runner: An optional task runner to use for task execution within the flow; if not provided, a `ConcurrentTaskRunner` will be instantiated. description: An optional string description for the flow; if not provided, the description will be pulled from the docstring for the decorated function. timeout_seconds: An optional number of seconds indicating a maximum runtime for the flow. If the flow exceeds this runtime, it will be marked as failed. Flow execution may continue until the next task is called. validate_parameters: By default, parameters passed to flows are validated by Pydantic. This will check that input values conform to the annotated types on the function. Where possible, values will be coerced into the correct type; for example, if a parameter is defined as `x: int` and "5" is passed, it will be resolved to `5`. If set to `False`, no validation will be performed on flow parameters. persist_result: An optional toggle indicating whether the result of this flow should be persisted to result storage. Defaults to `None`, which indicates that Prefect should choose whether the result should be persisted depending on the features being used. result_storage: An optional block to use to persist the result of this flow. This value will be used as the default for any tasks in this flow. If not provided, the local file system will be used unless called as a subflow, at which point the default will be loaded from the parent flow. result_serializer: An optional serializer to use to serialize the result of this flow for persistence. This value will be used as the default for any tasks in this flow. If not provided, the value of `PREFECT_RESULTS_DEFAULT_SERIALIZER` will be used unless called as a subflow, at which point the default will be loaded from the parent flow. cache_result_in_memory: An optional toggle indicating whether the cached result of a running the flow should be stored in memory. Defaults to `True`. log_prints: If set, `print` statements in the flow will be redirected to the Prefect logger for the flow run. Defaults to `None`, which indicates that the value from the parent flow should be used. If this is a parent flow, the default is pulled from the `PREFECT_LOGGING_LOG_PRINTS` setting. on_completion: An optional list of functions to call when the flow run is completed. Each function should accept three arguments: the flow, the flow run, and the final state of the flow run. on_failure: An optional list of functions to call when the flow run fails. Each function should accept three arguments: the flow, the flow run, and the final state of the flow run. on_cancellation: An optional list of functions to call when the flow run is cancelled. These functions will be passed the flow, flow run, and final state. on_crashed: An optional list of functions to call when the flow run crashes. Each function should accept three arguments: the flow, the flow run, and the final state of the flow run. on_running: An optional list of functions to call when the flow run is started. Each function should accept three arguments: the flow, the flow run, and the current state Returns: A callable `Flow` object which, when called, will run the flow and return its final state. Examples: Define a simple flow ```python from prefect import flow @flow def add(x, y): return x + y ``` Define an async flow ```python @flow async def add(x, y): return x + y ``` Define a flow with a version and description ```python @flow(version="first-flow", description="This flow is empty!") def my_flow(): pass ``` Define a flow with a custom name ```python @flow(name="The Ultimate Flow") def my_flow(): pass ``` Define a flow that submits its tasks to dask ```python from prefect_dask.task_runners import DaskTaskRunner @flow(task_runner=DaskTaskRunner) def my_flow(): pass ``` """ if __fn: return Flow( fn=__fn, name=name, version=version, flow_run_name=flow_run_name, task_runner=task_runner, description=description, timeout_seconds=timeout_seconds, validate_parameters=validate_parameters, retries=retries, retry_delay_seconds=retry_delay_seconds, persist_result=persist_result, result_storage=result_storage, result_serializer=result_serializer, cache_result_in_memory=cache_result_in_memory, log_prints=log_prints, on_completion=on_completion, on_failure=on_failure, on_cancellation=on_cancellation, on_crashed=on_crashed, on_running=on_running, ) else: return cast( Callable[[Callable[P, R]], Flow[P, R]], partial( flow, name=name, version=version, flow_run_name=flow_run_name, task_runner=task_runner, description=description, timeout_seconds=timeout_seconds, validate_parameters=validate_parameters, retries=retries, retry_delay_seconds=retry_delay_seconds, persist_result=persist_result, result_storage=result_storage, result_serializer=result_serializer, cache_result_in_memory=cache_result_in_memory, log_prints=log_prints, on_completion=on_completion, on_failure=on_failure, on_cancellation=on_cancellation, on_crashed=on_crashed, on_running=on_running, ), ) if not TYPE_CHECKING: # Add from_source so it is available on the flow function we all know and love from_source = staticmethod(Flow.from_source) else: # Mypy loses the plot somewhere along the line, so the annotation is reconstructed # manually here. @staticmethod def from_source( source: Union[str, Path, "RunnerStorage", ReadableDeploymentStorage], entrypoint: str, ) -> Union["Flow[..., Any]", Coroutine[Any, Any, "Flow[..., Any]"]]: ... flow: FlowDecorator = FlowDecorator()
FlowDecorator
python
dagster-io__dagster
python_modules/dagster/dagster/_core/launcher/base.py
{ "start": 483, "end": 797 }
class ____(NamedTuple): """Context available within a run launcher's launch_run call.""" dagster_run: DagsterRun workspace: Optional["BaseWorkspaceRequestContext"] @property def job_code_origin(self) -> Optional[JobPythonOrigin]: return self.dagster_run.job_code_origin
LaunchRunContext
python
pandas-dev__pandas
asv_bench/benchmarks/strftime.py
{ "start": 1669, "end": 3116 }
class ____: timeout = 1500 params = ([1000, 10000], ["D", "h"]) param_names = ["nobs", "freq"] def setup(self, nobs, freq): self.data = pd.DataFrame( { "p": pd.period_range(start="2000-01-01", periods=nobs, freq=freq), "r": [np.random.uniform()] * nobs, } ) self.data["i"] = self.data["p"] self.data.set_index("i", inplace=True) if freq == "D": self.default_fmt = "%Y-%m-%d" elif freq == "h": self.default_fmt = "%Y-%m-%d %H:00" def time_frame_period_to_str(self, nobs, freq): self.data["p"].astype(str) def time_frame_period_formatting_default(self, nobs, freq): self.data["p"].dt.strftime(date_format=None) def time_frame_period_formatting_default_explicit(self, nobs, freq): self.data["p"].dt.strftime(date_format=self.default_fmt) def time_frame_period_formatting_custom(self, nobs, freq): self.data["p"].dt.strftime(date_format="%Y-%m-%d --- %H:%M:%S") def time_frame_period_formatting_iso8601_strftime_Z(self, nobs, freq): self.data["p"].dt.strftime(date_format="%Y-%m-%dT%H:%M:%SZ") def time_frame_period_formatting_iso8601_strftime_offset(self, nobs, freq): """Not optimized yet as %z is not supported by `convert_strftime_format`""" self.data["p"].dt.strftime(date_format="%Y-%m-%dT%H:%M:%S%z")
PeriodStrftime
python
doocs__leetcode
lcof2/剑指 Offer II 055. 二叉搜索树迭代器/Solution.py
{ "start": 192, "end": 800 }
class ____: def __init__(self, root: TreeNode): def inorder(root): if root: inorder(root.left) self.vals.append(root.val) inorder(root.right) self.cur = 0 self.vals = [] inorder(root) def next(self) -> int: res = self.vals[self.cur] self.cur += 1 return res def hasNext(self) -> bool: return self.cur < len(self.vals) # Your BSTIterator object will be instantiated and called as such: # obj = BSTIterator(root) # param_1 = obj.next() # param_2 = obj.hasNext()
BSTIterator
python
crytic__slither
slither/solc_parsing/declarations/function.py
{ "start": 2148, "end": 74206 }
class ____(CallerContextExpression): # elems = [(type, name)] def __init__( self, function: Function, function_data: Dict, contract_parser: Optional["ContractSolc"], slither_parser: "SlitherCompilationUnitSolc", ) -> None: self._slither_parser: "SlitherCompilationUnitSolc" = slither_parser self._contract_parser = contract_parser self._function = function # Only present if compact AST if self.is_compact_ast: self._function.name = function_data["name"] else: self._function.name = function_data["attributes"][self.get_key()] if "id" in function_data: self._function.id = function_data["id"] self._functionNotParsed = function_data self._returnsNotParsed: List[dict] = [] self._params_was_analyzed = False self._content_was_analyzed = False self._counter_scope_local_variables = 0 # variable renamed will map the solc id # to the variable. It only works for compact format # Later if an expression provides the referencedDeclaration attr # we can retrieve the variable # It only matters if two variables have the same name in the function # which is only possible with solc > 0.5 self._variables_renamed: Dict[ int, Union[LocalVariableSolc, LocalVariableInitFromTupleSolc] ] = {} self._analyze_type() self._node_to_nodesolc: Dict[Node, NodeSolc] = {} self._node_to_yulobject: Dict[Node, YulBlock] = {} self._local_variables_parser: List[ Union[LocalVariableSolc, LocalVariableInitFromTupleSolc] ] = [] if "documentation" in function_data: function.has_documentation = True @property def underlying_function(self) -> Function: return self._function @property def contract_parser(self) -> Optional["ContractSolc"]: return self._contract_parser @property def slither_parser(self) -> "SlitherCompilationUnitSolc": return self._slither_parser @property def compilation_unit(self) -> "SlitherCompilationUnit": return self._function.compilation_unit ################################################################################### ################################################################################### # region AST format ################################################################################### ################################################################################### def get_key(self) -> str: return self._slither_parser.get_key() def get_children(self, key: str) -> str: if self.is_compact_ast: return key return "children" @property def is_compact_ast(self): return self._slither_parser.is_compact_ast # endregion ################################################################################### ################################################################################### # region Variables ################################################################################### ################################################################################### @property def variables_renamed( self, ) -> Dict[int, Union[LocalVariableSolc, LocalVariableInitFromTupleSolc]]: return self._variables_renamed def _add_local_variable( self, local_var_parser: Union[LocalVariableSolc, LocalVariableInitFromTupleSolc] ) -> None: # If two local variables have the same name # We add a suffix to the new variable # This is done to prevent collision during SSA translation # Use of while in case of collision # In the worst case, the name will be really long if local_var_parser.underlying_variable.name: known_variables = [v.name for v in self._function.variables] while local_var_parser.underlying_variable.name in known_variables: local_var_parser.underlying_variable.name += ( f"_scope_{self._counter_scope_local_variables}" ) self._counter_scope_local_variables += 1 known_variables = [v.name for v in self._function.variables] if local_var_parser.reference_id is not None: self._variables_renamed[local_var_parser.reference_id] = local_var_parser self._function.variables_as_dict[ local_var_parser.underlying_variable.name ] = local_var_parser.underlying_variable self._local_variables_parser.append(local_var_parser) # endregion ################################################################################### ################################################################################### # region Analyses ################################################################################### ################################################################################### @property def function_not_parsed(self) -> Dict: return self._functionNotParsed def _analyze_type(self) -> None: """ Analyz the type of the function Myst be called in the constructor as the name might change according to the function's type For example both the fallback and the receiver will have an empty name :return: """ if self.is_compact_ast: attributes = self._functionNotParsed else: attributes = self._functionNotParsed["attributes"] if self._function.name == "": self._function.function_type = FunctionType.FALLBACK # 0.6.x introduced the receiver function # It has also an empty name, so we need to check the kind attribute if "kind" in attributes: if attributes["kind"] == "receive": self._function.function_type = FunctionType.RECEIVE else: self._function.function_type = FunctionType.NORMAL if isinstance(self._function, FunctionContract): if self._function.name == self._function.contract_declarer.name: self._function.function_type = FunctionType.CONSTRUCTOR def _analyze_attributes(self) -> None: if self.is_compact_ast: attributes = self._functionNotParsed else: attributes = self._functionNotParsed["attributes"] if "stateMutability" in attributes: if attributes["stateMutability"] == "payable": self._function.payable = True elif attributes["stateMutability"] == "pure": self._function.pure = True self._function.view = True elif attributes["stateMutability"] == "view": self._function.view = True if "constant" in attributes: self._function.view = attributes["constant"] if "isConstructor" in attributes and attributes["isConstructor"]: self._function.function_type = FunctionType.CONSTRUCTOR if "kind" in attributes: if attributes["kind"] == "constructor": self._function.function_type = FunctionType.CONSTRUCTOR if "visibility" in attributes: self._function.visibility = attributes["visibility"] # old solc elif "public" in attributes: if attributes["public"]: self._function.visibility = "public" else: self._function.visibility = "private" else: self._function.visibility = "public" if "payable" in attributes: self._function.payable = attributes["payable"] if "baseFunctions" in attributes: overrides_ids = attributes["baseFunctions"] if len(overrides_ids) > 0: for f_id in overrides_ids: funcs = self.slither_parser.functions_by_id[f_id] for f in funcs: # Do not consider leaf contracts as overrides. # B is A { function a() override {} } and C is A { function a() override {} } override A.a(), not each other. if ( f.contract == self._function.contract or f.contract in self._function.contract.inheritance ): self._function.overrides.append(f) f.overridden_by.append(self._function) # Attaches reference to override specifier e.g. X is referenced by `function a() override(X)` if "overrides" in attributes and isinstance(attributes["overrides"], dict): for override in attributes["overrides"].get("overrides", []): refId = override["referencedDeclaration"] overridden_contract = self.slither_parser.contracts_by_id.get(refId, None) if overridden_contract: overridden_contract.add_reference_from_raw_source( override["src"], self.compilation_unit ) if "virtual" in attributes: self._function.is_virtual = attributes["virtual"] def analyze_params(self) -> None: # Can be re-analyzed due to inheritance if self._params_was_analyzed: return self._params_was_analyzed = True self._analyze_attributes() if self.is_compact_ast: params = self._functionNotParsed["parameters"] returns = self._functionNotParsed["returnParameters"] else: children = self._functionNotParsed[self.get_children("children")] # It uses to be # params = children[0] # returns = children[1] # But from Solidity 0.6.3 to 0.6.10 (included) # Comment above a function might be added in the children child_iter = iter( [child for child in children if child[self.get_key()] == "ParameterList"] ) params = next(child_iter) returns = next(child_iter) if params: self._parse_params(params) if returns: self._parse_returns(returns) def analyze_content(self) -> None: if self._content_was_analyzed: return self._content_was_analyzed = True if self.is_compact_ast: body = self._functionNotParsed.get("body", None) return_params = self._functionNotParsed.get("returnParameters", None) if body and body[self.get_key()] == "Block": self._function.is_implemented = True self._parse_cfg(body) for modifier in self._functionNotParsed["modifiers"]: self._parse_modifier(modifier) else: children = self._functionNotParsed[self.get_children("children")] return_params = children[1] self._function.is_implemented = False for child in children[2:]: if child[self.get_key()] == "Block": self._function.is_implemented = True self._parse_cfg(child) # Parse modifier after parsing all the block # In the case a local variable is used in the modifier for child in children[2:]: if child[self.get_key()] == "ModifierInvocation": self._parse_modifier(child) for local_var_parser in self._local_variables_parser: local_var_parser.analyze(self) for node_parser in self._node_to_nodesolc.values(): node_parser.analyze_expressions(self) for yul_parser in self._node_to_yulobject.values(): yul_parser.analyze_expressions() self._rewrite_ternary_as_if_else() self._remove_alone_endif() if return_params: self._fix_implicit_return(return_params) if self._function.entry_point: self._update_reachability(self._function.entry_point) # endregion ################################################################################### ################################################################################### # region Nodes ################################################################################### ################################################################################### def _new_node( self, node_type: NodeType, src: Union[str, Source], scope: Union[Scope, "Function"] ) -> NodeSolc: node = self._function.new_node(node_type, src, scope) node_parser = NodeSolc(node) self._node_to_nodesolc[node] = node_parser return node_parser def _new_yul_block( self, src: Union[str, Dict], father_scope: Union[Scope, Function] ) -> YulBlock: scope = Scope(False, True, father_scope) node = self._function.new_node(NodeType.ASSEMBLY, src, scope) contract = None if isinstance(self._function, FunctionContract): contract = self._function.contract yul_object = YulBlock( contract, node, [self._function.name, f"asm_{len(self._node_to_yulobject)}"], scope, ) self._node_to_yulobject[node] = yul_object return yul_object # endregion ################################################################################### ################################################################################### # region Parsing function ################################################################################### ################################################################################### def _parse_if(self, if_statement: Dict, node: NodeSolc, scope: Scope) -> NodeSolc: # IfStatement = 'if' '(' Expression ')' Statement ( 'else' Statement )? falseStatement = None if self.is_compact_ast: condition = if_statement["condition"] # Note: check if the expression could be directly # parsed here condition_node = self._new_node(NodeType.IF, condition["src"], scope) condition_node.add_unparsed_expression(condition) link_underlying_nodes(node, condition_node) true_scope = Scope(scope.is_checked, False, scope) trueStatement = self._parse_statement( if_statement["trueBody"], condition_node, true_scope ) if "falseBody" in if_statement and if_statement["falseBody"]: false_scope = Scope(scope.is_checked, False, scope) falseStatement = self._parse_statement( if_statement["falseBody"], condition_node, false_scope ) else: children = if_statement[self.get_children("children")] condition = children[0] # Note: check if the expression could be directly # parsed here condition_node = self._new_node(NodeType.IF, condition["src"], scope) condition_node.add_unparsed_expression(condition) link_underlying_nodes(node, condition_node) true_scope = Scope(scope.is_checked, False, scope) trueStatement = self._parse_statement(children[1], condition_node, true_scope) if len(children) == 3: false_scope = Scope(scope.is_checked, False, scope) falseStatement = self._parse_statement(children[2], condition_node, false_scope) endIf_node = self._new_node(NodeType.ENDIF, if_statement["src"], scope) link_underlying_nodes(trueStatement, endIf_node) if falseStatement: link_underlying_nodes(falseStatement, endIf_node) else: link_underlying_nodes(condition_node, endIf_node) return endIf_node def _parse_while(self, whilte_statement: Dict, node: NodeSolc, scope: Scope) -> NodeSolc: # WhileStatement = 'while' '(' Expression ')' Statement node_startWhile = self._new_node(NodeType.STARTLOOP, whilte_statement["src"], scope) body_scope = Scope(scope.is_checked, False, scope) if self.is_compact_ast: node_condition = self._new_node( NodeType.IFLOOP, whilte_statement["condition"]["src"], scope ) node_condition.add_unparsed_expression(whilte_statement["condition"]) statement = self._parse_statement(whilte_statement["body"], node_condition, body_scope) else: children = whilte_statement[self.get_children("children")] expression = children[0] node_condition = self._new_node(NodeType.IFLOOP, expression["src"], scope) node_condition.add_unparsed_expression(expression) statement = self._parse_statement(children[1], node_condition, body_scope) node_endWhile = self._new_node(NodeType.ENDLOOP, whilte_statement["src"], scope) link_underlying_nodes(node, node_startWhile) link_underlying_nodes(node_startWhile, node_condition) link_underlying_nodes(statement, node_condition) link_underlying_nodes(node_condition, node_endWhile) return node_endWhile def _parse_for_compact_ast( self, statement: Dict ) -> Tuple[Optional[Dict], Optional[Dict], Optional[Dict], Dict]: body = statement["body"] init_expression = statement.get("initializationExpression", None) condition = statement.get("condition", None) loop_expression = statement.get("loopExpression", None) return init_expression, condition, loop_expression, body def _parse_for_legacy_ast( self, statement: Dict ) -> Tuple[Optional[Dict], Optional[Dict], Optional[Dict], Dict]: # if we're using an old version of solc (anything below and including 0.4.11) or if the user # explicitly enabled compact ast, we might need to make some best-effort guesses children = statement[self.get_children("children")] # there should always be at least one, and never more than 4, children assert 1 <= len(children) <= 4 # the last element of the children array must be the body, since it's mandatory # however, it might be a single expression body = children[-1] if len(children) == 4: # handle the first trivial case - if there are four children we know exactly what they are pre, cond, post = children[0], children[1], children[2] elif len(children) == 1: # handle the second trivial case - if there is only one child we know there are no expressions pre, cond, post = None, None, None else: attributes = statement.get("attributes", None) def has_hint(key): return key in attributes and not attributes[key] if attributes and any( map( has_hint, ["condition", "initializationExpression", "loopExpression"], ) ): # if we have attribute hints, rely on those if len(children) == 2: # we're missing two expressions, find the one we have if not has_hint("initializationExpression"): pre, cond, post = children[0], None, None elif not has_hint("condition"): pre, cond, post = None, children[0], None else: # if not has_hint('loopExpression'): pre, cond, post = None, None, children[0] else: # len(children) == 3 # we're missing one expression, figure out what it is if has_hint("initializationExpression"): pre, cond, post = None, children[0], children[1] elif has_hint("condition"): pre, cond, post = children[0], None, children[1] else: # if has_hint('loopExpression'): pre, cond, post = children[0], children[1], None else: # we don't have attribute hints, and it's impossible to be 100% accurate here # let's just try our best first_type = children[0][self.get_key()] second_type = children[1][self.get_key()] # VariableDefinitionStatement is used by solc 0.4.0-0.4.6 # it's changed in 0.4.7 to VariableDeclarationStatement if first_type in ["VariableDefinitionStatement", "VariableDeclarationStatement"]: # only the pre statement can be a variable declaration if len(children) == 2: # only one child apart from body, it must be pre pre, cond, post = children[0], None, None else: # more than one child, figure out which one is the cond if second_type == "ExpressionStatement": # only the post can be an expression statement pre, cond, post = children[0], None, children[1] else: # similarly, the post cannot be anything other than an expression statement pre, cond, post = children[0], children[1], None elif first_type == "ExpressionStatement": # the first element can either be pre or post if len(children) == 2: # this is entirely ambiguous, so apply a very dumb heuristic: # if the statement is closer to the start of the body, it's probably the post # otherwise, it's probably the pre # this will work in all cases where the formatting isn't completely borked node_len = int(children[0]["src"].split(":")[1]) node_start = int(children[0]["src"].split(":")[0]) node_end = node_start + node_len for_start = int(statement["src"].split(":")[0]) + 3 # trim off the 'for' body_start = int(body["src"].split(":")[0]) dist_start = node_start - for_start dist_end = body_start - node_end if dist_start > dist_end: pre, cond, post = None, None, children[0] else: pre, cond, post = children[0], None, None else: # more than one child, we must be the pre pre, cond, post = children[0], children[1], None else: # the first element must be the cond if len(children) == 2: pre, cond, post = None, children[0], None else: pre, cond, post = None, children[0], children[1] return pre, cond, post, body def _parse_for(self, statement: Dict, node: NodeSolc, scope: Scope) -> NodeSolc: # ForStatement = 'for' '(' (SimpleStatement)? ';' (Expression)? ';' (ExpressionStatement)? ')' Statement if self.is_compact_ast: pre, cond, post, body = self._parse_for_compact_ast(statement) else: pre, cond, post, body = self._parse_for_legacy_ast(statement) node_startLoop = self._new_node(NodeType.STARTLOOP, statement["src"], scope) node_endLoop = self._new_node(NodeType.ENDLOOP, statement["src"], scope) last_scope = scope if pre: pre_scope = Scope(scope.is_checked, False, last_scope) last_scope = pre_scope node_init_expression = self._parse_statement(pre, node, pre_scope) link_underlying_nodes(node_init_expression, node_startLoop) else: link_underlying_nodes(node, node_startLoop) if cond: cond_scope = Scope(scope.is_checked, False, last_scope) last_scope = cond_scope node_condition = self._new_node(NodeType.IFLOOP, cond["src"], cond_scope) node_condition.add_unparsed_expression(cond) link_underlying_nodes(node_startLoop, node_condition) node_beforeBody = node_condition else: node_condition = None node_beforeBody = node_startLoop body_scope = Scope(scope.is_checked, False, last_scope) last_scope = body_scope node_body = self._parse_statement(body, node_beforeBody, body_scope) if post: node_loopexpression = self._parse_statement(post, node_body, last_scope) link_underlying_nodes(node_loopexpression, node_beforeBody) else: # node_loopexpression = None link_underlying_nodes(node_body, node_beforeBody) if node_condition: link_underlying_nodes(node_condition, node_endLoop) else: link_underlying_nodes( node_startLoop, node_endLoop ) # this is an infinite loop but we can't break our cfg return node_endLoop def _parse_dowhile(self, do_while_statement: Dict, node: NodeSolc, scope: Scope) -> NodeSolc: node_startDoWhile = self._new_node(NodeType.STARTLOOP, do_while_statement["src"], scope) condition_scope = Scope(scope.is_checked, False, scope) if self.is_compact_ast: node_condition = self._new_node( NodeType.IFLOOP, do_while_statement["condition"]["src"], condition_scope ) node_condition.add_unparsed_expression(do_while_statement["condition"]) statement = self._parse_statement( do_while_statement["body"], node_condition, condition_scope ) else: children = do_while_statement[self.get_children("children")] # same order in the AST as while expression = children[0] node_condition = self._new_node(NodeType.IFLOOP, expression["src"], condition_scope) node_condition.add_unparsed_expression(expression) statement = self._parse_statement(children[1], node_condition, condition_scope) body_scope = Scope(scope.is_checked, False, condition_scope) node_endDoWhile = self._new_node(NodeType.ENDLOOP, do_while_statement["src"], body_scope) link_underlying_nodes(node, node_startDoWhile) # empty block, loop from the start to the condition if not node_condition.underlying_node.sons: link_underlying_nodes(node_startDoWhile, node_condition) else: link_nodes( node_startDoWhile.underlying_node, node_condition.underlying_node.sons[0], ) link_underlying_nodes(statement, node_condition) link_underlying_nodes(node_condition, node_endDoWhile) return node_endDoWhile def _construct_try_expression(self, externalCall: Dict, parameters_list: Dict) -> Dict: # if the parameters are more than 1 we make the leftHandSide of the Assignment node # a TupleExpression otherwise an Identifier # case when there isn't returns(...) # e.g. external call that doesn't have any return variable if not parameters_list: return externalCall ret: Dict = {"nodeType": "Assignment", "operator": "=", "src": parameters_list["src"]} parameters = parameters_list.get("parameters", None) # if the name is "" it means the return variable is not used if len(parameters) == 1: if parameters[0]["name"] != "": self._add_param(parameters[0]) ret["typeDescriptions"] = { "typeString": parameters[0]["typeName"]["typeDescriptions"]["typeString"] } leftHandSide = { "name": parameters[0]["name"], "nodeType": "Identifier", "src": parameters[0]["src"], "referencedDeclaration": parameters[0]["id"], "typeDescriptions": parameters[0]["typeDescriptions"], } else: # we don't need an Assignment so we return only the external call return externalCall else: ret["typeDescriptions"] = {"typeString": "tuple()"} leftHandSide = { "components": [], "nodeType": "TupleExpression", "src": parameters_list["src"], } for i, p in enumerate(parameters): if p["name"] == "": continue new_statement = { "nodeType": "VariableDefinitionStatement", "src": p["src"], "declarations": [p], } self._add_param_init_tuple(new_statement, i) ident = { "name": p["name"], "nodeType": "Identifier", "src": p["src"], "referencedDeclaration": p["id"], "typeDescriptions": p["typeDescriptions"], } leftHandSide["components"].append(ident) ret["leftHandSide"] = leftHandSide ret["rightHandSide"] = externalCall return ret def _parse_try_catch(self, statement: Dict, node: NodeSolc, scope: Scope) -> NodeSolc: externalCall = statement.get("externalCall", None) if externalCall is None: raise ParsingError(f"Try/Catch not correctly parsed by Slither {statement}") catch_scope = Scope(scope.is_checked, False, scope) new_node = self._new_node(NodeType.TRY, statement["src"], catch_scope) clauses = statement.get("clauses", []) # the first clause is the try scope returned_variables = clauses[0].get("parameters", None) constructed_try_expression = self._construct_try_expression( externalCall, returned_variables ) new_node.add_unparsed_expression(constructed_try_expression) link_underlying_nodes(node, new_node) node = new_node for index, clause in enumerate(clauses): # clauses after the first one are related to catch cases # we set the parameters (e.g. data in this case. catch(string memory data) ...) # to be initialized so they are not reported by the uninitialized-local-variables detector if index >= 1: self._parse_catch(clause, node, catch_scope, True) else: # the parameters for the try scope were already added in _construct_try_expression self._parse_catch(clause, node, catch_scope, False) return node def _parse_catch( self, statement: Dict, node: NodeSolc, scope: Scope, add_param: bool ) -> NodeSolc: block = statement.get("block", None) if block is None: raise ParsingError(f"Catch not correctly parsed by Slither {statement}") try_scope = Scope(scope.is_checked, False, scope) try_node = self._new_node(NodeType.CATCH, statement["src"], try_scope) link_underlying_nodes(node, try_node) if add_param: if self.is_compact_ast: params = statement.get("parameters", None) else: params = statement[self.get_children("children")] if params: for param in params.get("parameters", []): assert param[self.get_key()] == "VariableDeclaration" self._add_param(param, True) return self._parse_statement(block, try_node, try_scope) def _parse_variable_definition(self, statement: Dict, node: NodeSolc, scope: Scope) -> NodeSolc: try: local_var = LocalVariable() local_var.set_function(self._function) local_var.set_offset(statement["src"], self._function.compilation_unit) local_var_parser = LocalVariableSolc(local_var, statement) self._add_local_variable(local_var_parser) # local_var.analyze(self) new_node = self._new_node(NodeType.VARIABLE, statement["src"], scope) new_node.underlying_node.add_variable_declaration(local_var) link_underlying_nodes(node, new_node) return new_node except MultipleVariablesDeclaration: # Custom handling of var (a,b) = .. style declaration if self.is_compact_ast: variables = statement["declarations"] count = len(variables) if ( statement["initialValue"]["nodeType"] == "TupleExpression" and len(statement["initialValue"]["components"]) == count ): inits = statement["initialValue"]["components"] i = 0 new_node = node for variable in variables: if variable is None: continue init = inits[i] src = variable["src"] i = i + 1 new_statement = { "nodeType": "VariableDefinitionStatement", "src": src, "declarations": [variable], "initialValue": init, } new_node = self._parse_variable_definition(new_statement, new_node, scope) else: # If we have # var (a, b) = f() # we can split in multiple declarations, without init # Then we craft one expression that does the assignment variables = [] i = 0 new_node = node for variable in statement["declarations"]: if variable: src = variable["src"] # Create a fake statement to be consistent new_statement = { "nodeType": "VariableDefinitionStatement", "src": src, "declarations": [variable], } variables.append(variable) new_node = self._parse_variable_definition_init_tuple( new_statement, i, new_node, scope ) else: variables.append(None) i = i + 1 var_identifiers = [] # craft of the expression doing the assignement for v in variables: if v is not None: identifier = { "nodeType": "Identifier", "src": v["src"], "name": v["name"], "referencedDeclaration": v["id"], "typeDescriptions": { "typeString": v["typeDescriptions"]["typeString"] }, } var_identifiers.append(identifier) else: var_identifiers.append(None) tuple_expression = { "nodeType": "TupleExpression", "src": statement["src"], "components": var_identifiers, } expression = { "nodeType": "Assignment", "src": statement["src"], "operator": "=", "type": "tuple()", "leftHandSide": tuple_expression, "rightHandSide": statement["initialValue"], "typeDescriptions": {"typeString": "tuple()"}, } node = new_node new_node = self._new_node(NodeType.EXPRESSION, statement["src"], scope) new_node.add_unparsed_expression(expression) link_underlying_nodes(node, new_node) else: count = 0 children = statement[self.get_children("children")] child = children[0] while child[self.get_key()] == "VariableDeclaration": count = count + 1 child = children[count] assert len(children) == (count + 1) tuple_vars = children[count] variables_declaration = children[0:count] i = 0 new_node = node if tuple_vars[self.get_key()] == "TupleExpression": assert len(tuple_vars[self.get_children("children")]) == count for variable in variables_declaration: init = tuple_vars[self.get_children("children")][i] src = variable["src"] i = i + 1 # Create a fake statement to be consistent new_statement = { self.get_key(): "VariableDefinitionStatement", "src": src, self.get_children("children"): [variable, init], } new_node = self._parse_variable_definition(new_statement, new_node, scope) else: # If we have # var (a, b) = f() # we can split in multiple declarations, without init # Then we craft one expression that does the assignment assert tuple_vars[self.get_key()] in ["FunctionCall", "Conditional"] variables = [] for variable in variables_declaration: src = variable["src"] # Create a fake statement to be consistent new_statement = { self.get_key(): "VariableDefinitionStatement", "src": src, self.get_children("children"): [variable], } variables.append(variable) new_node = self._parse_variable_definition_init_tuple( new_statement, i, new_node, scope ) i = i + 1 var_identifiers = [] # craft of the expression doing the assignement for v in variables: identifier = { self.get_key(): "Identifier", "src": v["src"], "attributes": { "value": v["attributes"][self.get_key()], "type": v["attributes"]["type"], }, } var_identifiers.append(identifier) expression = { self.get_key(): "Assignment", "src": statement["src"], "attributes": {"operator": "=", "type": "tuple()"}, self.get_children("children"): [ { self.get_key(): "TupleExpression", "src": statement["src"], self.get_children("children"): var_identifiers, }, tuple_vars, ], } node = new_node new_node = self._new_node(NodeType.EXPRESSION, statement["src"], scope) new_node.add_unparsed_expression(expression) link_underlying_nodes(node, new_node) return new_node def _parse_variable_definition_init_tuple( self, statement: Dict, index: int, node: NodeSolc, scope ) -> NodeSolc: local_var = LocalVariableInitFromTuple() local_var.set_function(self._function) local_var.set_offset(statement["src"], self._function.compilation_unit) local_var_parser = LocalVariableInitFromTupleSolc(local_var, statement, index) self._add_local_variable(local_var_parser) new_node = self._new_node(NodeType.VARIABLE, statement["src"], scope) new_node.underlying_node.add_variable_declaration(local_var) link_underlying_nodes(node, new_node) return new_node def _parse_statement( self, statement: Dict, node: NodeSolc, scope: Union[Scope, Function] ) -> NodeSolc: """ Return: node """ # Statement = IfStatement | WhileStatement | ForStatement | Block | InlineAssemblyStatement | # ( DoWhileStatement | PlaceholderStatement | Continue | Break | Return | # Throw | EmitStatement | SimpleStatement ) ';' # SimpleStatement = VariableDefinition | ExpressionStatement name = statement[self.get_key()] # SimpleStatement = VariableDefinition | ExpressionStatement if name == "IfStatement": node = self._parse_if(statement, node, scope) elif name == "WhileStatement": node = self._parse_while(statement, node, scope) elif name == "ForStatement": node = self._parse_for(statement, node, scope) elif name == "Block": node = self._parse_block(statement, node, scope) elif name == "UncheckedBlock": node = self._parse_unchecked_block(statement, node, scope) elif name == "InlineAssembly": # Added with solc 0.6 - the yul code is an AST if "AST" in statement and not self.compilation_unit.core.skip_assembly: self._function.contains_assembly = True yul_object = self._new_yul_block(statement["src"], scope) entrypoint = yul_object.entrypoint exitpoint = yul_object.convert(statement["AST"]) # technically, entrypoint and exitpoint are YulNodes and we should be returning a NodeSolc here # but they both expose an underlying_node so oh well link_underlying_nodes(node, entrypoint) end_assembly = self._new_node(NodeType.ENDASSEMBLY, statement["src"], scope) link_underlying_nodes(exitpoint, end_assembly) node = end_assembly else: asm_node = self._new_node(NodeType.ASSEMBLY, statement["src"], scope) self._function.contains_assembly = True # Added with solc 0.4.12 if "operations" in statement: asm_node.underlying_node.add_inline_asm(statement["operations"]) link_underlying_nodes(node, asm_node) end_assembly = self._new_node(NodeType.ENDASSEMBLY, statement["src"], scope) link_underlying_nodes(asm_node, end_assembly) node = end_assembly elif name == "DoWhileStatement": node = self._parse_dowhile(statement, node, scope) # For Continue / Break / Return / Throw # The is fixed later elif name == "Continue": continue_node = self._new_node(NodeType.CONTINUE, statement["src"], scope) link_underlying_nodes(node, continue_node) node = continue_node elif name == "Break": break_node = self._new_node(NodeType.BREAK, statement["src"], scope) link_underlying_nodes(node, break_node) node = break_node elif name == "Return": return_node = self._new_node(NodeType.RETURN, statement["src"], scope) link_underlying_nodes(node, return_node) if self.is_compact_ast: if statement.get("expression", None): return_node.add_unparsed_expression(statement["expression"]) else: if ( self.get_children("children") in statement and statement[self.get_children("children")] ): assert len(statement[self.get_children("children")]) == 1 expression = statement[self.get_children("children")][0] return_node.add_unparsed_expression(expression) node = return_node elif name == "Throw": throw_node = self._new_node(NodeType.THROW, statement["src"], scope) link_underlying_nodes(node, throw_node) node = throw_node elif name == "EmitStatement": # expression = parse_expression(statement[self.get_children('children')][0], self) if self.is_compact_ast: expression = statement["eventCall"] else: expression = statement[self.get_children("children")][0] new_node = self._new_node(NodeType.EXPRESSION, statement["src"], scope) new_node.add_unparsed_expression(expression) link_underlying_nodes(node, new_node) node = new_node elif name in ["VariableDefinitionStatement", "VariableDeclarationStatement"]: node = self._parse_variable_definition(statement, node, scope) elif name == "ExpressionStatement": # assert len(statement[self.get_children('expression')]) == 1 # assert not 'attributes' in statement # expression = parse_expression(statement[self.get_children('children')][0], self) if self.is_compact_ast: expression = statement[self.get_children("expression")] else: expression = statement[self.get_children("expression")][0] new_node = self._new_node(NodeType.EXPRESSION, statement["src"], scope) new_node.add_unparsed_expression(expression) link_underlying_nodes(node, new_node) node = new_node elif name == "TryStatement": node = self._parse_try_catch(statement, node, scope) # elif name == 'TryCatchClause': # self._parse_catch(statement, node) elif name == "RevertStatement": if self.is_compact_ast: expression = statement[self.get_children("errorCall")] else: expression = statement[self.get_children("errorCall")][0] new_node = self._new_node(NodeType.EXPRESSION, statement["src"], scope) new_node.add_unparsed_expression(expression) link_underlying_nodes(node, new_node) node = new_node else: raise ParsingError(f"Statement not parsed {name}") return node def _parse_block(self, block: Dict, node: NodeSolc, scope: Scope) -> NodeSolc: """ Return: Node """ assert block[self.get_key()] == "Block" if self.is_compact_ast: statements = block["statements"] else: statements = block[self.get_children("children")] new_scope = Scope(scope.is_checked, False, scope) for statement in statements: node = self._parse_statement(statement, node, new_scope) return node def _parse_unchecked_block(self, block: Dict, node: NodeSolc, scope): """ Return: Node """ assert block[self.get_key()] == "UncheckedBlock" if self.is_compact_ast: statements = block["statements"] else: statements = block[self.get_children("children")] new_scope = Scope(False, False, scope) for statement in statements: node = self._parse_statement(statement, node, new_scope) return node def _update_reachability(self, node: Node) -> None: worklist = [node] while worklist: current = worklist.pop() # fix point if not current.is_reachable: current.set_is_reachable(True) worklist.extend(current.sons) def _parse_cfg(self, cfg: Dict) -> None: assert cfg[self.get_key()] == "Block" node = self._new_node(NodeType.ENTRYPOINT, cfg["src"], self.underlying_function) self._function.entry_point = node.underlying_node if self.is_compact_ast: statements = cfg["statements"] else: statements = cfg[self.get_children("children")] if not statements: self._function.is_empty = True else: self._function.is_empty = False self._parse_block(cfg, node, self.underlying_function) self._remove_incorrect_edges() self._remove_alone_endif() # endregion ################################################################################### ################################################################################### # region Loops ################################################################################### ################################################################################### def _find_end_loop(self, node: Node, visited: List[Node], counter: int) -> Optional[Node]: # counter allows to explore nested loop if node in visited: return None if node.type == NodeType.ENDLOOP: if counter == 0: return node counter -= 1 # nested loop if node.type == NodeType.STARTLOOP: counter += 1 visited = visited + [node] for son in node.sons: ret = self._find_end_loop(son, visited, counter) if ret: return ret return None def _find_if_loop(self, node: Node, visited: List[Node], skip_if_loop: int) -> Optional[Node]: if node in visited: return None # If skip_if_loop is not 0 it means we are in a nested situation # and we have to skip the closer n loop headers. # If in the fathers there is an EXPRESSION node it's a for loop # and it's the index increment expression if node.type == NodeType.IFLOOP: if skip_if_loop == 0: for father in node.fathers: if father.type == NodeType.EXPRESSION: return father return node # skip_if_loop works as explained above. # This handle when a for loop doesn't have a condition # e.g. for (;;) {} # and decrement skip_if_loop since we are out of the nested loop if node.type == NodeType.STARTLOOP: if skip_if_loop == 0: return node skip_if_loop -= 1 # If we find an ENDLOOP means we are in a nested loop # we increment skip_if_loop counter if node.type == NodeType.ENDLOOP: skip_if_loop += 1 visited = visited + [node] for father in node.fathers: ret = self._find_if_loop(father, visited, skip_if_loop) if ret: return ret return None def _fix_break_node(self, node: Node) -> None: end_node = self._find_end_loop(node, [], 0) if not end_node: # If there is not end condition on the loop # The exploration will reach a STARTLOOP before reaching the endloop # We start with -1 as counter to catch this corner case end_node = self._find_end_loop(node, [], -1) if not end_node: raise ParsingError(f"Break in no-loop context {node.function}") for son in node.sons: son.remove_father(node) node.set_sons([end_node]) end_node.add_father(node) def _fix_continue_node(self, node: Node) -> None: if_loop_node = self._find_if_loop(node, [], 0) if not if_loop_node: raise ParsingError(f"Continue in no-loop context {node.node_id}") for son in node.sons: son.remove_father(node) node.set_sons([if_loop_node]) if_loop_node.add_father(node) # endregion ################################################################################### ################################################################################### # region Try-Catch ################################################################################### ################################################################################### def _fix_try(self, node: Node) -> None: end_node = next((son for son in node.sons if son.type != NodeType.CATCH), None) if end_node: for son in node.sons: if son.type == NodeType.CATCH: self._fix_catch(son, end_node, set()) def _fix_catch(self, node: Node, end_node: Node, visited: Set[Node]) -> None: if not node.sons: link_nodes(node, end_node) else: for son in node.sons: if son != end_node and son not in visited: visited.add(son) self._fix_catch(son, end_node, visited) # endregion ################################################################################### ################################################################################### # region Params, Returns, Modifiers ################################################################################### ################################################################################### def _add_param(self, param: Dict, initialized: bool = False) -> LocalVariableSolc: local_var = LocalVariable() local_var.set_function(self._function) local_var.set_offset(param["src"], self._function.compilation_unit) local_var_parser = LocalVariableSolc(local_var, param) local_var_parser.analyze(self) if initialized: local_var.initialized = True # see https://solidity.readthedocs.io/en/v0.4.24/types.html?highlight=storage%20location#data-location if local_var.location == "default": local_var.set_location("memory") self._add_local_variable(local_var_parser) return local_var_parser def _add_param_init_tuple(self, statement: Dict, index: int) -> LocalVariableInitFromTupleSolc: local_var = LocalVariableInitFromTuple() local_var.set_function(self._function) local_var.set_offset(statement["src"], self._function.compilation_unit) local_var_parser = LocalVariableInitFromTupleSolc(local_var, statement, index) self._add_local_variable(local_var_parser) return local_var_parser def _parse_params(self, params: Dict): assert params[self.get_key()] == "ParameterList" self._function.parameters_src().set_offset(params["src"], self._function.compilation_unit) if self.is_compact_ast: params = params["parameters"] else: params = params[self.get_children("children")] for param in params: assert param[self.get_key()] == "VariableDeclaration" local_var = self._add_param(param) self._function.add_parameters(local_var.underlying_variable) def _parse_returns(self, returns: Dict): assert returns[self.get_key()] == "ParameterList" self._function.returns_src().set_offset(returns["src"], self._function.compilation_unit) if self.is_compact_ast: self._returnsNotParsed = returns["parameters"] else: self._returnsNotParsed = returns[self.get_children("children")] for ret in self._returnsNotParsed: assert ret[self.get_key()] == "VariableDeclaration" local_var = self._add_param(ret) self._function.add_return(local_var.underlying_variable) def _parse_modifier(self, modifier: Dict): m = parse_expression(modifier, self) # self._expression_modifiers.append(m) # Do not parse modifier nodes for interfaces if not self._function.is_implemented: return for m in ExportValues(m).result(): if isinstance(m, Function): node_parser = self._new_node( NodeType.EXPRESSION, modifier["src"], self.underlying_function ) node_parser.add_unparsed_expression(modifier) # The latest entry point is the entry point, or the latest modifier call if self._function.modifiers: latest_entry_point = self._function.modifiers_statements[-1].nodes[-1] else: latest_entry_point = self._function.entry_point insert_node(latest_entry_point, node_parser.underlying_node) self._function.add_modifier( ModifierStatements( modifier=m, entry_point=latest_entry_point, nodes=[latest_entry_point, node_parser.underlying_node], ) ) elif isinstance(m, Contract): node_parser = self._new_node( NodeType.EXPRESSION, modifier["src"], self.underlying_function ) node_parser.add_unparsed_expression(modifier) # The latest entry point is the entry point, or the latest constructor call if self._function.explicit_base_constructor_calls_statements: latest_entry_point = self._function.explicit_base_constructor_calls_statements[ -1 ].nodes[-1] else: latest_entry_point = self._function.entry_point insert_node(latest_entry_point, node_parser.underlying_node) self._function.add_explicit_base_constructor_calls_statements( ModifierStatements( modifier=m, entry_point=latest_entry_point, nodes=[latest_entry_point, node_parser.underlying_node], ) ) def _fix_implicit_return(self, return_params: Dict) -> None: """ Creates an artificial return node iff a function has a named return variable declared in its signature. Finds all leaf nodes in the CFG which are not return nodes, and links them to the artificial return node. """ does_not_have_return_params = len(self.underlying_function.returns) == 0 does_not_have_named_returns = all( ret.name == "" for ret in self.underlying_function.returns ) not_implemented = not self._function.is_implemented if does_not_have_return_params or does_not_have_named_returns or not_implemented: return return_node = self._new_node( NodeType.RETURN, return_params["src"], self.underlying_function ) for node, node_solc in self._node_to_nodesolc.items(): if len(node.sons) == 0 and node.type not in [NodeType.RETURN, NodeType.THROW]: link_underlying_nodes(node_solc, return_node) for _, yul_block in self._node_to_yulobject.items(): for yul_node in yul_block.nodes: node = yul_node.underlying_node if len(node.sons) == 0 and node.type not in [NodeType.RETURN, NodeType.THROW]: link_underlying_nodes(yul_node, return_node) if self.is_compact_ast: self._add_return_exp_compact(return_node, return_params) else: self._add_return_exp_legacy(return_node, return_params) return_node.analyze_expressions(self) def _add_return_exp_compact(self, return_node: NodeSolc, return_params: Dict) -> None: if len(self.underlying_function.returns) == 1: return_arg = self.underlying_function.returns[0] if return_arg.name != "": (refId, refSrc, refType) = next( (ret["id"], ret["src"], ret["typeDescriptions"]) for ret in self._returnsNotParsed if ret["name"] == return_arg.name ) return_node.add_unparsed_expression( { "name": return_arg.name, "nodeType": "Identifier", "overloadedDeclarations": [], "referencedDeclaration": refId, "src": refSrc, "typeDescriptions": refType, } ) else: expression = { "components": [], "isConstant": False, "isInlineArray": False, "isLValue": False, "isPure": False, "lValueRequested": False, "nodeType": "TupleExpression", "src": return_params["src"], "typeDescriptions": {}, } type_ids = [] type_strs = [] for return_arg in self.underlying_function.returns: # For each named return variable, we add an identifier to the tuple. if return_arg.name != "": (refId, refSrc, refType) = next( (ret["id"], ret["src"], ret["typeDescriptions"]) for ret in self._returnsNotParsed if ret["name"] == return_arg.name ) type_ids.append(refType["typeIdentifier"]) type_strs.append(refType["typeString"]) expression["components"].append( { "name": return_arg.name, "nodeType": "Identifier", "overloadedDeclarations": [], "referencedDeclaration": refId, "src": refSrc, "typeDescriptions": refType, } ) expression["typeDescriptions"]["typeIdentifier"] = ( "t_tuple$_" + "_$_".join(type_ids) + "_$" ) expression["typeDescriptions"]["typeString"] = "tuple(" + ",".join(type_strs) + ")" return_node.add_unparsed_expression(expression) def _add_return_exp_legacy(self, return_node: NodeSolc, return_params: Dict) -> None: if len(self.underlying_function.returns) == 1: return_arg = self.underlying_function.returns[0] if return_arg.name != "": (refSrc, refType) = next( (ret["src"], ret["attributes"]["type"]) for ret in self._returnsNotParsed if ret["attributes"]["name"] == return_arg.name ) return_node.add_unparsed_expression( { "attributes": {"type": refType, "value": return_arg.name}, "name": "Identifier", "src": refSrc, } ) else: expression = { "children": [], "name": "TupleExpression", "src": return_params["src"], } for return_arg in self.underlying_function.returns: # For each named return variable, we add an identifier to the tuple. if return_arg.name != "": (refSrc, refType) = next( (ret["src"], ret["attributes"]["type"]) for ret in self._returnsNotParsed if ret["attributes"]["name"] == return_arg.name ) expression["children"].append( { "attributes": {"type": refType, "value": return_arg.name}, "name": "Identifier", "src": refSrc, } ) return_node.add_unparsed_expression(expression) # endregion ################################################################################### ################################################################################### # region Edges ################################################################################### ################################################################################### def _remove_incorrect_edges(self): for node in self._node_to_nodesolc: if node.type in [NodeType.RETURN, NodeType.THROW]: for son in node.sons: son.remove_father(node) node.set_sons([]) if node.type in [NodeType.BREAK]: self._fix_break_node(node) if node.type in [NodeType.CONTINUE]: self._fix_continue_node(node) if node.type in [NodeType.TRY]: self._fix_try(node) # this step needs to happen after all of the break statements are fixed # really, we should be passing some sort of context down so the break statement doesn't # need to be fixed out-of-band in the first place for node in self._node_to_nodesolc: if node.type in [NodeType.STARTLOOP]: # can we prune? only if after pruning, we have at least one son that isn't itself if ( len([son for son in node.sons if son.type != NodeType.ENDLOOP and son != node]) == 0 ): continue new_sons = [] for son in node.sons: if son.type != NodeType.ENDLOOP: new_sons.append(son) continue son.remove_father(node) node.set_sons(new_sons) def _remove_alone_endif(self) -> None: """ Can occur on: if(..){ return } else{ return } Iterate until a fix point to remove the ENDIF node creates on the following pattern if(){ return } else if(){ return } """ prev_nodes = [] while set(prev_nodes) != set(self._node_to_nodesolc.keys()): prev_nodes = self._node_to_nodesolc.keys() to_remove: List[Node] = [] for node in self._node_to_nodesolc: if node.type == NodeType.ENDIF and not node.fathers: for son in node.sons: son.remove_father(node) node.set_sons([]) to_remove.append(node) self._function.nodes = [n for n in self._function.nodes if n not in to_remove] for remove in to_remove: if remove in self._node_to_nodesolc: del self._node_to_nodesolc[remove] # endregion ################################################################################### ################################################################################### # region Ternary ################################################################################### ################################################################################### def _rewrite_ternary_as_if_else(self) -> bool: ternary_found = True updated = False while ternary_found: ternary_found = False for node in self._node_to_nodesolc: has_cond = HasConditional(node.expression) if has_cond.result(): st = SplitTernaryExpression(node.expression) condition = st.condition if not condition: raise ParsingError( f"Incorrect ternary conversion {node.expression} {node.source_mapping}" ) true_expr = st.true_expression false_expr = st.false_expression self._split_ternary_node(node, condition, true_expr, false_expr) ternary_found = True updated = True break return updated def _split_ternary_node( self, node: Node, condition: "Expression", true_expr: "Expression", false_expr: "Expression", ): condition_node = self._new_node(NodeType.IF, node.source_mapping, node.scope) condition_node.underlying_node.add_expression(condition) condition_node.analyze_expressions(self) if node.type == NodeType.VARIABLE: condition_node.underlying_node.add_variable_declaration(node.variable_declaration) true_node_parser = self._new_node(NodeType.EXPRESSION, node.source_mapping, node.scope) if node.type == NodeType.VARIABLE: assert isinstance(true_expr, AssignmentOperation) # true_expr = true_expr.expression_right elif node.type == NodeType.RETURN: true_node_parser.underlying_node.type = NodeType.RETURN true_node_parser.underlying_node.add_expression(true_expr) true_node_parser.analyze_expressions(self) false_node_parser = self._new_node(NodeType.EXPRESSION, node.source_mapping, node.scope) if node.type == NodeType.VARIABLE: assert isinstance(false_expr, AssignmentOperation) elif node.type == NodeType.RETURN: false_node_parser.underlying_node.type = NodeType.RETURN # false_expr = false_expr.expression_right false_node_parser.underlying_node.add_expression(false_expr) false_node_parser.analyze_expressions(self) endif_node = self._new_node(NodeType.ENDIF, node.source_mapping, node.scope) for father in node.fathers: father.replace_son(node, condition_node.underlying_node) condition_node.underlying_node.add_father(father) for son in node.sons: son.remove_father(node) son.add_father(endif_node.underlying_node) endif_node.underlying_node.add_son(son) link_underlying_nodes(condition_node, true_node_parser) link_underlying_nodes(condition_node, false_node_parser) if true_node_parser.underlying_node.type not in [ NodeType.THROW, NodeType.RETURN, ]: link_underlying_nodes(true_node_parser, endif_node) if false_node_parser.underlying_node.type not in [ NodeType.THROW, NodeType.RETURN, ]: link_underlying_nodes(false_node_parser, endif_node) self._function.nodes = [n for n in self._function.nodes if n.node_id != node.node_id] del self._node_to_nodesolc[node] # endregion
FunctionSolc
python
wandb__wandb
wandb/vendor/pygments/lexers/matlab.py
{ "start": 7475, "end": 26640 }
class ____(RegexLexer): """ For GNU Octave source code. .. versionadded:: 1.5 """ name = 'Octave' aliases = ['octave'] filenames = ['*.m'] mimetypes = ['text/octave'] # These lists are generated automatically. # Run the following in bash shell: # # First dump all of the Octave manual into a plain text file: # # $ info octave --subnodes -o octave-manual # # Now grep through it: # for i in \ # "Built-in Function" "Command" "Function File" \ # "Loadable Function" "Mapping Function"; # do # perl -e '@name = qw('"$i"'); # print lc($name[0]),"_kw = [\n"'; # # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \ # octave-manual | sort | uniq ; # echo "]" ; # echo; # done # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011) builtin_kw = ( "addlistener", "addpath", "addproperty", "all", "and", "any", "argnames", "argv", "assignin", "atexit", "autoload", "available_graphics_toolkits", "beep_on_error", "bitand", "bitmax", "bitor", "bitshift", "bitxor", "cat", "cell", "cellstr", "char", "class", "clc", "columns", "command_line_path", "completion_append_char", "completion_matches", "complex", "confirm_recursive_rmdir", "cputime", "crash_dumps_octave_core", "ctranspose", "cumprod", "cumsum", "debug_on_error", "debug_on_interrupt", "debug_on_warning", "default_save_options", "dellistener", "diag", "diff", "disp", "doc_cache_file", "do_string_escapes", "double", "drawnow", "e", "echo_executing_commands", "eps", "eq", "errno", "errno_list", "error", "eval", "evalin", "exec", "exist", "exit", "eye", "false", "fclear", "fclose", "fcntl", "fdisp", "feof", "ferror", "feval", "fflush", "fgetl", "fgets", "fieldnames", "file_in_loadpath", "file_in_path", "filemarker", "filesep", "find_dir_in_path", "fixed_point_format", "fnmatch", "fopen", "fork", "formula", "fprintf", "fputs", "fread", "freport", "frewind", "fscanf", "fseek", "fskipl", "ftell", "functions", "fwrite", "ge", "genpath", "get", "getegid", "getenv", "geteuid", "getgid", "getpgrp", "getpid", "getppid", "getuid", "glob", "gt", "gui_mode", "history_control", "history_file", "history_size", "history_timestamp_format_string", "home", "horzcat", "hypot", "ifelse", "ignore_function_time_stamp", "inferiorto", "info_file", "info_program", "inline", "input", "intmax", "intmin", "ipermute", "is_absolute_filename", "isargout", "isbool", "iscell", "iscellstr", "ischar", "iscomplex", "isempty", "isfield", "isfloat", "isglobal", "ishandle", "isieee", "isindex", "isinteger", "islogical", "ismatrix", "ismethod", "isnull", "isnumeric", "isobject", "isreal", "is_rooted_relative_filename", "issorted", "isstruct", "isvarname", "kbhit", "keyboard", "kill", "lasterr", "lasterror", "lastwarn", "ldivide", "le", "length", "link", "linspace", "logical", "lstat", "lt", "make_absolute_filename", "makeinfo_program", "max_recursion_depth", "merge", "methods", "mfilename", "minus", "mislocked", "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock", "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes", "munlock", "nargin", "nargout", "native_float_format", "ndims", "ne", "nfields", "nnz", "norm", "not", "numel", "nzmax", "octave_config_info", "octave_core_file_limit", "octave_core_file_name", "octave_core_file_options", "ones", "or", "output_max_field_width", "output_precision", "page_output_immediately", "page_screen_output", "path", "pathsep", "pause", "pclose", "permute", "pi", "pipe", "plus", "popen", "power", "print_empty_dimensions", "printf", "print_struct_array_contents", "prod", "program_invocation_name", "program_name", "putenv", "puts", "pwd", "quit", "rats", "rdivide", "readdir", "readlink", "read_readline_init_file", "realmax", "realmin", "rehash", "rename", "repelems", "re_read_readline_init_file", "reset", "reshape", "resize", "restoredefaultpath", "rethrow", "rmdir", "rmfield", "rmpath", "rows", "save_header_format_string", "save_precision", "saving_history", "scanf", "set", "setenv", "shell_cmd", "sighup_dumps_octave_core", "sigterm_dumps_octave_core", "silent_functions", "single", "size", "size_equal", "sizemax", "sizeof", "sleep", "source", "sparse_auto_mutate", "split_long_rows", "sprintf", "squeeze", "sscanf", "stat", "stderr", "stdin", "stdout", "strcmp", "strcmpi", "string_fill_char", "strncmp", "strncmpi", "struct", "struct_levels_to_print", "strvcat", "subsasgn", "subsref", "sum", "sumsq", "superiorto", "suppress_verbose_help_message", "symlink", "system", "tic", "tilde_expand", "times", "tmpfile", "tmpnam", "toc", "toupper", "transpose", "true", "typeinfo", "umask", "uminus", "uname", "undo_string_escapes", "unlink", "uplus", "upper", "usage", "usleep", "vec", "vectorize", "vertcat", "waitpid", "warning", "warranty", "whos_line_format", "yes_or_no", "zeros", "inf", "Inf", "nan", "NaN") command_kw = ("close", "load", "who", "whos") function_kw = ( "accumarray", "accumdim", "acosd", "acotd", "acscd", "addtodate", "allchild", "ancestor", "anova", "arch_fit", "arch_rnd", "arch_test", "area", "arma_rnd", "arrayfun", "ascii", "asctime", "asecd", "asind", "assert", "atand", "autoreg_matrix", "autumn", "axes", "axis", "bar", "barh", "bartlett", "bartlett_test", "beep", "betacdf", "betainv", "betapdf", "betarnd", "bicgstab", "bicubic", "binary", "binocdf", "binoinv", "binopdf", "binornd", "bitcmp", "bitget", "bitset", "blackman", "blanks", "blkdiag", "bone", "box", "brighten", "calendar", "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf", "cauchy_rnd", "caxis", "celldisp", "center", "cgs", "chisquare_test_homogeneity", "chisquare_test_independence", "circshift", "cla", "clabel", "clf", "clock", "cloglog", "closereq", "colon", "colorbar", "colormap", "colperm", "comet", "common_size", "commutation_matrix", "compan", "compare_versions", "compass", "computer", "cond", "condest", "contour", "contourc", "contourf", "contrast", "conv", "convhull", "cool", "copper", "copyfile", "cor", "corrcoef", "cor_test", "cosd", "cotd", "cov", "cplxpair", "cross", "cscd", "cstrcat", "csvread", "csvwrite", "ctime", "cumtrapz", "curl", "cut", "cylinder", "date", "datenum", "datestr", "datetick", "datevec", "dblquad", "deal", "deblank", "deconv", "delaunay", "delaunayn", "delete", "demo", "detrend", "diffpara", "diffuse", "dir", "discrete_cdf", "discrete_inv", "discrete_pdf", "discrete_rnd", "display", "divergence", "dlmwrite", "dos", "dsearch", "dsearchn", "duplication_matrix", "durbinlevinson", "ellipsoid", "empirical_cdf", "empirical_inv", "empirical_pdf", "empirical_rnd", "eomday", "errorbar", "etime", "etreeplot", "example", "expcdf", "expinv", "expm", "exppdf", "exprnd", "ezcontour", "ezcontourf", "ezmesh", "ezmeshc", "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor", "factorial", "fail", "fcdf", "feather", "fftconv", "fftfilt", "fftshift", "figure", "fileattrib", "fileparts", "fill", "findall", "findobj", "findstr", "finv", "flag", "flipdim", "fliplr", "flipud", "fpdf", "fplot", "fractdiff", "freqz", "freqz_plot", "frnd", "fsolve", "f_test_regression", "ftp", "fullfile", "fzero", "gamcdf", "gaminv", "gampdf", "gamrnd", "gca", "gcbf", "gcbo", "gcf", "genvarname", "geocdf", "geoinv", "geopdf", "geornd", "getfield", "ginput", "glpk", "gls", "gplot", "gradient", "graphics_toolkit", "gray", "grid", "griddata", "griddatan", "gtext", "gunzip", "gzip", "hadamard", "hamming", "hankel", "hanning", "hggroup", "hidden", "hilb", "hist", "histc", "hold", "hot", "hotelling_test", "housh", "hsv", "hurst", "hygecdf", "hygeinv", "hygepdf", "hygernd", "idivide", "ifftshift", "image", "imagesc", "imfinfo", "imread", "imshow", "imwrite", "index", "info", "inpolygon", "inputname", "interpft", "interpn", "intersect", "invhilb", "iqr", "isa", "isdefinite", "isdir", "is_duplicate_entry", "isequal", "isequalwithequalnans", "isfigure", "ishermitian", "ishghandle", "is_leap_year", "isletter", "ismac", "ismember", "ispc", "isprime", "isprop", "isscalar", "issquare", "isstrprop", "issymmetric", "isunix", "is_valid_file_id", "isvector", "jet", "kendall", "kolmogorov_smirnov_cdf", "kolmogorov_smirnov_test", "kruskal_wallis_test", "krylov", "kurtosis", "laplace_cdf", "laplace_inv", "laplace_pdf", "laplace_rnd", "legend", "legendre", "license", "line", "linkprop", "list_primes", "loadaudio", "loadobj", "logistic_cdf", "logistic_inv", "logistic_pdf", "logistic_rnd", "logit", "loglog", "loglogerr", "logm", "logncdf", "logninv", "lognpdf", "lognrnd", "logspace", "lookfor", "ls_command", "lsqnonneg", "magic", "mahalanobis", "manova", "matlabroot", "mcnemar_test", "mean", "meansq", "median", "menu", "mesh", "meshc", "meshgrid", "meshz", "mexext", "mget", "mkpp", "mode", "moment", "movefile", "mpoles", "mput", "namelengthmax", "nargchk", "nargoutchk", "nbincdf", "nbininv", "nbinpdf", "nbinrnd", "nchoosek", "ndgrid", "newplot", "news", "nonzeros", "normcdf", "normest", "norminv", "normpdf", "normrnd", "now", "nthroot", "null", "ocean", "ols", "onenormest", "optimget", "optimset", "orderfields", "orient", "orth", "pack", "pareto", "parseparams", "pascal", "patch", "pathdef", "pcg", "pchip", "pcolor", "pcr", "peaks", "periodogram", "perl", "perms", "pie", "pink", "planerot", "playaudio", "plot", "plotmatrix", "plotyy", "poisscdf", "poissinv", "poisspdf", "poissrnd", "polar", "poly", "polyaffine", "polyarea", "polyderiv", "polyfit", "polygcd", "polyint", "polyout", "polyreduce", "polyval", "polyvalm", "postpad", "powerset", "ppder", "ppint", "ppjumps", "ppplot", "ppval", "pqpnonneg", "prepad", "primes", "print", "print_usage", "prism", "probit", "qp", "qqplot", "quadcc", "quadgk", "quadl", "quadv", "quiver", "qzhess", "rainbow", "randi", "range", "rank", "ranks", "rat", "reallog", "realpow", "realsqrt", "record", "rectangle_lw", "rectangle_sw", "rectint", "refresh", "refreshdata", "regexptranslate", "repmat", "residue", "ribbon", "rindex", "roots", "rose", "rosser", "rotdim", "rref", "run", "run_count", "rundemos", "run_test", "runtests", "saveas", "saveaudio", "saveobj", "savepath", "scatter", "secd", "semilogx", "semilogxerr", "semilogy", "semilogyerr", "setaudio", "setdiff", "setfield", "setxor", "shading", "shift", "shiftdim", "sign_test", "sinc", "sind", "sinetone", "sinewave", "skewness", "slice", "sombrero", "sortrows", "spaugment", "spconvert", "spdiags", "spearman", "spectral_adf", "spectral_xdf", "specular", "speed", "spencer", "speye", "spfun", "sphere", "spinmap", "spline", "spones", "sprand", "sprandn", "sprandsym", "spring", "spstats", "spy", "sqp", "stairs", "statistics", "std", "stdnormal_cdf", "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd", "stem", "stft", "strcat", "strchr", "strjust", "strmatch", "strread", "strsplit", "strtok", "strtrim", "strtrunc", "structfun", "studentize", "subplot", "subsindex", "subspace", "substr", "substruct", "summer", "surf", "surface", "surfc", "surfl", "surfnorm", "svds", "swapbytes", "sylvester_matrix", "symvar", "synthesis", "table", "tand", "tar", "tcdf", "tempdir", "tempname", "test", "text", "textread", "textscan", "tinv", "title", "toeplitz", "tpdf", "trace", "trapz", "treelayout", "treeplot", "triangle_lw", "triangle_sw", "tril", "trimesh", "triplequad", "triplot", "trisurf", "triu", "trnd", "tsearchn", "t_test", "t_test_regression", "type", "unidcdf", "unidinv", "unidpdf", "unidrnd", "unifcdf", "unifinv", "unifpdf", "unifrnd", "union", "unique", "unix", "unmkpp", "unpack", "untabify", "untar", "unwrap", "unzip", "u_test", "validatestring", "vander", "var", "var_test", "vech", "ver", "version", "view", "voronoi", "voronoin", "waitforbuttonpress", "wavread", "wavwrite", "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday", "welch_test", "what", "white", "whitebg", "wienrnd", "wilcoxon_test", "wilkinson", "winter", "xlabel", "xlim", "ylabel", "yulewalker", "zip", "zlabel", "z_test") loadable_kw = ( "airy", "amd", "balance", "besselh", "besseli", "besselj", "besselk", "bessely", "bitpack", "bsxfun", "builtin", "ccolamd", "cellfun", "cellslices", "chol", "choldelete", "cholinsert", "cholinv", "cholshift", "cholupdate", "colamd", "colloc", "convhulln", "convn", "csymamd", "cummax", "cummin", "daspk", "daspk_options", "dasrt", "dasrt_options", "dassl", "dassl_options", "dbclear", "dbdown", "dbstack", "dbstatus", "dbstop", "dbtype", "dbup", "dbwhere", "det", "dlmread", "dmperm", "dot", "eig", "eigs", "endgrent", "endpwent", "etree", "fft", "fftn", "fftw", "filter", "find", "full", "gcd", "getgrent", "getgrgid", "getgrnam", "getpwent", "getpwnam", "getpwuid", "getrusage", "givens", "gmtime", "gnuplot_binary", "hess", "ifft", "ifftn", "inv", "isdebugmode", "issparse", "kron", "localtime", "lookup", "lsode", "lsode_options", "lu", "luinc", "luupdate", "matrix_type", "max", "min", "mktime", "pinv", "qr", "qrdelete", "qrinsert", "qrshift", "qrupdate", "quad", "quad_options", "qz", "rand", "rande", "randg", "randn", "randp", "randperm", "rcond", "regexp", "regexpi", "regexprep", "schur", "setgrent", "setpwent", "sort", "spalloc", "sparse", "spparms", "sprank", "sqrtm", "strfind", "strftime", "strptime", "strrep", "svd", "svd_driver", "syl", "symamd", "symbfact", "symrcm", "time", "tsearch", "typecast", "urlread", "urlwrite") mapping_kw = ( "abs", "acos", "acosh", "acot", "acoth", "acsc", "acsch", "angle", "arg", "asec", "asech", "asin", "asinh", "atan", "atanh", "beta", "betainc", "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos", "cosh", "cot", "coth", "csc", "csch", "erf", "erfc", "erfcx", "erfinv", "exp", "finite", "fix", "floor", "fmod", "gamma", "gammainc", "gammaln", "imag", "isalnum", "isalpha", "isascii", "iscntrl", "isdigit", "isfinite", "isgraph", "isinf", "islower", "isna", "isnan", "isprint", "ispunct", "isspace", "isupper", "isxdigit", "lcm", "lgamma", "log", "lower", "mod", "real", "rem", "round", "roundb", "sec", "sech", "sign", "sin", "sinh", "sqrt", "tan", "tanh", "toascii", "tolower", "xor") builtin_consts = ( "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA", "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER", "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET", "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO", "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE", "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED", "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG", "WSTOPSIG", "WTERMSIG", "WUNTRACED") tokens = { 'root': [ # We should look into multiline comments (r'[%#].*$', Comment), (r'^\s*function', Keyword, 'deffunc'), # from 'iskeyword' on hg changeset 8cc154f45e37 (words(( '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else', 'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef', 'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties', 'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods', 'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try', 'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'), Keyword), (words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw, suffix=r'\b'), Name.Builtin), (words(builtin_consts, suffix=r'\b'), Name.Constant), # operators in Octave but not Matlab: (r'-=|!=|!|/=|--', Operator), # operators: (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), # operators in Octave but not Matlab requiring escape for re: (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator), # operators requiring escape for re: (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), # punctuation: (r'[\[\](){}:@.,]', Punctuation), (r'=|:|;', Punctuation), (r'"[^"]*"', String), (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), (r'\d+[eEf][+-]?[0-9]+', Number.Float), (r'\d+', Number.Integer), # quote can be transpose, instead of string: # (not great, but handles common cases...) (r'(?<=[\w)\].])\'+', Operator), (r'(?<![\w)\].])\'', String, 'string'), (r'[a-zA-Z_]\w*', Name), (r'.', Text), ], 'string': [ (r"[^']*'", String, '#pop'), ], 'deffunc': [ (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', bygroups(Whitespace, Text, Whitespace, Punctuation, Whitespace, Name.Function, Punctuation, Text, Punctuation, Whitespace), '#pop'), # function with no args (r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'), ], }
OctaveLexer
python
allegroai__clearml
clearml/backend_api/services/v2_13/models.py
{ "start": 88554, "end": 89435 }
class ____(Request): """ Gets model information :param task: Task id :type task: str """ _service = "models" _action = "get_by_task_id" _version = "2.13" _schema = { "definitions": {}, "properties": {"task": {"description": "Task id", "type": ["string", "null"]}}, "type": "object", } def __init__(self, task: Optional[str] = None, **kwargs: Any) -> None: super(GetByTaskIdRequest, self).__init__(**kwargs) self.task = task @schema_property("task") def task(self) -> Optional[str]: return self._property_task @task.setter def task(self, value: Optional[str]) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value
GetByTaskIdRequest
python
getsentry__sentry
src/sentry/models/files/abstractfile.py
{ "start": 6659, "end": 13501 }
class ____(Model, _Parent[BlobIndexType, BlobType]): __relocation_scope__ = RelocationScope.Excluded name = models.TextField() type = models.CharField(max_length=64) timestamp = models.DateTimeField(default=timezone.now, db_index=True) headers = LegacyTextJSONField(default=dict) size = WrappingU32IntegerField(null=True) checksum = models.CharField(max_length=40, null=True, db_index=True) class Meta: abstract = True blobs: models.ManyToManyField @abc.abstractmethod def _blob_index_records(self) -> Sequence[BlobIndexType]: ... @abc.abstractmethod def _create_blob_index(self, blob: BlobType, offset: int) -> BlobIndexType: ... @abc.abstractmethod def _create_blob_from_file(self, contents: ContentFile, logger: Any) -> BlobType: ... @abc.abstractmethod def _get_blobs_by_id(self, blob_ids: Sequence[int]) -> models.QuerySet[BlobType]: ... @abc.abstractmethod def _delete_unreferenced_blob_task(self) -> Task[Any, Any]: ... def _get_chunked_blob(self, mode=None, prefetch=False, prefetch_to=None, delete=True): return ChunkedFileBlobIndexWrapper( self._blob_index_records(), mode=mode, prefetch=prefetch, prefetch_to=prefetch_to, delete=delete, ) @sentry_sdk.tracing.trace def getfile(self, mode=None, prefetch=False): """Returns a file object. By default the file is fetched on demand but if prefetch is enabled the file is fully prefetched into a tempfile before reading can happen. """ impl = self._get_chunked_blob(mode, prefetch) return FileObj(impl, self.name) @sentry_sdk.tracing.trace def save_to(self, path) -> None: """Fetches the file and emplaces it at a certain location. The write is done atomically to a tempfile first and then moved over. If the directory does not exist it is created. """ path = os.path.abspath(path) base = os.path.dirname(path) os.makedirs(base, exist_ok=True) f = None try: f = self._get_chunked_blob( prefetch=True, prefetch_to=base, delete=False ).detach_tempfile() # pre-emptively check if the file already exists. # this can happen as a race condition if two processes/threads # are trying to cache the same file and both try to write # at the same time, overwriting each other. Normally this is fine, # but can cause an issue if another process has opened the file # for reading, then the file that was being read gets clobbered. # I don't know if this affects normal filesystems, but it # definitely has an issue if the filesystem is NFS. if not os.path.exists(path): os.rename(f.name, path) f.close() f = None finally: if f is not None: f.close() try: os.remove(f.name) except Exception: pass @sentry_sdk.tracing.trace def putfile(self, fileobj, blob_size=DEFAULT_BLOB_SIZE, commit=True, logger=nooplogger): """ Save a fileobj into a number of chunks. Returns a list of `FileBlobIndex` items. >>> indexes = file.putfile(fileobj) """ results = [] offset = 0 checksum = sha1(b"") while True: contents = fileobj.read(blob_size) if not contents: break checksum.update(contents) blob_fileobj = ContentFile(contents) blob = self._create_blob_from_file(blob_fileobj, logger=logger) results.append(self._create_blob_index(blob=blob, offset=offset)) offset += blob.size self.size = offset self.checksum = checksum.hexdigest() metrics.distribution("filestore.file-size", offset, unit="byte") if commit: self.save() return results @sentry_sdk.tracing.trace def assemble_from_file_blob_ids(self, file_blob_ids, checksum): """ This creates a file, from file blobs and returns a temp file with the contents. """ tf = tempfile.NamedTemporaryFile() # All file tables are on the same connection and this lets us # bypass generics with transaction.atomic(using=router.db_for_write(type(self))): try: file_blobs_qs = self._get_blobs_by_id(blob_ids=file_blob_ids) # Ensure blobs are in the order and duplication as provided blobs_by_id = {blob.id: blob for blob in file_blobs_qs} file_blobs = [blobs_by_id[blob_id] for blob_id in file_blob_ids] except Exception: # Most likely a `KeyError` like `SENTRY-11QP` because an `id` in # `file_blob_ids` does suddenly not exist anymore logger.exception("`FileBlob` disappeared during `assemble_file`") raise new_checksum = sha1(b"") offset = 0 for blob in file_blobs: try: self._create_blob_index(blob=blob, offset=offset) except IntegrityError: # Most likely a `ForeignKeyViolation` like `SENTRY-11P5`, because # the blob we want to link does not exist anymore logger.exception("`FileBlob` disappeared trying to link `FileBlobIndex`") raise with blob.getfile() as blobfile: for chunk in blobfile.chunks(): new_checksum.update(chunk) tf.write(chunk) offset += blob.size self.size = offset self.checksum = new_checksum.hexdigest() if checksum != self.checksum: tf.close() raise AssembleChecksumMismatch("Checksum mismatch") metrics.distribution("filestore.file-size", offset, unit="byte") self.save() tf.flush() tf.seek(0) return tf @sentry_sdk.tracing.trace def delete(self, *args, **kwargs): blob_ids = [blob.id for blob in self.blobs.all()] ret = super().delete(*args, **kwargs) # Wait to delete blobs. This helps prevent # races around frequently used blobs in debug images and release files. transaction.on_commit( lambda: self._delete_unreferenced_blob_task().apply_async( kwargs={"blob_ids": blob_ids}, countdown=60 * 5 ), using=router.db_for_write(type(self)), ) return ret
AbstractFile
python
google__flatbuffers
tests/monster_test_generated.py
{ "start": 383, "end": 460 }
class ____(object): None_ = -1 Human = 0 Dwarf = 1 Elf = 2
Race
python
eventlet__eventlet
eventlet/hubs/timer.py
{ "start": 2554, "end": 3185 }
class ____(Timer): def __init__(self, *args, **kwargs): self.greenlet = greenlet.getcurrent() Timer.__init__(self, *args, **kwargs) @property def pending(self): if self.greenlet is None or self.greenlet.dead: return False return not self.called def __call__(self, *args): if not self.called: self.called = True if self.greenlet is not None and self.greenlet.dead: return cb, args, kw = self.tpl cb(*args, **kw) def cancel(self): self.greenlet = None Timer.cancel(self)
LocalTimer
python
pytorch__pytorch
test/test_license.py
{ "start": 534, "end": 2085 }
class ____(TestCase): @unittest.skipIf(not create_bundled, "can only be run in a source tree") def test_license_for_wheel(self): current = io.StringIO() create_bundled("third_party", current) with open(license_file) as fid: src_tree = fid.read() if not src_tree == current.getvalue(): raise AssertionError( f'the contents of "{license_file}" do not ' "match the current state of the third_party files. Use " '"python third_party/build_bundled.py" to regenerate it' ) @unittest.skipIf(len(distinfo) == 0, "no installation in site-package to test") def test_distinfo_license(self): """If run when pytorch is installed via a wheel, the license will be in site-package/torch-*dist-info/LICENSE. Make sure it contains the third party bundle of licenses""" if len(distinfo) > 1: raise AssertionError( 'Found too many "torch-*dist-info" directories ' f'in "{site_packages}, expected only one' ) # setuptools renamed *dist-info/LICENSE to *dist-info/licenses/LICENSE since 77.0 license_file = os.path.join(distinfo[0], "licenses", "LICENSE") if not os.path.exists(license_file): license_file = os.path.join(distinfo[0], "LICENSE") with open(license_file) as fid: txt = fid.read() self.assertTrue(starting_txt in txt) if __name__ == "__main__": run_tests()
TestLicense
python
wandb__wandb
wandb/vendor/pygments/lexers/asm.py
{ "start": 20797, "end": 21105 }
class ____(ObjdumpLexer): """ For the output of 'objdump -d -M intel'. .. versionadded:: 2.0 """ name = 'objdump-nasm' aliases = ['objdump-nasm'] filenames = ['*.objdump-intel'] mimetypes = ['text/x-nasm-objdump'] tokens = _objdump_lexer_tokens(NasmLexer)
NasmObjdumpLexer
python
scikit-learn__scikit-learn
sklearn/utils/tests/test_estimator_checks.py
{ "start": 7790, "end": 8431 }
class ____(BaseEstimator): def fit(self, X, y, sample_weight=None): # Convert data X, y = validate_data( self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True ) # Function is only called after we verify that pandas is installed from pandas import Series if isinstance(sample_weight, Series): raise ValueError( "Estimator does not accept 'sample_weight'of type pandas.Series" ) return self def predict(self, X): X = check_array(X) return np.ones(X.shape[0])
NoSampleWeightPandasSeriesType
python
facebookresearch__faiss
tests/test_local_search_quantizer.py
{ "start": 19385, "end": 21457 }
class ____(unittest.TestCase): def eval_index_accuracy(self, factory_key): ds = datasets.SyntheticDataset(32, 1000, 1000, 100) index = faiss.index_factory(ds.d, factory_key) index.train(ds.get_train()) index.add(ds.get_database()) inters = [] for nprobe in 1, 2, 4, 8, 16: index.nprobe = nprobe D, I = index.search(ds.get_queries(), 10) inter = faiss.eval_intersection(I, ds.get_groundtruth(10)) inters.append(inter) inters = np.array(inters) self.assertTrue(np.all(inters[1:] >= inters[:-1])) # do a little I/O test index2 = faiss.deserialize_index(faiss.serialize_index(index)) D2, I2 = index2.search(ds.get_queries(), 10) np.testing.assert_array_equal(I2, I) np.testing.assert_array_equal(D2, D) return inter def test_index_accuracy(self): self.eval_index_accuracy("IVF32,PLSQ2x2x5_Nqint8") @unittest.skipIf(platform.system() == 'Windows', 'Does not work on Windows-2022+.') def test_index_accuracy2(self): """check that the error is in the same ballpark as LSQ.""" inter1 = self.eval_index_accuracy("IVF32,PLSQ2x2x5_Nqint8") inter2 = self.eval_index_accuracy("IVF32,LSQ4x5_Nqint8") self.assertGreaterEqual(inter1 * 1.1, inter2) def test_factory(self): AQ = faiss.AdditiveQuantizer ns, Msub, nbits = 2, 4, 8 index = faiss.index_factory(64, f"IVF32,PLSQ{ns}x{Msub}x{nbits}_Nqint8") assert isinstance(index, faiss.IndexIVFProductLocalSearchQuantizer) self.assertEqual(index.nlist, 32) self.assertEqual(index.plsq.nsplits, ns) self.assertEqual(index.plsq.subquantizer(0).M, Msub) self.assertEqual(index.plsq.subquantizer(0).nbits.at(0), nbits) self.assertEqual(index.plsq.search_type, AQ.ST_norm_qint8) code_size = (ns * Msub * nbits + 7) // 8 + 1 self.assertEqual(index.plsq.code_size, code_size)
TestIndexIVFProductLocalSearchQuantizer
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/sensors/test_bedrock.py
{ "start": 7858, "end": 10152 }
class ____: SENSOR = BedrockIngestionJobSensor def setup_method(self): self.default_op_kwargs = dict( task_id="test_bedrock_knowledge_base_active_sensor", knowledge_base_id="knowledge_base_id", data_source_id="data_source_id", ingestion_job_id="ingestion_job_id", poke_interval=5, max_retries=1, ) self.sensor = self.SENSOR(**self.default_op_kwargs, aws_conn_id=None) def test_base_aws_op_attributes(self): op = self.SENSOR(**self.default_op_kwargs) assert op.hook.aws_conn_id == "aws_default" assert op.hook._region_name is None assert op.hook._verify is None assert op.hook._config is None op = self.SENSOR( **self.default_op_kwargs, aws_conn_id="aws-test-custom-conn", region_name="eu-west-1", verify=False, botocore_config={"read_timeout": 42}, ) assert op.hook.aws_conn_id == "aws-test-custom-conn" assert op.hook._region_name == "eu-west-1" assert op.hook._verify is False assert op.hook._config is not None assert op.hook._config.read_timeout == 42 @pytest.mark.parametrize("state", SENSOR.SUCCESS_STATES) @mock.patch.object(BedrockAgentHook, "conn") def test_poke_success_states(self, mock_conn, state): mock_conn.get_ingestion_job.return_value = {"ingestionJob": {"status": state}} assert self.sensor.poke({}) is True @pytest.mark.parametrize("state", SENSOR.INTERMEDIATE_STATES) @mock.patch.object(BedrockAgentHook, "conn") def test_poke_intermediate_states(self, mock_conn, state): mock_conn.get_ingestion_job.return_value = {"ingestionJob": {"status": state}} assert self.sensor.poke({}) is False @pytest.mark.parametrize("state", SENSOR.FAILURE_STATES) @mock.patch.object(BedrockAgentHook, "conn") def test_poke_failure_states(self, mock_conn, state): mock_conn.get_ingestion_job.return_value = {"ingestionJob": {"status": state}} sensor = self.SENSOR(**self.default_op_kwargs, aws_conn_id=None) with pytest.raises(AirflowException, match=sensor.FAILURE_MESSAGE): sensor.poke({})
TestBedrockIngestionJobSensor
python
apache__airflow
providers/google/tests/unit/google/common/links/test_storage.py
{ "start": 910, "end": 1549 }
class ____: def test_storage_link(self): assert StorageLink.name == "GCS Storage" assert StorageLink.key == "storage_conf" assert ( StorageLink.format_str == "https://console.cloud.google.com/storage/browser/{uri};tab=objects?project={project_id}" ) def test_storage_link_format(self): link = StorageLink() url = link._format_link(uri="test-bucket/test-folder", project_id="test-id") expected_url = "https://console.cloud.google.com/storage/browser/test-bucket/test-folder;tab=objects?project=test-id" assert url == expected_url
TestStorageLink
python
huggingface__transformers
src/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py
{ "start": 2874, "end": 12155 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2_5_VLTextModel`]. It is used to instantiate a Qwen2-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Qwen2-VL-7B-Instruct [Qwen/Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 152064): Vocabulary size of the Qwen2_5_VL model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Qwen2_5_VLModel`] hidden_size (`int`, *optional*, defaults to 8192): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 29568): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 80): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. use_sliding_window (`bool`, *optional*, defaults to `False`): Whether to use sliding window attention. sliding_window (`int`, *optional*, defaults to 4096): Sliding window attention (SWA) window size. If not specified, will default to `4096`. max_window_layers (`int`, *optional*, defaults to 80): The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any additional layer afterwards will use SWA (Sliding Window Attention). layer_types (`list`, *optional*): Attention pattern for each layer. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. bos_token_id (`int`, *optional*, defaults to 151643): The id of the _beginning-of-stream_ token. eos_token_id (`int`, *optional*, defaults to 151645): The id of the _end-of-stream_ token. pad_token_id (`int`, *optional*): The id of the _padding_ token. ```python >>> from transformers import Qwen2_5_VLTextModel, Qwen2_5_VLConfig >>> # Initializing a Qwen2_5_VL style configuration >>> configuration = Qwen2_5_VLConfig() >>> # Initializing a model from the Qwen2-VL-7B style configuration >>> model = Qwen2_5_VLTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_5_vl_text" base_config_key = "text_config" keys_to_ignore_at_inference = ["past_key_values"] default_theta = 1000000.0 # Default tensor parallel plan for base model `Qwen2_5_VL` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: Optional[int] = 152064, hidden_size: Optional[int] = 8192, intermediate_size: Optional[int] = 29568, num_hidden_layers: Optional[int] = 80, num_attention_heads: Optional[int] = 64, num_key_value_heads: Optional[int] = 8, hidden_act: Optional[str] = "silu", max_position_embeddings: Optional[int] = 32768, initializer_range: Optional[float] = 0.02, rms_norm_eps: Optional[int] = 1e-05, use_cache: Optional[bool] = True, tie_word_embeddings: Optional[bool] = False, use_sliding_window: Optional[bool] = False, sliding_window: Optional[int] = 4096, max_window_layers: Optional[int] = 80, layer_types: Optional[list[str]] = None, attention_dropout: Optional[float] = 0.0, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, bos_token_id: Optional[int] = 151643, eos_token_id: Optional[int] = 151645, pad_token_id: Optional[int] = None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window if self.use_sliding_window else None self.max_window_layers = max_window_layers # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_dropout = attention_dropout self.layer_types = layer_types if self.layer_types is None: self.layer_types = [ "sliding_attention" if self.sliding_window is not None and i >= self.max_window_layers else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types, self.num_hidden_layers) self.rope_parameters = rope_parameters super().__init__( tie_word_embeddings=tie_word_embeddings, bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, ignore_keys_at_rope_validation={"mrope"}, **kwargs, ) def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation: Optional[set] = None, **kwargs): rope_scaling = kwargs.pop("rope_scaling", None) self.rope_parameters = rope_scaling or self.rope_parameters self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else {} # Standardize and validate the correctness of rotary position embeddings parameters self.rope_parameters.setdefault("rope_theta", kwargs.pop("rope_theta", self.default_theta)) if self.rope_parameters.get("rope_type", self.rope_parameters.get("type")) == "mrope": self.rope_parameters["rope_type"] = "default" self.standardize_rope_params() self.validate_rope(ignore_keys=ignore_keys_at_rope_validation) return kwargs
Qwen2_5_VLTextConfig
python
apache__airflow
providers/neo4j/src/airflow/providers/neo4j/operators/neo4j.py
{ "start": 1247, "end": 2344 }
class ____(BaseOperator): """ Executes sql code in a specific Neo4j database. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:Neo4jOperator` :param sql: the sql code to be executed. Can receive a str representing a sql statement :param neo4j_conn_id: Reference to :ref:`Neo4j connection id <howto/connection:neo4j>`. :param parameters: the parameters to send to Neo4j driver session """ template_fields: Sequence[str] = ("sql", "parameters") def __init__( self, *, sql: str, neo4j_conn_id: str = "neo4j_default", parameters: dict[str, Any] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.neo4j_conn_id = neo4j_conn_id self.sql = sql self.parameters = parameters def execute(self, context: Context) -> None: self.log.info("Executing: %s", self.sql) hook = Neo4jHook(conn_id=self.neo4j_conn_id) hook.run(self.sql, self.parameters)
Neo4jOperator
python
doocs__leetcode
solution/1700-1799/1763.Longest Nice Substring/Solution.py
{ "start": 0, "end": 432 }
class ____: def longestNiceSubstring(self, s: str) -> str: n = len(s) ans = '' for i in range(n): ss = set() for j in range(i, n): ss.add(s[j]) if ( all(c.lower() in ss and c.upper() in ss for c in ss) and len(ans) < j - i + 1 ): ans = s[i : j + 1] return ans
Solution
python
redis__redis-py
redis/multidb/circuit.py
{ "start": 3250, "end": 3856 }
class ____(BaseCircuitBreaker): def __init__(self, cb: pybreaker.CircuitBreaker): """ Initialize a PBCircuitBreakerAdapter instance. This adapter wraps pybreaker's CircuitBreaker implementation to make it compatible with our CircuitBreaker interface. Args: cb: A pybreaker CircuitBreaker instance to be adapted. """ super().__init__(cb) def on_state_changed(self, cb: Callable[["CircuitBreaker", State, State], None]): listener = PBListener(cb, self.database) self._cb.add_listener(listener)
PBCircuitBreakerAdapter
python
zarr-developers__zarr-python
src/zarr/errors.py
{ "start": 1344, "end": 1518 }
class ____(NodeNotFoundError): """ Raised when a group isn't found at a certain path. """ _msg = "No group found in store {!r} at path {!r}"
GroupNotFoundError
python
kubernetes-client__python
kubernetes/client/models/v1_pod_template.py
{ "start": 383, "end": 6637 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'template': 'V1PodTemplateSpec' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'template': 'template' } def __init__(self, api_version=None, kind=None, metadata=None, template=None, local_vars_configuration=None): # noqa: E501 """V1PodTemplate - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._template = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if template is not None: self.template = template @property def api_version(self): """Gets the api_version of this V1PodTemplate. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1PodTemplate. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1PodTemplate. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1PodTemplate. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V1PodTemplate. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1PodTemplate. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1PodTemplate. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1PodTemplate. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1PodTemplate. # noqa: E501 :return: The metadata of this V1PodTemplate. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1PodTemplate. :param metadata: The metadata of this V1PodTemplate. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def template(self): """Gets the template of this V1PodTemplate. # noqa: E501 :return: The template of this V1PodTemplate. # noqa: E501 :rtype: V1PodTemplateSpec """ return self._template @template.setter def template(self, template): """Sets the template of this V1PodTemplate. :param template: The template of this V1PodTemplate. # noqa: E501 :type: V1PodTemplateSpec """ self._template = template def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1PodTemplate): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1PodTemplate): return True return self.to_dict() != other.to_dict()
V1PodTemplate
python
Textualize__textual
docs/examples/how-to/layout03.py
{ "start": 354, "end": 530 }
class ____(Placeholder): DEFAULT_CSS = """ ColumnsContainer { width: 1fr; height: 1fr; border: solid white; } """ # (1)!
ColumnsContainer
python
python-openxml__python-docx
src/docx/parts/document.py
{ "start": 803, "end": 6417 }
class ____(StoryPart): """Main document part of a WordprocessingML (WML) package, aka a .docx file. Acts as broker to other parts such as image, core properties, and style parts. It also acts as a convenient delegate when a mid-document object needs a service involving a remote ancestor. The `Parented.part` property inherited by many content objects provides access to this part object for that purpose. """ def add_footer_part(self): """Return (footer_part, rId) pair for newly-created footer part.""" footer_part = FooterPart.new(self.package) rId = self.relate_to(footer_part, RT.FOOTER) return footer_part, rId def add_header_part(self): """Return (header_part, rId) pair for newly-created header part.""" header_part = HeaderPart.new(self.package) rId = self.relate_to(header_part, RT.HEADER) return header_part, rId @property def comments(self) -> Comments: """|Comments| object providing access to the comments added to this document.""" return self._comments_part.comments @property def core_properties(self) -> CoreProperties: """A |CoreProperties| object providing read/write access to the core properties of this document.""" return self.package.core_properties @property def document(self): """A |Document| object providing access to the content of this document.""" return Document(self._element, self) def drop_header_part(self, rId: str) -> None: """Remove related header part identified by `rId`.""" self.drop_rel(rId) def footer_part(self, rId: str): """Return |FooterPart| related by `rId`.""" return self.related_parts[rId] def get_style(self, style_id: str | None, style_type: WD_STYLE_TYPE) -> BaseStyle: """Return the style in this document matching `style_id`. Returns the default style for `style_type` if `style_id` is |None| or does not match a defined style of `style_type`. """ return self.styles.get_by_id(style_id, style_type) def get_style_id(self, style_or_name, style_type): """Return the style_id (|str|) of the style of `style_type` matching `style_or_name`. Returns |None| if the style resolves to the default style for `style_type` or if `style_or_name` is itself |None|. Raises if `style_or_name` is a style of the wrong type or names a style not present in the document. """ return self.styles.get_style_id(style_or_name, style_type) def header_part(self, rId: str): """Return |HeaderPart| related by `rId`.""" return self.related_parts[rId] @lazyproperty def inline_shapes(self): """The |InlineShapes| instance containing the inline shapes in the document.""" return InlineShapes(self._element.body, self) @lazyproperty def numbering_part(self) -> NumberingPart: """A |NumberingPart| object providing access to the numbering definitions for this document. Creates an empty numbering part if one is not present. """ try: return cast(NumberingPart, self.part_related_by(RT.NUMBERING)) except KeyError: numbering_part = NumberingPart.new() self.relate_to(numbering_part, RT.NUMBERING) return numbering_part def save(self, path_or_stream: str | IO[bytes]): """Save this document to `path_or_stream`, which can be either a path to a filesystem location (a string) or a file-like object.""" self.package.save(path_or_stream) @property def settings(self) -> Settings: """A |Settings| object providing access to the settings in the settings part of this document.""" return self._settings_part.settings @property def styles(self): """A |Styles| object providing access to the styles in the styles part of this document.""" return self._styles_part.styles @property def _comments_part(self) -> CommentsPart: """A |CommentsPart| object providing access to the comments added to this document. Creates a default comments part if one is not present. """ try: return cast(CommentsPart, self.part_related_by(RT.COMMENTS)) except KeyError: assert self.package is not None comments_part = CommentsPart.default(self.package) self.relate_to(comments_part, RT.COMMENTS) return comments_part @property def _settings_part(self) -> SettingsPart: """A |SettingsPart| object providing access to the document-level settings for this document. Creates a default settings part if one is not present. """ try: return cast(SettingsPart, self.part_related_by(RT.SETTINGS)) except KeyError: settings_part = SettingsPart.default(self.package) self.relate_to(settings_part, RT.SETTINGS) return settings_part @property def _styles_part(self) -> StylesPart: """Instance of |StylesPart| for this document. Creates an empty styles part if one is not present. """ try: return cast(StylesPart, self.part_related_by(RT.STYLES)) except KeyError: package = self.package assert package is not None styles_part = StylesPart.default(package) self.relate_to(styles_part, RT.STYLES) return styles_part
DocumentPart
python
pypa__hatch
tests/backend/metadata/test_core.py
{ "start": 57432, "end": 60911 }
class ____: def test_unknown(self, isolation): metadata = ProjectMetadata( str(isolation), PluginManager(), {"project": {"name": "foo"}, "tool": {"hatch": {"metadata": {"hooks": {"foo": {}}}}}}, ) with pytest.raises(ValueError, match="Unknown metadata hook: foo"): _ = metadata.core def test_custom(self, temp_dir, helpers): classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.9", "Framework :: Foo", "Development Status :: 4 - Beta", "Private :: Do Not Upload", ] metadata = ProjectMetadata( str(temp_dir), PluginManager(), { "project": {"name": "foo", "classifiers": classifiers, "dynamic": ["version", "description"]}, "tool": {"hatch": {"version": {"path": "a/b"}, "metadata": {"hooks": {"custom": {}}}}}, }, ) file_path = temp_dir / "a" / "b" file_path.ensure_parent_dir_exists() file_path.write_text('__version__ = "0.0.1"') file_path = temp_dir / DEFAULT_BUILD_SCRIPT file_path.write_text( helpers.dedent( """ from hatchling.metadata.plugin.interface import MetadataHookInterface class CustomHook(MetadataHookInterface): def update(self, metadata): metadata['description'] = metadata['name'] + 'bar' metadata['version'] = metadata['version'] + 'rc0' def get_known_classifiers(self): return ['Framework :: Foo'] """ ) ) assert "custom" in metadata.hatch.metadata.hooks assert metadata.core.name == "foo" assert metadata.core.description == "foobar" assert metadata.core.version == "0.0.1rc0" assert metadata.core.classifiers == [ "Private :: Do Not Upload", "Development Status :: 4 - Beta", "Framework :: Foo", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.11", ] def test_custom_missing_dynamic(self, temp_dir, helpers): metadata = ProjectMetadata( str(temp_dir), PluginManager(), { "project": {"name": "foo", "dynamic": ["version"]}, "tool": {"hatch": {"version": {"path": "a/b"}, "metadata": {"hooks": {"custom": {}}}}}, }, ) file_path = temp_dir / "a" / "b" file_path.ensure_parent_dir_exists() file_path.write_text('__version__ = "0.0.1"') file_path = temp_dir / DEFAULT_BUILD_SCRIPT file_path.write_text( helpers.dedent( """ from hatchling.metadata.plugin.interface import MetadataHookInterface class CustomHook(MetadataHookInterface): def update(self, metadata): metadata['description'] = metadata['name'] + 'bar' """ ) ) with pytest.raises( ValueError, match="The field `description` was set dynamically and therefore must be listed in `project.dynamic`", ): _ = metadata.core
TestHook
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/selectable.py
{ "start": 233374, "end": 238075 }
class ____( roles.InElementRole, Generative, GroupedElement, ColumnElement[_T] ): """Represent a scalar subquery. A :class:`_sql.ScalarSelect` is created by invoking the :meth:`_sql.SelectBase.scalar_subquery` method. The object then participates in other SQL expressions as a SQL column expression within the :class:`_sql.ColumnElement` hierarchy. .. seealso:: :meth:`_sql.SelectBase.scalar_subquery` :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial """ _traverse_internals: _TraverseInternalsType = [ ("element", InternalTraversal.dp_clauseelement), ("type", InternalTraversal.dp_type), ] _from_objects: List[FromClause] = [] _is_from_container = True if not TYPE_CHECKING: _is_implicitly_boolean = False inherit_cache = True element: SelectBase def __init__(self, element: SelectBase) -> None: self.element = element self.type = element._scalar_type() self._propagate_attrs = element._propagate_attrs def __getattr__(self, attr: str) -> Any: return getattr(self.element, attr) def __getstate__(self) -> Dict[str, Any]: return {"element": self.element, "type": self.type} def __setstate__(self, state: Dict[str, Any]) -> None: self.element = state["element"] self.type = state["type"] @property def columns(self) -> NoReturn: raise exc.InvalidRequestError( "Scalar Select expression has no " "columns; use this object directly " "within a column-level expression." ) c = columns @_generative def where(self, crit: _ColumnExpressionArgument[bool]) -> Self: """Apply a WHERE clause to the SELECT statement referred to by this :class:`_expression.ScalarSelect`. """ self.element = cast("Select[Unpack[TupleAny]]", self.element).where( crit ) return self def self_group(self, against: Optional[OperatorType] = None) -> Self: return self def _ungroup(self) -> Self: return self @_generative def correlate( self, *fromclauses: Union[Literal[None, False], _FromClauseArgument], ) -> Self: r"""Return a new :class:`_expression.ScalarSelect` which will correlate the given FROM clauses to that of an enclosing :class:`_expression.Select`. This method is mirrored from the :meth:`_sql.Select.correlate` method of the underlying :class:`_sql.Select`. The method applies the :meth:_sql.Select.correlate` method, then returns a new :class:`_sql.ScalarSelect` against that statement. .. versionadded:: 1.4 Previously, the :meth:`_sql.ScalarSelect.correlate` method was only available from :class:`_sql.Select`. :param \*fromclauses: a list of one or more :class:`_expression.FromClause` constructs, or other compatible constructs (i.e. ORM-mapped classes) to become part of the correlate collection. .. seealso:: :meth:`_expression.ScalarSelect.correlate_except` :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial """ self.element = cast( "Select[Unpack[TupleAny]]", self.element ).correlate(*fromclauses) return self @_generative def correlate_except( self, *fromclauses: Union[Literal[None, False], _FromClauseArgument], ) -> Self: r"""Return a new :class:`_expression.ScalarSelect` which will omit the given FROM clauses from the auto-correlation process. This method is mirrored from the :meth:`_sql.Select.correlate_except` method of the underlying :class:`_sql.Select`. The method applies the :meth:_sql.Select.correlate_except` method, then returns a new :class:`_sql.ScalarSelect` against that statement. .. versionadded:: 1.4 Previously, the :meth:`_sql.ScalarSelect.correlate_except` method was only available from :class:`_sql.Select`. :param \*fromclauses: a list of one or more :class:`_expression.FromClause` constructs, or other compatible constructs (i.e. ORM-mapped classes) to become part of the correlate-exception collection. .. seealso:: :meth:`_expression.ScalarSelect.correlate` :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial """ self.element = cast( "Select[Unpack[TupleAny]]", self.element ).correlate_except(*fromclauses) return self
ScalarSelect
python
mlflow__mlflow
tests/sagemaker/mock/__init__.py
{ "start": 38645, "end": 39151 }
class ____: """ Object representing a model entry in the models list returned by SageMaker's "ListModels" API: https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListModels.html. """ def __init__(self, model, arn): self.model = model self.arn = arn @property def response_object(self): return { "ModelArn": self.arn, "ModelName": self.model.model_name, "CreationTime": self.model.creation_time, }
ModelSummary
python
walkccc__LeetCode
solutions/2545. Sort the Students by Their Kth Score/2545.py
{ "start": 0, "end": 142 }
class ____: def sortTheStudents(self, score: list[list[int]], k: int) -> list[list[int]]: return sorted(score, key=lambda x: -x[k])
Solution
python
spyder-ide__spyder
spyder/api/fonts.py
{ "start": 1150, "end": 1823 }
class ____: """Mixin to get the different Spyder font types from our config system.""" @classmethod def get_font( cls, font_type: str, font_size_delta: Optional[int] = 0 ) -> QFont: """ Get a font type as a QFont object. Parameters ---------- font_type: str A Spyder font type. This must be one of the `SpyderFontType` enum values. font_size_delta: int, optional Small increase or decrease over the default font size. The default is 0. """ return get_font(option=font_type, font_size_delta=font_size_delta)
SpyderFontsMixin
python
walkccc__LeetCode
solutions/140. Word Break II/140.py
{ "start": 0, "end": 585 }
class ____: def wordBreak(self, s: str, wordDict: list[str]) -> list[str]: wordSet = set(wordDict) @functools.lru_cache(None) def wordBreak(s: str) -> list[str]: ans = [] # 1 <= len(prefix) < len(s) for i in range(1, len(s)): prefix = s[:i] suffix = s[i:] if prefix in wordSet: for word in wordBreak(suffix): ans.append(prefix + ' ' + word) # `wordSet` contains the whole string s, so don't add any space. if s in wordSet: ans.append(s) return ans return wordBreak(s)
Solution
python
pytorch__pytorch
test/dynamo/test_misc.py
{ "start": 4631, "end": 5152 }
class ____: setup = False def __setattr__(self, key, value): assert torch.compiler.is_dynamo_compiling() or UserDefineSetAttr.setup super().__setattr__(f"pfx_{key}", value) def __getattr__(self, key, c=1): assert torch.compiler.is_dynamo_compiling() or UserDefineSetAttr.setup # c is added to force a guard on __defaults__ and checks the source for __getattr__ if c: return self.__dict__[f"pfx_{key}"] else: return None
UserDefineSetAttr
python
wandb__wandb
wandb/vendor/pygments/lexers/templates.py
{ "start": 11556, "end": 14781 }
class ____(RegexLexer): """ Generic `django <http://www.djangoproject.com/documentation/templates/>`_ and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer. It just highlights django/jinja code between the preprocessor directives, other data is left untouched by the lexer. """ name = 'Django/Jinja' aliases = ['django', 'jinja'] mimetypes = ['application/x-django-templating', 'application/x-jinja'] flags = re.M | re.S tokens = { 'root': [ (r'[^{]+', Other), (r'\{\{', Comment.Preproc, 'var'), # jinja/django comments (r'\{[*#].*?[*#]\}', Comment), # django comments (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)' r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})', bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, Comment, Comment.Preproc, Text, Keyword, Text, Comment.Preproc)), # raw jinja blocks (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)' r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})', bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, Text, Comment.Preproc, Text, Keyword, Text, Comment.Preproc)), # filter blocks (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)', bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function), 'block'), (r'(\{%)(-?\s*)([a-zA-Z_]\w*)', bygroups(Comment.Preproc, Text, Keyword), 'block'), (r'\{', Other) ], 'varnames': [ (r'(\|)(\s*)([a-zA-Z_]\w*)', bygroups(Operator, Text, Name.Function)), (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)', bygroups(Keyword, Text, Keyword, Text, Name.Function)), (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo), (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|' r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b', Keyword), (r'(loop|block|super|forloop)\b', Name.Builtin), (r'[a-zA-Z_][\w-]*', Name.Variable), (r'\.\w+', Name.Variable), (r':?"(\\\\|\\"|[^"])*"', String.Double), (r":?'(\\\\|\\'|[^'])*'", String.Single), (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator), (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" r"0[xX][0-9a-fA-F]+[Ll]?", Number), ], 'var': [ (r'\s+', Text), (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'), include('varnames') ], 'block': [ (r'\s+', Text), (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'), include('varnames'), (r'.', Punctuation) ] } def analyse_text(text): rv = 0.0 if re.search(r'\{%\s*(block|extends)', text) is not None: rv += 0.4 if re.search(r'\{%\s*if\s*.*?%\}', text) is not None: rv += 0.1 if re.search(r'\{\{.*?\}\}', text) is not None: rv += 0.1 return rv
DjangoLexer
python
apache__airflow
airflow-core/src/airflow/ti_deps/deps/valid_state_dep.py
{ "start": 978, "end": 2517 }
class ____(BaseTIDep): """ Ensures that the task instance's state is in a given set of valid states. :param valid_states: A list of valid states that a task instance can have to meet this dependency. :return: whether or not the task instance's state is valid """ NAME = "Task Instance State" IGNORABLE = True def __init__(self, valid_states): super().__init__() if not valid_states: raise AirflowException("ValidStatesDep received an empty set of valid states.") self._valid_states = valid_states def __eq__(self, other: object) -> bool: """Check if two task instance dependencies are equal by comparing their types and valid states.""" if not isinstance(other, ValidStateDep): return NotImplemented return self._valid_states == other._valid_states def __hash__(self): """Compute the hash value based on the type of the task instance dependency and its valid states.""" return hash((type(self), tuple(self._valid_states))) @provide_session def _get_dep_statuses(self, ti, session, dep_context): if dep_context.ignore_ti_state: yield self._passing_status(reason="Context specified that state should be ignored.") return if ti.state in self._valid_states: yield self._passing_status(reason=f"Task state {ti.state} was valid.") return yield self._failing_status(reason=f"Task is in the '{ti.state}' state.")
ValidStateDep
python
ray-project__ray
python/ray/serve/tests/test_model_composition.py
{ "start": 5677, "end": 7818 }
class ____: def __init__(self, child, parent): self._child = child self._parent = parent async def __call__(self, *args): # Check that the grandparent and parent are talking to the same child. assert await self._child.remote() == await self._parent.remote() return "ok" def test_pass_handle_to_multiple(serve_instance): child = Child.bind() parent = Parent.bind(child) grandparent = GrandParent.bind(child, parent) handle = serve.run(grandparent) assert handle.remote().result() == "ok" def test_run_non_json_serializable_args(serve_instance): # Test that we can capture and bind non-json-serializable arguments. arr1 = array.array("d", [1.0, 2.0, 3.0]) arr2 = array.array("d", [2.0, 3.0, 4.0]) arr3 = array.array("d", [3.0, 4.0, 5.0]) @serve.deployment class A: def __init__(self, arr1, *, arr2): self.arr1 = arr1 self.arr2 = arr2 self.arr3 = arr3 def __call__(self, *args): return self.arr1, self.arr2, self.arr3 handle = serve.run(A.bind(arr1, arr2=arr2)) ret1, ret2, ret3 = handle.remote().result() assert all( [ ret1 == arr1, ret2 == arr2, ret3 == arr3, ] ) @serve.deployment def func(): return 1 def test_single_functional_node_base_case(serve_instance): # Base case should work handle = serve.run(func.bind()) url = get_application_url() assert handle.remote().result() == 1 assert httpx.get(url).text == "1" def test_unsupported_remote(): @serve.deployment class Actor: def ping(self): return "hello" with pytest.raises( AttributeError, match=r"\'Application\' object has no attribute \'remote\'" ): _ = Actor.bind().remote() @serve.deployment def func(): return 1 with pytest.raises( AttributeError, match=r"\'Application\' object has no attribute \'remote\'" ): _ = func.bind().remote() if __name__ == "__main__": sys.exit(pytest.main(["-v", "-s", __file__]))
GrandParent
python
django-import-export__django-import-export
tests/core/migrations/0015_withpositiveintegerfields.py
{ "start": 92, "end": 794 }
class ____(migrations.Migration): dependencies = [ ("core", "0014_bookwithchapternumbers"), ] operations = [ migrations.CreateModel( name="WithPositiveIntegerFields", fields=[ ( "id", models.BigAutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("big", models.PositiveBigIntegerField(null=True)), ("small", models.PositiveSmallIntegerField(null=True)), ], ), ]
Migration
python
networkx__networkx
networkx/classes/reportviews.py
{ "start": 32785, "end": 33687 }
class ____(OutMultiEdgeDataView): """An EdgeDataView for inward edges of MultiDiGraph; See EdgeDataView""" __slots__ = () def __iter__(self): return ( self._report(nbr, n, k, dd) for n, nbrs in self._nodes_nbrs() for nbr, kd in nbrs.items() for k, dd in kd.items() ) def __contains__(self, e): u, v = e[:2] if self._nbunch is not None and v not in self._nbunch: return False # this edge doesn't end in nbunch try: kdict = self._adjdict[v][u] except KeyError: return False if self.keys is True: k = e[2] dd = kdict[k] return e == self._report(u, v, k, dd) return any(e == self._report(u, v, k, dd) for k, dd in kdict.items()) # EdgeViews have set operations and no data reported
InMultiEdgeDataView
python
kamyu104__LeetCode-Solutions
Python/minimum-array-end.py
{ "start": 51, "end": 423 }
class ____(object): def minEnd(self, n, x): """ :type n: int :type x: int :rtype: int """ n -= 1 base_n = base_x = 1 while base_n <= n: if (x&base_x) == 0: if n&base_n: x |= base_x base_n <<= 1 base_x <<= 1 return x
Solution
python
scipy__scipy
scipy/stats/tests/test_continuous.py
{ "start": 88065, "end": 88409 }
class ____(ContinuousDistribution): _variable = _RealParameter('x', domain=_RealInterval(endpoints=(-np.inf, np.inf))) def _pdf_formula(self, x, *args, **kwargs): return (0.4 * 1/(1.1 * np.sqrt(2*np.pi)) * np.exp(-0.5*((x+0.25)/1.1)**2) + 0.6 * 1/(0.9 * np.sqrt(2*np.pi)) * np.exp(-0.5*((x-0.5)/0.9)**2))
MixedDist
python
dagster-io__dagster
examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/components/integrations/test_sigma_utils.py
{ "start": 267, "end": 1787 }
class ____(SigmaOrganization): async def build_organization_data( self, sigma_filter=None, fetch_column_data: bool = True, fetch_lineage_data: bool = True, ) -> SigmaOrganizationData: """Returns mock Sigma organization data.""" # Create mock workbook workbook = SigmaWorkbook( properties={ "workbookId": str(uuid.uuid4()), "name": "Sample_Workbook", "url": f"{self.base_url}/workbook/sample", "path": "My Documents", "createdAt": "2024-01-01T00:00:00Z", "updatedAt": "2024-01-01T00:00:00Z", "latestVersion": 1, }, lineage=[], datasets=set(), direct_table_deps=set(), owner_email=None, materialization_schedules=None, ) # Create mock dataset dataset = SigmaDataset( properties={ "datasetId": str(uuid.uuid4()), "name": "Orders_Dataset", "url": f"{self.base_url}/dataset/orders", "description": "Sample orders dataset", "createdAt": "2024-01-01T00:00:00Z", "updatedAt": "2024-01-01T00:00:00Z", }, columns=set(), inputs=set(), ) return SigmaOrganizationData( workbooks=[workbook], datasets=[dataset], tables=[], )
MockSigmaOrganization
python
doocs__leetcode
solution/0200-0299/0280.Wiggle Sort/Solution.py
{ "start": 0, "end": 378 }
class ____: def wiggleSort(self, nums: List[int]) -> None: """ Do not return anything, modify nums in-place instead. """ for i in range(1, len(nums)): if (i % 2 == 1 and nums[i] < nums[i - 1]) or ( i % 2 == 0 and nums[i] > nums[i - 1] ): nums[i], nums[i - 1] = nums[i - 1], nums[i]
Solution
python
skorch-dev__skorch
skorch/tests/test_toy.py
{ "start": 94, "end": 3326 }
class ____: @pytest.fixture def module_cls(self): from skorch.toy import MLPModule return MLPModule def test_one_hidden(self, module_cls): module = module_cls() parameters = list(module.named_parameters()) # 2 linear * (weight + bias) assert len(parameters) == 4 # 2 linear, 1 relu, 1 dropout assert len(module.sequential) == 4 assert isinstance(module.sequential[0], nn.Linear) assert isinstance(module.sequential[1], nn.ReLU) assert isinstance(module.sequential[2], nn.Dropout) assert isinstance(module.sequential[3], nn.Linear) def test_two_hidden(self, module_cls): module = module_cls(num_hidden=2) parameters = list(module.named_parameters()) # 3 linear * (weight + bias) assert len(parameters) == 6 # 3 linear, 2 relu, 2 dropout assert len(module.sequential) == 7 assert isinstance(module.sequential[0], nn.Linear) assert isinstance(module.sequential[1], nn.ReLU) assert isinstance(module.sequential[2], nn.Dropout) assert isinstance(module.sequential[3], nn.Linear) assert isinstance(module.sequential[4], nn.ReLU) assert isinstance(module.sequential[5], nn.Dropout) assert isinstance(module.sequential[6], nn.Linear) @pytest.mark.parametrize('num_hidden', [0, 1, 2, 5, 10]) def test_many_hidden(self, module_cls, num_hidden): module = module_cls(num_hidden=num_hidden) parameters = list(module.named_parameters()) assert len(parameters) == 2 * (num_hidden + 1) assert len(module.sequential) == (3 * num_hidden) + 1 def test_output_nonlin(self, module_cls): module = module_cls(output_nonlin=nn.Sigmoid()) # 2 linear, 1 relu, 1 dropout, 1 sigmoid assert len(module.sequential) == 5 assert isinstance(module.sequential[0], nn.Linear) assert isinstance(module.sequential[1], nn.ReLU) assert isinstance(module.sequential[2], nn.Dropout) assert isinstance(module.sequential[3], nn.Linear) assert isinstance(module.sequential[4], nn.Sigmoid) def test_output_squeezed(self, module_cls): X = torch.zeros((5, 20)).float() module = module_cls(output_units=1) y = module(X) assert y.dim() == 2 module = module_cls(squeeze_output=True, output_units=1) y = module(X) assert y.dim() == 1 def test_dropout(self, module_cls): module = module_cls(dropout=0.567) assert np.isclose(module.sequential[2].p, 0.567) def test_make_classifier(self): from skorch.toy import make_classifier module = make_classifier()() assert isinstance(module.sequential[-1], nn.Softmax) def test_make_binary_classifier(self): from skorch.toy import make_binary_classifier module = make_binary_classifier()() assert isinstance(module.sequential[-1], nn.Linear) assert module.squeeze_output is True def test_make_regressor(self): from skorch.toy import make_regressor module = make_regressor()() assert module.sequential[-1].out_features == 1
TestMLPModule
python
PrefectHQ__prefect
tests/utilities/schema_tools/test_hydration.py
{ "start": 6651, "end": 9199 }
class ____: @pytest.mark.parametrize( "input_object, expected_output", [ # Valid Jinja template ( {"param": {"__prefect_kind": "jinja", "template": "Hello {{ name }}"}}, {"param": ValidJinja("Hello {{ name }}")}, ), # Jinja template with syntax error ( {"param": {"__prefect_kind": "jinja", "template": "Hello {{ name"}}, { "param": InvalidJinja( "unexpected end of template, expected 'end of print statement'." ) }, ), # Security error in Jinja template ( { "param": { "__prefect_kind": "jinja", "template": """ {% for i in range(1) %} Level 1 {% for j in range(1) %} Level 2 {% for k in range(1) %} Level 3 {% endfor %} {% endfor %} {% endfor %} """, } }, { "param": InvalidJinja( "Contains nested for loops at a depth of 3. Templates can nest for loops no more than 2 loops deep." ) }, ), # Missing template ( {"param": {"__prefect_kind": "jinja"}}, {"param": TemplateNotFound()}, ), ], ) def test_hydrate_with_jinja_prefect_kind(self, input_object, expected_output): assert hydrate(input_object) == expected_output def test_render_jinja(self): values = {"param": {"__prefect_kind": "jinja", "template": "Hello {{ name }}"}} ctx = HydrationContext(render_jinja=False, jinja_context={"name": "world"}) assert hydrate(values, ctx) == {"param": ValidJinja("Hello {{ name }}")} # render ctx = HydrationContext(render_jinja=True, jinja_context={"name": "world"}) assert hydrate(values, ctx) == {"param": "Hello world"} # render with no jinja_context ctx = HydrationContext(render_jinja=True, jinja_context={}) assert hydrate(values, ctx) == {"param": "Hello "}
TestHydrateWithJinjaPrefectKind
python
sympy__sympy
sympy/polys/domains/modularinteger.py
{ "start": 406, "end": 6042 }
class ____(PicklableWithSlots, DomainElement): """A class representing a modular integer. """ mod, dom, sym, _parent = None, None, None, None __slots__ = ('val',) def parent(self): return self._parent def __init__(self, val): if isinstance(val, self.__class__): self.val = val.val % self.mod else: self.val = self.dom.convert(val) % self.mod def modulus(self): return self.mod def __hash__(self): return hash((self.val, self.mod)) def __repr__(self): return "%s(%s)" % (self.__class__.__name__, self.val) def __str__(self): return "%s mod %s" % (self.val, self.mod) def __int__(self): return int(self.val) def to_int(self): sympy_deprecation_warning( """ModularInteger.to_int() is deprecated. Use int(a) or K = GF(p) and K.to_int(a) instead of a.to_int(). """, deprecated_since_version="1.13", active_deprecations_target="modularinteger-to-int", ) if self.sym: if self.val <= self.mod // 2: return self.val else: return self.val - self.mod else: return self.val def __pos__(self): return self def __neg__(self): return self.__class__(-self.val) @classmethod def _get_val(cls, other): if isinstance(other, cls): return other.val else: try: return cls.dom.convert(other) except CoercionFailed: return None def __add__(self, other): val = self._get_val(other) if val is not None: return self.__class__(self.val + val) else: return NotImplemented def __radd__(self, other): return self.__add__(other) def __sub__(self, other): val = self._get_val(other) if val is not None: return self.__class__(self.val - val) else: return NotImplemented def __rsub__(self, other): return (-self).__add__(other) def __mul__(self, other): val = self._get_val(other) if val is not None: return self.__class__(self.val * val) else: return NotImplemented def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): val = self._get_val(other) if val is not None: return self.__class__(self.val * self._invert(val)) else: return NotImplemented def __rtruediv__(self, other): return self.invert().__mul__(other) def __mod__(self, other): val = self._get_val(other) if val is not None: return self.__class__(self.val % val) else: return NotImplemented def __rmod__(self, other): val = self._get_val(other) if val is not None: return self.__class__(val % self.val) else: return NotImplemented def __pow__(self, exp): if not exp: return self.__class__(self.dom.one) if exp < 0: val, exp = self.invert().val, -exp else: val = self.val return self.__class__(pow(val, int(exp), self.mod)) def _compare(self, other, op): val = self._get_val(other) if val is None: return NotImplemented return op(self.val, val % self.mod) def _compare_deprecated(self, other, op): val = self._get_val(other) if val is None: return NotImplemented sympy_deprecation_warning( """Ordered comparisons with modular integers are deprecated. Use e.g. int(a) < int(b) instead of a < b. """, deprecated_since_version="1.13", active_deprecations_target="modularinteger-compare", stacklevel=4, ) return op(self.val, val % self.mod) def __eq__(self, other): return self._compare(other, operator.eq) def __ne__(self, other): return self._compare(other, operator.ne) def __lt__(self, other): return self._compare_deprecated(other, operator.lt) def __le__(self, other): return self._compare_deprecated(other, operator.le) def __gt__(self, other): return self._compare_deprecated(other, operator.gt) def __ge__(self, other): return self._compare_deprecated(other, operator.ge) def __bool__(self): return bool(self.val) @classmethod def _invert(cls, value): return cls.dom.invert(value, cls.mod) def invert(self): return self.__class__(self._invert(self.val)) _modular_integer_cache: dict[tuple[Any, Any, Any], type[ModularInteger]] = {} def ModularIntegerFactory(_mod, _dom, _sym, parent): """Create custom class for specific integer modulus.""" try: _mod = _dom.convert(_mod) except CoercionFailed: ok = False else: ok = True if not ok or _mod < 1: raise ValueError("modulus must be a positive integer, got %s" % _mod) key = _mod, _dom, _sym try: cls = _modular_integer_cache[key] except KeyError: class cls(ModularInteger): mod, dom, sym = _mod, _dom, _sym _parent = parent if _sym: cls.__name__ = "SymmetricModularIntegerMod%s" % _mod else: cls.__name__ = "ModularIntegerMod%s" % _mod _modular_integer_cache[key] = cls return cls
ModularInteger
python
kamyu104__LeetCode-Solutions
Python/longest-ideal-subsequence.py
{ "start": 34, "end": 357 }
class ____(object): def longestIdealString(self, s, k): """ :type s: str :type k: int :rtype: int """ dp = [0]*26 for c in s: x = ord(c)-ord('a') dp[x] = max(dp[i] for i in xrange(max(x-k, 0), min(x+k+1, 26)))+1 return max(dp)
Solution
python
ansible__ansible
test/units/executor/module_common/test_module_common.py
{ "start": 1954, "end": 2790 }
class ____: def test_slurp_nonexistent(self, mocker): mocker.patch('os.path.exists', side_effect=lambda x: False) with pytest.raises(ansible.errors.AnsibleError): amc._slurp('no_file') def test_slurp_file(self, mocker): mocker.patch('os.path.exists', side_effect=lambda x: True) m = mocker.mock_open(read_data='This is a test') mocker.patch('builtins.open', m) assert amc._slurp('some_file') == 'This is a test' def test_slurp_file_with_newlines(self, mocker): mocker.patch('os.path.exists', side_effect=lambda x: True) m = mocker.mock_open(read_data='#!/usr/bin/python\ndef test(args):\nprint("hi")\n') mocker.patch('builtins.open', m) assert amc._slurp('some_file') == '#!/usr/bin/python\ndef test(args):\nprint("hi")\n'
TestSlurp
python
walkccc__LeetCode
solutions/1848. Minimum Distance to the Target Element/1848.py
{ "start": 0, "end": 227 }
class ____: def getMinDistance(self, nums: list[int], target: int, start: int) -> int: ans = math.inf for i, num in enumerate(nums): if num == target: ans = min(ans, abs(i - start)) return ans
Solution
python
keras-team__keras
keras/src/ops/math.py
{ "start": 27382, "end": 31780 }
class ____(Operation): def __init__( self, sequence_length, sequence_stride, fft_length, length=None, window="hann", center=True, *, name=None, ): super().__init__(name=name) self.sequence_length = sequence_length self.sequence_stride = sequence_stride self.fft_length = fft_length self.length = length self.window = window self.center = center def compute_output_spec(self, x): if not isinstance(x, (tuple, list)) or len(x) != 2: raise ValueError( "Input `x` should be a tuple of two tensors - real and " f"imaginary. Received: x={x}" ) real, imag = x # Both real and imaginary parts should have the same shape. if real.shape != imag.shape: raise ValueError( "Input `x` should be a tuple of two tensors - real and " "imaginary. Both the real and imaginary parts should have the " f"same shape. Received: x[0].shape = {real.shape}, " f"x[1].shape = {imag.shape}" ) if len(real.shape) < 2: raise ValueError( f"Input should have rank >= 2. " f"Received: input.shape = {real.shape}" ) if real.shape[-2] is not None: output_size = ( real.shape[-2] - 1 ) * self.sequence_stride + self.fft_length if self.length is not None: output_size = self.length elif self.center: output_size = output_size - (self.fft_length // 2) * 2 else: output_size = None new_shape = real.shape[:-2] + (output_size,) return KerasTensor(shape=new_shape, dtype=real.dtype) def call(self, x): return backend.math.istft( x, sequence_length=self.sequence_length, sequence_stride=self.sequence_stride, fft_length=self.fft_length, length=self.length, window=self.window, center=self.center, ) @keras_export("keras.ops.istft") def istft( x, sequence_length, sequence_stride, fft_length, length=None, window="hann", center=True, ): """Inverse Short-Time Fourier Transform along the last axis of the input. To reconstruct an original waveform, the parameters should be the same in `stft`. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. sequence_length: An integer representing the sequence length. sequence_stride: An integer representing the sequence hop size. fft_length: An integer representing the size of the FFT that produced `stft`. Should be of type `int32`. length: An integer representing the output is clipped to exactly length. If not specified, no padding or clipping take place. Defaults to `None`. window: A string, a tensor of the window or `None`. If `window` is a string, available values are `"hann"` and `"hamming"`. If `window` is a tensor, it will be used directly as the window and its length must be `sequence_length`. If `window` is `None`, no windowing is used. Defaults to `"hann"`. center: Whether `x` was padded on both sides so that the t-th sequence is centered at time `t * sequence_stride`. Defaults to `True`. Returns: A tensor containing the inverse Short-Time Fourier Transform along the last axis of `x`. Example: >>> x = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0]) >>> istft(stft(x, 1, 1, 1), 1, 1, 1) array([0.0, 1.0, 2.0, 3.0, 4.0]) """ if any_symbolic_tensors(x): return ISTFT( sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, window=window, center=center, ).symbolic_call(x) return backend.math.istft( x, sequence_length=sequence_length, sequence_stride=sequence_stride, fft_length=fft_length, length=length, window=window, center=center, )
ISTFT
python
django__django
tests/lookup/models.py
{ "start": 1248, "end": 1373 }
class ____(models.Transform): lookup_name = "nulled" template = "NULL" @NulledTextField.register_lookup
NulledTransform
python
realpython__materials
python-double-underscore/point.py
{ "start": 0, "end": 467 }
class ____: def __init__(self, x, y): self.x = x self.y = y @property def x(self): return self._x @x.setter def x(self, value): self._x = _validate(value) @property def y(self): return self._y @y.setter def y(self, value): self._y = _validate(value) def _validate(value): if not isinstance(value, int | float): raise ValueError("number expected") return value
Point
python
walkccc__LeetCode
solutions/2012. Sum of Beauty in the Array/2012.py
{ "start": 0, "end": 476 }
class ____: def sumOfBeauties(self, nums: list[int]) -> int: n = len(nums) ans = 0 minOfRight = [0] * (n - 1) + [nums[-1]] for i in range(n - 2, 1, -1): minOfRight[i] = min(nums[i], minOfRight[i + 1]) maxOfLeft = nums[0] for i in range(1, n - 1): if maxOfLeft < nums[i] < minOfRight[i + 1]: ans += 2 elif nums[i - 1] < nums[i] < nums[i + 1]: ans += 1 maxOfLeft = max(maxOfLeft, nums[i]) return ans
Solution
python
PyCQA__pylint
tests/functional/a/arguments_renamed.py
{ "start": 1422, "end": 1637 }
class ____(Parent): def test(self, var): # [arguments-renamed] return var + 1 def kwargs_test(self, *, var1, kw2): #[arguments-differ] print(f"keyword parameters are {var1} and {kw2}.")
Child2
python
pypa__warehouse
tests/unit/manage/test_forms.py
{ "start": 14980, "end": 16323 }
class ____: def test_validate(self): fake_webauthn = object() user_id = (pretend.stub(),) user_service = pretend.stub( get_webauthn_by_label=pretend.call_recorder(lambda *a: fake_webauthn) ) form = forms.DeleteWebAuthnForm( formdata=MultiDict({"label": "fake label"}), user_service=user_service, user_id=user_id, ) assert form.user_service is user_service assert form.user_id is user_id assert form.validate(), str(form.errors) assert form.webauthn is fake_webauthn def test_validate_label_missing(self): form = forms.DeleteWebAuthnForm( user_service=pretend.stub(), user_id=pretend.stub() ) assert not form.validate() assert form.label.errors.pop() == "Specify a device name" def test_validate_label_not_in_use(self): user_service = pretend.stub( get_webauthn_by_label=pretend.call_recorder(lambda *a: None) ) form = forms.DeleteWebAuthnForm( formdata=MultiDict({"label": "fake label"}), user_service=user_service, user_id=pretend.stub(), ) assert not form.validate() assert form.label.errors.pop() == "No WebAuthn key with given label"
TestDeleteWebAuthnForm
python
spack__spack
lib/spack/spack/detection/test.py
{ "start": 719, "end": 950 }
class ____(NamedTuple): """Data structure to model assertions on detection tests""" #: Spec to be detected spec: str #: Attributes expected in the external spec extra_attributes: Dict[str, str]
ExpectedTestResult
python
coleifer__peewee
tests/manytomany.py
{ "start": 1344, "end": 1447 }
class ____(TestModel): name = TextField(unique=True) LogoColorDeferred = DeferredThroughModel()
Color
python
ray-project__ray
python/ray/train/v2/_internal/execution/checkpoint/checkpoint_manager.py
{ "start": 1208, "end": 2346 }
class ____(BaseModel): # Increment version if the schema changes version: int = 0 checkpoint_results: List[_TrainingResultState] latest_checkpoint_result: Optional[_TrainingResultState] def _get_training_result_from_state( state: _TrainingResultState, storage_context: StorageContext, ) -> _TrainingResult: """Get a TrainingResult object from a Pydantic state object.""" return _TrainingResult( checkpoint=Checkpoint( path=storage_context.build_checkpoint_path_from_name( state.checkpoint_dir_name ), filesystem=storage_context.storage_filesystem, ), metrics=state.metrics, ) def _get_state_from_training_result( training_result: _TrainingResult, storage_context: StorageContext, ) -> _TrainingResultState: """Get a Pydantic state object from a TrainingResult object.""" return _TrainingResultState( checkpoint_dir_name=storage_context.extract_checkpoint_dir_name_from_path( training_result.checkpoint.path ), metrics=training_result.metrics, )
_CheckpointManagerState
python
huggingface__transformers
src/transformers/models/instructblip/modeling_instructblip.py
{ "start": 45987, "end": 62469 }
class ____(InstructBlipPreTrainedModel, GenerationMixin): config: InstructBlipConfig main_input_name = "pixel_values" _can_compile_fullgraph = True _keep_in_fp32_modules = ["query_tokens"] # TODO @ArthurZucker I don't know why this is required for FP8 def __init__(self, config: InstructBlipConfig): super().__init__(config) self.vision_model = InstructBlipVisionModel._from_config(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = InstructBlipQFormerModel._from_config(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config) if language_model._no_split_modules is not None: self._no_split_modules.extend(language_model._no_split_modules) if language_model._keep_in_fp32_modules is not None: self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules) self.language_model = language_model # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self, modality=None): if modality is None: return self.language_model.get_encoder() else: return super().get_encoder(modality=modality) def get_decoder(self): return self.language_model.get_decoder() # Copied from transformers.models.instructblip.modeling_instructblip.InstructBlipModel._preprocess_accelerate def _preprocess_accelerate(self): r""" Some pre-processing hacks to make the model `accelerate` compatible. Check https://github.com/huggingface/transformers/pull/21707 for more details. """ hf_device_map = self.hf_device_map if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1: # warn users about unexpected behavior when using multi-GPU + InstructBLIP + `accelerate`. logger.warning( "The `language_model` is not in the `hf_device_map` dictionary and you are running your script" " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`." " Please pass a `device_map` that contains `language_model` to remove this warning." " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for" " more details on creating a `device_map` for large models.", ) if hasattr(self.language_model, "_hf_hook"): self.language_model._hf_hook.io_same_device = True # For `generate` compatibility def get_image_features( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = False, ): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. """ # step 1: forward the images through the vision encoder, # to get image embeddings of shape (batch_size, seq_len, hidden_size) vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) # difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer( input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True, ) query_output = query_outputs[0][:, : query_tokens.size(1), :] # step 3: use the language model, conditioned on the query outputs and the prompt language_model_inputs = self.language_projection(query_output) if return_dict: return language_model_inputs, vision_outputs, query_outputs return language_model_inputs def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor): """ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, InstructBlipForConditionalGenerationModelOutput]: r""" qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided to serve as text prompt, which the Q-Former model will encode. Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for details. [What are input IDs?](../glossary#input-ids) qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Only relevant in case an encoder-decoder language model (like T5) is used. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration >>> import torch >>> from PIL import Image >>> import requests >>> model = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b") >>> processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b") >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> model.to(device) # doctest: +IGNORE_RESULT >>> url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> prompt = "What is unusual about this image?" >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device) >>> outputs = model.generate( ... **inputs, ... do_sample=False, ... num_beams=5, ... max_length=256, ... min_length=1, ... top_p=0.9, ... repetition_penalty=1.5, ... length_penalty=1.0, ... temperature=1, ... ) >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() >>> print(generated_text) The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV, which is parked in the middle of a busy city street. This is an unconventional approach to ironing clothes, as it requires the man to balance himself and his ironing equipment on top of the vehicle while navigating through traffic. Additionally, the presence of taxis and other vehicles in the scene further emphasizes the unusual nature of this situation. ```""" language_model_inputs, vision_outputs, query_outputs = self.get_image_features( pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) if self.config.use_decoder_only_language_model: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs, ) logits = outputs[0] loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) else: kwargs["return_dict"] = True outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels, **kwargs, ) loss = outputs.loss logits = outputs.logits return InstructBlipForConditionalGenerationModelOutput( loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs, ) @torch.no_grad() def generate( self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor] = None, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, **generate_kwargs, ) -> torch.LongTensor: """ Overrides `generate` function to be able to use the model as a conditional generator. Args: pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): Input images to be processed. qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt to be fed to the Q-Former module. qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt for the generation. attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. interpolate_pos_encoding (`bool`, *optional*, defaults to `False`): Whether to interpolate the positional encoding of the image embeddings. Returns: captions (list): A list of strings of length batch_size * num_captions. """ if hasattr(self, "hf_device_map"): # preprocess for `accelerate` self._preprocess_accelerate() batch_size = pixel_values.shape[0] language_model_inputs, vision_outputs, query_outputs = self.get_image_features( pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) if inputs_embeds is None: if input_ids is None: image_tokens = [self.config.image_token_index] * self.config.num_query_tokens start_tokens = image_tokens + [self.config.text_config.bos_token_id] input_ids = torch.tensor([start_tokens], dtype=torch.long, device=pixel_values.device) input_ids = input_ids.repeat(batch_size, 1) inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) inputs = {"inputs_embeds": inputs_embeds, "attention_mask": attention_mask} if not self.language_model.config.is_encoder_decoder: inputs["input_ids"] = input_ids outputs = self.language_model.generate(**inputs, **generate_kwargs) return outputs __all__ = [ "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ]
InstructBlipForConditionalGeneration
python
pypa__warehouse
tests/unit/accounts/test_views.py
{ "start": 143913, "end": 146844 }
class ____: @pytest.mark.parametrize("next_route", [None, "/manage/accounts", "/projects/"]) def test_reauth(self, monkeypatch, pyramid_request, pyramid_services, next_route): user_service = pretend.stub(get_password_timestamp=lambda uid: 0) response = pretend.stub() monkeypatch.setattr(views, "HTTPSeeOther", lambda url: response) pyramid_services.register_service(user_service, IUserService, None) pyramid_request.route_path = lambda *args, **kwargs: pretend.stub() pyramid_request.session.record_auth_timestamp = pretend.call_recorder( lambda *args: None ) pyramid_request.session.record_password_timestamp = lambda ts: None pyramid_request.user = pretend.stub(id=pretend.stub, username=pretend.stub()) pyramid_request.matched_route = pretend.stub(name=pretend.stub()) pyramid_request.matchdict = {"foo": "bar"} pyramid_request.GET = pretend.stub(mixed=lambda: {"baz": "bar"}) form_obj = pretend.stub( next_route=pretend.stub(data=next_route), next_route_matchdict=pretend.stub(data="{}"), next_route_query=pretend.stub(data="{}"), validate=lambda: True, ) form_class = pretend.call_recorder(lambda d, **kw: form_obj) if next_route is not None: pyramid_request.method = "POST" pyramid_request.POST["next_route"] = next_route pyramid_request.POST["next_route_matchdict"] = "{}" pyramid_request.POST["next_route_query"] = "{}" _ = views.reauthenticate(pyramid_request, _form_class=form_class) assert pyramid_request.session.record_auth_timestamp.calls == ( [pretend.call()] if next_route is not None else [] ) assert form_class.calls == [ pretend.call( pyramid_request.POST, request=pyramid_request, username=pyramid_request.user.username, next_route=pyramid_request.matched_route.name, next_route_matchdict=json.dumps(pyramid_request.matchdict), next_route_query=json.dumps(pyramid_request.GET.mixed()), action="reauthenticate", user_service=user_service, check_password_metrics_tags=[ "method:reauth", "auth_method:reauthenticate_form", ], ) ] def test_reauth_no_user(self, monkeypatch, pyramid_request): pyramid_request.user = None pyramid_request.route_path = pretend.call_recorder(lambda a: "/the-redirect") result = views.reauthenticate(pyramid_request) assert isinstance(result, HTTPSeeOther) assert pyramid_request.route_path.calls == [pretend.call("accounts.login")] assert result.headers["Location"] == "/the-redirect"
TestReAuthentication
python
django-haystack__django-haystack
haystack/inputs.py
{ "start": 933, "end": 1191 }
class ____(BaseInput): """ An input type for sanitizing user/untrusted input. """ input_type_name = "clean" def prepare(self, query_obj): query_string = super().prepare(query_obj) return query_obj.clean(query_string)
Clean
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/model_shaping.py
{ "start": 380, "end": 3606 }
class ____: __dict__: Dict[str, Any] = {} def _is_dataclass_instance(obj) -> bool: ... def fields(obj): ... def asdict(obj: RecordSchema, *, dict_factory: Any = dict) -> Dict[str, Any]: """Return the fields of a RecordSchema instance as a new dictionary mapping field names to field values. """ return _asdict_inner(obj, dict_factory) def _asdict_inner(obj: Any, dict_factory: Any) -> Any: meta = getattr(obj, RecordSchema._META_PROP, {}) if _is_dataclass_instance(obj): result = [] for f in fields(obj): value = _asdict_inner(getattr(obj, f.name), dict_factory) field_meta = meta.get(f.name) if value is not None or (field_meta and field_meta.include_none): name_override = field_meta and field_meta.name result.append((name_override or f.name, value)) return dict_factory(result) elif isinstance(obj, (list, tuple)): return type(obj)(cast(List[Any], (_asdict_inner(v, dict_factory) for v in obj))) elif isinstance(obj, DictRecord): result = [] # item access in dict is already by serialized name for k, v in obj.items(): value = _asdict_inner(v, dict_factory) field_meta = meta.get(k) if v is not None or (field_meta and field_meta.include_none): result.append((k, value)) return dict_factory(result) elif isinstance(obj, MutableRecord): obj = obj.__dict__ result = [] for k, v in obj.items(): value = _asdict_inner(v, dict_factory) field_meta = meta.get(k) if v is not None or (field_meta and field_meta.include_none): name_override = field_meta and field_meta.name result.append((name_override or k, value)) return dict_factory(result) else: return obj def asdict_test(obj): return asdict(obj) def obscure_test(obj): return type(obj)(_test_source()) def shape_multi_sink(obj): _rce(obj.foo) _rce(obj.bar) _rce(obj) _sql(obj.bar) def shape_multi_source(): if 1 > 2: return { "a": _user_controlled(), "a": {"b": _user_controlled()}, "a": {"b": {"c": _user_controlled()}}, } else: return { "a": {"b": _cookies()}, } def tito_shaping(parameters: Dict[str, Any]) -> Dict[str, Any]: return { "foo": parameters.get("foo"), "bar": parameters.get("bar"), "to_string": str(parameters), } def test_tito_shaping() -> None: obj = tito_shaping({"foo": _test_source(), "bar": {}}) _test_sink(obj["foo"]) # True Positive _test_sink(obj["bar"]) # TODO(T163123131): False Positive in model shaping _test_sink(obj["to_string"]) # True Positive obj = tito_shaping({"foo": {"source": _test_source(), "benign": ""}, "bar": {}}) _test_sink(obj["foo"]["source"]) # True Positive _test_sink(obj["foo"]["benign"]) # TODO(T163123131): False Positive in model shaping _test_sink(obj["bar"]) # TODO(T163123131): False Positive in model shaping _test_sink(obj["to_string"]) # True Positive
MutableRecord
python
tensorflow__tensorflow
tensorflow/compiler/tests/xla_device_gpu_test.py
{ "start": 1056, "end": 1796 }
class ____(test.TestCase): def __init__(self, method_name="runTest"): super(XlaDeviceGpuTest, self).__init__(method_name) context.context().enable_xla_devices() def testCopiesToAndFromGpuWork(self): """Tests that copies between GPU and XLA devices work.""" if not config.list_physical_devices("GPU"): return with session_lib.Session() as sess: x = array_ops.placeholder(dtypes.float32, [2]) with ops.device("GPU"): y = x * 2 with ops.device("device:XLA_CPU:0"): z = y * y with ops.device("GPU"): w = y + z result = sess.run(w, {x: [1.5, 0.5]}) self.assertAllClose(result, [12., 2.], rtol=1e-3) if __name__ == "__main__": test.main()
XlaDeviceGpuTest
python
pytorch__pytorch
test/test_transformers.py
{ "start": 221571, "end": 229655 }
class ____(NNTestCase): def run_test( self, device, make_q, make_kv, attn_bias=None, forw_tolerances: Optional[Tolerances] = None, grad_tolerances: Optional[Tolerances] = None, backend=None, causal_variant=None, ): if backend is not None: torch._dynamo.reset() query, key, value = make_q(), make_kv(), make_kv() query_prototype, key_prototype, value_prototype = query_key_value_clones(query, key, value) realized = attn_bias._materialize(device) if attn_bias is not None else None pytorch_output = scaled_dot_product_attention( query, key, value, attn_mask=realized, dropout_p=0.0, is_causal=False ) sdpa_op = ( torch.compile(scaled_dot_product_attention, backend=backend) if backend is not None else scaled_dot_product_attention ) sdpa_output = sdpa_op( query_prototype, key_prototype, value_prototype, attn_mask=attn_bias, dropout_p=0.0, is_causal=False, scale=None, ) dOut = torch.randn_like(pytorch_output) pytorch_output.backward(dOut) sdpa_output.backward(dOut) # Use default assert_close tolerances for dtypes if forw_tolerances is None: forw_tolerances = Tolerances(atol=None, rtol=None) if grad_tolerances is None: grad_tolerances = Tolerances(atol=None, rtol=None) torch.testing.assert_close(pytorch_output, sdpa_output, rtol=forw_tolerances.rtol, atol=forw_tolerances.atol) torch.testing.assert_close(query.grad, query_prototype.grad, rtol=grad_tolerances.rtol, atol=grad_tolerances.atol) torch.testing.assert_close(key.grad, key_prototype.grad, rtol=grad_tolerances.rtol, atol=grad_tolerances.atol) torch.testing.assert_close(value.grad, value_prototype.grad, rtol=grad_tolerances.rtol, atol=grad_tolerances.atol) @parametrize("causal_variant", [CausalVariant.UPPER_LEFT, CausalVariant.LOWER_RIGHT]) @parametrize( "shape", [(16, 16, 128, 128, 16), (16, 16, 128, 256, 32), (16, 16, 256, 128, 32), (1, 1, 23, 56, 15)], ) def test_causal_variants(self, device, causal_variant: CausalVariant, shape: list[tuple[int]]): make_tensor = partial( torch.rand, device=device, dtype=torch.float16, requires_grad=True ) bsz, num_heads, seq_len_q, seq_len_kv, head_dim = shape make_q_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_q, head_dim)) make_kv_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_kv, head_dim)) if causal_variant == CausalVariant.LOWER_RIGHT and seq_len_q > seq_len_kv: self.skipTest( "Lower right causal mask will produce NaNs in the output when seq_len_q > seq_len_kv!" ) forw_tol = Tolerances(1e-3, 1e-3) grad_tol = Tolerances(5e-3, 5e-3) if causal_variant == CausalVariant.UPPER_LEFT: attn_bias = causal_upper_left(seq_len_q, seq_len_kv) else: attn_bias = causal_lower_right(seq_len_q, seq_len_kv) with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION, SDPBackend.FLASH_ATTENTION, SDPBackend.MATH, SDPBackend.CUDNN_ATTENTION]): self.run_test(device, make_q_tensor, make_kv_tensor, attn_bias, forw_tol, grad_tol, backend=None) @parametrize("causal_variant", [CausalVariant.UPPER_LEFT, CausalVariant.LOWER_RIGHT]) @parametrize( "shape", [(16, 16, 128, 128, 16), (16, 16, 128, 256, 32), (16, 16, 256, 128, 32), (1, 1, 23, 56, 15)], ) @unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on windows") @skipIfTorchDynamo("This function already calls torch.compile.") def test_causal_variants_compile(self, device, causal_variant: CausalVariant, shape: list[tuple[int]]): cnts = CompileCounterWithBackend("aot_eager") make_tensor = partial( torch.rand, device=device, dtype=torch.float16, requires_grad=True ) bsz, num_heads, seq_len_q, seq_len_kv, head_dim = shape make_q_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_q, head_dim)) make_kv_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_kv, head_dim)) if causal_variant == CausalVariant.LOWER_RIGHT and seq_len_q > seq_len_kv: self.skipTest( "Lower right causal mask will produce NaNs in the output when seq_len_q > seq_len_kv!" ) forw_tol = Tolerances(1e-3, 1e-3) grad_tol = Tolerances(5e-3, 5e-3) if causal_variant == CausalVariant.UPPER_LEFT: attn_bias = causal_upper_left(seq_len_q, seq_len_kv) else: attn_bias = causal_lower_right(seq_len_q, seq_len_kv) with sdpa_kernel(backends=[SDPBackend.EFFICIENT_ATTENTION, SDPBackend.FLASH_ATTENTION, SDPBackend.MATH, SDPBackend.CUDNN_ATTENTION]): self.run_test(device, make_q_tensor, make_kv_tensor, attn_bias, forw_tol, grad_tol, backend=cnts) self.assertEqual(cnts.frame_count, 1, "Compiled graph should have 1 frame!") @parametrize("shape", [(16, 16, 128, 128, 16), (16, 16, 128, 256, 32), (16, 16, 256, 128, 32), (1, 1, 23, 56, 15)]) def test_is_causal_equals_upper_left(self, device, shape: list[tuple[int]]): make_tensor = partial( torch.rand, device=device, dtype=torch.float16, requires_grad=True ) bsz, num_heads, seq_len_q, seq_len_kv, head_dim = shape make_q_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_q, head_dim)) make_kv_tensor = partial(make_tensor, SdpaShape(bsz, num_heads, seq_len_kv, head_dim)) forw_tol = Tolerances(1e-3, 1e-3) query = make_q_tensor() key = make_kv_tensor() value = make_kv_tensor() attn_bias = causal_upper_left(seq_len_q, seq_len_kv) out_attn_bias = scaled_dot_product_attention(query, key, value, attn_mask=attn_bias, dropout_p=0.0) out_is_causal = scaled_dot_product_attention(query, key, value, is_causal=True, dropout_p=0.0) torch.testing.assert_close(out_attn_bias, out_is_causal, rtol=forw_tol.rtol, atol=forw_tol.atol) def test_is_causal_and_mask_fails(self, device): make_tensor = partial( torch.rand, device=device, dtype=torch.float16, requires_grad=True ) make_q_tensor = partial(make_tensor, SdpaShape(16, 16, 128, 16)) make_kv_tensor = partial(make_tensor, SdpaShape(16, 16, 128, 16)) query = make_q_tensor() key = make_kv_tensor() value = make_kv_tensor() attn_bias = causal_upper_left(128, 128) with self.assertRaisesRegex(ValueError, "CausalBias should not be used with causal=True"): scaled_dot_product_attention(query, key, value, attn_mask=attn_bias, is_causal=True, dropout_p=0.0) if NOTEST_CPU: device_types = ("cuda", "mps") else: device_types = ("cpu", "cuda", "mps") if TEST_XPU: device_types += ("xpu", ) instantiate_device_type_tests(TestTransformers, globals(), only_for=device_types) instantiate_device_type_tests(TestSDPAFailureModes, globals(), only_for=device_types, allow_mps=True) instantiate_device_type_tests(TestSDPA, globals(), only_for=device_types, allow_mps=True, allow_xpu=True) instantiate_device_type_tests(TestSDPACudaOnly, globals(), only_for=("cuda")) instantiate_device_type_tests(TestSDPACpuOnly, globals(), only_for=("cpu")) instantiate_device_type_tests(TestAttnBias, globals(), only_for=device_types, allow_xpu=True) instantiate_device_type_tests(TestSDPAXpuOnly, globals(), only_for="xpu", allow_xpu=True) if __name__ == '__main__': run_tests()
TestAttnBias
python
tensorflow__tensorflow
tensorflow/python/saved_model/nested_structure_coder.py
{ "start": 5369, "end": 5986 }
class ____: """Codec for dicts.""" def can_encode(self, pyobj): return isinstance(pyobj, collections_abc.Mapping) def do_encode(self, dict_value, encode_fn): encoded_dict = struct_pb2.StructuredValue() encoded_dict.dict_value.CopyFrom(struct_pb2.DictValue()) for key, value in dict_value.items(): encoded_dict.dict_value.fields[key].CopyFrom(encode_fn(value)) return encoded_dict def can_decode(self, value): return value.HasField("dict_value") def do_decode(self, value, decode_fn): return {key: decode_fn(val) for key, val in value.dict_value.fields.items()}
_DictCodec
python
kamyu104__LeetCode-Solutions
Python/divide-array-into-equal-pairs.py
{ "start": 63, "end": 276 }
class ____(object): def divideArray(self, nums): """ :type nums: List[int] :rtype: bool """ return all(cnt%2 == 0 for cnt in collections.Counter(nums).itervalues())
Solution
python
spyder-ide__spyder
spyder/utils/installers.py
{ "start": 460, "end": 977 }
class ____(object): """ Base class for installer error; do not use directly. Exit Spyder with code 1. """ logger = logging.getLogger('Installer') logger.setLevel(logging.DEBUG) def __init__(self, msg): if not running_installer_test(): # Don't do anything return msg = self._msg(msg) self.logger.error(msg + '\n', stack_info=True) raise SystemExit(1) def _msg(self, msg): raise NotImplementedError()
SpyderInstallerError
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/types.py
{ "start": 7634, "end": 7902 }
class ____(Enum): """Represents the sync mode for a given Airbyte stream.""" SAME_AS_SOURCE = "source" DESTINATION_DEFAULT = "destination" @deprecated(breaking_version="2.0", additional_warn_text=MANAGED_ELEMENTS_DEPRECATION_MSG)
AirbyteDestinationNamespace
python
sqlalchemy__sqlalchemy
test/orm/inheritance/test_relationship.py
{ "start": 5001, "end": 11952 }
class ____(fixtures.MappedTest): run_setup_mappers = "once" @classmethod def define_tables(cls, metadata): Table( "people", metadata, Column( "person_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column("name", String(50)), Column("type", String(30)), ) Table( "engineers", metadata, Column( "person_id", Integer, ForeignKey("people.person_id"), primary_key=True, ), Column("primary_language", String(50)), Column("reports_to_id", Integer, ForeignKey("managers.person_id")), ) Table( "managers", metadata, Column( "person_id", Integer, ForeignKey("people.person_id"), primary_key=True, ), ) @classmethod def setup_mappers(cls): engineers = cls.tables.engineers managers = cls.tables.managers people = cls.tables.people cls.mapper_registry.map_imperatively( Person, people, polymorphic_on=people.c.type, polymorphic_identity="person", ) cls.mapper_registry.map_imperatively( Manager, managers, inherits=Person, polymorphic_identity="manager" ) cls.mapper_registry.map_imperatively( Engineer, engineers, inherits=Person, polymorphic_identity="engineer", properties={ "reports_to": relationship( Manager, primaryjoin=( managers.c.person_id == engineers.c.reports_to_id ), backref="engineers", ) }, ) def test_has(self): m1 = Manager(name="dogbert") e1 = Engineer(name="dilbert", primary_language="java", reports_to=m1) sess = fixture_session() sess.add(m1) sess.add(e1) sess.flush() sess.expunge_all() eq_( sess.query(Engineer) .filter(Engineer.reports_to.has(Manager.name == "dogbert")) .first(), Engineer(name="dilbert"), ) def test_join(self): m1 = Manager(name="dogbert") e1 = Engineer(name="dilbert", primary_language="java", reports_to=m1) sess = fixture_session() sess.add(m1) sess.add(e1) sess.flush() sess.expunge_all() ma = aliased(Manager) eq_( sess.query(Engineer) .join(ma, Engineer.reports_to) .filter(ma.name == "dogbert") .first(), Engineer(name="dilbert"), ) @testing.combinations((True,), (False,), argnames="autoalias") def test_filter_aliasing(self, autoalias): m1 = Manager(name="dogbert") m2 = Manager(name="foo") e1 = Engineer(name="wally", primary_language="java", reports_to=m1) e2 = Engineer(name="dilbert", primary_language="c++", reports_to=m2) e3 = Engineer(name="etc", primary_language="c++") sess = fixture_session() sess.add_all([m1, m2, e1, e2, e3]) sess.flush() sess.expunge_all() if autoalias: # filter aliasing applied to Engineer doesn't whack Manager with _aliased_join_warning(r"Engineer\(engineers\)"): eq_( sess.query(Manager) .join(Manager.engineers) .filter(Manager.name == "dogbert") .all(), [m1], ) with _aliased_join_warning(r"Engineer\(engineers\)"): eq_( sess.query(Manager) .join(Manager.engineers) .filter(Engineer.name == "dilbert") .all(), [m2], ) with _aliased_join_warning(r"Engineer\(engineers\)"): eq_( sess.query(Manager, Engineer) .join(Manager.engineers) .order_by(Manager.name.desc()) .all(), [(m2, e2), (m1, e1)], ) else: eng = aliased(Engineer, flat=True) eq_( sess.query(Manager) .join(Manager.engineers.of_type(eng)) .filter(Manager.name == "dogbert") .all(), [m1], ) eq_( sess.query(Manager) .join(Manager.engineers.of_type(eng)) .filter(eng.name == "dilbert") .all(), [m2], ) eq_( sess.query(Manager, eng) .join(Manager.engineers.of_type(eng)) .order_by(Manager.name.desc()) .all(), [(m2, e2), (m1, e1)], ) @testing.combinations((True,), (False,), argnames="autoalias") def test_relationship_compare(self, autoalias): m1 = Manager(name="dogbert") m2 = Manager(name="foo") e1 = Engineer(name="dilbert", primary_language="java", reports_to=m1) e2 = Engineer(name="wally", primary_language="c++", reports_to=m2) e3 = Engineer(name="etc", primary_language="c++") sess = fixture_session() sess.add(m1) sess.add(m2) sess.add(e1) sess.add(e2) sess.add(e3) sess.flush() sess.expunge_all() if autoalias: with _aliased_join_warning(r"Engineer\(engineers\)"): eq_( sess.query(Manager) .join(Manager.engineers) .filter(Engineer.reports_to == None) .all(), [], ) with _aliased_join_warning(r"Engineer\(engineers\)"): eq_( sess.query(Manager) .join(Manager.engineers) .filter(Engineer.reports_to == m1) .all(), [m1], ) else: eng = aliased(Engineer, flat=True) eq_( sess.query(Manager) .join(Manager.engineers.of_type(eng)) .filter(eng.reports_to == None) .all(), [], ) eq_( sess.query(Manager) .join(Manager.engineers.of_type(eng)) .filter(eng.reports_to == m1) .all(), [m1], )
SelfReferentialJ2JTest
python
gawel__pyquery
tests/test_pyquery.py
{ "start": 651, "end": 927 }
class ____(TestCase): def test_unicode(self): xml = pq("<html><p>é</p></html>") self.assertEqual(type(xml.html()), str) self.assertEqual(str(xml), '<html><p>é</p></html>') self.assertEqual(str(xml('p:contains("é")')), '<p>é</p>')
TestUnicode
python
getsentry__sentry
src/sentry/organizations/services/organization_actions/model.py
{ "start": 200, "end": 331 }
class ____: organization: Organization org_member: OrganizationMember team: Team | None
OrganizationAndMemberCreationResult
python
cherrypy__cherrypy
cherrypy/tutorial/tut04_complex_site.py
{ "start": 186, "end": 620 }
class ____: """Home page app.""" @cherrypy.expose def index(self): """Produce HTTP response body of home page app index URI.""" return """ <p>Hi, this is the home page! Check out the other fun stuff on this site:</p> <ul> <li><a href="/joke/">A silly joke</a></li> <li><a href="/links/">Useful links</a></li> </ul>"""
HomePage
python
MongoEngine__mongoengine
mongoengine/connection.py
{ "start": 930, "end": 17882 }
class ____(Exception): """Error raised when the database connection can't be established or when a connection with a requested alias can't be retrieved. """ pass def _check_db_name(name): """Check if a database name is valid. This functionality is copied from pymongo Database class constructor. """ if not isinstance(name, str): raise TypeError("name must be an instance of %s" % str) elif name != "$external": _check_name(name) def _get_connection_settings( db=None, name=None, host=None, port=None, read_preference=READ_PREFERENCE, username=None, password=None, authentication_source=None, authentication_mechanism=None, authmechanismproperties=None, **kwargs, ): """Get the connection settings as a dict :param db: the name of the database to use, for compatibility with connect :param name: the name of the specific database to use :param host: the host name of the: program: `mongod` instance to connect to :param port: the port that the: program: `mongod` instance is running on :param read_preference: The read preference for the collection :param username: username to authenticate with :param password: password to authenticate with :param authentication_source: database to authenticate against :param authentication_mechanism: database authentication mechanisms. By default, use SCRAM-SHA-1 with MongoDB 3.0 and later, MONGODB-CR (MongoDB Challenge Response protocol) for older servers. :param mongo_client_class: using alternative connection client other than pymongo.MongoClient, e.g. mongomock, montydb, that provides pymongo alike interface but not necessarily for connecting to a real mongo instance. :param kwargs: ad-hoc parameters to be passed into the pymongo driver, for example maxpoolsize, tz_aware, etc. See the documentation for pymongo's `MongoClient` for a full list. """ conn_settings = { "name": name or db or DEFAULT_DATABASE_NAME, "host": host or DEFAULT_HOST, "port": port or DEFAULT_PORT, "read_preference": read_preference, "username": username, "password": password, "authentication_source": authentication_source, "authentication_mechanism": authentication_mechanism, "authmechanismproperties": authmechanismproperties, } _check_db_name(conn_settings["name"]) conn_host = conn_settings["host"] # Host can be a list or a string, so if string, force to a list. if isinstance(conn_host, str): conn_host = [conn_host] resolved_hosts = [] for entity in conn_host: # Reject old mongomock integration # To be removed in a few versions after 0.27.0 if entity.startswith("mongomock://") or kwargs.get("is_mock"): raise Exception( "Use of mongomock:// URI or 'is_mock' were removed in favor of 'mongo_client_class=mongomock.MongoClient'. " "Check the CHANGELOG for more info" ) # Handle URI style connections, only updating connection params which # were explicitly specified in the URI. if "://" in entity: uri_dict = uri_parser.parse_uri(entity) resolved_hosts.append(entity) database = uri_dict.get("database") if database: conn_settings["name"] = database for param in ("read_preference", "username", "password"): if uri_dict.get(param): conn_settings[param] = uri_dict[param] uri_options = uri_dict[ "options" ] # uri_options is a _CaseInsensitiveDictionary if "replicaset" in uri_options: conn_settings["replicaSet"] = uri_options["replicaset"] if "authsource" in uri_options: conn_settings["authentication_source"] = uri_options["authsource"] if "authmechanism" in uri_options: conn_settings["authentication_mechanism"] = uri_options["authmechanism"] if "readpreference" in uri_options: read_preferences = ( ReadPreference.NEAREST, ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ) # Starting with PyMongo v3.5, the "readpreference" option is # returned as a string (e.g. "secondaryPreferred") and not an # int (e.g. 3). # TODO simplify the code below once we drop support for # PyMongo v3.4. read_pf_mode = uri_options["readpreference"] if isinstance(read_pf_mode, str): read_pf_mode = read_pf_mode.lower() for preference in read_preferences: if ( preference.name.lower() == read_pf_mode or preference.mode == read_pf_mode ): ReadPrefClass = preference.__class__ break if "readpreferencetags" in uri_options: conn_settings["read_preference"] = ReadPrefClass( tag_sets=uri_options["readpreferencetags"] ) else: conn_settings["read_preference"] = ReadPrefClass() if "authmechanismproperties" in uri_options: conn_settings["authmechanismproperties"] = uri_options[ "authmechanismproperties" ] if "uuidrepresentation" in uri_options: REV_UUID_REPRESENTATIONS = { v: k for k, v in _UUID_REPRESENTATIONS.items() } conn_settings["uuidrepresentation"] = REV_UUID_REPRESENTATIONS[ uri_options["uuidrepresentation"] ] else: resolved_hosts.append(entity) conn_settings["host"] = resolved_hosts # Deprecated parameters that should not be passed on kwargs.pop("slaves", None) kwargs.pop("is_slave", None) keys = { key.lower() for key in kwargs.keys() } # pymongo options are case insensitive if "uuidrepresentation" not in keys and "uuidrepresentation" not in conn_settings: warnings.warn( "No uuidRepresentation is specified! Falling back to " "'pythonLegacy' which is the default for pymongo 3.x. " "For compatibility with other MongoDB drivers this should be " "specified as 'standard' or '{java,csharp}Legacy' to work with " "older drivers in those languages. This will be changed to " "'unspecified' in a future release.", DeprecationWarning, stacklevel=3, ) kwargs["uuidRepresentation"] = "pythonLegacy" conn_settings.update(kwargs) return conn_settings def register_connection( alias, db=None, name=None, host=None, port=None, read_preference=READ_PREFERENCE, username=None, password=None, authentication_source=None, authentication_mechanism=None, authmechanismproperties=None, **kwargs, ): """Register the connection settings. :param alias: the name that will be used to refer to this connection throughout MongoEngine :param db: the name of the database to use, for compatibility with connect :param name: the name of the specific database to use :param host: the host name of the: program: `mongod` instance to connect to :param port: the port that the: program: `mongod` instance is running on :param read_preference: The read preference for the collection :param username: username to authenticate with :param password: password to authenticate with :param authentication_source: database to authenticate against :param authentication_mechanism: database authentication mechanisms. By default, use SCRAM-SHA-1 with MongoDB 3.0 and later, MONGODB-CR (MongoDB Challenge Response protocol) for older servers. :param mongo_client_class: using alternative connection client other than pymongo.MongoClient, e.g. mongomock, montydb, that provides pymongo alike interface but not necessarily for connecting to a real mongo instance. :param kwargs: ad-hoc parameters to be passed into the pymongo driver, for example maxpoolsize, tz_aware, etc. See the documentation for pymongo's `MongoClient` for a full list. """ conn_settings = _get_connection_settings( db=db, name=name, host=host, port=port, read_preference=read_preference, username=username, password=password, authentication_source=authentication_source, authentication_mechanism=authentication_mechanism, authmechanismproperties=authmechanismproperties, **kwargs, ) _connection_settings[alias] = conn_settings def disconnect(alias=DEFAULT_CONNECTION_NAME): """Close the connection with a given alias.""" from mongoengine import Document from mongoengine.base.common import _get_documents_by_db connection = _connections.pop(alias, None) if connection: # MongoEngine may share the same MongoClient across multiple aliases # if connection settings are the same so we only close # the client if we're removing the final reference. # Important to use 'is' instead of '==' because clients connected to the same cluster # will compare equal even with different options if all(connection is not c for c in _connections.values()): connection.close() if alias in _dbs: # Detach all cached collections in Documents for doc_cls in _get_documents_by_db(alias, DEFAULT_CONNECTION_NAME): if issubclass(doc_cls, Document): # Skip EmbeddedDocument doc_cls._disconnect() del _dbs[alias] if alias in _connection_settings: del _connection_settings[alias] def disconnect_all(): """Close all registered database.""" for alias in list(_connections.keys()): disconnect(alias) def get_connection(alias=DEFAULT_CONNECTION_NAME, reconnect=False): """Return a connection with a given alias.""" # Connect to the database if not already connected if reconnect: disconnect(alias) # If the requested alias already exists in the _connections list, return # it immediately. if alias in _connections: return _connections[alias] # Validate that the requested alias exists in the _connection_settings. # Raise ConnectionFailure if it doesn't. if alias not in _connection_settings: if alias == DEFAULT_CONNECTION_NAME: msg = "You have not defined a default connection" else: msg = 'Connection with alias "%s" has not been defined' % alias raise ConnectionFailure(msg) def _clean_settings(settings_dict): if PYMONGO_VERSION < (4,): irrelevant_fields_set = { "name", "username", "password", "authentication_source", "authentication_mechanism", "authmechanismproperties", } rename_fields = {} else: irrelevant_fields_set = {"name"} rename_fields = { "authentication_source": "authSource", "authentication_mechanism": "authMechanism", } return { rename_fields.get(k, k): v for k, v in settings_dict.items() if k not in irrelevant_fields_set and v is not None } raw_conn_settings = _connection_settings[alias].copy() # Retrieve a copy of the connection settings associated with the requested # alias and remove the database name and authentication info (we don't # care about them at this point). conn_settings = _clean_settings(raw_conn_settings) if DriverInfo is not None: conn_settings.setdefault( "driver", DriverInfo("MongoEngine", mongoengine.__version__) ) # Determine if we should use PyMongo's or mongomock's MongoClient. if "mongo_client_class" in conn_settings: mongo_client_class = conn_settings.pop("mongo_client_class") else: mongo_client_class = MongoClient # Re-use existing connection if one is suitable. existing_connection = _find_existing_connection(raw_conn_settings) if existing_connection: connection = existing_connection else: connection = _create_connection( alias=alias, mongo_client_class=mongo_client_class, **conn_settings ) _connections[alias] = connection return _connections[alias] def _create_connection(alias, mongo_client_class, **connection_settings): """ Create the new connection for this alias. Raise ConnectionFailure if it can't be established. """ try: return mongo_client_class(**connection_settings) except Exception as e: raise ConnectionFailure(f"Cannot connect to database {alias} :\n{e}") def _find_existing_connection(connection_settings): """ Check if an existing connection could be reused Iterate over all of the connection settings and if an existing connection with the same parameters is suitable, return it :param connection_settings: the settings of the new connection :return: An existing connection or None """ connection_settings_bis = ( (db_alias, settings.copy()) for db_alias, settings in _connection_settings.items() ) def _clean_settings(settings_dict): # Only remove the name but it's important to # keep the username/password/authentication_source/authentication_mechanism # to identify if the connection could be shared (cfr https://github.com/MongoEngine/mongoengine/issues/2047) return {k: v for k, v in settings_dict.items() if k != "name"} cleaned_conn_settings = _clean_settings(connection_settings) for db_alias, connection_settings in connection_settings_bis: db_conn_settings = _clean_settings(connection_settings) if cleaned_conn_settings == db_conn_settings and _connections.get(db_alias): return _connections[db_alias] def get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False): if reconnect: disconnect(alias) if alias not in _dbs: conn = get_connection(alias) conn_settings = _connection_settings[alias] db = conn[conn_settings["name"]] # Authenticate if necessary if ( PYMONGO_VERSION < (4,) and conn_settings["username"] and ( conn_settings["password"] or conn_settings["authentication_mechanism"] == "MONGODB-X509" ) and conn_settings["authmechanismproperties"] is None ): auth_kwargs = {"source": conn_settings["authentication_source"]} if conn_settings["authentication_mechanism"] is not None: auth_kwargs["mechanism"] = conn_settings["authentication_mechanism"] db.authenticate( conn_settings["username"], conn_settings["password"], **auth_kwargs ) _dbs[alias] = db return _dbs[alias] def connect(db=None, alias=DEFAULT_CONNECTION_NAME, **kwargs): """Connect to the database specified by the 'db' argument. Connection settings may be provided here as well if the database is not running on the default port on localhost. If authentication is needed, provide username and password arguments as well. Multiple databases are supported by using aliases. Provide a separate `alias` to connect to a different instance of: program: `mongod`. In order to replace a connection identified by a given alias, you'll need to call ``disconnect`` first See the docstring for `register_connection` for more details about all supported kwargs. """ if alias in _connections: prev_conn_setting = _connection_settings[alias] new_conn_settings = _get_connection_settings(db, **kwargs) if new_conn_settings != prev_conn_setting: err_msg = ( "A different connection with alias `{}` was already " "registered. Use disconnect() first" ).format(alias) raise ConnectionFailure(err_msg) else: register_connection(alias, db, **kwargs) return get_connection(alias) # Support old naming convention _get_connection = get_connection _get_db = get_db
ConnectionFailure
python
getsentry__sentry
src/sentry/analytics/events/issue_deleted.py
{ "start": 70, "end": 312 }
class ____(analytics.Event): group_id: int delete_type: str organization_id: int project_id: int user_id: int | None = None default_user_id: int | str | None = None analytics.register(IssueDeletedEvent)
IssueDeletedEvent
python
pytest-dev__pytest-django
pytest_django/live_server_helper.py
{ "start": 61, "end": 2873 }
class ____: """The liveserver fixture This is the object that the ``live_server`` fixture returns. The ``live_server`` fixture handles creation and stopping. """ def __init__(self, addr: str, *, start: bool = True) -> None: from django.db import connections from django.test.testcases import LiveServerThread from django.test.utils import modify_settings liveserver_kwargs: dict[str, Any] = {} connections_override = {} for conn in connections.all(): # If using in-memory sqlite databases, pass the connections to # the server thread. if conn.vendor == "sqlite" and conn.is_in_memory_db(): connections_override[conn.alias] = conn liveserver_kwargs["connections_override"] = connections_override from django.conf import settings if "django.contrib.staticfiles" in settings.INSTALLED_APPS: from django.contrib.staticfiles.handlers import StaticFilesHandler liveserver_kwargs["static_handler"] = StaticFilesHandler else: from django.test.testcases import _StaticFilesHandler liveserver_kwargs["static_handler"] = _StaticFilesHandler try: host, port = addr.split(":") except ValueError: host = addr else: liveserver_kwargs["port"] = int(port) self.thread = LiveServerThread(host, **liveserver_kwargs) self._live_server_modified_settings = modify_settings( ALLOWED_HOSTS={"append": host}, ) # `_live_server_modified_settings` is enabled and disabled by # `_live_server_helper`. self.thread.daemon = True if start: self.start() def start(self) -> None: """Start the server""" for conn in self.thread.connections_override.values(): # Explicitly enable thread-shareability for this connection. conn.inc_thread_sharing() self.thread.start() self.thread.is_ready.wait() if self.thread.error: error = self.thread.error self.stop() raise error def stop(self) -> None: """Stop the server""" # Terminate the live server's thread. self.thread.terminate() # Restore shared connections' non-shareability. for conn in self.thread.connections_override.values(): conn.dec_thread_sharing() @property def url(self) -> str: return f"http://{self.thread.host}:{self.thread.port}" def __str__(self) -> str: return self.url def __add__(self, other: str) -> str: return f"{self}{other}" def __repr__(self) -> str: return f"<LiveServer listening at {self.url}>"
LiveServer
python
crytic__slither
slither/vyper_parsing/ast/types.py
{ "start": 1988, "end": 2060 }
class ____(ASTNode): value: ASTNode attr: str @dataclass
Attribute
python
nedbat__coveragepy
coverage/misc.py
{ "start": 6292, "end": 11291 }
class ____: """A sentinel object to use for unusual default-value needs. Construct with a string that will be used as the repr, for display in help and Sphinx output. """ def __init__(self, display_as: str) -> None: self.display_as = display_as def __repr__(self) -> str: return self.display_as def substitute_variables(text: str, variables: Mapping[str, str]) -> str: """Substitute ``${VAR}`` variables in `text` with their values. Variables in the text can take a number of shell-inspired forms:: $VAR ${VAR} ${VAR?} strict: an error if VAR isn't defined. ${VAR-missing} defaulted: "missing" if VAR isn't defined. $$ just a dollar sign. `variables` is a dictionary of variable values. Returns the resulting text with values substituted. """ dollar_pattern = r"""(?x) # Use extended regex syntax \$ # A dollar sign, (?: # then (?P<dollar> \$ ) | # a dollar sign, or (?P<word1> \w+ ) | # a plain word, or \{ # a {-wrapped (?P<word2> \w+ ) # word, (?: # either (?P<strict> \? ) | # with a strict marker -(?P<defval> [^}]* ) # or a default value )? # maybe. } ) """ dollar_groups = ("dollar", "word1", "word2") def dollar_replace(match: re.Match[str]) -> str: """Called for each $replacement.""" # Only one of the dollar_groups will have matched, just get its text. word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks if word == "$": return "$" elif word in variables: return variables[word] elif match["strict"]: msg = f"Variable {word} is undefined: {text!r}" raise CoverageException(msg) else: return match["defval"] text = re.sub(dollar_pattern, dollar_replace, text) return text def format_local_datetime(dt: datetime.datetime) -> str: """Return a string with local timezone representing the date.""" return dt.astimezone().strftime("%Y-%m-%d %H:%M %z") def import_local_file(modname: str, modfile: str | None = None) -> ModuleType: """Import a local file as a module. Opens a file in the current directory named `modname`.py, imports it as `modname`, and returns the module object. `modfile` is the file to import if it isn't in the current directory. """ if modfile is None: modfile = modname + ".py" spec = importlib.util.spec_from_file_location(modname, modfile) assert spec is not None mod = importlib.util.module_from_spec(spec) sys.modules[modname] = mod assert spec.loader is not None spec.loader.exec_module(mod) return mod @functools.cache def _human_key(s: str) -> tuple[list[str | int], str]: """Turn a string into a list of string and number chunks. "z23a" -> (["z", 23, "a"], "z23a") The original string is appended as a last value to ensure the key is unique enough so that "x1y" and "x001y" can be distinguished. """ def tryint(s: str) -> str | int: """If `s` is a number, return an int, else `s` unchanged.""" try: return int(s) except ValueError: return s return ([tryint(c) for c in re.split(r"(\d+)", s)], s) def human_sorted(strings: Iterable[str]) -> list[str]: """Sort the given iterable of strings the way that humans expect. Numeric components in the strings are sorted as numbers. Returns the sorted list. """ return sorted(strings, key=_human_key) SortableItem = TypeVar("SortableItem", bound=Sequence[Any]) def human_sorted_items( items: Iterable[SortableItem], reverse: bool = False, ) -> list[SortableItem]: """Sort (string, ...) items the way humans expect. The elements of `items` can be any tuple/list. They'll be sorted by the first element (a string), with ties broken by the remaining elements. Returns the sorted list of items. """ return sorted(items, key=lambda item: (_human_key(item[0]), *item[1:]), reverse=reverse) def plural(n: int, thing: str = "", things: str = "") -> str: """Pluralize a word. If n is 1, return thing. Otherwise return things, or thing+s. """ if n == 1: return thing else: return things or (thing + "s") def stdout_link(text: str, url: str) -> str: """Format text+url as a clickable link for stdout. If attached to a terminal, use escape sequences. Otherwise, just return the text. """ if hasattr(sys.stdout, "isatty") and sys.stdout.isatty(): return f"\033]8;;{url}\a{text}\033]8;;\a" else: return text
DefaultValue
python
run-llama__llama_index
llama-index-core/llama_index/core/node_parser/file/simple_file.py
{ "start": 765, "end": 3489 }
class ____(NodeParser): """ Simple file node parser. Splits a document loaded from a file into Nodes using logic based on the file type automatically detects the NodeParser to use based on file type Args: include_metadata (bool): whether to include metadata in nodes include_prev_next_rel (bool): whether to include prev/next relationships """ @classmethod def from_defaults( cls, include_metadata: bool = True, include_prev_next_rel: bool = True, callback_manager: Optional[CallbackManager] = None, ) -> "SimpleFileNodeParser": callback_manager = callback_manager or CallbackManager([]) return cls( include_metadata=include_metadata, include_prev_next_rel=include_prev_next_rel, callback_manager=callback_manager, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "SimpleFileNodeParser" def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any, ) -> List[BaseNode]: """ Parse document into nodes. Args: nodes (Sequence[BaseNode]): nodes to parse """ all_nodes: List[BaseNode] = [] documents_with_progress = get_tqdm_iterable( nodes, show_progress, "Parsing documents into nodes" ) for document in documents_with_progress: # Try to get extension from metadata, or extract from file_path ext = document.metadata.get("extension") if ext is None and "file_path" in document.metadata: # Extract extension from file_path _, ext = os.path.splitext(document.metadata["file_path"]) ext = ext.lower() if ext and ext in FILE_NODE_PARSERS: parser = FILE_NODE_PARSERS[ext]( include_metadata=self.include_metadata, include_prev_next_rel=self.include_prev_next_rel, callback_manager=self.callback_manager, ) nodes = parser.get_nodes_from_documents([document], show_progress) all_nodes.extend(nodes) else: # What to do when file type isn't supported yet? all_nodes.extend( # build node from document build_nodes_from_splits( [document.get_content(metadata_mode=MetadataMode.NONE)], document, id_func=self.id_func, ) ) return all_nodes
SimpleFileNodeParser
python
dagster-io__dagster
python_modules/dagster/dagster/_utils/test/mysql_instance.py
{ "start": 8283, "end": 8575 }
class ____(Exception): def __init__(self, message, subprocess_error): super().__init__(check.opt_str_param(message, "message")) self.subprocess_error = check.inst_param( subprocess_error, "subprocess_error", subprocess.CalledProcessError )
MySQLDockerError
python
Pylons__pyramid
tests/test_csrf.py
{ "start": 6518, "end": 7138 }
class ____(unittest.TestCase): def setUp(self): self.config = testing.setUp() def _callFUT(self, *args, **kwargs): from pyramid.csrf import get_csrf_token return get_csrf_token(*args, **kwargs) def test_no_override_csrf_utility_registered(self): request = testing.DummyRequest() self._callFUT(request) def test_success(self): self.config.set_csrf_storage_policy(DummyCSRF()) request = testing.DummyRequest() csrf_token = self._callFUT(request) self.assertEqual(csrf_token, '02821185e4c94269bdc38e6eeae0a2f8')
Test_get_csrf_token
python
spack__spack
lib/spack/spack/traverse.py
{ "start": 3095, "end": 3683 }
class ____: """A visitor that traverses all edges once.""" def __init__(self, visitor, key=id, visited=None): self.visitor = visitor self.visited = set() if visited is None else visited self.key = key def accept(self, item): return self.visitor.accept(item) def neighbors(self, item): # Covering edges means: drop dependencies of visited nodes. key = self.key(item.edge.spec) if key in self.visited: return [] self.visited.add(key) return self.visitor.neighbors(item)
CoverEdgesVisitor
python
doocs__leetcode
solution/2100-2199/2182.Construct String With Repeat Limit/Solution.py
{ "start": 0, "end": 696 }
class ____: def repeatLimitedString(self, s: str, repeatLimit: int) -> str: cnt = [0] * 26 for c in s: cnt[ord(c) - ord("a")] += 1 ans = [] j = 24 for i in range(25, -1, -1): j = min(i - 1, j) while 1: x = min(repeatLimit, cnt[i]) cnt[i] -= x ans.append(ascii_lowercase[i] * x) if cnt[i] == 0: break while j >= 0 and cnt[j] == 0: j -= 1 if j < 0: break cnt[j] -= 1 ans.append(ascii_lowercase[j]) return "".join(ans)
Solution
python
run-llama__llama_index
llama-index-packs/llama-index-packs-mixture-of-agents/llama_index/packs/mixture_of_agents/base.py
{ "start": 730, "end": 782 }
class ____(Event): result: str
GenerateResultEvent
python
openai__openai-python
src/openai/types/beta/code_interpreter_tool_param.py
{ "start": 224, "end": 389 }
class ____(TypedDict, total=False): type: Required[Literal["code_interpreter"]] """The type of tool being defined: `code_interpreter`"""
CodeInterpreterToolParam
python
sqlalchemy__sqlalchemy
test/perf/compiled_extensions/collections_.py
{ "start": 3269, "end": 9674 }
class ____(Case): @staticmethod def set_fn(): return set @staticmethod def python(): from sqlalchemy.util import _collections_cy py_coll = load_uncompiled_module(_collections_cy) assert not py_coll._is_compiled() return py_coll.IdentitySet @staticmethod def cython(): from sqlalchemy.util import _collections_cy assert _collections_cy._is_compiled() return _collections_cy.IdentitySet IMPLEMENTATIONS = { "set": set_fn.__func__, "python": python.__func__, "cython": cython.__func__, } NUMBER = 10 def init_objects(self): self.val1 = list(range(10)) self.val2 = list(wrap(token_urlsafe(4 * 2048), 4)) self.imp_1 = self.impl(self.val1) self.imp_2 = self.impl(self.val2) @classmethod def update_results(cls, results): cls._divide_results(results, "python", "set", "py / set") cls._divide_results(results, "cython", "python", "cy / py") cls._divide_results(results, "cython", "set", "cy / set") @test_case(number=2_500_000) def init_empty(self): self.impl() @test_case(number=2_500) def init(self): self.impl(self.val1) self.impl(self.val2) @test_case(number=5_000) def init_from_impl(self): self.impl(self.imp_2) @test_case(number=100) def add(self): ii = self.impl() x = 25_000 for i in range(x): ii.add(str(i % (x / 2))) @test_case def contains(self): ii = self.impl(self.val2) for _ in range(1_000): for x in self.val1 + self.val2: x in ii @test_case(number=200) def remove(self): v = [str(i) for i in range(7500)] ii = self.impl(v) for x in v[:5000]: ii.remove(x) @test_case(number=200) def discard(self): v = [str(i) for i in range(7500)] ii = self.impl(v) for x in v[:5000]: ii.discard(x) @test_case def pop(self): for x in range(50_000): ii = self.impl(self.val1) for x in self.val1: ii.pop() @test_case def clear(self): i, v = self.impl, self.val1 for _ in range(125_000): ii = i(v) ii.clear() @test_case(number=2_500_000) def eq(self): self.imp_1 == self.imp_1 self.imp_1 == self.imp_2 self.imp_1 == self.val2 @test_case(number=2_500_000) def ne(self): self.imp_1 != self.imp_1 self.imp_1 != self.imp_2 self.imp_1 != self.val2 @test_case(number=20_000) def issubset(self): self.imp_1.issubset(self.imp_1) self.imp_1.issubset(self.imp_2) self.imp_1.issubset(self.val1) self.imp_1.issubset(self.val2) @test_case(number=50_000) def le(self): self.imp_1 <= self.imp_1 self.imp_1 <= self.imp_2 self.imp_2 <= self.imp_1 self.imp_2 <= self.imp_2 @test_case(number=2_500_000) def lt(self): self.imp_1 < self.imp_1 self.imp_1 < self.imp_2 self.imp_2 < self.imp_1 self.imp_2 < self.imp_2 @test_case(number=20_000) def issuperset(self): self.imp_1.issuperset(self.imp_1) self.imp_1.issuperset(self.imp_2) self.imp_1.issubset(self.val1) self.imp_1.issubset(self.val2) @test_case(number=50_000) def ge(self): self.imp_1 >= self.imp_1 self.imp_1 >= self.imp_2 self.imp_2 >= self.imp_1 self.imp_2 >= self.imp_2 @test_case(number=2_500_000) def gt(self): self.imp_1 > self.imp_1 self.imp_2 > self.imp_2 self.imp_2 > self.imp_1 self.imp_2 > self.imp_2 @test_case(number=10_000) def union(self): self.imp_1.union(self.imp_2) @test_case(number=10_000) def or_test(self): self.imp_1 | self.imp_2 @test_case def update(self): ii = self.impl(self.val1) for _ in range(1_000): ii.update(self.imp_2) @test_case def ior(self): ii = self.impl(self.val1) for _ in range(1_000): ii |= self.imp_2 @test_case def difference(self): for _ in range(2_500): self.imp_1.difference(self.imp_2) self.imp_1.difference(self.val2) @test_case(number=250_000) def sub(self): self.imp_1 - self.imp_2 @test_case def difference_update(self): ii = self.impl(self.val1) for _ in range(2_500): ii.difference_update(self.imp_2) ii.difference_update(self.val2) @test_case def isub(self): ii = self.impl(self.val1) for _ in range(250_000): ii -= self.imp_2 @test_case(number=20_000) def intersection(self): self.imp_1.intersection(self.imp_2) self.imp_1.intersection(self.val2) @test_case(number=250_000) def and_test(self): self.imp_1 & self.imp_2 @test_case def intersection_up(self): ii = self.impl(self.val1) for _ in range(2_500): ii.intersection_update(self.imp_2) ii.intersection_update(self.val2) @test_case def iand(self): ii = self.impl(self.val1) for _ in range(250_000): ii &= self.imp_2 @test_case(number=2_500) def symmetric_diff(self): self.imp_1.symmetric_difference(self.imp_2) self.imp_1.symmetric_difference(self.val2) @test_case(number=2_500) def xor(self): self.imp_1 ^ self.imp_2 @test_case def symmetric_diff_up(self): ii = self.impl(self.val1) for _ in range(125): ii.symmetric_difference_update(self.imp_2) ii.symmetric_difference_update(self.val2) @test_case def ixor(self): ii = self.impl(self.val1) for _ in range(250): ii ^= self.imp_2 @test_case(number=25_000) def copy(self): self.imp_1.copy() self.imp_2.copy() @test_case(number=2_500_000) def len(self): len(self.imp_1) len(self.imp_2) @test_case(number=25_000) def iter(self): list(self.imp_1) list(self.imp_2) @test_case(number=10_000) def repr(self): str(self.imp_1) str(self.imp_2)
IdentitySet
python
gevent__gevent
src/gevent/tests/test__event.py
{ "start": 11099, "end": 11172 }
class ____(TestEvent_SetThenClear): N = 1000
TestEvent_SetThenClear1000
python
graphql-python__graphene
graphene/tests/issues/test_356.py
{ "start": 129, "end": 180 }
class ____(graphene.ObjectType): pass
SomeTypeOne
python
pytorch__pytorch
test/test_tensor_creation_ops.py
{ "start": 154551, "end": 177317 }
class ____(TestCase): exact_dtype = True # TODO: add torch.complex64, torch.complex128 @dtypes(torch.float, torch.double) def test_normal(self, device, dtype): def helper(self, device, dtype, ptype, t_transform, std_transform): q = torch.empty(100, 100, dtype=dtype, device=device) q.normal_() self.assertEqual(t_transform(q).mean(), 0, atol=0.2, rtol=0) self.assertEqual(t_transform(q).std(), std_transform(1), atol=0.2, rtol=0) q.normal_(2, 3) self.assertEqual(t_transform(q).mean(), 2, atol=0.3, rtol=0) self.assertEqual(t_transform(q).std(), std_transform(3), atol=0.3, rtol=0) q = torch.empty(100, 100, dtype=dtype, device=device) q_row1 = q[0:1].clone() q[99:100].normal_() self.assertEqual(t_transform(q[99:100]).mean(), 0, atol=0.2, rtol=0) self.assertEqual(t_transform(q[99:100]).std(), std_transform(1), atol=0.2, rtol=0) self.assertEqual(t_transform(q[0:1]).clone(), t_transform(q_row1)) mean = torch.empty(100, 100, dtype=dtype, device=device) mean[:50].fill_(ptype(0)) mean[50:].fill_(ptype(1)) std = torch.empty(100, 100, dtype=torch.float, device=device) std[:, :50] = 4 std[:, 50:] = 1 r = torch.normal(mean) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0) self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0) self.assertEqual(t_transform(r).std(), std_transform(1), atol=0.2, rtol=0) r.fill_(42) r = torch.normal(mean, 3) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0) self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0) self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.2, rtol=0) r.fill_(42) torch.normal(mean, 3, out=r) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0) self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0) self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.2, rtol=0) r.fill_(42) r = torch.normal(2, std) self.assertFalse(r.dtype.is_complex) self.assertEqual(str(r.device), device) self.assertEqual(r.mean(), 2, atol=0.2, rtol=0) self.assertEqual(r[:, :50].std(), 4, atol=0.3, rtol=0) self.assertEqual(r[:, 50:].std(), 1, atol=0.2, rtol=0) r.fill_(42) torch.normal(2, std, out=r) self.assertFalse(r.dtype.is_complex) self.assertEqual(str(r.device), device) self.assertEqual(r.mean(), 2, atol=0.2, rtol=0) self.assertEqual(r[:, :50].std(), 4, atol=0.3, rtol=0) self.assertEqual(r[:, 50:].std(), 1, atol=0.2, rtol=0) r.fill_(42) r = torch.normal(mean, std) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0) self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0) self.assertEqual(t_transform(r[:, :50]).std(), std_transform(4), atol=0.3, rtol=0) self.assertEqual(t_transform(r[:, 50:]).std(), std_transform(1), atol=0.2, rtol=0) r.fill_(42) torch.normal(mean, std, out=r) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0) self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0) self.assertEqual(t_transform(r[:, :50]).std(), std_transform(4), atol=0.3, rtol=0) self.assertEqual(t_transform(r[:, 50:]).std(), std_transform(1), atol=0.2, rtol=0) # test empty mean/std out = torch.normal(mean=torch.empty((0, 2)), std=torch.empty((0, 1))) self.assertEqual(out.size(), torch.Size([0, 2])) r.fill_(42) r = torch.normal(2, 3, (100, 100), dtype=dtype, device=device) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(t_transform(r).mean(), 2, atol=0.3, rtol=0) self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.3, rtol=0) r.fill_(42) torch.normal(2, 3, (100, 100), dtype=dtype, device=device, out=r) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(t_transform(r).mean(), 2, atol=0.3, rtol=0) self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.3, rtol=0) # float std 0 with float mean r.fill_(42) torch.normal(2, 0, (10, 10), dtype=dtype, device=device, out=r) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertTrue(r.eq(2).all()) # float std 0 with tensor mean r.fill_(42) mean_rand = torch.randn(10, 10, dtype=dtype, device=device) torch.normal(mean_rand, 0, out=r) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(mean_rand, r, atol=0, rtol=0) # tensor std 0 with float mean r.fill_(42) std_zeros = torch.zeros(10, 10, dtype=dtype, device=device) torch.normal(2, std_zeros, out=r) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertTrue(r.eq(2).all()) # tensor std 0 with tensor mean r.fill_(42) torch.normal(mean_rand, std_zeros, out=r) self.assertEqual(r.dtype, dtype) self.assertEqual(str(r.device), device) self.assertEqual(mean_rand, r, atol=0, rtol=0) if dtype.is_complex: helper(self, device, dtype, lambda x: complex(x, x), lambda t: torch.real(t).to(torch.float), lambda mean: mean / math.sqrt(2)) helper(self, device, dtype, lambda x: complex(x, x), lambda t: torch.imag(t).to(torch.float), lambda mean: mean / math.sqrt(2)) self.assertRaisesRegex( RuntimeError, "normal expects standard deviation to be non-complex", lambda: torch.normal(0, torch.empty(100, 100, dtype=dtype, device=device))) out = torch.empty(100, 100, dtype=dtype, device=device) self.assertRaisesRegex( RuntimeError, "normal expects standard deviation to be non-complex", lambda: torch.normal(0, torch.empty(100, 100, dtype=dtype, device=device), out=out)) else: helper(self, device, dtype, lambda x: x, lambda t: t, lambda mean: mean) # Ensure that normal raises appropriate error when `std` < 0 def test_normal_std_error(self, device): a = torch.tensor(0, dtype=torch.float32, device=device) std = torch.tensor(-1, dtype=torch.float32, device=device) for input in [0, a]: with self.assertRaisesRegex(RuntimeError, r'normal expects std >= 0.0, but found std'): torch.normal(input, -1, (10,)) with self.assertRaisesRegex(RuntimeError, r'normal expects all elements of std >= 0.0'): torch.normal(input, std) # https://github.com/pytorch/pytorch/issues/126834 @xfailIfTorchDynamo @dtypes(torch.float, torch.double, torch.half) @dtypesIfCUDA(torch.float, torch.double, torch.half, torch.bfloat16) def test_uniform_from_to(self, device, dtype): size = 2000 alpha = 0.1 float_min = torch.finfo(torch.float).min float_max = torch.finfo(torch.float).max double_min = torch.finfo(torch.double).min double_max = torch.finfo(torch.double).max if dtype == torch.bfloat16: min_val = -3.389531389251535e+38 max_val = 3.389531389251535e+38 else: min_val = torch.finfo(dtype).min max_val = torch.finfo(dtype).max values = [double_min, float_min, -42, 0, 42, float_max, double_max] for from_ in values: for to_ in values: t = torch.empty(size, dtype=dtype, device=device) if not (min_val <= from_ <= max_val) or not (min_val <= to_ <= max_val): pass elif to_ < from_: self.assertRaisesRegex( RuntimeError, "uniform_ expects to return", lambda: t.uniform_(from_, to_) ) elif to_ - from_ > max_val: self.assertRaisesRegex( RuntimeError, "uniform_ expects to-from", lambda: t.uniform_(from_, to_) ) else: t.uniform_(from_, to_) range_ = to_ - from_ if dtype != torch.bfloat16 and not ( dtype == torch.half and device == 'cpu') and not torch.isnan(t).all(): delta = alpha * range_ double_t = t.to(torch.double) if range_ == 0: self.assertTrue(double_t.min() == from_) self.assertTrue(double_t.max() == to_) elif dtype == torch.half: self.assertTrue(from_ <= double_t.min() <= (from_ + delta)) self.assertTrue((to_ - delta) <= double_t.max() <= to_) else: self.assertTrue(from_ <= double_t.min() <= (from_ + delta)) self.assertTrue((to_ - delta) <= double_t.max() < to_) def test_random_neg_values(self, device): SIZE = 10 signed_dtypes = [torch.double, torch.float, torch.long, torch.int, torch.short] for dtype in signed_dtypes: res = torch.rand(SIZE, SIZE).to(device=device, dtype=dtype) res.random_(-10, -1) self.assertLessEqual(res.max().item(), 9) self.assertGreaterEqual(res.min().item(), -10) # TODO: this test should be updated @onlyCPU def test_randint_inference(self, device): size = (2, 1) for args in [(3,), (1, 3)]: # (low,) and (low, high) self.assertIs(torch.int64, torch.randint(*args, size=size).dtype) self.assertIs(torch.int64, torch.randint(*args, size=size, layout=torch.strided).dtype) self.assertIs(torch.int64, torch.randint(*args, size=size, generator=torch.default_generator).dtype) self.assertIs(torch.float32, torch.randint(*args, size=size, dtype=torch.float32).dtype) out = torch.empty(size, dtype=torch.float32) self.assertIs(torch.float32, torch.randint(*args, size=size, out=out).dtype) self.assertIs(torch.float32, torch.randint(*args, size=size, out=out, dtype=torch.float32).dtype) out = torch.empty(size, dtype=torch.int64) self.assertIs(torch.int64, torch.randint(*args, size=size, out=out).dtype) self.assertIs(torch.int64, torch.randint(*args, size=size, out=out, dtype=torch.int64).dtype) self.assertRaisesRegex(RuntimeError, "random_ expects 'from' to be less than 'to', but got from=0 >= to=0", lambda: torch.randint(0, size=size)) self.assertRaisesRegex(RuntimeError, "random_ expects 'from' to be less than 'to', but got from=-1 >= to=-2", lambda: torch.randint(-1, -2, size=size)) self.assertRaisesRegex(TypeError, r"randint\(\): argument 'high' \(position 1\) must be int, not float", lambda: torch.randint(.5, size=size)) self.assertRaisesRegex(RuntimeError, "from is out of bounds for", lambda: torch.randint(-32769, 0, size=size, dtype=torch.int16)) self.assertRaisesRegex(RuntimeError, "from is out of bounds for", lambda: torch.randint(-1, 1, size=size, dtype=torch.uint32)) # TODO: this test should be updated @onlyCPU def test_randint(self, device): SIZE = 100 def seed(generator): if generator is None: torch.manual_seed(123456) else: generator.manual_seed(123456) return generator for generator in (None, torch.Generator()): generator = seed(generator) res1 = torch.randint(0, 6, (SIZE, SIZE), generator=generator) res2 = torch.empty((), dtype=torch.int64) generator = seed(generator) torch.randint(0, 6, (SIZE, SIZE), generator=generator, out=res2) generator = seed(generator) res3 = torch.randint(6, (SIZE, SIZE), generator=generator) res4 = torch.empty((), dtype=torch.int64) generator = seed(generator) torch.randint(6, (SIZE, SIZE), out=res4, generator=generator) self.assertEqual(res1, res2) self.assertEqual(res1, res3) self.assertEqual(res1, res4) self.assertEqual(res2, res3) self.assertEqual(res2, res4) self.assertEqual(res3, res4) self.assertTrue((res1 < 6).all().item()) self.assertTrue((res1 >= 0).all().item()) @unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "For fb compatibility random not changed in fbcode") def test_randint_distribution(self, device): size = 1_000_000 n_max = int(0.75 * 2 ** 32) n_bins = 8 def bin(index, max_size): return index // (max_size // n_bins) res = torch.randint(n_max, (size,), device=device) # histogram implemented for float only bins = bin(res, n_max).float().cpu() hist, _ = bins.histogram(8, range=(0, n_bins)) expected_bin = res.shape[0] / 8 expected_error = math.sqrt(expected_bin) / expected_bin * 3 error = (hist - expected_bin).abs().max() / expected_bin self.assertTrue(error < expected_error) @dtypes(torch.half, torch.float, torch.bfloat16, torch.double, torch.complex32, torch.complex64, torch.complex128) def test_randn(self, device, dtype): SIZE = 100 for size in [0, SIZE]: torch.manual_seed(123456) res1 = torch.randn(size, size, dtype=dtype, device=device) res2 = torch.tensor([], dtype=dtype, device=device) torch.manual_seed(123456) torch.randn(size, size, out=res2) self.assertEqual(res1, res2) @dtypes(torch.float, torch.double, torch.complex32, torch.complex64, torch.complex128) def test_rand(self, device, dtype): SIZE = 100 for size in [0, SIZE]: torch.manual_seed(123456) res1 = torch.rand(size, size, dtype=dtype, device=device) res2 = torch.tensor([], dtype=dtype, device=device) torch.manual_seed(123456) torch.rand(size, size, out=res2) self.assertEqual(res1, res2) def test_randperm(self, device): if device == 'cpu' or device == 'meta': rng_device = None else: # TODO: This won't actually work for non-CUDA device # see https://github.com/pytorch/pytorch/issues/54282 rng_device = [device] # Test core functionality. On CUDA, different value of n has different # code path for n in (5, 100, 50000, 100000): # Ensure both integer and floating-point numbers are tested. Half follows an execution path that is # different from others on CUDA. for dtype in (torch.long, torch.half, torch.float, torch.bfloat16): if n > 2049 and dtype == torch.half: # Large n for torch.half will raise an exception, do not test here. continue if dtype == torch.bfloat16 and device != 'cpu': continue if n > 256 and dtype == torch.bfloat16: continue with torch.random.fork_rng(devices=rng_device): res1 = torch.randperm(n, dtype=dtype, device=device) res2 = torch.empty(0, dtype=dtype, device=device) torch.randperm(n, out=res2, dtype=dtype, device=device) self.assertEqual(res1, res2, atol=0, rtol=0) self.assertEqual(res1.sort().values.long(), torch.arange(n, device=device)) # Default type is long for n in (100, 10000): self.assertEqual(torch.randperm(n, device=device).dtype, torch.long) # randperm of 0 elements is an empty tensor res1 = torch.randperm(0) res2 = torch.tensor(5, dtype=dtype, device=device) torch.randperm(0, out=res2) self.assertEqual(res1.numel(), 0) self.assertEqual(res2.numel(), 0) # Test exceptions when n is too large for a floating point type for dtype, small_n, large_n in ((torch.uint8, 2**8, 2**8 + 1), (torch.half, 2**11 + 1, 2**11 + 2), (torch.float, 2**24 + 1, 2**24 + 2), (torch.double, 2**25, # 2**53 + 1 is too large to run 2**53 + 2)): res = torch.empty(0, dtype=dtype, device=device) torch.randperm(small_n, out=res) # No exception expected self.assertRaises(RuntimeError, lambda: torch.randperm(large_n, out=res, device=device)) # Test non-contiguous tensors for n in (4, 5, 6, 10, 20): non_contiguous_tensor = torch.zeros((2, 3), dtype=torch.long, device=device).t() self.assertFalse(non_contiguous_tensor.is_contiguous()) with torch.random.fork_rng(devices=rng_device): res = torch.randperm(n, dtype=torch.long, device=device) torch.randperm(n, out=non_contiguous_tensor) self.assertEqual(non_contiguous_tensor, res) self.assertEqual(res.sort().values.long(), torch.arange(n, device=device)) @largeTensorTest("10GB", "cpu") @largeTensorTest("40GB", "cuda") @slowTest def test_randperm_large(self, device): # Test even distribution where rand32 might produce skewed "uniform" distribution # n_items is chosen to not evenly divide 2**32 and be sufficiently large # to easily detect skew def decile(index, collection_size): return index // (collection_size // 10) n_items = 700_000_000 shuffled = torch.randperm(n_items, device=device) interval = 1_000_000 shuffled_interval = shuffled[:interval] # histogram implemented for float only deciles = decile(shuffled_interval, shuffled.shape[0]).float().cpu() hist, _ = deciles.histogram(10, range=(0, 10)) expected_bin = shuffled_interval.shape[0] / 10 expected_error = math.sqrt(expected_bin) / expected_bin * 3 error = (hist - expected_bin).abs().max() / expected_bin self.assertTrue(error < expected_error, f"error {error} > {expected_error}") # Test exceptions when device and generator types are incompatible @onlyCUDA @unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Produces inconsistent errors when run in fbcode.") def test_randperm_device_compatibility(self, device): cuda_gen = torch.Generator(device='cuda') cpu_gen = torch.Generator(device='cpu') # n=0 is a special case that we don't need to use generator, thus no error even if # device and generator don't match torch.randperm(0, device='cuda:0', generator=torch.Generator(device='cuda:1')) if torch.cuda.device_count() > 1: torch.randperm(0, device='cuda:1', generator=torch.Generator(device='cuda:0')) torch.randperm(0, device='cuda', generator=torch.Generator(device='cpu')) torch.randperm(0, device='cpu', generator=torch.Generator(device='cuda')) for n in (1, 3, 100, 30000): torch.randperm(n, device='cuda', generator=torch.Generator(device='cuda:0')) torch.randperm(n, device='cuda:0', generator=torch.Generator(device='cuda')) # For cuda:0 to match cuda:1, we are making consistent device type matching # behavior just like torch.randint. Longer term, generator should ignore # device ordinal, since it's not used anyway. torch.randint(low=0, high=n + 1, size=(1,), device="cuda:0", generator=torch.Generator(device='cuda:1')) torch.randperm(n, device='cuda:0', generator=torch.Generator(device='cuda:1')) if torch.cuda.device_count() > 1: torch.randint(low=0, high=n + 1, size=(1,), device="cuda:1", generator=torch.Generator(device='cuda:0')) torch.randperm(n, device='cuda:1', generator=torch.Generator(device='cuda:0')) regex = 'Expected a .* device type for generator but found .*' cuda_t = torch.tensor(n, device='cuda') self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cuda', generator=cpu_gen)) self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cuda', generator=cpu_gen, out=cuda_t)) cpu_t = torch.tensor(n, device='cpu') self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cpu', generator=cuda_gen)) self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cpu', generator=cuda_gen, out=cpu_t)) self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, generator=cuda_gen)) # implicitly on CPU # Class for testing *like ops, like torch.ones_like
TestRandomTensorCreation
python
kamyu104__LeetCode-Solutions
Python/length-of-longest-subarray-with-at-most-k-frequency.py
{ "start": 93, "end": 569 }
class ____(object): def maxSubarrayLength(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ cnt = collections.Counter() result = left = 0 for right in xrange(len(nums)): cnt[nums[right]] += 1 while not (cnt[nums[right]] <= k): cnt[nums[left]] -= 1 left += 1 result = max(result, right-left+1) return result
Solution
python
milvus-io__pymilvus
pymilvus/client/search_iterator.py
{ "start": 751, "end": 7627 }
class ____: # for compatibility, track the number of total results left _left_res_cnt = None def __init__( self, connection: Connections, collection_name: str, data: Union[List, utils.SparseMatrixInputType], batch_size: int = 1000, limit: Optional[int] = UNLIMITED, filter: Optional[str] = None, output_fields: Optional[List[str]] = None, search_params: Optional[Dict] = None, timeout: Optional[float] = None, partition_names: Optional[List[str]] = None, anns_field: Optional[str] = None, round_decimal: Optional[int] = -1, external_filter_func: Optional[Callable[[Hits], Union[Hits, List[Hit]]]] = None, **kwargs, ): self._check_params(batch_size, data, kwargs) # for compatibility, support limit, deprecate in future if limit != UNLIMITED: self._left_res_cnt = limit self._conn = connection self._set_up_collection_id(collection_name) kwargs[COLLECTION_ID] = self._collection_id self._params = { "collection_name": collection_name, "data": data, "anns_field": anns_field, "param": deepcopy(search_params), "limit": batch_size, "expression": filter, "partition_names": partition_names, "output_fields": output_fields, "timeout": timeout, "round_decimal": round_decimal, ITERATOR_FIELD: True, ITER_SEARCH_V2_KEY: True, ITER_SEARCH_BATCH_SIZE_KEY: batch_size, GUARANTEE_TIMESTAMP: 0, **kwargs, } self._external_filter_func = external_filter_func self._cache = [] self._batch_size = batch_size self._probe_for_compability(self._params) def _set_up_collection_id(self, collection_name: str): res = self._conn.describe_collection(collection_name) self._collection_id = res[COLLECTION_ID] def _check_token_exists(self, token: Union[str, None]): if token is None or token == "": raise ServerVersionIncompatibleException( message=ExceptionsMessage.SearchIteratorV2FallbackWarning ) # this detects whether the server supports search_iterator_v2 and is for compatibility only # if the server holds iterator states, this implementation needs to be reconsidered def _probe_for_compability(self, params: Dict): dummy_params = deepcopy(params) dummy_batch_size = 1 dummy_params["limit"] = dummy_batch_size dummy_params[ITER_SEARCH_BATCH_SIZE_KEY] = dummy_batch_size iter_info = self._conn.search(**dummy_params).get_search_iterator_v2_results_info() self._check_token_exists(iter_info.token) # internal next function, do not use this outside of this class def _next(self): res = self._conn.search(**self._params) iter_info = res.get_search_iterator_v2_results_info() self._check_token_exists(iter_info.token) self._params[ITER_SEARCH_LAST_BOUND_KEY] = iter_info.last_bound # patch token and guarantee timestamp for the first next() call if ITER_SEARCH_ID_KEY not in self._params: # the token should not change during the lifetime of the iterator self._params[ITER_SEARCH_ID_KEY] = iter_info.token if self._params[GUARANTEE_TIMESTAMP] <= 0: if res.get_session_ts() > 0: self._params[GUARANTEE_TIMESTAMP] = res.get_session_ts() else: logger.warning( "failed to set up mvccTs from milvus server, use client-side ts instead" ) self._params[GUARANTEE_TIMESTAMP] = fall_back_to_latest_session_ts() return res def next(self): if self._left_res_cnt is not None and self._left_res_cnt <= 0: return None if self._external_filter_func is None: # return SearchPage for compability return self._wrap_return_res(self._next()[0]) # the length of the results should be `batch_size` if no limit is set, # otherwise it should be the number of results left if less than `batch_size` target_len = ( self._batch_size if self._left_res_cnt is None else min(self._batch_size, self._left_res_cnt) ) while True: hits = self._next()[0] # no more results from server if len(hits) == 0: break # apply external filter if self._external_filter_func is not None: hits = self._external_filter_func(hits) self._cache.extend(hits) if len(self._cache) >= target_len: break # if the number of elements in cache is less than or equal to target_len, # return all results we could possibly return # if the number of elements in cache is more than target_len, # return target_len results and keep the rest for next call ret = self._cache[:target_len] del self._cache[:target_len] # return SearchPage for compability return self._wrap_return_res(ret) def close(self): pass def _check_params( self, batch_size: int, data: Union[List, utils.SparseMatrixInputType], kwargs: Dict, ): # metric_type can be empty, deduced at server side # anns_field can be empty, deduced at server side # check batch size if batch_size < 0: raise ParamError(message="batch size cannot be less than zero") if batch_size > MAX_BATCH_SIZE: raise ParamError(message=f"batch size cannot be larger than {MAX_BATCH_SIZE}") # check offset if kwargs.get(OFFSET, 0) != 0: raise ParamError(message="Offset is not supported for search_iterator_v2") # check num queries, heavy to check at server side rows = entity_helper.get_input_num_rows(data) if rows > 1: raise ParamError( message="search_iterator_v2 does not support processing multiple vectors simultaneously" ) if rows == 0: raise ParamError(message="The vector data for search cannot be empty") def _wrap_return_res(self, res: Hits) -> SearchPage: if len(res) == 0: return SearchPage(None) if self._left_res_cnt is None: return SearchPage(res) # When we have a limit, ensure we don't return more results than requested cur_len = len(res) if cur_len > self._left_res_cnt: res = res[: self._left_res_cnt] self._left_res_cnt -= cur_len return SearchPage(res)
SearchIteratorV2
python
doocs__leetcode
solution/1400-1499/1443.Minimum Time to Collect All Apples in a Tree/Solution.py
{ "start": 0, "end": 563 }
class ____: def minTime(self, n: int, edges: List[List[int]], hasApple: List[bool]) -> int: def dfs(u, cost): if vis[u]: return 0 vis[u] = True nxt_cost = 0 for v in g[u]: nxt_cost += dfs(v, 2) if not hasApple[u] and nxt_cost == 0: return 0 return cost + nxt_cost g = defaultdict(list) for u, v in edges: g[u].append(v) g[v].append(u) vis = [False] * n return dfs(0, 0)
Solution
python
getsentry__sentry
src/sentry/issues/producer.py
{ "start": 831, "end": 4517 }
class ____(ValueEqualityEnum): """ Defines the type of payload that is being sent to Kafka. Messages without PayloadTypes default to OCCURRENCE. When adding new types, existing tests must pass without modifying the payload_type or the payload for backwards compatibility. """ OCCURRENCE = "occurrence" STATUS_CHANGE = "status_change" def _get_occurrence_producer() -> KafkaProducer: return get_arroyo_producer( "sentry.issues.producer", Topic.INGEST_OCCURRENCES, exclude_config_keys=["compression.type", "message.max.bytes"], ) _occurrence_producer = SingletonProducer( _get_occurrence_producer, max_futures=settings.SENTRY_ISSUE_PLATFORM_FUTURES_MAX_LIMIT ) def produce_occurrence_to_kafka( payload_type: PayloadType = PayloadType.OCCURRENCE, occurrence: IssueOccurrence | None = None, status_change: StatusChangeMessage | None = None, event_data: dict[str, Any] | None = None, is_buffered_spans: bool | None = False, ) -> None: if payload_type == PayloadType.OCCURRENCE: payload_data = _prepare_occurrence_message(occurrence, event_data, is_buffered_spans) elif payload_type == PayloadType.STATUS_CHANGE: payload_data = _prepare_status_change_message(status_change) else: raise NotImplementedError(f"Unknown payload type: {payload_type}") if payload_data is None: return partition_key = None if occurrence and occurrence.fingerprint: partition_key = f"{occurrence.fingerprint[0]}-{occurrence.project_id}".encode() elif status_change and status_change.fingerprint: partition_key = f"{status_change.fingerprint[0]}-{status_change.project_id}".encode() payload = KafkaPayload(partition_key, json.dumps(payload_data).encode("utf-8"), []) if settings.SENTRY_EVENTSTREAM != "sentry.eventstream.kafka.KafkaEventStream": # If we're not running Kafka then we're just in dev. # Skip producing to Kafka and just process the message directly process_message(Message(Value(payload=payload, committable={}))) return try: topic = get_topic_definition(Topic.INGEST_OCCURRENCES)["real_topic_name"] _occurrence_producer.produce(ArroyoTopic(topic), payload) except KafkaException: logger.exception( "Failed to send occurrence to issue platform", extra={ "id": payload_data["id"], "type": payload_data["type"], "issue_title": payload_data["issue_title"], }, ) def _prepare_occurrence_message( occurrence: IssueOccurrence | None, event_data: dict[str, Any] | None, is_buffered_spans: bool | None = False, ) -> MutableMapping[str, Any] | None: if not occurrence: raise ValueError("occurrence must be provided") if event_data and occurrence.event_id != event_data["event_id"]: raise ValueError("Event id on occurrence and event_data must be the same") payload_data = cast(MutableMapping[str, Any], occurrence.to_dict()) payload_data["payload_type"] = PayloadType.OCCURRENCE.value if event_data: payload_data["event"] = event_data if is_buffered_spans: payload_data["is_buffered_spans"] = True return payload_data def _prepare_status_change_message( status_change: StatusChangeMessage | None, ) -> MutableMapping[str, Any] | None: if not status_change: raise ValueError("status_change must be provided") payload_data = cast(MutableMapping[str, Any], status_change.to_dict()) payload_data["payload_type"] = PayloadType.STATUS_CHANGE.value return payload_data
PayloadType
python
faif__python-patterns
patterns/behavioral/specification.py
{ "start": 536, "end": 1015 }
class ____(Specification): @abstractmethod def is_satisfied_by(self, candidate): pass def and_specification(self, candidate: "Specification") -> "AndSpecification": return AndSpecification(self, candidate) def or_specification(self, candidate: "Specification") -> "OrSpecification": return OrSpecification(self, candidate) def not_specification(self) -> "NotSpecification": return NotSpecification(self)
CompositeSpecification
python
facelessuser__pymdown-extensions
pymdownx/magiclink.py
{ "start": 31849, "end": 32469 }
class ____(InlineProcessor): """Return a link Element given an auto link `<http://example/com>`.""" def handleMatch(self, m, data): """Return link optionally without protocol.""" el = etree.Element("a") el.set('href', self.unescape(m.group(1))) el.text = md_util.AtomicString(m.group(1)) if self.config['hide_protocol']: el.text = md_util.AtomicString(el.text[el.text.find("://") + 3:]) if self.config.get('repo_url_shortener', False): el.set('magiclink', str(MAGIC_AUTO_LINK)) return el, m.start(0), m.end(0)
MagiclinkAutoPattern
python
huggingface__transformers
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
{ "start": 44364, "end": 45320 }
class ____(nn.Module): def __init__(self, config: Qwen3OmniMoeVisionEncoderConfig, use_postshuffle_norm=False) -> None: super().__init__() self.hidden_size = config.hidden_size * (config.spatial_merge_size**2) self.use_postshuffle_norm = use_postshuffle_norm self.ln_q = nn.LayerNorm(self.hidden_size if use_postshuffle_norm else config.hidden_size, eps=1e-6) self.mlp = nn.ModuleList( [ nn.Linear(self.hidden_size, self.hidden_size), nn.GELU(), nn.Linear(self.hidden_size, config.out_hidden_size), ] ) def forward(self, hidden: torch.Tensor) -> torch.Tensor: hidden = self.ln_q(hidden.view(-1, self.hidden_size) if self.use_postshuffle_norm else hidden).view( -1, self.hidden_size ) for layer in self.mlp: hidden = layer(hidden) return hidden
Qwen3OmniMoeVisionPatchMerger
python
keras-team__keras
guides/custom_train_step_in_jax.py
{ "start": 5500, "end": 8945 }
class ____(keras.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.loss_tracker = keras.metrics.Mean(name="loss") self.mae_metric = keras.metrics.MeanAbsoluteError(name="mae") self.loss_fn = keras.losses.MeanSquaredError() def compute_loss_and_updates( self, trainable_variables, non_trainable_variables, x, y, training=False, ): y_pred, non_trainable_variables = self.stateless_call( trainable_variables, non_trainable_variables, x, training=training, ) loss = self.loss_fn(y, y_pred) return loss, (y_pred, non_trainable_variables) def train_step(self, state, data): ( trainable_variables, non_trainable_variables, optimizer_variables, metrics_variables, ) = state x, y = data # Get the gradient function. grad_fn = jax.value_and_grad( self.compute_loss_and_updates, has_aux=True ) # Compute the gradients. (loss, (y_pred, non_trainable_variables)), grads = grad_fn( trainable_variables, non_trainable_variables, x, y, training=True, ) # Update trainable variables and optimizer variables. ( trainable_variables, optimizer_variables, ) = self.optimizer.stateless_apply( optimizer_variables, grads, trainable_variables ) # Update metrics. loss_tracker_vars = metrics_variables[ : len(self.loss_tracker.variables) ] mae_metric_vars = metrics_variables[len(self.loss_tracker.variables) :] loss_tracker_vars = self.loss_tracker.stateless_update_state( loss_tracker_vars, loss ) mae_metric_vars = self.mae_metric.stateless_update_state( mae_metric_vars, y, y_pred ) logs = {} logs[self.loss_tracker.name] = self.loss_tracker.stateless_result( loss_tracker_vars ) logs[self.mae_metric.name] = self.mae_metric.stateless_result( mae_metric_vars ) new_metrics_vars = loss_tracker_vars + mae_metric_vars # Return metric logs and updated state variables. state = ( trainable_variables, non_trainable_variables, optimizer_variables, new_metrics_vars, ) return logs, state @property def metrics(self): # We list our `Metric` objects here so that `reset_states()` can be # called automatically at the start of each epoch # or at the start of `evaluate()`. return [self.loss_tracker, self.mae_metric] # Construct an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) # We don't pass a loss or metrics here. model.compile(optimizer="adam") # Just use `fit` as usual -- you can use callbacks, etc. x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) model.fit(x, y, epochs=5) """ ## Providing your own evaluation step What if you want to do the same for calls to `model.evaluate()`? Then you would override `test_step` in exactly the same way. Here's what it looks like: """
CustomModel
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/math_ops/basic_gpu_test.py
{ "start": 6289, "end": 7944 }
class ____(test.TestCase): def _GetGradientArgs(self, xs, ys): return self.evaluate(broadcast_gradient_args(xs, ys)) def testBroadcast(self): r0, r1 = self._GetGradientArgs([2, 3, 5], [1]) self.assertAllEqual(r0, []) self.assertAllEqual(r1, [0, 1, 2]) _GRAD_TOL = {dtypes.float32: 1e-3} def _compareGpu(self, x, y, np_func, tf_func): np_ans = np_func(x, y) with self.cached_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_gpu = self.evaluate(out) self.assertAllClose(np_ans, tf_gpu) self.assertShapeEqual(np_ans, out) # TODO(zhifengc/ke): make gradient checker work on GPU. def testGradient(self): x1 = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) x2 = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape( [1, 3, 2]) def div_x1(x1): return math_ops.truediv(x1, x2) * math_ops.cast(1.1, dtype=x1.dtype) def div_x2(x2): return math_ops.truediv(x1, x2) * math_ops.cast(1.1, dtype=x2.dtype) with self.cached_session(): err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient( div_x1, [x1])) self.assertLess(err, self._GRAD_TOL[dtypes.as_dtype(x1.dtype)]) err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient( div_x2, [x2])) self.assertLess(err, self._GRAD_TOL[dtypes.as_dtype(x2.dtype)]) self._compareGpu(x1, x2, np.true_divide, math_ops.truediv) self._compareGpu(x1, x2 + 0.1, np.floor_divide, math_ops.floordiv)
BroadcastSimpleTest
python
huggingface__transformers
src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py
{ "start": 788, "end": 6068 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileNetV2Model`]. It is used to instantiate a MobileNetV2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileNetV2 [google/mobilenet_v2_1.0_224](https://huggingface.co/google/mobilenet_v2_1.0_224) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. depth_multiplier (`float`, *optional*, defaults to 1.0): Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32 channels. This is sometimes also called "alpha" or "width multiplier". depth_divisible_by (`int`, *optional*, defaults to 8): The number of channels in each layer will always be a multiple of this number. min_depth (`int`, *optional*, defaults to 8): All layers will have at least this many channels. expand_ratio (`float`, *optional*, defaults to 6.0): The number of output channels of the first layer in each block is input channels times expansion ratio. output_stride (`int`, *optional*, defaults to 32): The ratio between the spatial resolution of the input and output feature maps. By default the model reduces the input dimensions by a factor of 32. If `output_stride` is 8 or 16, the model uses dilated convolutions on the depthwise layers instead of regular convolutions, so that the feature maps never become more than 8x or 16x smaller than the input image. first_layer_is_expansion (`bool`, *optional*, defaults to `True`): True if the very first convolution layer is also the expansion layer for the first expansion block. finegrained_output (`bool`, *optional*, defaults to `True`): If true, the number of output channels in the final convolution layer will stay large (1280) even if `depth_multiplier` is less than 1. hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`): The non-linear activation function (function or string) in the Transformer encoder and convolution layers. tf_padding (`bool`, *optional*, defaults to `True`): Whether to use TensorFlow padding rules on the convolution layers. classifier_dropout_prob (`float`, *optional*, defaults to 0.8): The dropout ratio for attached classifiers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 0.001): The epsilon used by the layer normalization layers. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. Example: ```python >>> from transformers import MobileNetV2Config, MobileNetV2Model >>> # Initializing a "mobilenet_v2_1.0_224" style configuration >>> configuration = MobileNetV2Config() >>> # Initializing a model from the "mobilenet_v2_1.0_224" style configuration >>> model = MobileNetV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mobilenet_v2" def __init__( self, num_channels=3, image_size=224, depth_multiplier=1.0, depth_divisible_by=8, min_depth=8, expand_ratio=6.0, output_stride=32, first_layer_is_expansion=True, finegrained_output=True, hidden_act="relu6", tf_padding=True, classifier_dropout_prob=0.8, initializer_range=0.02, layer_norm_eps=0.001, semantic_loss_ignore_index=255, **kwargs, ): super().__init__(**kwargs) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero.") self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.depth_divisible_by = depth_divisible_by self.min_depth = min_depth self.expand_ratio = expand_ratio self.output_stride = output_stride self.first_layer_is_expansion = first_layer_is_expansion self.finegrained_output = finegrained_output self.hidden_act = hidden_act self.tf_padding = tf_padding self.classifier_dropout_prob = classifier_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.semantic_loss_ignore_index = semantic_loss_ignore_index __all__ = ["MobileNetV2Config"]
MobileNetV2Config
python
keon__algorithms
tests/test_graph.py
{ "start": 5560, "end": 6520 }
class ____(unittest.TestCase): def test_all_pairs_shortest_path(self): graph = [[0, 0.1, 0.101, 0.142, 0.277], [0.465, 0, 0.191, 0.192, 0.587], [0.245, 0.554, 0, 0.333, 0.931], [1.032, 0.668, 0.656, 0, 0.151], [0.867, 0.119, 0.352, 0.398, 0]] result = all_pairs_shortest_path(graph) self.assertEqual(result, [ [0, 0.1, 0.101, 0.142, 0.277], [0.436, 0, 0.191, 0.192, 0.34299999999999997], [0.245, 0.345, 0, 0.333, 0.484], [0.706, 0.27, 0.46099999999999997, 0, 0.151], [0.5549999999999999, 0.119, 0.31, 0.311, 0], ])
TestAll_Pairs_Shortest_Path
python
django__django
tests/model_formsets/models.py
{ "start": 5777, "end": 5939 }
class ____(models.Model): uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=255)
UUIDPKParent
python
scrapy__scrapy
tests/test_loader.py
{ "start": 15805, "end": 19497 }
class ____: response = HtmlResponse( url="", encoding="utf-8", body=b""" <html> <body> <header> <div id="id">marta</div> <p>paragraph</p> </header> <footer class="footer"> <a href="http://www.scrapy.org">homepage</a> <img src="/images/logo.png" width="244" height="65" alt="Scrapy"> </footer> </body> </html> """, ) def test_nested_xpath(self): l = NestedItemLoader(response=self.response) nl = l.nested_xpath("//header") nl.add_xpath("name", "div/text()") nl.add_css("name_div", "#id") nl.add_value("name_value", nl.selector.xpath('div[@id = "id"]/text()').getall()) assert l.get_output_value("name") == ["marta"] assert l.get_output_value("name_div") == ['<div id="id">marta</div>'] assert l.get_output_value("name_value") == ["marta"] assert l.get_output_value("name") == nl.get_output_value("name") assert l.get_output_value("name_div") == nl.get_output_value("name_div") assert l.get_output_value("name_value") == nl.get_output_value("name_value") def test_nested_css(self): l = NestedItemLoader(response=self.response) nl = l.nested_css("header") nl.add_xpath("name", "div/text()") nl.add_css("name_div", "#id") nl.add_value("name_value", nl.selector.xpath('div[@id = "id"]/text()').getall()) assert l.get_output_value("name") == ["marta"] assert l.get_output_value("name_div") == ['<div id="id">marta</div>'] assert l.get_output_value("name_value") == ["marta"] assert l.get_output_value("name") == nl.get_output_value("name") assert l.get_output_value("name_div") == nl.get_output_value("name_div") assert l.get_output_value("name_value") == nl.get_output_value("name_value") def test_nested_replace(self): l = NestedItemLoader(response=self.response) nl1 = l.nested_xpath("//footer") nl2 = nl1.nested_xpath("a") l.add_xpath("url", "//footer/a/@href") assert l.get_output_value("url") == ["http://www.scrapy.org"] nl1.replace_xpath("url", "img/@src") assert l.get_output_value("url") == ["/images/logo.png"] nl2.replace_xpath("url", "@href") assert l.get_output_value("url") == ["http://www.scrapy.org"] def test_nested_ordering(self): l = NestedItemLoader(response=self.response) nl1 = l.nested_xpath("//footer") nl2 = nl1.nested_xpath("a") nl1.add_xpath("url", "img/@src") l.add_xpath("url", "//footer/a/@href") nl2.add_xpath("url", "text()") l.add_xpath("url", "//footer/a/@href") assert l.get_output_value("url") == [ "/images/logo.png", "http://www.scrapy.org", "homepage", "http://www.scrapy.org", ] def test_nested_load_item(self): l = NestedItemLoader(response=self.response) nl1 = l.nested_xpath("//footer") nl2 = nl1.nested_xpath("img") l.add_xpath("name", "//header/div/text()") nl1.add_xpath("url", "a/@href") nl2.add_xpath("image", "@src") item = l.load_item() assert item is l.item assert item is nl1.item assert item is nl2.item assert item["name"] == ["marta"] assert item["url"] == ["http://www.scrapy.org"] assert item["image"] == ["/images/logo.png"] # Functions as processors def function_processor_strip(iterable): return [x.strip() for x in iterable] def function_processor_upper(iterable): return [x.upper() for x in iterable]
TestSubselectorLoader
python
django__django
django/contrib/gis/geos/prototypes/coordseq.py
{ "start": 1731, "end": 3124 }
class ____(GEOSFuncFactory): restype = CS_PTR @staticmethod def errcheck(result, func, cargs): if not result: raise GEOSException( "Error encountered checking Coordinate Sequence returned from GEOS " 'C function "%s".' % func.__name__ ) return result # ## Coordinate Sequence ctypes prototypes ## # Coordinate Sequence constructors & cloning. cs_clone = CsOutput("GEOSCoordSeq_clone", argtypes=[CS_PTR]) create_cs = CsOutput("GEOSCoordSeq_create", argtypes=[c_uint, c_uint]) get_cs = CsOutput("GEOSGeom_getCoordSeq", argtypes=[GEOM_PTR]) # Getting, setting ordinate cs_getordinate = CsOperation("GEOSCoordSeq_getOrdinate", ordinate=True, get=True) cs_setordinate = CsOperation("GEOSCoordSeq_setOrdinate", ordinate=True) # For getting, x, y, z cs_getx = CsOperation("GEOSCoordSeq_getX", get=True) cs_gety = CsOperation("GEOSCoordSeq_getY", get=True) cs_getz = CsOperation("GEOSCoordSeq_getZ", get=True) # For setting, x, y, z cs_setx = CsOperation("GEOSCoordSeq_setX") cs_sety = CsOperation("GEOSCoordSeq_setY") cs_setz = CsOperation("GEOSCoordSeq_setZ") # These routines return size & dimensions. cs_getsize = CsInt("GEOSCoordSeq_getSize") cs_getdims = CsInt("GEOSCoordSeq_getDimensions") cs_is_ccw = GEOSFuncFactory( "GEOSCoordSeq_isCCW", restype=c_int, argtypes=[CS_PTR, POINTER(c_byte)] )
CsOutput
python
PyCQA__pydocstyle
src/tests/test_decorators.py
{ "start": 5499, "end": 7535 }
class ____: """Unit test for Method class.""" def makeMethod(self, name='someMethodName'): """Return a simple method instance.""" children = [] dunder_all = ['ClassName'] source = textwrap.dedent("""\ class ClassName: def %s(self): """ % (name)) module = parser.Module('module_name', source, 0, 1, [], 'Docstring for module', [], None, dunder_all, None, None, '') cls = parser.Class('ClassName', source, 0, 1, [], 'Docstring for class', children, [], module, '') return parser.Method(name, source, 0, 1, [], 'Docstring for method', children, [], cls, '') def test_is_public_normal(self): """Test that methods are normally public, even if decorated.""" method = self.makeMethod('methodName') method.decorators = [parser.Decorator('some_decorator', [])] assert method.is_public def test_is_public_setter(self): """Test that setter methods are considered private.""" method = self.makeMethod('methodName') method.decorators = [ parser.Decorator('some_decorator', []), parser.Decorator('methodName.setter', []), ] assert not method.is_public def test_is_public_deleter(self): """Test that deleter methods are also considered private.""" method = self.makeMethod('methodName') method.decorators = [ parser.Decorator('methodName.deleter', []), parser.Decorator('another_decorator', []), ] assert not method.is_public def test_is_public_trick(self): """Test that common prefix does not necessarily indicate private.""" method = self.makeMethod("foo") method.decorators = [ parser.Decorator('foobar', []), parser.Decorator('foobar.baz', []), ] assert method.is_public
TestMethod