language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
getsentry__sentry
src/sentry/incidents/grouptype.py
{ "start": 2440, "end": 6147 }
class ____(StrEnum): CRASH_FREE_SESSIONS = "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate" CRASH_FREE_USERS = "percentage(users_crashed, users) AS _crash_rate_alert_aggregate" # Define all possible alert type values as a literal type MetricAlertType = Literal[ "num_errors", "users_experiencing_errors", "throughput", "trans_duration", "apdex", "failure_rate", "lcp", "fid", "cls", "crash_free_sessions", "crash_free_users", "trace_item_throughput", "trace_item_duration", "trace_item_apdex", "trace_item_failure_rate", "trace_item_lcp", "custom_transactions", "eap_metrics", ] AggregateIdentifier = Literal[ "count()", "count_unique(user)", "transaction.duration", "apdex", "failure_rate()", "measurements.lcp", "measurements.fid", "measurements.cls", "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate", "percentage(users_crashed, users) AS _crash_rate_alert_aggregate", "count(span.duration)", "span.duration", ] ALERT_TYPE_IDENTIFIERS: dict[Dataset, dict[MetricAlertType, AggregateIdentifier]] = { Dataset.Events: {"num_errors": "count()", "users_experiencing_errors": "count_unique(user)"}, Dataset.Transactions: { "throughput": "count()", "trans_duration": "transaction.duration", "apdex": "apdex", "failure_rate": "failure_rate()", "lcp": "measurements.lcp", "fid": "measurements.fid", "cls": "measurements.cls", }, Dataset.PerformanceMetrics: { "throughput": "count()", "trans_duration": "transaction.duration", "apdex": "apdex", "failure_rate": "failure_rate()", "lcp": "measurements.lcp", "fid": "measurements.fid", "cls": "measurements.cls", }, Dataset.Sessions: { "crash_free_sessions": SessionsAggregate.CRASH_FREE_SESSIONS.value, "crash_free_users": SessionsAggregate.CRASH_FREE_USERS.value, }, Dataset.Metrics: { "crash_free_sessions": SessionsAggregate.CRASH_FREE_SESSIONS.value, "crash_free_users": SessionsAggregate.CRASH_FREE_USERS.value, }, Dataset.EventsAnalyticsPlatform: { "trace_item_throughput": "count(span.duration)", "trace_item_duration": "span.duration", "trace_item_apdex": "apdex", "trace_item_failure_rate": "failure_rate()", "trace_item_lcp": "measurements.lcp", }, } def get_alert_type_from_aggregate_dataset( aggregate: str, dataset: Dataset, organization: Organization | None = None ) -> MetricAlertType: """ Given an aggregate and dataset object, will return the corresponding wizard alert type e.g. {'aggregate': 'count()', 'dataset': Dataset.ERRORS} will yield 'num_errors' This function is used to format the aggregate value for anomaly detection issues. """ identifier_for_dataset = ALERT_TYPE_IDENTIFIERS.get(dataset, {}) # Find matching alert type entry matching_alert_type: MetricAlertType | None = None for alert_type, identifier in identifier_for_dataset.items(): if identifier in aggregate: matching_alert_type = alert_type break # Special handling for EventsAnalyticsPlatform dataset if dataset == Dataset.EventsAnalyticsPlatform: if organization and features.has( "organizations:discover-saved-queries-deprecation", organization ): return matching_alert_type if matching_alert_type else "eap_metrics" return "eap_metrics" return matching_alert_type if matching_alert_type else "custom_transactions"
SessionsAggregate
python
pytorch__pytorch
test/distributed/tensor/test_dtensor_compile.py
{ "start": 3144, "end": 5040 }
class ____(nn.Module): def __init__(self, device): super().__init__() self.mlp_0 = MLPModule(device) self.mlp_1 = MLPModule(device) def forward(self, input): return self.mlp_1(self.mlp_0(input)) def extract_graph(fx_g, _, graph_cell): graph_cell[0] = fx_g.code return fx_g # Make a custom compiler that runs aot autograd but extracts the fw graph fw_graph_cell = [None] bw_graph_cell = [None] fw_compiler = functools.partial(extract_graph, graph_cell=fw_graph_cell) bw_compiler = functools.partial(extract_graph, graph_cell=bw_graph_cell) from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd aot_eager_graph = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) device_type = ( acc.type if (acc := torch.accelerator.current_accelerator(True)) else "cpu" ) def _apply_sharding(mod: nn.Module, shard_dim: int, device_mesh: DeviceMesh): """ Shards on the given dimension if possible, else replicate Args: mod: (nn.Module) Module to shard or replicate shard_dim: (int) Dimension to shard on if possible device_mesh: (DeviceMesh) 1D Device Mesh Returns: Sharded DTensor """ def shard_module_params(name, module, device_mesh): for name, param in module.named_parameters(): placement = Replicate() if shard_dim < len(param.size()): placement = Shard(shard_dim) dist_param = torch.nn.Parameter( distribute_tensor(param, device_mesh, [placement]) ) name = name.split(".")[-1] module.register_parameter(name, dist_param) sharded_mod = distribute_module(mod, device_mesh, shard_module_params) return sharded_mod
SimpleModel
python
bokeh__bokeh
src/bokeh/models/tickers.py
{ "start": 10264, "end": 11425 }
class ____(BasicTicker): ''' Generate nice lat/lon ticks form underlying WebMercator coordinates. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) dimension = Nullable(Enum(LatLon), help=""" Specify whether to generate ticks for Latitude or Longitude. Projected coordinates are not separable, computing Latitude and Longitude tick locations from Web Mercator requires considering coordinates from both dimensions together. Use this property to specify which result should be returned. Typically, if the ticker is for an x-axis, then dimension should be ``"lon"`` and if the ticker is for a y-axis, then the dimension should be `"lat"``. In order to prevent hard to debug errors, there is no default value for dimension. Using an un-configured ``MercatorTicker`` will result in a validation error and a JavaScript console error. """) @error(MISSING_MERCATOR_DIMENSION) def _check_missing_dimension(self): if self.dimension is None: return str(self)
MercatorTicker
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_set.py
{ "start": 51015, "end": 51256 }
class ____(_TestSubsets, __TestCase): left = set([1]) right = set([2]) name = "neither empty, neither contains" cases = "!=" #==============================================================================
TestSubsetNonOverlap
python
vyperlang__vyper
vyper/venom/analysis/mem_ssa.py
{ "start": 1961, "end": 2281 }
class ____(MemoryAccess): """Represents a definition of memory state""" def __init__(self, id: int, store_inst: IRInstruction, loc: MemoryLocation): super().__init__(id) self.store_inst = store_inst self.loc = loc @property def inst(self): return self.store_inst
MemoryDef
python
huggingface__transformers
src/transformers/models/got_ocr2/modular_got_ocr2.py
{ "start": 15232, "end": 19715 }
class ____(LlavaForConditionalGeneration): @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, GotOcr2CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GotOcr2ForConditionalGeneration, TextStreamer >>> model = GotOcr2ForConditionalGeneration.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf").to("cuda") >>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf") >>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(image, return_tensors="pt", color="green").to("cuda") >>> # Generate >>> streamer = TextStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True) >>> generate_ids = model.generate( ... **inputs, ... do_sample=False, ... tokenizer = processor.tokenizer, ... stop_strings='<|im_end|>', ... streamer=streamer, ... max_new_tokens=4096, ... ) "You should keep in mind what features from the module should be used, especially when you're planning to sell a template." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) return GotOcr2CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) __all__ = [ "GotOcr2VisionConfig", "GotOcr2Config", "GotOcr2PreTrainedModel", "GotOcr2Model", "GotOcr2ForConditionalGeneration", ]
GotOcr2ForConditionalGeneration
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 629428, "end": 630074 }
class ____(sgqlc.types.relay.Connection): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field( sgqlc.types.list_of("SponsorsActivityEdge"), graphql_name="edges" ) nodes = sgqlc.types.Field( sgqlc.types.list_of("SponsorsActivity"), graphql_name="nodes" ) page_info = sgqlc.types.Field( sgqlc.types.non_null(PageInfo), graphql_name="pageInfo" ) total_count = sgqlc.types.Field( sgqlc.types.non_null(Int), graphql_name="totalCount" )
SponsorsActivityConnection
python
python-attrs__attrs
typing-examples/baseline.py
{ "start": 857, "end": 1066 }
class ____: without_alias: int _with_alias: int = attrs.field(alias="_with_alias") attrs.fields(AliasExample).without_alias.alias attrs.fields(AliasExample)._with_alias.alias @attrs.define
AliasExample
python
walkccc__LeetCode
solutions/298. Binary Tree Longest Consecutive Sequence/298.py
{ "start": 0, "end": 534 }
class ____: def longestConsecutive(self, root: TreeNode | None) -> int: if not root: return 0 def dfs(root: TreeNode | None, target: int, length: int, maxLength: int) -> int: if not root: return maxLength if root.val == target: length += 1 maxLength = max(maxLength, length) else: length = 1 return max(dfs(root.left, root.val + 1, length, maxLength), dfs(root.right, root.val + 1, length, maxLength)) return dfs(root, root.val, 0, 0)
Solution
python
tensorflow__tensorflow
tensorflow/python/util/dispatch.py
{ "start": 5556, "end": 6864 }
class ____(object): """Abstract base class for TensorFlow global operator dispatchers.""" NOT_SUPPORTED = OpDispatcher.NOT_SUPPORTED def handle(self, op, args, kwargs): """Handle the specified operation with the specified arguments.""" def register(self): """Register this dispatcher as a handler for all ops.""" _GLOBAL_DISPATCHERS.append(self) def dispatch(op, args, kwargs): """Returns the result from the first successful dispatcher for a given op. Calls the `handle` method of each `OpDispatcher` that has been registered to handle `op`, and returns the value from the first successful handler. Args: op: Python function: the operation to dispatch for. args: The arguments to the operation. kwargs: They keyword arguments to the operation. Returns: The result of the operation, or `NOT_SUPPORTED` if no registered dispatcher can handle the given arguments. """ for dispatcher in getattr(op, FALLBACK_DISPATCH_ATTR): result = dispatcher.handle(args, kwargs) if result is not OpDispatcher.NOT_SUPPORTED: return result for dispatcher in _GLOBAL_DISPATCHERS: result = dispatcher.handle(op, args, kwargs) if result is not OpDispatcher.NOT_SUPPORTED: return result return OpDispatcher.NOT_SUPPORTED
GlobalOpDispatcher
python
getsentry__sentry
src/sentry/models/debugfile.py
{ "start": 12495, "end": 14235 }
class ____: def __init__( self, file_format: str, arch: str, debug_id: str, path: str, code_id: str | None = None, name: str | None = None, data: Any | None = None, ): self.file_format = file_format self.arch = arch self.debug_id = debug_id # TODO(flub): should this use normalize_debug_id()? self.code_id = code_id self.path = path self.data = data if name is not None: self.name = os.path.basename(name) elif path is not None: self.name = os.path.basename(path) @classmethod def from_object( cls, obj: Object, path: str, name: str | None = None, debug_id: str | None = None, ) -> DifMeta: if debug_id is not None: try: debug_id = normalize_debug_id(debug_id) except SymbolicError: debug_id = None # Only allow overrides in the debug_id's age if the rest of the debug id # matches with what we determine from the object file. We generally # trust the server more than the client. obj_id = obj.debug_id if obj_id and debug_id and obj_id[:36] == debug_id[:36]: obj_id = debug_id return cls( file_format=obj.file_format, arch=obj.arch, debug_id=obj_id, code_id=obj.code_id, path=path, # TODO: Extract the object name from the object name=name, data={"type": obj.kind, "features": list(obj.features)}, ) @property def basename(self) -> str: return os.path.basename(self.path)
DifMeta
python
wandb__wandb
wandb/sdk/launch/builder/context_manager.py
{ "start": 592, "end": 9618 }
class ____: """Creates a build context for a container image from job source code. The dockerfile and build context may be specified by the job itself. If not, the behavior for creating the build context is as follows: - If a Dockerfile.wandb is found adjacent to the entrypoint, the directory containing the entrypoint is used as the build context and Dockerfile.wandb is used as the Dockerfile. - If `override_dockerfile` is set on the LaunchProject, the directory containing the Dockerfile is used as the build context and the Dockerfile is used as the Dockerfile. `override_dockerfile` can be set in a launch spec via the `-D` flag to `wandb launch` or in the `overrides` section of the launch drawer. - If no dockerfile is set, a Dockerfile is generated from the job's requirements and entrypoint. """ def __init__(self, launch_project: LaunchProject): """Initialize a BuildContextManager. Arguments: launch_project: The launch project. """ self._launch_project = launch_project assert self._launch_project.project_dir is not None self._directory = tempfile.mkdtemp() def _generate_dockerfile(self, builder_type: str) -> str: """Generate a Dockerfile for the container image. Arguments: builder_type: The type of builder to use. One of "docker" or "kaniko". Returns: The contents of the Dockerfile. """ launch_project = self._launch_project entry_point = ( launch_project.override_entrypoint or launch_project.get_job_entry_point() ) # get python versions truncated to major.minor to ensure image availability if launch_project.python_version: spl = launch_project.python_version.split(".")[:2] py_version, py_major = (".".join(spl), spl[0]) else: py_version, py_major = get_current_python_version() python_build_image = ( f"python:{py_version}" # use full python image for package installation ) requirements_section = get_requirements_section( launch_project, self._directory, builder_type ) # ----- stage 2: base ----- python_base_setup = get_base_setup(launch_project, py_version, py_major) # set up user info username, userid = get_docker_user(launch_project, launch_project.resource) user_setup = get_user_setup(username, userid, launch_project.resource) workdir = f"/home/{username}" assert entry_point is not None entrypoint_section = get_entrypoint_setup(entry_point) dockerfile_contents = DOCKERFILE_TEMPLATE.format( py_build_image=python_build_image, requirements_section=requirements_section, base_setup=python_base_setup, uid=userid, user_setup=user_setup, workdir=workdir, entrypoint_section=entrypoint_section, ) return dockerfile_contents def create_build_context(self, builder_type: str) -> Tuple[str, str]: """Create the build context for the container image. Returns: A pair of str: the path to the build context locally and the image tag computed from the Dockerfile. """ entrypoint = ( self._launch_project.get_job_entry_point() or self._launch_project.override_entrypoint ) assert entrypoint is not None assert entrypoint.name is not None assert self._launch_project.project_dir is not None # we use that as the build context. build_context_root_dir = self._launch_project.project_dir job_build_context = self._launch_project.job_build_context if job_build_context: full_path = os.path.join(build_context_root_dir, job_build_context) if not os.path.exists(full_path): raise LaunchError(f"Build context does not exist at {full_path}") build_context_root_dir = full_path # This is the case where the user specifies a Dockerfile to use. # We use the directory containing the Dockerfile as the build context. override_dockerfile = self._launch_project.override_dockerfile if override_dockerfile: full_path = os.path.join( build_context_root_dir, override_dockerfile, ) if not os.path.exists(full_path): raise LaunchError(f"Dockerfile does not exist at {full_path}") shutil.copytree( build_context_root_dir, self._directory, symlinks=True, dirs_exist_ok=True, ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc"), ) shutil.copy( full_path, os.path.join(self._directory, _WANDB_DOCKERFILE_NAME), ) return self._directory, image_tag_from_dockerfile_and_source( self._launch_project, open(full_path).read() ) # If the job specifies a Dockerfile, we use that as the Dockerfile. job_dockerfile = self._launch_project.job_dockerfile if job_dockerfile: dockerfile_path = os.path.join(build_context_root_dir, job_dockerfile) if not os.path.exists(dockerfile_path): raise LaunchError(f"Dockerfile does not exist at {dockerfile_path}") shutil.copytree( build_context_root_dir, self._directory, symlinks=True, dirs_exist_ok=True, ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc"), ) shutil.copy( dockerfile_path, os.path.join(self._directory, _WANDB_DOCKERFILE_NAME), ) return self._directory, image_tag_from_dockerfile_and_source( self._launch_project, open(dockerfile_path).read() ) # This is the case where we find Dockerfile.wandb adjacent to the # entrypoint. We use the entrypoint directory as the build context. entrypoint_dir = os.path.dirname(entrypoint.name) if entrypoint_dir: path = os.path.join( build_context_root_dir, entrypoint_dir, _WANDB_DOCKERFILE_NAME, ) else: path = os.path.join(build_context_root_dir, _WANDB_DOCKERFILE_NAME) if os.path.exists( path ): # We found a Dockerfile.wandb adjacent to the entrypoint. shutil.copytree( os.path.dirname(path), self._directory, symlinks=True, dirs_exist_ok=True, ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc"), ) # TODO: remove this once we make things more explicit for users if entrypoint_dir: new_path = os.path.basename(entrypoint.name) entrypoint = self._launch_project.get_job_entry_point() if entrypoint is not None: entrypoint.update_entrypoint_path(new_path) with open(path) as f: docker_file_contents = f.read() return self._directory, image_tag_from_dockerfile_and_source( self._launch_project, docker_file_contents ) # This is the case where we use our own Dockerfile template. We move # the user code into a src directory in the build context. dst_path = os.path.join(self._directory, "src") assert self._launch_project.project_dir is not None shutil.copytree( src=self._launch_project.project_dir, dst=dst_path, symlinks=True, ignore=shutil.ignore_patterns("fsmonitor--daemon.ipc"), ) shutil.copy( os.path.join(os.path.dirname(__file__), "templates", "_wandb_bootstrap.py"), os.path.join(self._directory), ) if self._launch_project.python_version: runtime_path = os.path.join(dst_path, "runtime.txt") with open(runtime_path, "w") as fp: fp.write(f"python-{self._launch_project.python_version}") # TODO: we likely don't need to pass the whole git repo into the container # with open(os.path.join(directory, ".dockerignore"), "w") as f: # f.write("**/.git") with open(os.path.join(self._directory, _WANDB_DOCKERFILE_NAME), "w") as handle: docker_file_contents = self._generate_dockerfile(builder_type=builder_type) handle.write(docker_file_contents) image_tag = image_tag_from_dockerfile_and_source( self._launch_project, docker_file_contents ) return self._directory, image_tag
BuildContextManager
python
airbytehq__airbyte
airbyte-integrations/connectors/source-linnworks/source_linnworks/streams.py
{ "start": 500, "end": 1654 }
class ____(HttpStream, ABC): http_method = "POST" def __init__(self, authenticator: Union[AuthBase, HttpAuthenticator] = None, start_date: str = None): super().__init__(authenticator=authenticator) self._authenticator = authenticator self.start_date = start_date @property def url_base(self) -> str: return self._session.auth.get_server() def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: return None def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None ) -> MutableMapping[str, Any]: return {} def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: json = response.json() if not isinstance(json, list): json = [json] for record in json: yield record def backoff_time(self, response: requests.Response) -> Optional[float]: delay_time = response.headers.get("Retry-After") if delay_time: return int(delay_time)
LinnworksStream
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/via_type_of.py
{ "start": 1836, "end": 2497 }
class ____: x: Dict[str, List[int]] = {} y: Test3_Foo = Test3_Foo() z: Annotated[List[List[str]], "test3"] = [] def test3_alarm1(c: Test3_C): # always-via-type:Dict[str, List[int]] _test_sink(c.x) def test3_alarm2(c: Test3_C): # always-via-type:Test3_Foo _test_sink(c.y) def test3_alarm3(c: Test3_C): # always-via-type:typing.Annotated[List[List[str]] _test_sink(c.z) def test3_alarm4(c: Test3_C): # via-type:Dict[str, List[int]], # via-type:Test3_Foo, # via-type:typing.Annotated[List[List[str]] foo = c.x if 1: foo = c.y elif 2: foo = c.z _test_sink(foo) @dataclass
Test3_C
python
keon__algorithms
algorithms/compression/huffman_coding.py
{ "start": 973, "end": 3329 }
class ____: def __init__(self, file): self.file = file self.buffer = [] self.is_last_byte = False def get_number_of_additional_bits_in_the_last_byte(self) -> int: bin_num = self.get_bit() + self.get_bit() + self.get_bit() return int(bin_num, 2) def load_tree(self) -> Node: """ Load tree from file :return: """ node_stack = deque() queue_leaves = deque() root = Node() current_node = root is_end_of_tree = False while not is_end_of_tree: current_bit = self.get_bit() if current_bit == "0": current_node.left = Node() current_node.right = Node() node_stack.append(current_node.right) # going to left node, right push on stack current_node = current_node.left else: queue_leaves.append(current_node) if node_stack: current_node = node_stack.pop() else: is_end_of_tree = True self._fill_tree(queue_leaves) return root def _fill_tree(self, leaves_queue): """ Load values to tree after reading tree :param leaves_queue: :return: """ leaves_queue.reverse() while leaves_queue: node = leaves_queue.pop() s = int(self.get_byte(), 2) node.sign = s def _load_byte(self, buff_limit=8) -> bool: """ Load next byte is buffer is less than buff_limit :param buff_limit: :return: True if there is enough bits in buffer to read """ if len(self.buffer) <= buff_limit: byte = self.file.read(1) if not byte: return False i = int.from_bytes(byte, "big") self.buffer.extend(list("{0:08b}".format(i))) return True def get_bit(self, buff_limit=8): if self._load_byte(buff_limit): bit = self.buffer.pop(0) return bit else: return -1 def get_byte(self): if self._load_byte(): byte_list = self.buffer[:8] self.buffer = self.buffer[8:] return "".join(byte_list) else: return -1
HuffmanReader
python
gevent__gevent
src/greentest/3.13/test_queue.py
{ "start": 23047, "end": 27240 }
class ____(BlockingTestMixin): def setUp(self): Queue = self.queue.Queue class FailingQueue(Queue): def __init__(self, *args): self.fail_next_put = False self.fail_next_get = False Queue.__init__(self, *args) def _put(self, item): if self.fail_next_put: self.fail_next_put = False raise FailingQueueException("You Lose") return Queue._put(self, item) def _get(self): if self.fail_next_get: self.fail_next_get = False raise FailingQueueException("You Lose") return Queue._get(self) self.FailingQueue = FailingQueue super().setUp() def failing_queue_test(self, q): if q.qsize(): raise RuntimeError("Call this function with an empty queue") for i in range(QUEUE_SIZE-1): q.put(i) # Test a failing non-blocking put. q.fail_next_put = True try: q.put("oops", block=0) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass q.fail_next_put = True try: q.put("oops", timeout=0.1) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass q.put("last") self.assertTrue(qfull(q), "Queue should be full") # Test a failing blocking put q.fail_next_put = True try: self.do_blocking_test(q.put, ("full",), q.get, ()) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass # Check the Queue isn't damaged. # put failed, but get succeeded - re-add q.put("last") # Test a failing timeout put q.fail_next_put = True try: self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (), FailingQueueException) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass # Check the Queue isn't damaged. # put failed, but get succeeded - re-add q.put("last") self.assertTrue(qfull(q), "Queue should be full") q.get() self.assertTrue(not qfull(q), "Queue should not be full") q.put("last") self.assertTrue(qfull(q), "Queue should be full") # Test a blocking put self.do_blocking_test(q.put, ("full",), q.get, ()) # Empty it for i in range(QUEUE_SIZE): q.get() self.assertTrue(not q.qsize(), "Queue should be empty") q.put("first") q.fail_next_get = True try: q.get() self.fail("The queue didn't fail when it should have") except FailingQueueException: pass self.assertTrue(q.qsize(), "Queue should not be empty") q.fail_next_get = True try: q.get(timeout=0.1) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass self.assertTrue(q.qsize(), "Queue should not be empty") q.get() self.assertTrue(not q.qsize(), "Queue should be empty") q.fail_next_get = True try: self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',), FailingQueueException) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass # put succeeded, but get failed. self.assertTrue(q.qsize(), "Queue should not be empty") q.get() self.assertTrue(not q.qsize(), "Queue should be empty") def test_failing_queue(self): # Test to make sure a queue is functioning correctly. # Done twice to the same instance. q = self.FailingQueue(QUEUE_SIZE) self.failing_queue_test(q) self.failing_queue_test(q)
FailingQueueTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 866612, "end": 867535 }
class ____(sgqlc.types.Type): """A file changed in a pull request.""" __schema__ = github_schema __field_names__ = ("additions", "change_type", "deletions", "path", "viewer_viewed_state") additions = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="additions") """The number of additions to the file.""" change_type = sgqlc.types.Field(sgqlc.types.non_null(PatchStatus), graphql_name="changeType") """How the file was changed in this PullRequest""" deletions = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="deletions") """The number of deletions to the file.""" path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path") """The path of the file.""" viewer_viewed_state = sgqlc.types.Field(sgqlc.types.non_null(FileViewedState), graphql_name="viewerViewedState") """The state of the file for the viewer."""
PullRequestChangedFile
python
walkccc__LeetCode
solutions/44. Wildcard Matching/44.py
{ "start": 0, "end": 672 }
class ____: def isMatch(self, s: str, p: str) -> bool: m = len(s) n = len(p) # dp[i][j] := True if s[0..i) matches p[0..j) dp = [[False] * (n + 1) for _ in range(m + 1)] dp[0][0] = True def isMatch(i: int, j: int) -> bool: return i >= 0 and p[j] == '?' or s[i] == p[j] for j, c in enumerate(p): if c == '*': dp[0][j + 1] = dp[0][j] for i in range(m): for j in range(n): if p[j] == '*': matchEmpty = dp[i + 1][j] matchSome = dp[i][j + 1] dp[i + 1][j + 1] = matchEmpty or matchSome elif isMatch(i, j): dp[i + 1][j + 1] = dp[i][j] return dp[m][n]
Solution
python
coleifer__peewee
playhouse/sqlcipher_ext.py
{ "start": 3484, "end": 3556 }
class ____(_SqlCipherDatabase, SqliteDatabase): pass
SqlCipherDatabase
python
scipy__scipy
scipy/stats/_hypotests.py
{ "start": 70626, "end": 86882 }
class ____: """Result of `scipy.stats.tukey_hsd`. Attributes ---------- statistic : float ndarray The computed statistic of the test for each comparison. The element at index ``(i, j)`` is the statistic for the comparison between groups ``i`` and ``j``. pvalue : float ndarray The associated p-value from the studentized range distribution. The element at index ``(i, j)`` is the p-value for the comparison between groups ``i`` and ``j``. Notes ----- The string representation of this object displays the most recently calculated confidence interval, and if none have been previously calculated, it will evaluate ``confidence_interval()``. References ---------- .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's Method." https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm, 28 November 2020. .. [2] P. A. Games and J. F. Howell, "Pairwise Multiple Comparison Procedures with Unequal N's and/or Variances: A Monte Carlo Study," Journal of Educational Statistics, vol. 1, no. 2, pp. 113-125, Jun. 1976, doi: https://doi.org/10.3102/10769986001002113. """ def __init__(self, statistic, pvalue, _ntreatments, _df, _stand_err): self.statistic = statistic self.pvalue = pvalue self._ntreatments = _ntreatments self._df = _df self._stand_err = _stand_err self._ci = None self._ci_cl = None def __str__(self): # Note: `__str__` prints the confidence intervals from the most # recent call to `confidence_interval`. If it has not been called, # it will be called with the default CL of .95. if self._ci is None: self.confidence_interval(confidence_level=.95) s = ("Pairwise Group Comparisons" f" ({self._ci_cl*100:.1f}% Confidence Interval)\n") s += "Comparison Statistic p-value Lower CI Upper CI\n" for i, j in np.ndindex(self.pvalue.shape): if i != j: s += (f" ({i} - {j}) {self.statistic[i, j]:>10.3f}" f"{self.pvalue[i, j]:>10.3f}" f"{self._ci.low[i, j]:>10.3f}" f"{self._ci.high[i, j]:>10.3f}\n") return s def confidence_interval(self, confidence_level=.95): """Compute the confidence interval for the specified confidence level. Parameters ---------- confidence_level : float, optional Confidence level for the computed confidence interval of the estimated proportion. Default is .95. Returns ------- ci : ``ConfidenceInterval`` object The object has attributes ``low`` and ``high`` that hold the lower and upper bounds of the confidence intervals for each comparison. The high and low values are accessible for each comparison at index ``(i, j)`` between groups ``i`` and ``j``. References ---------- .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's Method." https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm, 28 November 2020. .. [2] P. A. Games and J. F. Howell, "Pairwise Multiple Comparison Procedures with Unequal N's and/or Variances: A Monte Carlo Study," Journal of Educational Statistics, vol. 1, no. 2, pp. 113-125, Jun. 1976, doi: https://doi.org/10.3102/10769986001002113. Examples -------- >>> from scipy.stats import tukey_hsd >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9] >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1] >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8] >>> result = tukey_hsd(group0, group1, group2) >>> ci = result.confidence_interval() >>> ci.low array([[-3.649159, -8.249159, -3.909159], [ 0.950841, -3.649159, 0.690841], [-3.389159, -7.989159, -3.649159]]) >>> ci.high array([[ 3.649159, -0.950841, 3.389159], [ 8.249159, 3.649159, 7.989159], [ 3.909159, -0.690841, 3.649159]]) """ # check to see if the supplied confidence level matches that of the # previously computed CI. if (self._ci is not None and self._ci_cl is not None and confidence_level == self._ci_cl): return self._ci if not 0 < confidence_level < 1: raise ValueError("Confidence level must be between 0 and 1.") # determine the critical value of the studentized range using the # appropriate confidence level, number of treatments, and degrees # of freedom. See [1] "Confidence limits for Tukey's method" / [2] p.117 # "H0 was rejected if...". Note that in the cases of unequal sample sizes, # there will be a criterion for each group comparison. params = (confidence_level, self._ntreatments, self._df) srd = distributions.studentized_range.ppf(*params) # also called maximum critical value, the confidence_radius is the # studentized range critical value * the square root of mean square # error over the sample size. confidence_radius = srd * self._stand_err # the confidence levels are determined by the # `mean_differences` +- `confidence_radius` upper_conf = self.statistic + confidence_radius lower_conf = self.statistic - confidence_radius self._ci = ConfidenceInterval(low=lower_conf, high=upper_conf) self._ci_cl = confidence_level return self._ci def _tukey_hsd_iv(args, equal_var): if (len(args)) < 2: raise ValueError("There must be more than 1 treatment.") if not isinstance(equal_var, bool): raise TypeError("Expected a boolean value for 'equal_var'") args = [np.asarray(arg) for arg in args] for arg in args: if arg.ndim != 1: raise ValueError("Input samples must be one-dimensional.") if arg.size <= 1: raise ValueError("Input sample size must be greater than one.") if np.isinf(arg).any(): raise ValueError("Input samples must be finite.") return args @xp_capabilities(np_only=True) def tukey_hsd(*args, equal_var=True): """Perform Tukey's HSD test for equality of means over multiple treatments. Tukey's honestly significant difference (HSD) test performs pairwise comparison of means for a set of samples. Whereas ANOVA (e.g. `f_oneway`) assesses whether the true means underlying each sample are identical, Tukey's HSD is a post hoc test used to compare the mean of each sample to the mean of each other sample. The null hypothesis is that the distributions underlying the samples all have the same mean. The test statistic, which is computed for every possible pairing of samples, is simply the difference between the sample means. For each pair, the p-value is the probability under the null hypothesis (and other assumptions; see notes) of observing such an extreme value of the statistic, considering that many pairwise comparisons are being performed. Confidence intervals for the difference between each pair of means are also available. Parameters ---------- sample1, sample2, ... : array_like The sample measurements for each group. There must be at least two arguments. equal_var: bool, optional If True (default) and equal sample size, perform Tukey-HSD test [6]. If True and unequal sample size, perform Tukey-Kramer test [4]_. If False, perform Games-Howell test [7]_, which does not assume equal variances. Returns ------- result : `~scipy.stats._result_classes.TukeyHSDResult` instance The return value is an object with the following attributes: statistic : float ndarray The computed statistic of the test for each comparison. The element at index ``(i, j)`` is the statistic for the comparison between groups ``i`` and ``j``. pvalue : float ndarray The computed p-value of the test for each comparison. The element at index ``(i, j)`` is the p-value for the comparison between groups ``i`` and ``j``. The object has the following methods: confidence_interval(confidence_level=0.95): Compute the confidence interval for the specified confidence level. See Also -------- dunnett : performs comparison of means against a control group. Notes ----- The use of this test relies on several assumptions. 1. The observations are independent within and among groups. 2. The observations within each group are normally distributed. 3. The distributions from which the samples are drawn have the same finite variance. The original formulation of the test was for samples of equal size drawn from populations assumed to have equal variances [6]_. In case of unequal sample sizes, the test uses the Tukey-Kramer method [4]_. When equal variances are not assumed (``equal_var=False``), the test uses the Games-Howell method [7]_. References ---------- .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's Method." https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm, 28 November 2020. .. [2] Abdi, Herve & Williams, Lynne. (2021). "Tukey's Honestly Significant Difference (HSD) Test." https://personal.utdallas.edu/~herve/abdi-HSD2010-pretty.pdf .. [3] "One-Way ANOVA Using SAS PROC ANOVA & PROC GLM." SAS Tutorials, 2007, www.stattutorials.com/SAS/TUTORIAL-PROC-GLM.htm. .. [4] Kramer, Clyde Young. "Extension of Multiple Range Tests to Group Means with Unequal Numbers of Replications." Biometrics, vol. 12, no. 3, 1956, pp. 307-310. JSTOR, www.jstor.org/stable/3001469. Accessed 25 May 2021. .. [5] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.3.3. The ANOVA table and tests of hypotheses about means" https://www.itl.nist.gov/div898/handbook/prc/section4/prc433.htm, 2 June 2021. .. [6] Tukey, John W. "Comparing Individual Means in the Analysis of Variance." Biometrics, vol. 5, no. 2, 1949, pp. 99-114. JSTOR, www.jstor.org/stable/3001913. Accessed 14 June 2021. .. [7] P. A. Games and J. F. Howell, "Pairwise Multiple Comparison Procedures with Unequal N's and/or Variances: A Monte Carlo Study," Journal of Educational Statistics, vol. 1, no. 2, pp. 113-125, Jun. 1976, doi: https://doi.org/10.3102/10769986001002113. Examples -------- Here are some data comparing the time to relief of three brands of headache medicine, reported in minutes. Data adapted from [3]_. >>> import numpy as np >>> from scipy.stats import tukey_hsd >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9] >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1] >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8] We would like to see if the means between any of the groups are significantly different. First, visually examine a box and whisker plot. >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 1) >>> ax.boxplot([group0, group1, group2]) >>> ax.set_xticklabels(["group0", "group1", "group2"]) # doctest: +SKIP >>> ax.set_ylabel("mean") # doctest: +SKIP >>> plt.show() From the box and whisker plot, we can see overlap in the interquartile ranges group 1 to group 2 and group 3, but we can apply the ``tukey_hsd`` test to determine if the difference between means is significant. We set a significance level of .05 to reject the null hypothesis. >>> res = tukey_hsd(group0, group1, group2) >>> print(res) Pairwise Group Comparisons (95.0% Confidence Interval) Comparison Statistic p-value Lower CI Upper CI (0 - 1) -4.600 0.014 -8.249 -0.951 (0 - 2) -0.260 0.980 -3.909 3.389 (1 - 0) 4.600 0.014 0.951 8.249 (1 - 2) 4.340 0.020 0.691 7.989 (2 - 0) 0.260 0.980 -3.389 3.909 (2 - 1) -4.340 0.020 -7.989 -0.691 The null hypothesis is that each group has the same mean. The p-value for comparisons between ``group0`` and ``group1`` as well as ``group1`` and ``group2`` do not exceed .05, so we reject the null hypothesis that they have the same means. The p-value of the comparison between ``group0`` and ``group2`` exceeds .05, so we accept the null hypothesis that there is not a significant difference between their means. We can also compute the confidence interval associated with our chosen confidence level. >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9] >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1] >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8] >>> result = tukey_hsd(group0, group1, group2) >>> conf = res.confidence_interval(confidence_level=.99) >>> for ((i, j), l) in np.ndenumerate(conf.low): ... # filter out self comparisons ... if i != j: ... h = conf.high[i,j] ... print(f"({i} - {j}) {l:>6.3f} {h:>6.3f}") (0 - 1) -9.480 0.280 (0 - 2) -5.140 4.620 (1 - 0) -0.280 9.480 (1 - 2) -0.540 9.220 (2 - 0) -4.620 5.140 (2 - 1) -9.220 0.540 """ args = _tukey_hsd_iv(args, equal_var) ntreatments = len(args) means = np.asarray([np.mean(arg) for arg in args]) nsamples_treatments = np.asarray([a.size for a in args]) nobs = np.sum(nsamples_treatments) vars_ = np.asarray([np.var(arg, ddof=1) for arg in args]) if equal_var: # determine mean square error [5]. Note that this is sometimes called # mean square error within. mse = (np.sum(vars_ * (nsamples_treatments - 1)) / (nobs - ntreatments)) # The calculation of the standard error differs when treatments differ in # size. See ("Unequal sample sizes")[1]. if np.unique(nsamples_treatments).size == 1: # all input groups are the same length, so only one value needs to be # calculated [1]. normalize = 2 / nsamples_treatments[0] else: # to compare groups of differing sizes, we must compute a variance # value for each individual comparison. Use broadcasting to get the # resulting matrix. [3], verified against [4] (page 308). normalize = 1 / nsamples_treatments + 1 / nsamples_treatments[None].T # the standard error is used in the computation of the tukey criterion and # finding the p-values. stand_err = np.sqrt(normalize * mse / 2) df = nobs - ntreatments else: # `stand_err` is the denominator of the Behrens-Fisher statistic ($v$) # with a factor of $\sqrt{2}$. Compare [7] p.116 "t-solution rejects H0 if...", # [7] p. 117 "H0 was rejected", and definition of `t_stat` below. sj2_nj = vars_ / nsamples_treatments si2_ni = sj2_nj[:, np.newaxis] stand_err = np.sqrt(si2_ni + sj2_nj) / 2**0.5 # `df` is the Welch degree of freedom $\nu$. # See [7] p. 116 "and the degrees of freedom, $\nu$, are given by...". njm1 = nsamples_treatments - 1 nim1 = njm1[:, np.newaxis] df = (si2_ni + sj2_nj)**2 / (si2_ni**2 / nim1 + sj2_nj**2 / njm1) # the mean difference is the test statistic. mean_differences = means[None].T - means # Calculate the t-statistic to use within the survival function of the # studentized range to get the p-value. t_stat = np.abs(mean_differences) / stand_err params = t_stat, ntreatments, df pvalues = distributions.studentized_range.sf(*params) return TukeyHSDResult(mean_differences, pvalues, ntreatments, df, stand_err)
TukeyHSDResult
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 17736, "end": 17905 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ("LOGIN",)
OrgEnterpriseOwnerOrderField
python
walkccc__LeetCode
solutions/2520. Count the Digits That Divide a Number/2520.py
{ "start": 0, "end": 110 }
class ____: def countDigits(self, num: int) -> int: return sum(num % int(d) == 0 for d in str(num))
Solution
python
getsentry__sentry
tests/sentry/backup/test_exports.py
{ "start": 1641, "end": 3750 }
class ____(ExportCheckpointer): cache_hits: int = 0 cache_misses: int = 0 cache_writes: int = 0 def __init__( self, crypto: EncryptorDecryptorPair | None, printer: Printer, tmp_dir: str, test_method_name: str, ): self.__crypto = crypto self.__printer = printer self.__tmp_dir = tmp_dir self.__test_method_name = test_method_name def _get_file_name(self, model_name: NormalizedModelName) -> Path: if self.__crypto is None: return Path(self.__tmp_dir).joinpath( f"_{self.__test_method_name}.checkpoint.{str(model_name)}.json" ) else: return Path(self.__tmp_dir).joinpath( f"_{self.__test_method_name}.checkpoint.{str(model_name)}.enc.tar" ) def get(self, model_name: NormalizedModelName) -> RpcExportOk | None: file_name = self._get_file_name(model_name) try: with open(file_name, "rb") as fp: json_data = ( decrypt_encrypted_tarball(fp, self.__crypto.decryptor) if self.__crypto is not None else fp.read() ) parsed_json = self._parse_cached_json(json_data) if parsed_json is None: self.cache_misses += 1 else: self.cache_hits += 1 return parsed_json except (FileNotFoundError, DecryptionError, JSONDecodeError, ExportCheckpointerError): self.cache_misses += 1 return None def add(self, model_name: NormalizedModelName, json_export: Any) -> None: file_name = self._get_file_name(model_name) with open(file_name, "wb") as fp: out_bytes = ( create_encrypted_export_tarball(json_export, self.__crypto.encryptor).getvalue() if self.__crypto is not None else dumps(json_export) ) fp.write(out_bytes) self.cache_writes += 1
FakeExportCheckpointer
python
huggingface__transformers
src/transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py
{ "start": 16537, "end": 19718 }
class ____(HunYuanDenseV1PreTrainedModel): def __init__(self, config: HunYuanDenseV1Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [HunYuanDenseV1DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = HunYuanDenseV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = HunYuanDenseV1RotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_position: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_embeddings=position_embeddings, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring
HunYuanDenseV1Model
python
tox-dev__tox
src/tox/execute/local_sub_process/__init__.py
{ "start": 6129, "end": 6808 }
class ____(ExecuteStatus): def __init__(self, options: ExecuteOptions, out: SyncWrite, err: SyncWrite, exit_code: int | None) -> None: super().__init__(options, out, err) self._exit_code = exit_code @property def exit_code(self) -> int | None: return self._exit_code def wait(self, timeout: float | None = None) -> int | None: # noqa: ARG002 return self._exit_code # pragma: no cover def write_stdin(self, content: str) -> None: """Cannot write.""" def interrupt(self) -> None: # noqa: PLR6301 return None # pragma: no cover # nothing running so nothing to interrupt
LocalSubprocessExecuteFailedStatus
python
tornadoweb__tornado
tornado/test/httputil_test.py
{ "start": 19269, "end": 19756 }
class ____(unittest.TestCase): METHOD = "GET" PATH = "/foo" VERSION = "HTTP/1.1" def test_parse_request_start_line(self): start_line = " ".join([self.METHOD, self.PATH, self.VERSION]) parsed_start_line = parse_request_start_line(start_line) self.assertEqual(parsed_start_line.method, self.METHOD) self.assertEqual(parsed_start_line.path, self.PATH) self.assertEqual(parsed_start_line.version, self.VERSION)
ParseRequestStartLineTest
python
dagster-io__dagster
python_modules/dagster/dagster/_utils/log.py
{ "start": 620, "end": 2082 }
class ____(logging.Handler): def __init__(self, json_path: str): super().__init__() self.json_path = check.str_param(json_path, "json_path") def emit(self, record: logging.LogRecord) -> None: from dagster._core.log_manager import LOG_RECORD_METADATA_ATTR try: log_dict = copy.copy(record.__dict__) # This horrific monstrosity is to maintain backwards compatability # with the old behavior of the JsonFileHandler, which the clarify # project has a dependency on. It relied on the dagster-defined # properties smashing all the properties of the LogRecord object # and uploads all of those properties to a redshift table for # in order to do analytics on the log if LOG_RECORD_METADATA_ATTR in log_dict: dagster_meta_dict = log_dict[LOG_RECORD_METADATA_ATTR] del log_dict[LOG_RECORD_METADATA_ATTR] else: dagster_meta_dict = {} log_dict.update(dagster_meta_dict) with open(self.json_path, "a", encoding="utf8") as ff: text_line = seven.json.dumps(log_dict) ff.write(text_line + "\n") # Need to catch Exception here, so disabling lint except Exception as e: logging.critical("[%s] Error during logging!", self.__class__.__name__) logging.exception(str(e))
JsonFileHandler
python
eriklindernoren__ML-From-Scratch
mlfromscratch/utils/misc.py
{ "start": 533, "end": 3773 }
class ____(): def __init__(self): self.cmap = plt.get_cmap('viridis') def _transform(self, X, dim): covariance = calculate_covariance_matrix(X) eigenvalues, eigenvectors = np.linalg.eig(covariance) # Sort eigenvalues and eigenvector by largest eigenvalues idx = eigenvalues.argsort()[::-1] eigenvalues = eigenvalues[idx][:dim] eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :dim] # Project the data onto principal components X_transformed = X.dot(eigenvectors) return X_transformed def plot_regression(self, lines, title, axis_labels=None, mse=None, scatter=None, legend={"type": "lines", "loc": "lower right"}): if scatter: scatter_plots = scatter_labels = [] for s in scatter: scatter_plots += [plt.scatter(s["x"], s["y"], color=s["color"], s=s["size"])] scatter_labels += [s["label"]] scatter_plots = tuple(scatter_plots) scatter_labels = tuple(scatter_labels) for l in lines: li = plt.plot(l["x"], l["y"], color=s["color"], linewidth=l["width"], label=l["label"]) if mse: plt.suptitle(title) plt.title("MSE: %.2f" % mse, fontsize=10) else: plt.title(title) if axis_labels: plt.xlabel(axis_labels["x"]) plt.ylabel(axis_labels["y"]) if legend["type"] == "lines": plt.legend(loc="lower_left") elif legend["type"] == "scatter" and scatter: plt.legend(scatter_plots, scatter_labels, loc=legend["loc"]) plt.show() # Plot the dataset X and the corresponding labels y in 2D using PCA. def plot_in_2d(self, X, y=None, title=None, accuracy=None, legend_labels=None): X_transformed = self._transform(X, dim=2) x1 = X_transformed[:, 0] x2 = X_transformed[:, 1] class_distr = [] y = np.array(y).astype(int) colors = [self.cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))] # Plot the different class distributions for i, l in enumerate(np.unique(y)): _x1 = x1[y == l] _x2 = x2[y == l] _y = y[y == l] class_distr.append(plt.scatter(_x1, _x2, color=colors[i])) # Plot legend if not legend_labels is None: plt.legend(class_distr, legend_labels, loc=1) # Plot title if title: if accuracy: perc = 100 * accuracy plt.suptitle(title) plt.title("Accuracy: %.1f%%" % perc, fontsize=10) else: plt.title(title) # Axis labels plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.show() # Plot the dataset X and the corresponding labels y in 3D using PCA. def plot_in_3d(self, X, y=None): X_transformed = self._transform(X, dim=3) x1 = X_transformed[:, 0] x2 = X_transformed[:, 1] x3 = X_transformed[:, 2] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x1, x2, x3, c=y) plt.show()
Plot
python
pypa__pipenv
pipenv/vendor/click/parser.py
{ "start": 7822, "end": 8052 }
class ____: def __init__(self, rargs: t.List[str]) -> None: self.opts: t.Dict[str, t.Any] = {} self.largs: t.List[str] = [] self.rargs = rargs self.order: t.List["CoreParameter"] = []
ParsingState
python
django__django
tests/prefetch_related/tests.py
{ "start": 2139, "end": 17750 }
class ____(TestDataMixin, TestCase): def assertWhereContains(self, sql, needle): where_idx = sql.index("WHERE") self.assertEqual( sql.count(str(needle), where_idx), 1, msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:]), ) def test_m2m_forward(self): with self.assertNumQueries(2): lists = [ list(b.authors.all()) for b in Book.objects.prefetch_related("authors") ] normal_lists = [list(b.authors.all()) for b in Book.objects.all()] self.assertEqual(lists, normal_lists) def test_m2m_reverse(self): with self.assertNumQueries(2): lists = [ list(a.books.all()) for a in Author.objects.prefetch_related("books") ] normal_lists = [list(a.books.all()) for a in Author.objects.all()] self.assertEqual(lists, normal_lists) def test_foreignkey_forward(self): with self.assertNumQueries(2): books = [ a.first_book for a in Author.objects.prefetch_related("first_book") ] normal_books = [a.first_book for a in Author.objects.all()] self.assertEqual(books, normal_books) def test_fetch_mode_copied_fetching_one(self): author = ( Author.objects.fetch_mode(FETCH_PEERS) .prefetch_related("first_book") .get(pk=self.author1.pk) ) self.assertEqual(author._state.fetch_mode, FETCH_PEERS) self.assertEqual( author.first_book._state.fetch_mode, FETCH_PEERS, ) def test_fetch_mode_copied_fetching_many(self): authors = list( Author.objects.fetch_mode(FETCH_PEERS).prefetch_related("first_book") ) self.assertEqual(authors[0]._state.fetch_mode, FETCH_PEERS) self.assertEqual( authors[0].first_book._state.fetch_mode, FETCH_PEERS, ) def test_fetch_mode_raise(self): authors = list(Author.objects.fetch_mode(RAISE).prefetch_related("first_book")) authors[0].first_book # No exception, already loaded def test_foreignkey_reverse(self): with self.assertNumQueries(2): [ list(b.first_time_authors.all()) for b in Book.objects.prefetch_related("first_time_authors") ] self.assertSequenceEqual(self.book2.authors.all(), [self.author1]) def test_onetoone_reverse_no_match(self): # Regression for #17439 with self.assertNumQueries(2): book = Book.objects.prefetch_related("bookwithyear").all()[0] with self.assertNumQueries(0): with self.assertRaises(BookWithYear.DoesNotExist): book.bookwithyear def test_onetoone_reverse_with_to_field_pk(self): """ A model (Bio) with a OneToOneField primary key (author) that references a non-pk field (name) on the related model (Author) is prefetchable. """ Bio.objects.bulk_create( [ Bio(author=self.author1), Bio(author=self.author2), Bio(author=self.author3), ] ) authors = Author.objects.filter( name__in=[self.author1, self.author2, self.author3], ).prefetch_related("bio") with self.assertNumQueries(2): for author in authors: self.assertEqual(author.name, author.bio.author.name) def test_survives_clone(self): with self.assertNumQueries(2): [ list(b.first_time_authors.all()) for b in Book.objects.prefetch_related("first_time_authors").exclude( id=1000 ) ] def test_len(self): with self.assertNumQueries(2): qs = Book.objects.prefetch_related("first_time_authors") len(qs) [list(b.first_time_authors.all()) for b in qs] def test_bool(self): with self.assertNumQueries(2): qs = Book.objects.prefetch_related("first_time_authors") bool(qs) [list(b.first_time_authors.all()) for b in qs] def test_count(self): with self.assertNumQueries(2): qs = Book.objects.prefetch_related("first_time_authors") [b.first_time_authors.count() for b in qs] def test_exists(self): with self.assertNumQueries(2): qs = Book.objects.prefetch_related("first_time_authors") [b.first_time_authors.exists() for b in qs] def test_in_and_prefetch_related(self): """ Regression test for #20242 - QuerySet "in" didn't work the first time when using prefetch_related. This was fixed by the removal of chunked reads from QuerySet iteration in 70679243d1786e03557c28929f9762a119e3ac14. """ qs = Book.objects.prefetch_related("first_time_authors") self.assertIn(qs[0], qs) def test_clear(self): with self.assertNumQueries(5): with_prefetch = Author.objects.prefetch_related("books") without_prefetch = with_prefetch.prefetch_related(None) [list(a.books.all()) for a in without_prefetch] def test_m2m_then_m2m(self): """A m2m can be followed through another m2m.""" with self.assertNumQueries(3): qs = Author.objects.prefetch_related("books__read_by") lists = [ [[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs ] self.assertEqual( lists, [ [["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre [["Amy"]], # Anne - Poems [["Amy"], []], # Emily - Poems, Wuthering Heights [["Amy", "Belinda"]], # Jane - Sense and Sense ], ) def test_overriding_prefetch(self): with self.assertNumQueries(3): qs = Author.objects.prefetch_related("books", "books__read_by") lists = [ [[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs ] self.assertEqual( lists, [ [["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre [["Amy"]], # Anne - Poems [["Amy"], []], # Emily - Poems, Wuthering Heights [["Amy", "Belinda"]], # Jane - Sense and Sense ], ) with self.assertNumQueries(3): qs = Author.objects.prefetch_related("books__read_by", "books") lists = [ [[str(r) for r in b.read_by.all()] for b in a.books.all()] for a in qs ] self.assertEqual( lists, [ [["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre [["Amy"]], # Anne - Poems [["Amy"], []], # Emily - Poems, Wuthering Heights [["Amy", "Belinda"]], # Jane - Sense and Sense ], ) def test_get(self): """ Objects retrieved with .get() get the prefetch behavior. """ # Need a double with self.assertNumQueries(3): author = Author.objects.prefetch_related("books__read_by").get( name="Charlotte" ) lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()] self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre def test_foreign_key_then_m2m(self): """ A m2m relation can be followed after a relation like ForeignKey that doesn't have many objects. """ with self.assertNumQueries(2): qs = Author.objects.select_related("first_book").prefetch_related( "first_book__read_by" ) lists = [[str(r) for r in a.first_book.read_by.all()] for a in qs] self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]]) def test_reverse_one_to_one_then_m2m(self): """ A m2m relation can be followed after going through the select_related reverse of an o2o. """ qs = Author.objects.prefetch_related("bio__books").select_related("bio") with self.assertNumQueries(1): list(qs.all()) Bio.objects.create(author=self.author1) with self.assertNumQueries(2): list(qs.all()) def test_attribute_error(self): qs = Reader.objects.prefetch_related("books_read__xyz") msg = ( "Cannot find 'xyz' on Book object, 'books_read__xyz' " "is an invalid parameter to prefetch_related()" ) with self.assertRaisesMessage(AttributeError, msg) as cm: list(qs) self.assertIn("prefetch_related", str(cm.exception)) def test_invalid_final_lookup(self): qs = Book.objects.prefetch_related("authors__name") msg = ( "'authors__name' does not resolve to an item that supports " "prefetching - this is an invalid parameter to prefetch_related()." ) with self.assertRaisesMessage(ValueError, msg) as cm: list(qs) self.assertIn("prefetch_related", str(cm.exception)) self.assertIn("name", str(cm.exception)) def test_prefetch_eq(self): prefetch_1 = Prefetch("authors", queryset=Author.objects.all()) prefetch_2 = Prefetch("books", queryset=Book.objects.all()) self.assertEqual(prefetch_1, prefetch_1) self.assertEqual(prefetch_1, mock.ANY) self.assertNotEqual(prefetch_1, prefetch_2) def test_forward_m2m_to_attr_conflict(self): msg = "to_attr=authors conflicts with a field on the Book model." authors = Author.objects.all() with self.assertRaisesMessage(ValueError, msg): list( Book.objects.prefetch_related( Prefetch("authors", queryset=authors, to_attr="authors"), ) ) # Without the ValueError, an author was deleted due to the implicit # save of the relation assignment. self.assertEqual(self.book1.authors.count(), 3) def test_reverse_m2m_to_attr_conflict(self): msg = "to_attr=books conflicts with a field on the Author model." poems = Book.objects.filter(title="Poems") with self.assertRaisesMessage(ValueError, msg): list( Author.objects.prefetch_related( Prefetch("books", queryset=poems, to_attr="books"), ) ) # Without the ValueError, a book was deleted due to the implicit # save of reverse relation assignment. self.assertEqual(self.author1.books.count(), 2) def test_m2m_then_reverse_fk_object_ids(self): with CaptureQueriesContext(connection) as queries: list(Book.objects.prefetch_related("authors__addresses")) sql = queries[-1]["sql"] self.assertWhereContains(sql, self.author1.name) def test_m2m_then_m2m_object_ids(self): with CaptureQueriesContext(connection) as queries: list(Book.objects.prefetch_related("authors__favorite_authors")) sql = queries[-1]["sql"] self.assertWhereContains(sql, self.author1.name) def test_m2m_then_reverse_one_to_one_object_ids(self): with CaptureQueriesContext(connection) as queries: list(Book.objects.prefetch_related("authors__authorwithage")) sql = queries[-1]["sql"] self.assertWhereContains(sql, self.author1.id) def test_filter_deferred(self): """ Related filtering of prefetched querysets is deferred on m2m and reverse m2o relations until necessary. """ add_q = Query.add_q for relation in ["authors", "first_time_authors"]: with self.subTest(relation=relation): with mock.patch.object( Query, "add_q", autospec=True, side_effect=lambda self, q, reuse_all: add_q(self, q), ) as add_q_mock: list(Book.objects.prefetch_related(relation)) self.assertEqual(add_q_mock.call_count, 1) def test_named_values_list(self): qs = Author.objects.prefetch_related("books") self.assertCountEqual( [value.name for value in qs.values_list("name", named=True)], ["Anne", "Charlotte", "Emily", "Jane"], ) def test_m2m_prefetching_iterator_with_chunks(self): with self.assertNumQueries(3): authors = [ b.authors.first() for b in Book.objects.prefetch_related("authors").iterator(chunk_size=2) ] self.assertEqual( authors, [self.author1, self.author1, self.author3, self.author4], ) def test_m2m_prefetching_iterator_without_chunks_error(self): msg = ( "chunk_size must be provided when using QuerySet.iterator() after " "prefetch_related()." ) with self.assertRaisesMessage(ValueError, msg): Book.objects.prefetch_related("authors").iterator() def test_m2m_join_reuse(self): FavoriteAuthors.objects.bulk_create( [ FavoriteAuthors( author=self.author1, likes_author=self.author3, is_active=True ), FavoriteAuthors( author=self.author1, likes_author=self.author4, is_active=False, ), FavoriteAuthors( author=self.author2, likes_author=self.author3, is_active=True ), FavoriteAuthors( author=self.author2, likes_author=self.author4, is_active=True ), ] ) with self.assertNumQueries(2): authors = list( Author.objects.filter( pk__in=[self.author1.pk, self.author2.pk] ).prefetch_related( Prefetch( "favorite_authors", queryset=( Author.objects.annotate( active_favorite=F("likes_me__is_active"), ).filter(active_favorite=True) ), to_attr="active_favorite_authors", ) ) ) self.assertEqual(authors[0].active_favorite_authors, [self.author3]) self.assertEqual( authors[1].active_favorite_authors, [self.author3, self.author4] ) def test_prefetch_queryset_child_class(self): employee = SelfDirectedEmployee.objects.create(name="Foo") employee.boss = employee employee.save() with self.assertNumQueries(2): retrieved_employee = SelfDirectedEmployee.objects.prefetch_related( Prefetch("boss", SelfDirectedEmployee.objects.all()) ).get() with self.assertNumQueries(0): self.assertEqual(retrieved_employee, employee) self.assertEqual(retrieved_employee.boss, retrieved_employee)
PrefetchRelatedTests
python
keon__algorithms
tests/test_strings.py
{ "start": 15568, "end": 16388 }
class ____(unittest.TestCase): def test_text_justification(self): self.assertEqual(["This is an", "example of text", "justification. "], text_justification(["This", "is", "an", "example", "of", "text", "justification."], 16) ) self.assertEqual(["What must be", "acknowledgment ", "shall be "], text_justification(["What", "must", "be", "acknowledgment", "shall", "be"], 16) )
TestTextJustification
python
scipy__scipy
scipy/io/tests/test_idl.py
{ "start": 5821, "end": 12030 }
class ____: def test_scalars(self): s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False) assert_identical(s.scalars.a, np.array(np.int16(1))) assert_identical(s.scalars.b, np.array(np.int32(2))) assert_identical(s.scalars.c, np.array(np.float32(3.))) assert_identical(s.scalars.d, np.array(np.float64(4.))) assert_identical(s.scalars.e, np.array([b"spam"], dtype=object)) assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j))) def test_scalars_replicated(self): s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False) assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5)) assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5)) assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5)) assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5)) assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object)) assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5)) def test_scalars_replicated_3d(self): s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False) assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object)) assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2)) def test_arrays(self): s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False) assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) def test_arrays_replicated(self): s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False) # Check column types assert_(s.arrays_rep.a.dtype.type is np.object_) assert_(s.arrays_rep.b.dtype.type is np.object_) assert_(s.arrays_rep.c.dtype.type is np.object_) assert_(s.arrays_rep.d.dtype.type is np.object_) # Check column shapes assert_equal(s.arrays_rep.a.shape, (5, )) assert_equal(s.arrays_rep.b.shape, (5, )) assert_equal(s.arrays_rep.c.shape, (5, )) assert_equal(s.arrays_rep.d.shape, (5, )) # Check values for i in range(5): assert_array_identical(s.arrays_rep.a[i], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays_rep.b[i], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays_rep.c[i], np.array([np.complex64(1+2j), np.complex64(7+8j)])) assert_array_identical(s.arrays_rep.d[i], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) def test_arrays_replicated_3d(self): s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False) # Check column types assert_(s.arrays_rep.a.dtype.type is np.object_) assert_(s.arrays_rep.b.dtype.type is np.object_) assert_(s.arrays_rep.c.dtype.type is np.object_) assert_(s.arrays_rep.d.dtype.type is np.object_) # Check column shapes assert_equal(s.arrays_rep.a.shape, (4, 3, 2)) assert_equal(s.arrays_rep.b.shape, (4, 3, 2)) assert_equal(s.arrays_rep.c.shape, (4, 3, 2)) assert_equal(s.arrays_rep.d.shape, (4, 3, 2)) # Check values for i in range(4): for j in range(3): for k in range(2): assert_array_identical(s.arrays_rep.a[i, j, k], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays_rep.b[i, j, k], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays_rep.c[i, j, k], np.array([np.complex64(1+2j), np.complex64(7+8j)])) assert_array_identical(s.arrays_rep.d[i, j, k], np.array([b"cheese", b"bacon", b"spam"], dtype=object)) def test_inheritance(self): s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False) assert_identical(s.fc.x, np.array([0], dtype=np.int16)) assert_identical(s.fc.y, np.array([0], dtype=np.int16)) assert_identical(s.fc.r, np.array([0], dtype=np.int16)) assert_identical(s.fc.c, np.array([4], dtype=np.int16)) def test_arrays_corrupt_idl80(self): # test byte arrays with missing nbyte information from IDL 8.0 .sav file with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "Not able to verify number of bytes from header", UserWarning) s = readsav(path.join(DATA_PATH,'struct_arrays_byte_idl80.sav'), verbose=False) assert_identical(s.y.x[0], np.array([55,66], dtype=np.uint8))
TestStructures
python
huggingface__transformers
src/transformers/models/owlv2/modeling_owlv2.py
{ "start": 2167, "end": 6789 }
class ____(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`Owlv2VisionModel`]. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`Owlv2TextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Owlv2VisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.loss.loss_for_object_detection._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.loss.loss_for_object_detection.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.loss.loss_for_object_detection.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.loss.loss_for_object_detection.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area @dataclass @auto_docstring( custom_intro=""" Output type of [`Owlv2ForObjectDetection`]. """ )
Owlv2Output
python
facebookresearch__faiss
tests/test_standalone_codec.py
{ "start": 8214, "end": 10041 }
class ____(unittest.TestCase): def test_rw(self): """ Low-level bit string tests """ rs = np.random.RandomState(1234) nbyte = 1000 sz = 0 bs = np.ones(nbyte, dtype='uint8') bw = faiss.BitstringWriter(swig_ptr(bs), nbyte) if False: ctrl = [(7, 0x35), (13, 0x1d74)] for nbit, x in ctrl: bw.write(x, nbit) else: ctrl = [] while True: nbit = int(1 + 62 * rs.rand() ** 4) if sz + nbit > nbyte * 8: break x = int(rs.randint(1 << nbit, dtype='int64')) bw.write(x, nbit) ctrl.append((nbit, x)) sz += nbit bignum = 0 sz = 0 for nbit, x in ctrl: bignum |= x << sz sz += nbit for i in range(nbyte): self.assertTrue(((bignum >> (i * 8)) & 255) == bs[i]) br = faiss.BitstringReader(swig_ptr(bs), nbyte) for nbit, xref in ctrl: xnew = br.read(nbit) self.assertTrue(xnew == xref) def test_arrays(self): nbit = 5 M = 10 n = 20 rs = np.random.RandomState(123) a = rs.randint(1<<nbit, size=(n, M), dtype='int32') b = faiss.pack_bitstrings(a, nbit) c = faiss.unpack_bitstrings(b, M, nbit) np.testing.assert_array_equal(a, c) def test_arrays_variable_size(self): nbits = [10, 5, 3, 12, 6, 7, 4] n = 20 rs = np.random.RandomState(123) a = rs.randint(1<<16, size=(n, len(nbits)), dtype='int32') a &= (1 << np.array(nbits)) - 1 b = faiss.pack_bitstrings(a, nbits) c = faiss.unpack_bitstrings(b, nbits) np.testing.assert_array_equal(a, c)
TestBitstring
python
milvus-io__pymilvus
pymilvus/grpc_gen/milvus_pb2_grpc.py
{ "start": 194850, "end": 195375 }
class ____(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.RegisterLink = channel.unary_unary( '/milvus.proto.milvus.ProxyService/RegisterLink', request_serializer=milvus__pb2.RegisterLinkRequest.SerializeToString, response_deserializer=milvus__pb2.RegisterLinkResponse.FromString, )
ProxyServiceStub
python
great-expectations__great_expectations
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_geometry_to_be_within_place.py
{ "start": 478, "end": 4023 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.geometry.within_place" condition_value_keys = ( "column_shape_format", "place", "geocoder", "geocoder_config", ) # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): column_shape_format = kwargs.get("column_shape_format") place = kwargs.get("place") geocoder = kwargs.get("geocoder") geocoder_config = kwargs.get("geocoder_config") if geocoder not in ["nominatim", "pickpoint", "openmapquest"]: raise NotImplementedError("The geocoder is not implemented for this method.") # find the reference shape with the geocoder. if geocoder is not None: try: # Specify the default parameters for Nominatim and run query. User is responsible for config and query params otherwise. query_params = dict(exactly_one=True, geometry="wkt") location = cls.geocode(geocoder, geocoder_config, place, query_params) except Exception: raise Exception( # noqa: TRY002, TRY003 "Geocoding configuration and query failed to produce a valid result." ) else: raise Exception( # noqa: TRY002, TRY003 "A valid geocoder must be provided for this method. See GeoPy for reference." ) # This method only works with the default Nominatim params and wkt. # TODO: Other geocoders and methods need to be implemented. # TODO: Add a conversion from lat-long to a CRS. (geopandas) if location is not None: shape_ref = geos.from_wkt(location.raw.get("geotext")) else: raise Exception("Geocoding failed to return a result.") # noqa: TRY002, TRY003 # Load the column into a pygeos Geometry vector from numpy array (Series not supported). if column_shape_format == "wkt": shape_test = geos.from_wkt(column.to_numpy(), on_invalid="ignore") elif column_shape_format == "wkb": shape_test = geos.from_wkb(column.to_numpy(), on_invalid="ignore") else: raise NotImplementedError("Column values shape format not implemented.") # Prepare the geometries geos.prepare(shape_ref) geos.prepare(shape_test) # Return whether the distance is below the tolerance. return pd.Series(geos.contains(shape_ref, shape_test)) @staticmethod def geocode(geocoder, config, query, query_config): cls = geopy.geocoders.get_geocoder_for_service(geocoder) geolocator = cls(**config) location = geolocator.geocode(query, **query_config) return location # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesGeometryWithinPlace
python
Pylons__pyramid
tests/test_predicates.py
{ "start": 8107, "end": 9685 }
class ____(unittest.TestCase): def _makeOne(self, val): from pyramid.predicates import TraversePredicate return TraversePredicate(val, None) def test___call__traverse_has_remainder_already(self): inst = self._makeOne('/1/:a/:b') info = {'traverse': 'abc'} request = Dummy() result = inst(info, request) self.assertEqual(result, True) self.assertEqual(info, {'traverse': 'abc'}) def test___call__traverse_matches(self): inst = self._makeOne('/1/:a/:b') info = {'match': {'a': 'a', 'b': 'b'}} request = Dummy() result = inst(info, request) self.assertEqual(result, True) self.assertEqual( info, {'match': {'a': 'a', 'b': 'b', 'traverse': ('1', 'a', 'b')}} ) def test___call__traverse_matches_with_highorder_chars(self): inst = self._makeOne(text_(b'/La Pe\xc3\xb1a/{x}', 'utf-8')) info = {'match': {'x': text_(b'Qu\xc3\xa9bec', 'utf-8')}} request = Dummy() result = inst(info, request) self.assertEqual(result, True) self.assertEqual( info['match']['traverse'], ( text_(b'La Pe\xc3\xb1a', 'utf-8'), text_(b'Qu\xc3\xa9bec', 'utf-8'), ), ) def test_text(self): inst = self._makeOne('/abc') self.assertEqual(inst.text(), 'traverse matchdict pseudo-predicate') def test_phash(self): inst = self._makeOne('/abc') self.assertEqual(inst.phash(), '')
TestTraversePredicate
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/freshness.py
{ "start": 1018, "end": 1987 }
class ____(graphene.Union): class Meta: name = "InternalFreshnessPolicy" types = (GrapheneTimeWindowFreshnessPolicy, GrapheneCronFreshnessPolicy) @classmethod def from_policy(cls, policy: FreshnessPolicy): if isinstance(policy, TimeWindowFreshnessPolicy): return GrapheneTimeWindowFreshnessPolicy( failWindowSeconds=policy.fail_window.to_timedelta().total_seconds(), warnWindowSeconds=policy.warn_window.to_timedelta().total_seconds() if policy.warn_window else None, ) elif isinstance(policy, CronFreshnessPolicy): return GrapheneCronFreshnessPolicy( deadlineCron=policy.deadline_cron, lowerBoundDeltaSeconds=policy.lower_bound_delta.total_seconds(), timezone=policy.timezone, ) raise Exception("Unknown freshness policy type")
GrapheneInternalFreshnessPolicy
python
dagster-io__dagster
python_modules/dagster/dagster/_core/storage/runs/sql_run_storage.py
{ "start": 2540, "end": 47102 }
class ____(RunStorage): """Base class for SQL based run storages.""" @abstractmethod def connect(self) -> ContextManager[Connection]: """Context manager yielding a sqlalchemy.engine.Connection.""" @abstractmethod def upgrade(self) -> None: """This method should perform any schema or data migrations necessary to bring an out-of-date instance of the storage up to date. """ def fetchall(self, query: SqlAlchemyQuery) -> Sequence[Any]: with self.connect() as conn: return db_fetch_mappings(conn, query) def fetchone(self, query: SqlAlchemyQuery) -> Optional[Any]: with self.connect() as conn: if db.__version__.startswith("2."): return conn.execute(query).mappings().first() else: return conn.execute(query).fetchone() def _get_run_insertion_values( self, dagster_run: DagsterRun, run_creation_time: Optional[datetime] = None ) -> dict[str, Any]: check.inst_param(dagster_run, "dagster_run", DagsterRun) if dagster_run.job_snapshot_id and not self.has_job_snapshot(dagster_run.job_snapshot_id): raise DagsterSnapshotDoesNotExist( f"Snapshot {dagster_run.job_snapshot_id} does not exist in run storage" ) has_tags = dagster_run.tags and len(dagster_run.tags) > 0 partition = dagster_run.tags.get(PARTITION_NAME_TAG) if has_tags else None partition_set = dagster_run.tags.get(PARTITION_SET_TAG) if has_tags else None values = { "run_id": dagster_run.run_id, "pipeline_name": dagster_run.job_name, "status": dagster_run.status.value, "run_body": serialize_value(dagster_run), "snapshot_id": dagster_run.job_snapshot_id, "partition": partition, "partition_set": partition_set, } if self.has_backfill_id_column(): values["backfill_id"] = dagster_run.tags.get(BACKFILL_ID_TAG) if run_creation_time: values["create_timestamp"] = run_creation_time return values def _core_add_run(self, col_values: dict[str, Any], tags: Mapping[str, str]) -> None: run_id = col_values["run_id"] runs_insert = RunsTable.insert().values(**col_values) with self.connect() as conn: try: conn.execute(runs_insert) except db_exc.IntegrityError as exc: raise DagsterRunAlreadyExists from exc if tags: conn.execute( RunTagsTable.insert(), [dict(run_id=run_id, key=k, value=v) for k, v in tags.items()], ) def add_run(self, dagster_run: DagsterRun) -> DagsterRun: self._core_add_run( self._get_run_insertion_values(dagster_run, get_current_datetime()), dagster_run.tags_for_storage(), ) return dagster_run def add_historical_run( self, dagster_run: DagsterRun, run_creation_time: datetime ) -> DagsterRun: self._core_add_run( self._get_run_insertion_values(dagster_run, run_creation_time), dagster_run.tags_for_storage(), ) return dagster_run def handle_run_event( self, run_id: str, event: DagsterEvent, update_timestamp: Optional[datetime] = None ) -> None: from dagster._core.events import JobFailureData check.str_param(run_id, "run_id") check.inst_param(event, "event", DagsterEvent) if event.event_type not in EVENT_TYPE_TO_PIPELINE_RUN_STATUS: return run = self._get_run_by_id(run_id) if not run: # TODO log? return new_job_status = EVENT_TYPE_TO_PIPELINE_RUN_STATUS[event.event_type] run_stats_cols_in_index = self.has_run_stats_index_cols() kwargs = {} # Update timestamp represents the time that the event occurred, not the time at which # we're processing the event in the run storage. But we fall back to the current time. # This is specific to the open-source implementation. update_timestamp = update_timestamp or get_current_datetime() if run_stats_cols_in_index and event.event_type == DagsterEventType.PIPELINE_START: kwargs["start_time"] = update_timestamp.timestamp() if run_stats_cols_in_index and event.event_type in { DagsterEventType.PIPELINE_CANCELED, DagsterEventType.PIPELINE_FAILURE, DagsterEventType.PIPELINE_SUCCESS, }: kwargs["end_time"] = update_timestamp.timestamp() with self.connect() as conn: conn.execute( RunsTable.update() .where(RunsTable.c.run_id == run_id) .values( run_body=serialize_value(run.with_status(new_job_status)), status=new_job_status.value, update_timestamp=update_timestamp, **kwargs, ) ) if event.event_type == DagsterEventType.PIPELINE_FAILURE and isinstance( event.event_specific_data, JobFailureData ): failure_reason = event.event_specific_data.failure_reason if failure_reason and failure_reason != RunFailureReason.UNKNOWN: self.add_run_tags(run_id, {RUN_FAILURE_REASON_TAG: failure_reason.value}) def _row_to_run(self, row: dict) -> DagsterRun: run = deserialize_value(row["run_body"], DagsterRun) status = DagsterRunStatus(row["status"]) # NOTE: the status column is more trustworthy than the status in the run body, since concurrent # writes (e.g. handle_run_event and add_tags) can cause the status in the body to be out of # overriden with an old value. return run.with_status(status) def _rows_to_runs(self, rows: Iterable[dict]) -> Sequence[DagsterRun]: return list(map(self._row_to_run, rows)) def _add_cursor_limit_to_query( self, query: SqlAlchemyQuery, cursor: Optional[str], limit: Optional[int], order_by: Optional[str], ascending: Optional[bool], ) -> SqlAlchemyQuery: """Helper function to deal with cursor/limit pagination args.""" if cursor: cursor_query = db_select([RunsTable.c.id]).where(RunsTable.c.run_id == cursor) if ascending: query = query.where(RunsTable.c.id > db_scalar_subquery(cursor_query)) else: query = query.where(RunsTable.c.id < db_scalar_subquery(cursor_query)) if limit: query = query.limit(limit) sorting_column = getattr(RunsTable.c, order_by) if order_by else RunsTable.c.id direction = db.asc if ascending else db.desc query = query.order_by(direction(sorting_column)) return query def _add_filters_to_table(self, table: db.Table, filters: RunsFilter) -> db.Table: if filters.tags: table = self._apply_tags_table_filters(table, filters.tags) return table def _add_filters_to_query(self, query: SqlAlchemyQuery, filters: RunsFilter) -> SqlAlchemyQuery: check.inst_param(filters, "filters", RunsFilter) if filters.run_ids: query = query.where(RunsTable.c.run_id.in_(filters.run_ids)) if filters.job_name: query = query.where(RunsTable.c.pipeline_name == filters.job_name) if filters.statuses: query = query.where( RunsTable.c.status.in_([status.value for status in filters.statuses]) ) if filters.snapshot_id: query = query.where(RunsTable.c.snapshot_id == filters.snapshot_id) if filters.updated_after: query = query.where( RunsTable.c.update_timestamp > filters.updated_after.replace(tzinfo=None) ) if filters.updated_before: query = query.where( RunsTable.c.update_timestamp < filters.updated_before.replace(tzinfo=None) ) if filters.created_after: query = query.where( RunsTable.c.create_timestamp > filters.created_after.replace(tzinfo=None) ) if filters.created_before: query = query.where( RunsTable.c.create_timestamp < filters.created_before.replace(tzinfo=None) ) if filters.exclude_subruns: if self.has_built_index(RUN_BACKFILL_ID): query = query.where(RunsTable.c.backfill_id.is_(None)) else: runs_in_backfills = db_select([RunTagsTable.c.run_id]).where( RunTagsTable.c.key == BACKFILL_ID_TAG ) query = query.where(RunsTable.c.run_id.notin_(runs_in_backfills)) return query def _runs_query( self, filters: Optional[RunsFilter] = None, cursor: Optional[str] = None, limit: Optional[int] = None, columns: Optional[Sequence[str]] = None, order_by: Optional[str] = None, ascending: bool = False, bucket_by: Optional[Union[JobBucket, TagBucket]] = None, ) -> SqlAlchemyQuery: filters = check.opt_inst_param(filters, "filters", RunsFilter, default=RunsFilter()) check.opt_str_param(cursor, "cursor") check.opt_int_param(limit, "limit") check.opt_sequence_param(columns, "columns") check.opt_str_param(order_by, "order_by") check.opt_bool_param(ascending, "ascending") if columns is None: columns = ["run_body", "status"] table = self._add_filters_to_table(RunsTable, filters) base_query = db_select([getattr(RunsTable.c, column) for column in columns]).select_from( table ) base_query = self._add_filters_to_query(base_query, filters) return self._add_cursor_limit_to_query(base_query, cursor, limit, order_by, ascending) def _apply_tags_table_filters( self, table: db.Table, tags: Mapping[str, Union[str, Sequence[str]]] ) -> SqlAlchemyQuery: """Efficient query pattern for filtering by multiple tags.""" for i, (key, value) in enumerate(tags.items()): run_tags_alias = db.alias(RunTagsTable, f"run_tags_filter{i}") table = table.join( run_tags_alias, db.and_( RunsTable.c.run_id == run_tags_alias.c.run_id, run_tags_alias.c.key == key, (run_tags_alias.c.value == value) if isinstance(value, str) else run_tags_alias.c.value.in_(value), ), ) return table def get_runs( self, filters: Optional[RunsFilter] = None, cursor: Optional[str] = None, limit: Optional[int] = None, bucket_by: Optional[Union[JobBucket, TagBucket]] = None, ascending: bool = False, ) -> Sequence[DagsterRun]: query = self._runs_query(filters, cursor, limit, bucket_by=bucket_by, ascending=ascending) rows = self.fetchall(query) return self._rows_to_runs(rows) def get_run_ids( self, filters: Optional[RunsFilter] = None, cursor: Optional[str] = None, limit: Optional[int] = None, ) -> Sequence[str]: query = self._runs_query(filters=filters, cursor=cursor, limit=limit, columns=["run_id"]) rows = self.fetchall(query) return [row["run_id"] for row in rows] def get_runs_count(self, filters: Optional[RunsFilter] = None) -> int: subquery = db_subquery(self._runs_query(filters=filters)) query = db_select([db.func.count().label("count")]).select_from(subquery) row = self.fetchone(query) count = row["count"] if row else 0 return count def _get_run_by_id(self, run_id: str) -> Optional[DagsterRun]: check.str_param(run_id, "run_id") query = db_select([RunsTable.c.run_body, RunsTable.c.status]).where( RunsTable.c.run_id == run_id ) rows = self.fetchall(query) return self._row_to_run(rows[0]) if rows else None def get_run_records( self, filters: Optional[RunsFilter] = None, limit: Optional[int] = None, order_by: Optional[str] = None, ascending: bool = False, cursor: Optional[str] = None, bucket_by: Optional[Union[JobBucket, TagBucket]] = None, ) -> Sequence[RunRecord]: filters = check.opt_inst_param(filters, "filters", RunsFilter, default=RunsFilter()) check.opt_int_param(limit, "limit") columns = ["id", "run_body", "status", "create_timestamp", "update_timestamp"] if self.has_run_stats_index_cols(): columns += ["start_time", "end_time"] # only fetch columns we use to build RunRecord query = self._runs_query( filters=filters, limit=limit, columns=columns, order_by=order_by, ascending=ascending, cursor=cursor, bucket_by=bucket_by, ) rows = self.fetchall(query) return [ RunRecord( storage_id=check.int_param(row["id"], "id"), dagster_run=self._row_to_run(row), create_timestamp=utc_datetime_from_naive( check.inst(row["create_timestamp"], datetime) ), update_timestamp=utc_datetime_from_naive( check.inst(row["update_timestamp"], datetime) ), start_time=( check.opt_inst(row["start_time"], float) if "start_time" in row else None ), end_time=check.opt_inst(row["end_time"], float) if "end_time" in row else None, ) for row in rows ] def get_run_tags( self, tag_keys: Sequence[str], value_prefix: Optional[str] = None, limit: Optional[int] = None, ) -> Sequence[tuple[str, set[str]]]: result = defaultdict(set) query = ( db_select([RunTagsTable.c.key, RunTagsTable.c.value]) .distinct() .order_by(RunTagsTable.c.key, RunTagsTable.c.value) .where(RunTagsTable.c.key.in_(tag_keys)) ) if value_prefix: query = query.where(RunTagsTable.c.value.startswith(value_prefix)) if limit: query = query.limit(limit) rows = self.fetchall(query) for r in rows: result[r["key"]].add(r["value"]) return sorted(list([(k, v) for k, v in result.items()]), key=lambda x: x[0]) def get_run_tag_keys(self) -> Sequence[str]: query = db_select([RunTagsTable.c.key]).distinct().order_by(RunTagsTable.c.key) rows = self.fetchall(query) return sorted([r["key"] for r in rows]) def add_run_tags(self, run_id: str, new_tags: Mapping[str, str]) -> None: check.str_param(run_id, "run_id") check.mapping_param(new_tags, "new_tags", key_type=str, value_type=str) run = self._get_run_by_id(run_id) if not run: raise DagsterRunNotFoundError( f"Run {run_id} was not found in instance.", invalid_run_id=run_id ) current_tags = run.tags if run.tags else {} all_tags = merge_dicts(current_tags, new_tags) partition = all_tags.get(PARTITION_NAME_TAG) partition_set = all_tags.get(PARTITION_SET_TAG) with self.connect() as conn: conn.execute( RunsTable.update() .where(RunsTable.c.run_id == run_id) .values( run_body=serialize_value(run.with_tags(merge_dicts(current_tags, new_tags))), partition=partition, partition_set=partition_set, update_timestamp=get_current_datetime(), ) ) current_tags_set = set(current_tags.keys()) new_tags_set = set(new_tags.keys()) existing_tags = current_tags_set & new_tags_set added_tags = new_tags_set.difference(existing_tags) for tag in existing_tags: conn.execute( RunTagsTable.update() .where(db.and_(RunTagsTable.c.run_id == run_id, RunTagsTable.c.key == tag)) .values(value=new_tags[tag]) ) if added_tags: conn.execute( RunTagsTable.insert(), [dict(run_id=run_id, key=tag, value=new_tags[tag]) for tag in added_tags], ) def get_run_group(self, run_id: str) -> tuple[str, Sequence[DagsterRun]]: check.str_param(run_id, "run_id") dagster_run = self._get_run_by_id(run_id) if not dagster_run: raise DagsterRunNotFoundError( f"Run {run_id} was not found in instance.", invalid_run_id=run_id ) # find root_run root_run_id = dagster_run.root_run_id if dagster_run.root_run_id else dagster_run.run_id root_run = self._get_run_by_id(root_run_id) if not root_run: raise DagsterRunNotFoundError( f"Run id {root_run_id} set as root run id for run {run_id} was not found in" " instance.", invalid_run_id=root_run_id, ) # root_run_id to run_id 1:1 mapping # https://github.com/dagster-io/dagster/issues/2495 # Note: we currently use tags to persist the run group info root_to_run = db_subquery( db_select( [RunTagsTable.c.value.label("root_run_id"), RunTagsTable.c.run_id.label("run_id")] ).where( db.and_(RunTagsTable.c.key == ROOT_RUN_ID_TAG, RunTagsTable.c.value == root_run_id) ), "root_to_run", ) # get run group run_group_query = db_select([RunsTable.c.run_body, RunsTable.c.status]).select_from( root_to_run.join( RunsTable, root_to_run.c.run_id == RunsTable.c.run_id, isouter=True, ) ) res = self.fetchall(run_group_query) run_group = self._rows_to_runs(res) return (root_run_id, [root_run, *run_group]) def has_run(self, run_id: str) -> bool: check.str_param(run_id, "run_id") return bool(self._get_run_by_id(run_id)) def delete_run(self, run_id: str) -> None: check.str_param(run_id, "run_id") query = db.delete(RunsTable).where(RunsTable.c.run_id == run_id) with self.connect() as conn: conn.execute(query) def has_job_snapshot(self, job_snapshot_id: str) -> bool: check.str_param(job_snapshot_id, "job_snapshot_id") return self._has_snapshot_id(job_snapshot_id) def add_job_snapshot(self, job_snapshot: JobSnap) -> str: check.inst_param(job_snapshot, "job_snapshot", JobSnap) snapshot_id = job_snapshot.snapshot_id return self._add_snapshot( snapshot_id=snapshot_id, snapshot_obj=job_snapshot, snapshot_type=SnapshotType.PIPELINE, ) def get_job_snapshot(self, job_snapshot_id: str) -> JobSnap: check.str_param(job_snapshot_id, "job_snapshot_id") return self._get_snapshot(job_snapshot_id) # type: ignore # (allowed to return None?) def has_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> bool: check.str_param(execution_plan_snapshot_id, "execution_plan_snapshot_id") return bool(self.get_execution_plan_snapshot(execution_plan_snapshot_id)) def add_execution_plan_snapshot( self, execution_plan_snapshot: ExecutionPlanSnapshot, ) -> str: check.inst_param(execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot) snapshot_id = create_execution_plan_snapshot_id(execution_plan_snapshot) return self._add_snapshot( snapshot_id=snapshot_id, snapshot_obj=execution_plan_snapshot, snapshot_type=SnapshotType.EXECUTION_PLAN, ) def get_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> ExecutionPlanSnapshot: check.str_param(execution_plan_snapshot_id, "execution_plan_snapshot_id") return self._get_snapshot(execution_plan_snapshot_id) # type: ignore # (allowed to return None?) def _add_snapshot(self, snapshot_id: str, snapshot_obj, snapshot_type: SnapshotType) -> str: check.str_param(snapshot_id, "snapshot_id") check.not_none_param(snapshot_obj, "snapshot_obj") check.inst_param(snapshot_type, "snapshot_type", SnapshotType) with self.connect() as conn: snapshot_insert = SnapshotsTable.insert().values( snapshot_id=snapshot_id, snapshot_body=zlib.compress(serialize_value(snapshot_obj).encode("utf-8")), snapshot_type=snapshot_type.value, ) try: conn.execute(snapshot_insert) except db_exc.IntegrityError: # on_conflict_do_nothing equivalent pass return snapshot_id def get_run_storage_id(self) -> str: query = db_select([InstanceInfo.c.run_storage_id]) row = self.fetchone(query) if not row: run_storage_id = str(uuid.uuid4()) with self.connect() as conn: conn.execute(InstanceInfo.insert().values(run_storage_id=run_storage_id)) return run_storage_id else: return row["run_storage_id"] def _has_snapshot_id(self, snapshot_id: str) -> bool: query = db_select([SnapshotsTable.c.snapshot_id]).where( SnapshotsTable.c.snapshot_id == snapshot_id ) row = self.fetchone(query) return bool(row) def _get_snapshot(self, snapshot_id: str) -> Optional[JobSnap]: query = db_select([SnapshotsTable.c.snapshot_body]).where( SnapshotsTable.c.snapshot_id == snapshot_id ) row = self.fetchone(query) return ( defensively_unpack_execution_plan_snapshot_query(logging, [row["snapshot_body"]]) # type: ignore if row else None ) def get_run_partition_data(self, runs_filter: RunsFilter) -> Sequence[RunPartitionData]: if self.has_built_index(RUN_PARTITIONS) and self.has_run_stats_index_cols(): query = self._runs_query( filters=runs_filter, columns=["run_id", "status", "start_time", "end_time", "partition"], ) rows = self.fetchall(query) # dedup by partition _partition_data_by_partition = {} for row in rows: if not row["partition"] or row["partition"] in _partition_data_by_partition: continue _partition_data_by_partition[row["partition"]] = RunPartitionData( run_id=row["run_id"], partition=row["partition"], status=DagsterRunStatus[row["status"]], start_time=row["start_time"], end_time=row["end_time"], ) return list(_partition_data_by_partition.values()) else: query = self._runs_query(filters=runs_filter) rows = self.fetchall(query) _partition_data_by_partition = {} for row in rows: run = self._row_to_run(row) partition = run.tags.get(PARTITION_NAME_TAG) if not partition or partition in _partition_data_by_partition: continue _partition_data_by_partition[partition] = RunPartitionData( run_id=run.run_id, partition=partition, status=run.status, start_time=None, end_time=None, ) return list(_partition_data_by_partition.values()) def _get_partition_runs( self, partition_set_name: str, partition_name: str ) -> Sequence[DagsterRun]: # utility method to help test reads off of the partition column if not self.has_built_index(RUN_PARTITIONS): # query by tags return self.get_runs( filters=RunsFilter( tags={ PARTITION_SET_TAG: partition_set_name, PARTITION_NAME_TAG: partition_name, } ) ) else: query = ( self._runs_query() .where(RunsTable.c.partition == partition_name) .where(RunsTable.c.partition_set == partition_set_name) ) rows = self.fetchall(query) return self._rows_to_runs(rows) # Tracking data migrations over secondary indexes def _execute_data_migrations( self, migrations: Mapping[str, Callable[[], MigrationFn]], print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False, ) -> None: for migration_name, migration_fn in migrations.items(): if self.has_built_index(migration_name): if not force_rebuild_all: if print_fn: print_fn(f"Skipping already applied data migration: {migration_name}") continue if print_fn: print_fn(f"Starting data migration: {migration_name}") migration_fn()(self, print_fn) self.mark_index_built(migration_name) if print_fn: print_fn(f"Finished data migration: {migration_name}") def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None: self._execute_data_migrations(REQUIRED_DATA_MIGRATIONS, print_fn, force_rebuild_all) def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None: self._execute_data_migrations(OPTIONAL_DATA_MIGRATIONS, print_fn, force_rebuild_all) def has_built_index(self, migration_name: str) -> bool: query = ( db_select([1]) .where(SecondaryIndexMigrationTable.c.name == migration_name) .where(SecondaryIndexMigrationTable.c.migration_completed != None) # noqa: E711 .limit(1) ) results = self.fetchall(query) return len(results) > 0 def mark_index_built(self, migration_name: str) -> None: query = SecondaryIndexMigrationTable.insert().values( name=migration_name, migration_completed=datetime.now(), ) with self.connect() as conn: try: conn.execute(query) except db_exc.IntegrityError: conn.execute( SecondaryIndexMigrationTable.update() .where(SecondaryIndexMigrationTable.c.name == migration_name) .values(migration_completed=datetime.now()) ) # Checking for migrations def has_run_stats_index_cols(self) -> bool: with self.connect() as conn: column_names = [x.get("name") for x in db.inspect(conn).get_columns(RunsTable.name)] return "start_time" in column_names and "end_time" in column_names def has_bulk_actions_selector_cols(self) -> bool: with self.connect() as conn: column_names = [ x.get("name") for x in db.inspect(conn).get_columns(BulkActionsTable.name) ] return "selector_id" in column_names def has_backfill_id_column(self) -> bool: with self.connect() as conn: column_names = [x.get("name") for x in db.inspect(conn).get_columns(RunsTable.name)] return "backfill_id" in column_names def has_bulk_action_job_name_column(self) -> bool: with self.connect() as conn: column_names = [ x.get("name") for x in db.inspect(conn).get_columns(BulkActionsTable.name) ] return "job_name" in column_names def has_backfill_tags_table(self) -> bool: with self.connect() as conn: return BackfillTagsTable.name in db.inspect(conn).get_table_names() # Daemon heartbeats def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None: with self.connect() as conn: # insert, or update if already present try: conn.execute( DaemonHeartbeatsTable.insert().values( timestamp=datetime_from_timestamp(daemon_heartbeat.timestamp), daemon_type=daemon_heartbeat.daemon_type, daemon_id=daemon_heartbeat.daemon_id, body=serialize_value(daemon_heartbeat), ) ) except db_exc.IntegrityError: conn.execute( DaemonHeartbeatsTable.update() .where(DaemonHeartbeatsTable.c.daemon_type == daemon_heartbeat.daemon_type) .values( timestamp=datetime_from_timestamp(daemon_heartbeat.timestamp), daemon_id=daemon_heartbeat.daemon_id, body=serialize_value(daemon_heartbeat), ) ) def get_daemon_heartbeats(self) -> Mapping[str, DaemonHeartbeat]: rows = self.fetchall(db_select([DaemonHeartbeatsTable.c.body])) heartbeats = [] for row in rows: heartbeats.append(deserialize_value(row["body"], DaemonHeartbeat)) return {heartbeat.daemon_type: heartbeat for heartbeat in heartbeats} def wipe(self) -> None: """Clears the run storage.""" with self.connect() as conn: # https://stackoverflow.com/a/54386260/324449 conn.execute(RunsTable.delete()) conn.execute(RunTagsTable.delete()) conn.execute(SnapshotsTable.delete()) conn.execute(DaemonHeartbeatsTable.delete()) conn.execute(BulkActionsTable.delete()) def wipe_daemon_heartbeats(self) -> None: with self.connect() as conn: # https://stackoverflow.com/a/54386260/324449 conn.execute(DaemonHeartbeatsTable.delete()) def _add_backfill_filters_to_table( self, table: db.Table, filters: Optional[BulkActionsFilter] ) -> db.Table: if filters and filters.tags and self.has_built_index(BACKFILL_JOB_NAME_AND_TAGS): for i, (key, value) in enumerate(filters.tags.items()): backfill_tags_alias = db.alias(BackfillTagsTable, f"backfill_tags_filter{i}") table = table.join( backfill_tags_alias, db.and_( BulkActionsTable.c.key == backfill_tags_alias.c.backfill_id, backfill_tags_alias.c.key == key, (backfill_tags_alias.c.value == value) if isinstance(value, str) else backfill_tags_alias.c.value.in_(value), ), ) return table return table def _backfills_query(self, filters: Optional[BulkActionsFilter] = None): query = db_select([BulkActionsTable.c.body, BulkActionsTable.c.timestamp]) if filters and filters.tags: if not self.has_built_index(BACKFILL_JOB_NAME_AND_TAGS): # if the migration was run, we added the query for tags filtering in _add_backfill_filters_to_table # BackfillTags table has not been built. However, all tags that are on a backfill are # applied to the runs the backfill launches. So we can query for runs that match the tags and # are also part of a backfill to find the backfills that match the tags. backfills_with_tags_query = db_select([RunTagsTable.c.value]).where( RunTagsTable.c.key == BACKFILL_ID_TAG ) for i, (key, value) in enumerate(filters.tags.items()): run_tags_alias = db.alias(RunTagsTable, f"run_tags_filter{i}") backfills_with_tags_query = backfills_with_tags_query.where( db.and_( RunTagsTable.c.run_id == run_tags_alias.c.run_id, run_tags_alias.c.key == key, (run_tags_alias.c.value == value) if isinstance(value, str) else run_tags_alias.c.value.in_(value), ), ) query = query.where( BulkActionsTable.c.key.in_(db_subquery(backfills_with_tags_query)) ) if filters and filters.job_name: if self.has_built_index(BACKFILL_JOB_NAME_AND_TAGS): query = query.where(BulkActionsTable.c.job_name == filters.job_name) else: run_tags_table = RunTagsTable runs_in_backfill_with_job_name = run_tags_table.join( RunsTable, db.and_( RunTagsTable.c.run_id == RunsTable.c.run_id, RunTagsTable.c.key == BACKFILL_ID_TAG, RunsTable.c.pipeline_name == filters.job_name, ), ) backfills_with_job_name_query = db_select([RunTagsTable.c.value]).select_from( runs_in_backfill_with_job_name ) query = query.where( BulkActionsTable.c.key.in_(db_subquery(backfills_with_job_name_query)) ) if filters and filters.statuses: query = query.where( BulkActionsTable.c.status.in_([status.value for status in filters.statuses]) ) if filters and filters.created_after: query = query.where(BulkActionsTable.c.timestamp > filters.created_after) if filters and filters.created_before: query = query.where(BulkActionsTable.c.timestamp < filters.created_before) if filters and filters.backfill_ids: query = query.where(BulkActionsTable.c.key.in_(filters.backfill_ids)) return query def _add_cursor_limit_to_backfills_query( self, query, cursor: Optional[str] = None, limit: Optional[int] = None ): if limit: query = query.limit(limit) if cursor: cursor_query = db_select([BulkActionsTable.c.id]).where( BulkActionsTable.c.key == cursor ) query = query.where(BulkActionsTable.c.id < cursor_query) return query def _apply_backfill_tags_filter_to_results( self, backfills: Sequence[PartitionBackfill], tags: Mapping[str, Union[str, Sequence[str]]] ) -> Sequence[PartitionBackfill]: if not tags: return backfills def _matches_backfill( backfill: PartitionBackfill, tags: Mapping[str, Union[str, Sequence[str]]] ) -> bool: for key, value in tags.items(): if isinstance(value, str): if backfill.tags.get(key) != value: return False elif backfill.tags.get(key) not in value: return False return True return [backfill for backfill in backfills if _matches_backfill(backfill, tags)] def get_backfills( self, filters: Optional[BulkActionsFilter] = None, cursor: Optional[str] = None, limit: Optional[int] = None, status: Optional[BulkActionStatus] = None, ) -> Sequence[PartitionBackfill]: check.opt_inst_param(status, "status", BulkActionStatus) query = db_select([BulkActionsTable.c.body, BulkActionsTable.c.timestamp]) if status and filters: raise DagsterInvariantViolationError( "Cannot provide status and filters to get_backfills. Please use filters rather than status." ) if status is not None: filters = BulkActionsFilter(statuses=[status]) table = self._add_backfill_filters_to_table(BulkActionsTable, filters) query = self._backfills_query(filters=filters).select_from(table) query = self._add_cursor_limit_to_backfills_query(query, cursor=cursor, limit=limit) query = query.order_by(BulkActionsTable.c.id.desc()) rows = self.fetchall(query) backfill_candidates = deserialize_values((row["body"] for row in rows), PartitionBackfill) if filters and filters.tags and not self.has_built_index(BACKFILL_JOB_NAME_AND_TAGS): # if we are still using the run tags table to get backfills by tag, we need to do an additional check. # runs can have more tags than the backfill that launched them. Since we filtered tags by # querying for runs with those tags, we need to do an additional check that the backfills # also have the requested tags backfill_candidates = self._apply_backfill_tags_filter_to_results( backfill_candidates, filters.tags ) return backfill_candidates def get_backfills_count(self, filters: Optional[BulkActionsFilter] = None) -> int: check.opt_inst_param(filters, "filters", BulkActionsFilter) if filters and filters.tags: # runs can have more tags than the backfill that launched them. Since we filtered tags by # querying for runs with those tags, we need to do an additional check that the backfills # also have the requested tags. This requires fetching the backfills from the db and filtering them query = self._backfills_query(filters=filters) rows = self.fetchall(query) backfill_candidates = deserialize_values( (row["body"] for row in rows), PartitionBackfill ) return len( self._apply_backfill_tags_filter_to_results(backfill_candidates, filters.tags) ) subquery = db_subquery(self._backfills_query(filters=filters)) query = db_select([db.func.count().label("count")]).select_from(subquery) row = self.fetchone(query) count = row["count"] if row else 0 return count def get_backfill(self, backfill_id: str) -> Optional[PartitionBackfill]: check.str_param(backfill_id, "backfill_id") query = db_select([BulkActionsTable.c.body]).where(BulkActionsTable.c.key == backfill_id) row = self.fetchone(query) return deserialize_value(row["body"], PartitionBackfill) if row else None def add_backfill(self, partition_backfill: PartitionBackfill) -> None: check.inst_param(partition_backfill, "partition_backfill", PartitionBackfill) values: dict[str, Any] = dict( key=partition_backfill.backfill_id, status=partition_backfill.status.value, timestamp=datetime_from_timestamp(partition_backfill.backfill_timestamp), body=serialize_value(cast("NamedTuple", partition_backfill)), ) if self.has_bulk_actions_selector_cols(): values["selector_id"] = partition_backfill.selector_id values["action_type"] = partition_backfill.bulk_action_type.value if self.has_bulk_action_job_name_column(): values["job_name"] = partition_backfill.job_name with self.connect() as conn: conn.execute(BulkActionsTable.insert().values(**values)) if self.has_backfill_tags_table(): tags_to_insert = partition_backfill.tags if len(tags_to_insert.items()) > 0: conn.execute( BackfillTagsTable.insert(), [ dict( backfill_id=partition_backfill.backfill_id, key=k, value=v, ) for k, v in tags_to_insert.items() ], ) def update_backfill(self, partition_backfill: PartitionBackfill) -> None: check.inst_param(partition_backfill, "partition_backfill", PartitionBackfill) backfill_id = partition_backfill.backfill_id if not self.get_backfill(backfill_id): raise DagsterInvariantViolationError( f"Backfill {backfill_id} is not present in storage" ) with self.connect() as conn: conn.execute( BulkActionsTable.update() .where(BulkActionsTable.c.key == backfill_id) .values( status=partition_backfill.status.value, body=serialize_value(partition_backfill), ) ) def get_cursor_values(self, keys: set[str]) -> Mapping[str, str]: check.set_param(keys, "keys", of_type=str) rows = self.fetchall( db_select([KeyValueStoreTable.c.key, KeyValueStoreTable.c.value]).where( KeyValueStoreTable.c.key.in_(keys) ), ) return {row["key"]: row["value"] for row in rows} def set_cursor_values(self, pairs: Mapping[str, str]) -> None: check.mapping_param(pairs, "pairs", key_type=str, value_type=str) db_values = [{"key": k, "value": v} for k, v in pairs.items()] with self.connect() as conn: try: conn.execute(KeyValueStoreTable.insert().values(db_values)) except db_exc.IntegrityError: conn.execute( KeyValueStoreTable.update() .where(KeyValueStoreTable.c.key.in_(pairs.keys())) .values(value=db.sql.case(pairs, value=KeyValueStoreTable.c.key)) ) # Migrating run history def replace_job_origin(self, run: DagsterRun, job_origin: RemoteJobOrigin) -> None: new_label = job_origin.repository_origin.get_label() with self.connect() as conn: conn.execute( RunsTable.update() .where(RunsTable.c.run_id == run.run_id) .values( run_body=serialize_value( run.with_job_origin(job_origin).with_tags( {**run.tags, REPOSITORY_LABEL_TAG: new_label} ) ), ) ) conn.execute( RunTagsTable.update() .where(RunTagsTable.c.run_id == run.run_id) .where(RunTagsTable.c.key == REPOSITORY_LABEL_TAG) .values(value=new_label) ) GET_PIPELINE_SNAPSHOT_QUERY_ID = "get-pipeline-snapshot" def defensively_unpack_execution_plan_snapshot_query( logger: logging.Logger, row: Sequence[Any] ) -> Optional[Union[ExecutionPlanSnapshot, JobSnap]]: # minimal checking here because sqlalchemy returns a different type based on what version of # SqlAlchemy you are using def _warn(msg: str) -> None: logger.warning(f"get-pipeline-snapshot: {msg}") if not isinstance(row[0], bytes): _warn("First entry in row is not a binary type.") return None try: uncompressed_bytes = zlib.decompress(row[0]) except zlib.error: _warn("Could not decompress bytes stored in snapshot table.") return None try: decoded_str = uncompressed_bytes.decode("utf-8") except UnicodeDecodeError: _warn("Could not unicode decode decompressed bytes stored in snapshot table.") return None try: return deserialize_value(decoded_str, (ExecutionPlanSnapshot, JobSnap)) except JSONDecodeError: _warn("Could not parse json in snapshot table.") return None
SqlRunStorage
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 54470, "end": 54800 }
class ____(ExprNode): # Abstract base class for expression nodes which have # no sub-expressions. subexprs = [] # Override to optimize -- we know we have no children def generate_subexpr_evaluation_code(self, code): pass def generate_subexpr_disposal_code(self, code): pass
AtomicExprNode
python
Pylons__pyramid
tests/test_config/test_init.py
{ "start": 49634, "end": 50226 }
class ____: def __init__(self, adaptation=None, util=None): self.utilities = [] self.adapters = [] self.adaptation = adaptation self.util = util def subscribers(self, events, name): self.events = events return events def registerUtility(self, *arg, **kw): self.utilities.append((arg, kw)) def registerAdapter(self, *arg, **kw): self.adapters.append((arg, kw)) def queryAdapter(self, *arg, **kw): return self.adaptation def queryUtility(self, *arg, **kw): return self.util
DummyRegistry
python
tensorflow__tensorflow
tensorflow/python/distribute/distribute_lib.py
{ "start": 102319, "end": 133440 }
class ____(object): """Additional APIs for algorithms that need to be distribution-aware. Note: For most usage of `tf.distribute.Strategy`, there should be no need to call these methods, since TensorFlow libraries (such as optimizers) already call these methods when needed on your behalf. Some common use cases of functions on this page: * _Locality_ `tf.distribute.DistributedValues` can have the same _locality_ as a _distributed variable_, which leads to a mirrored value residing on the same devices as the variable (as opposed to the compute devices). Such values may be passed to a call to `tf.distribute.StrategyExtended.update` to update the value of a variable. You may use `tf.distribute.StrategyExtended.colocate_vars_with` to give a variable the same locality as another variable. You may convert a "PerReplica" value to a variable's locality by using `tf.distribute.StrategyExtended.reduce_to` or `tf.distribute.StrategyExtended.batch_reduce_to`. * _How to update a distributed variable_ A distributed variable is variables created on multiple devices. As discussed in the [glossary](https://www.tensorflow.org/api_docs/python/tf/distribute), mirrored variable and SyncOnRead variable are two examples. The standard pattern for updating distributed variables is to: 1. In your function passed to `tf.distribute.Strategy.run`, compute a list of (update, variable) pairs. For example, the update might be a gradient of the loss with respect to the variable. 2. Switch to cross-replica mode by calling `tf.distribute.get_replica_context().merge_call()` with the updates and variables as arguments. 3. Call `tf.distribute.StrategyExtended.reduce_to(VariableAggregation.SUM, t, v)` (for one variable) or `tf.distribute.StrategyExtended.batch_reduce_to` (for a list of variables) to sum the updates. 4. Call `tf.distribute.StrategyExtended.update(v)` for each variable to update its value. Steps 2 through 4 are done automatically by class `tf.keras.optimizers.Optimizer` if you call its `tf.keras.optimizers.Optimizer.apply_gradients` method in a replica context. In fact, a higher-level solution to update a distributed variable is by calling `assign` on the variable as you would do to a regular `tf.Variable`. You can call the method in both _replica context_ and _cross-replica context_. For a _mirrored variable_, calling `assign` in _replica context_ requires you to specify the `aggregation` type in the variable constructor. In that case, the context switching and sync described in steps 2 through 4 are handled for you. If you call `assign` on _mirrored variable_ in _cross-replica context_, you can only assign a single value or assign values from another mirrored variable or a mirrored `tf.distribute.DistributedValues`. For a _SyncOnRead variable_, in _replica context_, you can simply call `assign` on it and no aggregation happens under the hood. In _cross-replica context_, you can only assign a single value to a SyncOnRead variable. One example case is restoring from a checkpoint: if the `aggregation` type of the variable is `tf.VariableAggregation.SUM`, it is assumed that replica values were added before checkpointing, so at the time of restoring, the value is divided by the number of replicas and then assigned to each replica; if the `aggregation` type is `tf.VariableAggregation.MEAN`, the value is assigned to each replica directly. """ def __init__(self, container_strategy): self._container_strategy_weakref = weakref.ref(container_strategy) self._default_device = None # This property is used to determine if we should set drop_remainder=True # when creating Datasets from numpy array inputs. self._require_static_shapes = False def _resource_creator_scope(self): """Returns one or a list of ops.resource_creator_scope for some Strategy.""" return None def _default_device_scope(self): if self._default_device: return ops.device(self._default_device) return None def _container_strategy(self): """Get the containing `tf.distribute.Strategy`. This should not generally be needed except when creating a new `ReplicaContext` and to validate that the caller is in the correct `scope()`. Returns: The `tf.distribute.Strategy` such that `strategy.extended` is `self`. """ container_strategy = self._container_strategy_weakref() assert container_strategy is not None return container_strategy def _scope(self, strategy): """Implementation of tf.distribute.Strategy.scope().""" def creator_with_resource_vars(next_creator, **kwargs): """Variable creator to use in `_CurrentDistributionContext`.""" if ops.inside_function(): if_graph_building = "graph_building" else: if_graph_building = "not_graph_building" with monitoring.MonitoredTimer(distributed_variable_creation_time_counter.get_cell(strategy.__class__.__name__, if_graph_building)): _require_strategy_scope_extended(self) kwargs["use_resource"] = True kwargs["distribute_strategy"] = strategy # Unwrap `initial_value` if it is a `CheckpointInitialValue` to avoid # dereferencing a `Tensor` that is without a `name`. We still need to # propagate the metadata it's holding. if isinstance(kwargs["initial_value"], trackable.CheckpointInitialValue): checkpoint_restore_uid = kwargs[ "initial_value"].checkpoint_position.restore_uid kwargs["initial_value"] = kwargs["initial_value"].wrapped_value elif isinstance(kwargs["initial_value"], trackable.CheckpointInitialValueCallable): checkpoint_restore_uid = kwargs[ "initial_value"].checkpoint_position.restore_uid elif (isinstance(kwargs["initial_value"], functools.partial) and isinstance(kwargs["initial_value"].func, trackable.CheckpointInitialValueCallable)): # Some libraries (e.g., Keras) create partial function out of # initializer to bind shape/dtype, for example: # initial_val = functools.partial(initializer, shape, dtype=dtype) # Therefore to get the restore_uid we need to examine the "func" of # the partial function. checkpoint_restore_uid = kwargs[ "initial_value"].func.checkpoint_position.restore_uid else: checkpoint_restore_uid = None created = self._create_variable(next_creator, **kwargs) if checkpoint_restore_uid is not None: # pylint: disable=protected-access # Let the checkpointing infrastructure know that the variable was # already restored so it doesn't waste memory loading the value again. # In this case of CheckpointInitialValueCallable this may already be # done by the final variable creator, but it doesn't hurt to do it # again. created._maybe_initialize_trackable() created._update_uid = checkpoint_restore_uid # pylint: enable=protected-access return created def distributed_getter(getter, *args, **kwargs): if not self._allow_variable_partition(): if kwargs.pop("partitioner", None) is not None: tf_logging.log_first_n( tf_logging.WARN, "Partitioned variables are disabled when using " "current tf.distribute.Strategy.", 1) return getter(*args, **kwargs) return _CurrentDistributionContext( strategy, variable_scope.variable_creator_scope(creator_with_resource_vars), variable_scope.variable_scope( variable_scope.get_variable_scope(), custom_getter=distributed_getter, ), strategy.extended._resource_creator_scope(), # pylint: disable=protected-access self._default_device_scope(), ) def _allow_variable_partition(self): return False def _create_variable(self, next_creator, **kwargs): # Note: should support "colocate_with" argument. raise NotImplementedError("must be implemented in descendants") def variable_created_in_scope(self, v): """Tests whether `v` was created while this strategy scope was active. Variables created inside the strategy scope are "owned" by it: >>> strategy = tf.distribute.MirroredStrategy() >>> with strategy.scope(): ... v = tf.Variable(1.) >>> strategy.extended.variable_created_in_scope(v) True Variables created outside the strategy are not owned by it: >>> strategy = tf.distribute.MirroredStrategy() >>> v = tf.Variable(1.) >>> strategy.extended.variable_created_in_scope(v) False Args: v: A `tf.Variable` instance. Returns: True if `v` was created inside the scope, False if not. """ return v._distribute_strategy == self._container_strategy_weakref() # pylint: disable=protected-access def colocate_vars_with(self, colocate_with_variable): """Scope that controls which devices variables will be created on. No operations should be added to the graph inside this scope, it should only be used when creating variables (some implementations work by changing variable creation, others work by using a tf.compat.v1.colocate_with() scope). This may only be used inside `self.scope()`. Example usage: ``` with strategy.scope(): var1 = tf.Variable(...) with strategy.extended.colocate_vars_with(var1): # var2 and var3 will be created on the same device(s) as var1 var2 = tf.Variable(...) var3 = tf.Variable(...) def fn(v1, v2, v3): # operates on v1 from var1, v2 from var2, and v3 from var3 # `fn` runs on every device `var1` is on, `var2` and `var3` will be there # too. strategy.extended.update(var1, fn, args=(var2, var3)) ``` Args: colocate_with_variable: A variable created in this strategy's `scope()`. Variables created while in the returned context manager will be on the same set of devices as `colocate_with_variable`. Returns: A context manager. """ def create_colocated_variable(next_creator, **kwargs): _require_strategy_scope_extended(self) kwargs["use_resource"] = True kwargs["colocate_with"] = colocate_with_variable return next_creator(**kwargs) _require_strategy_scope_extended(self) self._validate_colocate_with_variable(colocate_with_variable) return variable_scope.variable_creator_scope(create_colocated_variable) def _validate_colocate_with_variable(self, colocate_with_variable): """Validate `colocate_with_variable` argument to `colocate_vars_with`.""" pass def _make_dataset_iterator(self, dataset): raise NotImplementedError("must be implemented in descendants") def _make_input_fn_iterator(self, input_fn, replication_mode): raise NotImplementedError("must be implemented in descendants") def _experimental_distribute_dataset(self, dataset, options): raise NotImplementedError("must be implemented in descendants") def _distribute_datasets_from_function(self, dataset_fn, options): raise NotImplementedError("must be implemented in descendants") def _experimental_distribute_values_from_function(self, value_fn): raise NotImplementedError("must be implemented in descendants") def _reduce(self, reduce_op, value): # Default implementation until we have an implementation for each strategy. dst = device_util.current() or self._default_device or "/device:CPU:0" return self._local_results(self.reduce_to(reduce_op, value, dst))[0] def reduce_to(self, reduce_op, value, destinations, options=None): """Combine (via e.g. sum or mean) values across replicas. `reduce_to` aggregates `tf.distribute.DistributedValues` and distributed variables. It supports both dense values and `tf.IndexedSlices`. This API currently can only be called in cross-replica context. Other variants to reduce values across replicas are: * `tf.distribute.StrategyExtended.batch_reduce_to`: the batch version of this API. * `tf.distribute.ReplicaContext.all_reduce`: the counterpart of this API in replica context. It supports both batched and non-batched all-reduce. * `tf.distribute.Strategy.reduce`: a more convenient method to reduce to the host in cross-replica context. `destinations` specifies where to reduce the value to, e.g. "GPU:0". You can also pass in a `Tensor`, and the destinations will be the device of that tensor. For all-reduce, pass the same to `value` and `destinations`. It can be used in `tf.distribute.ReplicaContext.merge_call` to write code that works for all `tf.distribute.Strategy`. @tf.function def step_fn(var): def merge_fn(strategy, value, var): # All-reduce the value. Note that `value` here is a # `tf.distribute.DistributedValues`. reduced = strategy.extended.reduce_to(tf.distribute.ReduceOp.SUM, value, destinations=var) strategy.extended.update(var, lambda var, value: var.assign(value), args=(reduced,)) value = tf.identity(1.) tf.distribute.get_replica_context().merge_call(merge_fn, args=(value, var)) def run(strategy): with strategy.scope(): v = tf.Variable(0.) strategy.run(step_fn, args=(v,)) return v run(tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])) MirroredVariable:{ 0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=2.0>, 1: <tf.Variable 'Variable/replica_1:0' shape=() dtype=float32, numpy=2.0> } run(tf.distribute.experimental.CentralStorageStrategy( compute_devices=["GPU:0", "GPU:1"], parameter_device="CPU:0")) <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=2.0> run(tf.distribute.OneDeviceStrategy("GPU:0")) <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0> Args: reduce_op: a `tf.distribute.ReduceOp` value specifying how values should be combined. Allows using string representation of the enum such as "SUM", "MEAN". value: a `tf.distribute.DistributedValues`, or a `tf.Tensor` like object. destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a `tf.Tensor` alike object, or a device string. It specifies the devices to reduce to. To perform an all-reduce, pass the same to `value` and `destinations`. Note that if it's a `tf.Variable`, the value is reduced to the devices of that variable, and this method doesn't update the variable. options: a `tf.distribute.experimental.CommunicationOptions`. Options to perform collective operations. This overrides the default options if the `tf.distribute.Strategy` takes one in the constructor. See `tf.distribute.experimental.CommunicationOptions` for details of the options. Returns: A tensor or value reduced to `destinations`. """ with monitoring.MonitoredTimer( distributed_api_time_counter.get_cell(self.__class__.__name__, "Reduce_to_eagerly") ) if not ops.inside_function() else contextlib.nullcontext(): if options is None: options = collective_util.Options() _require_cross_replica_or_default_context_extended(self) assert not isinstance(destinations, (list, tuple)) assert not isinstance(reduce_op, variable_scope.VariableAggregation) if isinstance(reduce_op, six.string_types): reduce_op = reduce_util.ReduceOp(reduce_op.upper()) assert (reduce_op == reduce_util.ReduceOp.SUM or reduce_op == reduce_util.ReduceOp.MEAN) return self._reduce_to(reduce_op, value, destinations, options) def _reduce_to(self, reduce_op, value, destinations, options): raise NotImplementedError("must be implemented in descendants") def batch_reduce_to(self, reduce_op, value_destination_pairs, options=None): """Combine multiple `reduce_to` calls into one for faster execution. Similar to `reduce_to`, but accepts a list of (value, destinations) pairs. It's more efficient than reduce each value separately. This API currently can only be called in cross-replica context. Other variants to reduce values across replicas are: * `tf.distribute.StrategyExtended.reduce_to`: the non-batch version of this API. * `tf.distribute.ReplicaContext.all_reduce`: the counterpart of this API in replica context. It supports both batched and non-batched all-reduce. * `tf.distribute.Strategy.reduce`: a more convenient method to reduce to the host in cross-replica context. See `reduce_to` for more information. @tf.function def step_fn(var): def merge_fn(strategy, value, var): # All-reduce the value. Note that `value` here is a # `tf.distribute.DistributedValues`. reduced = strategy.extended.batch_reduce_to( tf.distribute.ReduceOp.SUM, [(value, var)])[0] strategy.extended.update(var, lambda var, value: var.assign(value), args=(reduced,)) value = tf.identity(1.) tf.distribute.get_replica_context().merge_call(merge_fn, args=(value, var)) def run(strategy): with strategy.scope(): v = tf.Variable(0.) strategy.run(step_fn, args=(v,)) return v run(tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])) MirroredVariable:{ 0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=2.0>, 1: <tf.Variable 'Variable/replica_1:0' shape=() dtype=float32, numpy=2.0> } run(tf.distribute.experimental.CentralStorageStrategy( compute_devices=["GPU:0", "GPU:1"], parameter_device="CPU:0")) <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=2.0> run(tf.distribute.OneDeviceStrategy("GPU:0")) <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0> Args: reduce_op: a `tf.distribute.ReduceOp` value specifying how values should be combined. Allows using string representation of the enum such as "SUM", "MEAN". value_destination_pairs: a sequence of (value, destinations) pairs. See `tf.distribute.Strategy.reduce_to` for descriptions. options: a `tf.distribute.experimental.CommunicationOptions`. Options to perform collective operations. This overrides the default options if the `tf.distribute.Strategy` takes one in the constructor. See `tf.distribute.experimental.CommunicationOptions` for details of the options. Returns: A list of reduced values, one per pair in `value_destination_pairs`. """ with monitoring.MonitoredTimer( distributed_api_time_counter.get_cell(self.__class__.__name__, "Batch_reduce_to_eagerly") ) if not ops.inside_function() else contextlib.nullcontext(): if options is None: options = collective_util.Options() _require_cross_replica_or_default_context_extended(self) assert not isinstance(reduce_op, variable_scope.VariableAggregation) if isinstance(reduce_op, six.string_types): reduce_op = reduce_util.ReduceOp(reduce_op.upper()) return self._batch_reduce_to(reduce_op, value_destination_pairs, options) def _batch_reduce_to(self, reduce_op, value_destination_pairs, options): return [ self.reduce_to(reduce_op, t, destinations=v, options=options) for t, v in value_destination_pairs ] def _replica_ctx_all_reduce(self, reduce_op, value, options=None): """All-reduce `value` across all replicas so that all get the final result. If `value` is a nested structure of tensors, all-reduces of these tensors will be batched when possible. `options` can be set to hint the batching behavior. This API must be called in a replica context. Args: reduce_op: A `tf.distribute.ReduceOp` value specifying how values should be combined. value: Value to be reduced. A tensor or a nested structure of tensors. options: A `tf.distribute.experimental.CommunicationOptions`. Options to perform collective operations. This overrides the default options if the `tf.distribute.Strategy` takes one in the constructor. Returns: A tensor or a nested structure of tensors with the reduced values. The structure is the same as `value`. """ if options is None: options = collective_util.Options() replica_context = get_replica_context() assert replica_context, ( "`StrategyExtended._replica_ctx_all_reduce` must be called in" " a replica context") def merge_fn(_, flat_value): return self.batch_reduce_to(reduce_op, [(v, v) for v in flat_value], options) reduced = replica_context.merge_call(merge_fn, args=(nest.flatten(value),)) return nest.pack_sequence_as(value, reduced) def _replica_ctx_update(self, var, fn, args=(), kwargs=None, group=True): """Run `fn` with `args` and `kwargs` to update `var`.""" # This method is called by ReplicaContext.update. Strategies who'd like to # remove merge_call in this path should override this method. replica_context = get_replica_context() if not replica_context: raise ValueError("`StrategyExtended._replica_ctx_update` must be called " "in a replica context.") def merge_fn(_, *merged_args, **merged_kwargs): return self.update(var, fn, merged_args, merged_kwargs, group=group) return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs) def _gather_to(self, value, destinations, axis, options=None): """Gather `value` across replicas along axis-th dimension to `destinations`. `gather_to` gathers `tf.distribute.DistributedValues` or `tf.Tensor`-like object, along `axis`-th dimension. It supports only dense tensors but NOT sparse tensor. This API can only be called in cross-replica context. Args: value: a `tf.distribute.DistributedValues`, or a `tf.Tensor` like object. destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a `tf.Tensor` alike object, or a device string. It specifies the devices to reduce to. To perform an all-gather, pass the same to `value` and `destinations`. Note that if it's a `tf.Variable`, the value is reduced to the devices of that variable, and this method doesn't update the variable. axis: 0-D int32 Tensor. Dimension along which to gather. Must be in the range [0, rank(value)). options: a `tf.distribute.experimental.CommunicationOptions`. Options to perform collective operations. This overrides the default options if the `tf.distribute.Strategy` takes one in the constructor. See `tf.distribute.experimental.CommunicationOptions` for details of the options. Returns: A tensor or value gathered to `destinations`. """ _require_cross_replica_or_default_context_extended(self) assert not isinstance(destinations, (list, tuple)) if options is None: options = collective_util.Options() return self._gather_to_implementation(value, destinations, axis, options) def _gather_to_implementation(self, value, destinations, axis, options): raise NotImplementedError("_gather_to must be implemented in descendants") def _batch_gather_to(self, value_destination_pairs, axis, options=None): _require_cross_replica_or_default_context_extended(self) if options is None: options = collective_util.Options() return [ self._gather_to(t, destinations=v, axis=axis, options=options) for t, v in value_destination_pairs ] def update(self, var, fn, args=(), kwargs=None, group=True): """Run `fn` to update `var` using inputs mirrored to the same devices. `tf.distribute.StrategyExtended.update` takes a distributed variable `var` to be updated, an update function `fn`, and `args` and `kwargs` for `fn`. It applies `fn` to each component variable of `var` and passes corresponding values from `args` and `kwargs`. Neither `args` nor `kwargs` may contain per-replica values. If they contain mirrored values, they will be unwrapped before calling `fn`. For example, `fn` can be `assign_add` and `args` can be a mirrored DistributedValues where each component contains the value to be added to this mirrored variable `var`. Calling `update` will call `assign_add` on each component variable of `var` with the corresponding tensor value on that device. Example usage: ```python strategy = tf.distribute.MirroredStrategy(['GPU:0', 'GPU:1']) # With 2 devices with strategy.scope(): v = tf.Variable(5.0, aggregation=tf.VariableAggregation.SUM) def update_fn(v): return v.assign(1.0) result = strategy.extended.update(v, update_fn) # result is # Mirrored:{ # 0: tf.Tensor(1.0, shape=(), dtype=float32), # 1: tf.Tensor(1.0, shape=(), dtype=float32) # } ``` If `var` is mirrored across multiple devices, then this method implements logic as following: ```python results = {} for device, v in var: with tf.device(device): # args and kwargs will be unwrapped if they are mirrored. results[device] = fn(v, *args, **kwargs) return merged(results) ``` Otherwise, this method returns `fn(var, *args, **kwargs)` colocated with `var`. Args: var: Variable, possibly mirrored to multiple devices, to operate on. fn: Function to call. Should take the variable as the first argument. args: Tuple or list. Additional positional arguments to pass to `fn()`. kwargs: Dict with keyword arguments to pass to `fn()`. group: Boolean. Defaults to True. If False, the return value will be unwrapped. Returns: By default, the merged return value of `fn` across all replicas. The merged result has dependencies to make sure that if it is evaluated at all, the side effects (updates) will happen on every replica. If instead "group=False" is specified, this function will return a nest of lists where each list has an element per replica, and the caller is responsible for ensuring all elements are executed. """ # TODO(b/178944108): Update the documentation to reflect the fact that # `update` can be called in a replica context. if kwargs is None: kwargs = {} replica_context = get_replica_context() # pylint: disable=protected-access if (replica_context is None or replica_context is _get_default_replica_context()): fn = autograph.tf_convert( fn, autograph_ctx.control_status_ctx(), convert_by_default=False) with self._container_strategy().scope(): return self._update(var, fn, args, kwargs, group) else: return self._replica_ctx_update( var, fn, args=args, kwargs=kwargs, group=group) def _update(self, var, fn, args, kwargs, group): raise NotImplementedError("must be implemented in descendants") def _local_results(self, val): """Returns local results per replica as a tuple.""" if isinstance(val, ds_types.DistributedValues): return val._values # pylint: disable=protected-access if nest.is_nested(val): replica_values = [] def get_values(x, index): if isinstance(x, ds_types.DistributedValues): return x._values[index] # pylint: disable=protected-access return x for i in range(len(self.worker_devices)): replica_values.append( nest.map_structure( lambda x: get_values(x, i), # pylint: disable=cell-var-from-loop val)) return tuple(replica_values) return (val,) def value_container(self, value): """Returns the container that this per-replica `value` belongs to. Args: value: A value returned by `run()` or a variable created in `scope()`. Returns: A container that `value` belongs to. If value does not belong to any container (including the case of container having been destroyed), returns the value itself. `value in experimental_local_results(value_container(value))` will always be true. """ raise NotImplementedError("must be implemented in descendants") def _group(self, value, name=None): """Implementation of `group`.""" value = nest.flatten(self._local_results(value)) if len(value) != 1 or name is not None: return control_flow_ops.group(value, name=name) # Special handling for the common case of one op. v, = value if hasattr(v, "op"): v = v.op return v @property def experimental_require_static_shapes(self): """Returns `True` if static shape is required; `False` otherwise.""" return self._require_static_shapes @property def _num_replicas_in_sync(self): """Returns number of replicas over which gradients are aggregated.""" raise NotImplementedError("must be implemented in descendants") @property def worker_devices(self): """Returns the tuple of all devices used to for compute replica execution. """ # TODO(josh11b): More docstring raise NotImplementedError("must be implemented in descendants") @property def parameter_devices(self): """Returns the tuple of all devices used to place variables.""" # TODO(josh11b): More docstring raise NotImplementedError("must be implemented in descendants") def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None): """Configures the strategy class.""" del session_config, cluster_spec, task_type, task_id def _update_config_proto(self, config_proto): return copy.deepcopy(config_proto) def _in_multi_worker_mode(self): """Whether this strategy indicates working in multi-worker settings. Multi-worker training refers to the setup where the training is distributed across multiple workers, as opposed to the case where only a local process performs the training. This function is used by higher-level APIs such as Keras' `model.fit()` to infer for example whether or not a distribute coordinator should be run, and thus TensorFlow servers should be started for communication with other servers in the cluster, or whether or not saving/restoring checkpoints is relevant for preemption fault tolerance. Subclasses should override this to provide whether the strategy is currently in multi-worker setup. Experimental. Signature and implementation are subject to change. """ raise NotImplementedError("must be implemented in descendants") @tf_export(v1=["distribute.StrategyExtended"]) # pylint: disable=missing-docstring
StrategyExtendedV2
python
bokeh__bokeh
src/bokeh/core/serialization.py
{ "start": 15197, "end": 24506 }
class ____: """ Convert from serializable representations to built-in and custom types. """ _decoders: ClassVar[dict[str, Decoder]] = {} @classmethod def register(cls, type: str, decoder: Decoder) -> None: assert type not in cls._decoders, f"'{type} is already registered" cls._decoders[type] = decoder _references: dict[ID, Model] _setter: Setter | None _decoding: bool _buffers: dict[ID, Buffer] def __init__(self, references: Sequence[Model] | None = None, *, setter: Setter | None = None): self._references = {obj.id: obj for obj in references or []} self._setter = setter self._decoding = False self._buffers = {} def has_ref(self, obj: Model) -> bool: return obj.id in self._references def deserialize(self, obj: Any | Serialized[Any]) -> Any: if isinstance(obj, Serialized): return self.decode(obj.content, obj.buffers) else: return self.decode(obj) def decode(self, obj: AnyRep, buffers: list[Buffer] | None = None) -> Any: if buffers is not None: for buffer in buffers: self._buffers[buffer.id] = buffer if self._decoding: return self._decode(obj) self._decoding = True try: return self._decode(obj) finally: self._buffers.clear() self._decoding = False def _decode(self, obj: AnyRep) -> Any: if isinstance(obj, dict): if "type" in obj: match obj["type"]: case type if type in self._decoders: return self._decoders[type](obj, self) case "ref": return self._decode_ref(cast(Ref, obj)) case "symbol": return self._decode_symbol(cast(SymbolRep, obj)) case "number": return self._decode_number(cast(NumberRep, obj)) case "array": return self._decode_array(cast(ArrayRep, obj)) case "set": return self._decode_set(cast(SetRep, obj)) case "map": return self._decode_map(cast(MapRep, obj)) case "bytes": return self._decode_bytes(cast(BytesRep, obj)) case "slice": return self._decode_slice(cast(SliceRep, obj)) case "typed_array": return self._decode_typed_array(cast(TypedArrayRep, obj)) case "ndarray": return self._decode_ndarray(cast(NDArrayRep, obj)) case "object": if "id" in obj: return self._decode_object_ref(cast(ObjectRefRep, obj)) else: return self._decode_object(cast(ObjectRep, obj)) case type: self.error(f"unable to decode an object of type '{type}'") elif "id" in obj: return self._decode_ref(cast(Ref, obj)) else: return {key: self._decode(val) for key, val in obj.items()} elif isinstance(obj, list): return [self._decode(entry) for entry in obj] else: return obj def _decode_ref(self, obj: Ref) -> Model: id = obj["id"] instance = self._references.get(id) if instance is not None: return instance else: self.error(UnknownReferenceError(id)) def _decode_symbol(self, obj: SymbolRep) -> float: name = obj["name"] self.error(f"can't resolve named symbol '{name}'") # TODO: implement symbol resolution def _decode_number(self, obj: NumberRep) -> float: value = obj["value"] return float(value) if isinstance(value, str) else value def _decode_array(self, obj: ArrayRep) -> list[Any]: entries = obj.get("entries", []) return [ self._decode(entry) for entry in entries ] def _decode_set(self, obj: SetRep) -> set[Any]: entries = obj.get("entries", []) return { self._decode(entry) for entry in entries } def _decode_map(self, obj: MapRep) -> dict[Any, Any]: entries = obj.get("entries", []) return { self._decode(key): self._decode(val) for key, val in entries } def _decode_bytes(self, obj: BytesRep) -> bytes | memoryview[int]: data = obj["data"] if isinstance(data, str): return gzip.decompress(base64.b64decode(data)) elif isinstance(data, Buffer): buffer = data # in case of decode(encode(obj)) else: id = data["id"] if id in self._buffers: buffer = self._buffers[id] else: self.error(f"can't resolve buffer '{id}'") return buffer.data def _decode_slice(self, obj: SliceRep) -> slice: start = self._decode(obj["start"]) stop = self._decode(obj["stop"]) step = self._decode(obj["step"]) return slice(start, stop, step) def _decode_typed_array(self, obj: TypedArrayRep) -> TypedArray[Any]: array = obj["array"] order = obj["order"] dtype = obj["dtype"] data = self._decode(array) dtype_to_typecode = dict( uint8="B", int8="b", uint16="H", int16="h", uint32="I", int32="i", #uint64="Q", #int64="q", float32="f", float64="d", ) typecode = dtype_to_typecode.get(dtype) if typecode is None: self.error(f"unsupported dtype '{dtype}'") typed_array: TypedArray[Any] = TypedArray(typecode, data) if order != sys.byteorder: typed_array.byteswap() return typed_array def _decode_ndarray(self, obj: NDArrayRep) -> npt.NDArray[Any]: array = obj["array"] order = obj["order"] dtype = obj["dtype"] shape = obj["shape"] decoded = self._decode(array) ndarray: npt.NDArray[Any] if isinstance(decoded, bytes): ndarray = np.copy(np.frombuffer(decoded, dtype=dtype)) if order != sys.byteorder: ndarray.byteswap(inplace=True) else: ndarray = np.array(decoded, dtype=dtype) if len(shape) > 1: ndarray = ndarray.reshape(shape) return ndarray def _decode_object(self, obj: ObjectRep) -> object: raise NotImplementedError() def _decode_object_ref(self, obj: ObjectRefRep) -> Model: id = obj["id"] instance = self._references.get(id) if instance is not None: from ..util.warnings import BokehUserWarning, warn warn(f"reference already known '{id}'", BokehUserWarning) return instance name = obj["name"] attributes = obj.get("attributes") cls = self._resolve_type(name) instance = cls.__new__(cls, id=id) if instance is None: self.error(f"can't instantiate {name}(id={id})") self._references[instance.id] = instance # We want to avoid any Model specific initialization that happens with # Slider(...) when reconstituting from JSON, but we do need to perform # general HasProps machinery that sets properties, so call it explicitly if not instance._initialized: from .has_props import HasProps HasProps.__init__(instance) if attributes is not None: decoded_attributes = {key: self._decode(val) for key, val in attributes.items()} for key, val in decoded_attributes.items(): instance.set_from_json(key, val, setter=self._setter) return instance def _resolve_type(self, type: str) -> type[Model]: from ..model import Model cls = Model.model_class_reverse_map.get(type) if cls is not None: if issubclass(cls, Model): return cls else: self.error(f"object of type '{type}' is not a subclass of 'Model'") else: if type == "Figure": from ..plotting import figure return figure # XXX: helps with push_session(); this needs a better resolution scheme else: self.error(f"can't resolve type '{type}'") def error(self, error: str | DeserializationError) -> NoReturn: if isinstance(error, str): raise DeserializationError(error) else: raise error #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
Deserializer
python
pydata__xarray
xarray/tests/test_backends.py
{ "start": 12406, "end": 13537 }
class ____: netcdf3_formats: tuple[T_NetcdfTypes, ...] = ("NETCDF3_CLASSIC", "NETCDF3_64BIT") @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: pass @requires_scipy def test_dtype_coercion_error(self) -> None: """Failing dtype coercion should lead to an error""" for dtype, format in itertools.product( _nc3_dtype_coercions, self.netcdf3_formats ): if dtype == "bool": # coerced upcast (bool to int8) ==> can never fail continue # Using the largest representable value, create some data that will # no longer compare equal after the coerced downcast maxval = np.iinfo(dtype).max x = np.array([0, 1, 2, maxval], dtype=dtype) ds = Dataset({"x": ("t", x, {})}) with create_tmp_file(allow_cleanup_failure=False) as path: with pytest.raises(ValueError, match="could not safely cast"): ds.to_netcdf(path, format=format)
NetCDF3Only
python
neetcode-gh__leetcode
python/0705-design-hashset.py
{ "start": 0, "end": 534 }
class ____: def __init__(self): self.hashset = [] def add(self, key: int) -> None: if not self.contains(key): self.hashset.append(key) def remove(self, key: int) -> None: if self.contains(key): self.hashset.remove(key) def contains(self, key: int) -> bool: return True if key in self.hashset else False # Your MyHashSet object will be instantiated and called as such: # obj = MyHashSet() # obj.add(key) # obj.remove(key) # param_3 = obj.contains(key)
MyHashSet
python
tensorflow__tensorflow
tensorflow/cc/saved_model/testdata/generate_saved_models.py
{ "start": 1458, "end": 2098 }
class ____(module.Module): """Three vars (one in a sub-module) and compute method.""" def __init__(self): self.x = variables.Variable(1.0, name="variable_x") self.y = variables.Variable(2.0, name="variable_y") self.child = module.Module() self.child.z = variables.Variable(3.0, name="child_variable") self.child.c = ops.convert_to_tensor(5.0) @def_function.function(input_signature=[ tensor_spec.TensorSpec((), dtypes.float32), tensor_spec.TensorSpec((), dtypes.float32) ]) def compute(self, a, b): return (a + self.x) * (b + self.y) / (self.child.z) + self.child.c
VarsAndArithmeticObjectGraph
python
airbytehq__airbyte
airbyte-integrations/connectors/source-trello/components.py
{ "start": 406, "end": 1759 }
class ____(SubstreamPartitionRouter): def stream_slices(self) -> Iterable[StreamSlice]: stream_map = {stream_config.stream.name: stream_config.stream for stream_config in self.parent_stream_configs} board_ids = set(self.config.get("board_ids", [])) if not board_ids: board_ids = self.read_all_boards(stream_boards=stream_map["boards"], stream_organizations=stream_map["organizations"]) for board_id in board_ids: yield StreamSlice(partition={"id": board_id}, cursor_slice={}) def read_all_boards(self, stream_boards: Stream, stream_organizations: Stream): """ Method to get id of each board in the boards stream, get ids of boards associated with each organization in the organizations stream and yield each unique board id """ board_ids = set() for record in stream_boards.read_records(sync_mode=SyncMode.full_refresh): if record["id"] not in board_ids: board_ids.add(record["id"]) yield record["id"] for record in stream_organizations.read_records(sync_mode=SyncMode.full_refresh): for board_id in record["idBoards"]: if board_id not in board_ids: board_ids.add(board_id) yield board_id
OrderIdsPartitionRouter
python
scipy__scipy
scipy/special/tests/test_exponential_integrals.py
{ "start": 802, "end": 2621 }
class ____: @pytest.mark.parametrize('x, expected', [(0, 0), (np.inf, 1)]) def test_limits(self, x, expected): y = sc._ufuncs._scaled_exp1(x) assert y == expected # The expected values were computed with mpmath, e.g.: # # from mpmath import mp # mp.dps = 80 # x = 1e-25 # print(float(x*mp.exp(x)*np.expint(1, x))) # # prints 5.698741165994961e-24 # # The method used to compute _scaled_exp1 changes at x=1 # and x=1250, so values at those inputs, and values just # above and below them, are included in the test data. @pytest.mark.parametrize('x, expected', [(1e-25, 5.698741165994961e-24), (0.1, 0.20146425447084518), (0.9995, 0.5962509885831002), (1.0, 0.5963473623231941), (1.0005, 0.5964436833238044), (2.5, 0.7588145912149602), (10.0, 0.9156333393978808), (100.0, 0.9901942286733019), (500.0, 0.9980079523802055), (1000.0, 0.9990019940238807), (1249.5, 0.9992009578306811), (1250.0, 0.9992012769377913), (1250.25, 0.9992014363957858), (2000.0, 0.9995004992514963), (1e4, 0.9999000199940024), (1e10, 0.9999999999), (1e15, 0.999999999999999), ]) def test_scaled_exp1(self, x, expected): y = sc._ufuncs._scaled_exp1(x) assert_allclose(y, expected, rtol=2e-15)
TestScaledExp1
python
pytorch__pytorch
torch/fx/_symbolic_trace.py
{ "start": 42602, "end": 51993 }
class ____: def __init__(self) -> None: super().__init__() self.patches_made: list[_PatchedFn] = [] self.visited: set[int] = set() def patch( self, frame_dict: dict[str, Any], name: str, new_fn: Callable, deduplicate: bool = True, ): """ Replace frame_dict[name] with new_fn until we exit the context manager. """ new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined] if name not in frame_dict and hasattr(builtins, name): self.patches_made.append(_PatchedFnDel(frame_dict, name, None, new_fn)) self.patches_made[-1].patch() elif getattr(frame_dict[name], "__fx_already_patched", False): return # already patched, no need to do it again else: self.patches_made.append( _PatchedFnSetItem(frame_dict, name, frame_dict[name], new_fn) ) self.patches_made[-1].patch() def patch_method( self, cls: type, name: str, new_fn: Callable, deduplicate: bool = True ): """ Replace object_or_dict.name with new_fn until we exit the context manager. """ new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined] orig_fn = getattr(cls, name) if getattr(orig_fn, "__fx_already_patched", False): return # already patched, no need to do it again self.patches_made.append(_PatchedFnSetAttr(cls, name, orig_fn, new_fn)) self.patches_made[-1].patch() def visit_once(self, thing: Any): """Return True on the first call to with thing, otherwise false""" idx = id(thing) if idx in self.visited: return False self.visited.add(idx) return True def revert_all_patches(self): """ Remove all the stored patcheds. It doesn't modify patches_made. """ for patch in self.patches_made: patch.revert() return self.patches_made def reapply_all_patches(self): """ Patch all the stored patcheds. It doesn't modify patches_made. """ for patch in self.patches_made: patch.patch() return self.patches_made def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): """ Undo all the changes made via self.patch() and self.patch_method() """ while self.patches_made: # unpatch in reverse order to handle duplicates correctly self.patches_made.pop().revert() self.visited.clear() CURRENT_PATCHER: Optional[_Patcher] = None @contextlib.contextmanager def _new_patcher(): global CURRENT_PATCHER prior_patcher = CURRENT_PATCHER try: CURRENT_PATCHER = _Patcher() yield CURRENT_PATCHER finally: # Clear all the patches made by when using current patcher. assert CURRENT_PATCHER is not None CURRENT_PATCHER.revert_all_patches() CURRENT_PATCHER = prior_patcher @contextlib.contextmanager def _maybe_revert_all_patches(): current_patcher = CURRENT_PATCHER patches_made = None patches_removed = None try: if current_patcher is not None: patches_removed = current_patcher.revert_all_patches() yield finally: if current_patcher is not None: patches_made = current_patcher.reapply_all_patches() assert patches_made == patches_removed, ( "CURRENT_PATCHER was changed during a revert_all_patches" ) def _patch_wrapped_functions(patcher: _Patcher): """ Go through ``_wrapped_fn_patch_table`` and, for each frame object, wrap the listed global functions in the `_create_wrapped_func` wrapper. """ for (_, name), frame_dict in _wrapped_fns_to_patch.copy().items(): if name not in frame_dict and hasattr(builtins, name): orig_fn = getattr(builtins, name) else: orig_fn = frame_dict[name] patcher.patch(frame_dict, name, _create_wrapped_func(orig_fn)) for cls, name in _wrapped_methods_to_patch: patcher.patch_method(cls, name, _create_wrapped_method(cls, name)) def _autowrap_check( patcher: _Patcher, frame_dict: dict[str, Any], function_ids: set[int] ): """ Some methods, like `math.sqrt` are common enough we want to automatically wrap them as we see them. This method searches a scope for them and patches them if found. """ if patcher.visit_once(frame_dict): for name, value in frame_dict.items(): if ( not name.startswith("_") and callable(value) and id(value) in function_ids ): patcher.patch(frame_dict, name, _create_wrapped_func(value)) @compatibility(is_backward_compatible=True) def wrap(fn_or_name: Union[str, Callable]): """ This function can be called at module-level scope to register fn_or_name as a "leaf function". A "leaf function" will be preserved as a CallFunction node in the FX trace instead of being traced through:: # foo/bar/baz.py def my_custom_function(x, y): return x * x + y * y torch.fx.wrap("my_custom_function") def fn_to_be_traced(x, y): # When symbolic tracing, the below call to my_custom_function will be inserted into # the graph rather than tracing it. return my_custom_function(x, y) This function can also equivalently be used as a decorator:: # foo/bar/baz.py @torch.fx.wrap def my_custom_function(x, y): return x * x + y * y A wrapped function can be thought of a "leaf function", analogous to the concept of "leaf modules", that is, they are functions that are left as calls in the FX trace rather than traced through. Args: fn_or_name (Union[str, Callable]): The function or name of the global function to insert into the graph when it's called """ if not callable(fn_or_name) and not isinstance(fn_or_name, str): raise RuntimeError( "Unsupported type for global function! Must be either a callable or " "string name" ) if callable(fn_or_name): assert not isinstance(fn_or_name, str) # to make mypy happy fn_name = fn_or_name.__name__ else: assert isinstance(fn_or_name, str), ( "fn_or_name must be a global function or string name" ) fn_name = fn_or_name currentframe = inspect.currentframe() assert currentframe is not None f = currentframe.f_back assert f is not None if f.f_code.co_name != "<module>": raise NotImplementedError("wrap must be called at the top level of a module") # consider implementing Callable version of this via _autowrap_function_ids / _autowrap_search # semantics would be slightly different, but would add support `from x import wrapped_function` _wrapped_fns_to_patch[(id(f.f_globals), fn_name)] = f.f_globals return fn_or_name @compatibility(is_backward_compatible=True) def symbolic_trace( root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[dict[str, Any]] = None, ) -> GraphModule: """ Symbolic tracing API Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule`` constructed by recording operations seen while tracing through ``root``. ``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures. For example:: def f(a, b): if b == True: return a else: return a * 2 FX can typically not trace through this due to the presence of control flow. However, we can use `concrete_args` to specialize on the value of `b` to trace through this:: f = fx.symbolic_trace(f, concrete_args={"b": False}) assert f(3, False) == 6 Note that although you can still pass in different values of `b`, they will be ignored. We can also use `concrete_args` to eliminate data-structure handling from our function. This will use pytrees to flatten your input. To avoid overspecializing, pass in `fx.PH` for values that shouldn't be specialized. For example:: def f(x): out = 0 for v in x.values(): out += v return out f = fx.symbolic_trace( f, concrete_args={"x": {"a": fx.PH, "b": fx.PH, "c": fx.PH}} ) assert f({"a": 1, "b": 2, "c": 4}) == 7 Args: root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted into a Graph representation. concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized Returns: GraphModule: a Module created from the recorded operations from ``root``. """ tracer = Tracer() graph = tracer.trace(root, concrete_args) name = ( root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__ ) return _make_graph_module(tracer.root, graph, name) @wrap def _assert_is_none(value, msg): assert value is None, msg
_Patcher
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 158972, "end": 160349 }
class ____(sgqlc.types.Input): """Autogenerated input type of CloneTemplateRepository""" __schema__ = github_schema __field_names__ = ("repository_id", "name", "owner_id", "description", "visibility", "include_all_branches", "client_mutation_id") repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId") """The Node ID of the template repository.""" name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") """The name of the new repository.""" owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId") """The ID of the owner for the new repository.""" description = sgqlc.types.Field(String, graphql_name="description") """A short description of the new repository.""" visibility = sgqlc.types.Field(sgqlc.types.non_null(RepositoryVisibility), graphql_name="visibility") """Indicates the repository's visibility level.""" include_all_branches = sgqlc.types.Field(Boolean, graphql_name="includeAllBranches") """Whether to copy all branches from the template to the new repository. Defaults to copying only the default branch of the template. """ client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
CloneTemplateRepositoryInput
python
rapidsai__cudf
python/dask_cudf/dask_cudf/_expr/collection.py
{ "start": 2452, "end": 4770 }
class ____(DXDataFrame, CudfFrameBase): @classmethod def from_dict(cls, *args, **kwargs): with config.set({"dataframe.backend": "cudf"}): return DXDataFrame.from_dict(*args, **kwargs) def set_index( self, *args, divisions=None, **kwargs, ): if divisions == "quantile": divisions = None warnings.warn( "Ignoring divisions='quantile'. This option is now " "deprecated. Please raise an issue on github if this " "feature is necessary.", FutureWarning, ) return super().set_index(*args, divisions=divisions, **kwargs) def groupby( self, by, group_keys=True, sort=None, observed=None, dropna=None, **kwargs, ): from dask_cudf._expr.groupby import GroupBy if isinstance(by, FrameBase) and not isinstance(by, DXSeries): raise ValueError( f"`by` must be a column name or list of columns, got {by}." ) if "as_index" in kwargs: msg = ( "The `as_index` argument is now deprecated. All groupby " "results will be consistent with `as_index=True`." ) if kwargs.pop("as_index") is not True: raise NotImplementedError( f"{msg} Please reset the index after aggregating." ) else: warnings.warn(msg, FutureWarning) return GroupBy( self, by, group_keys=group_keys, sort=sort, observed=observed, dropna=dropna, **kwargs, ) def to_orc(self, *args, **kwargs): from dask_cudf.io.orc import to_orc as to_orc_impl return to_orc_impl(self, *args, **kwargs) @staticmethod def read_text(*args, **kwargs): from dask_cudf.io.text import read_text as read_text_impl return read_text_impl(*args, **kwargs) def clip(self, lower=None, upper=None, axis=1): if axis not in (None, 1): raise NotImplementedError("axis not yet supported in clip.") return new_collection(self.expr.clip(lower, upper, 1))
DataFrame
python
django__django
tests/unmanaged_models/models.py
{ "start": 3166, "end": 3240 }
class ____(models.Model): mm = models.ManyToManyField(Unmanaged1)
Managed1
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py
{ "start": 30721, "end": 30968 }
class ____(MetadataValue[None]): """Representation of null.""" @public @property def value(self) -> None: """None: The wrapped null value.""" return None @whitelist_for_serdes @record(kw_only=False)
NullMetadataValue
python
wandb__wandb
wandb/vendor/pygments/lexers/shell.py
{ "start": 21424, "end": 21736 }
class ____(ShellSessionBaseLexer): """ Lexer for simplistic MSDOS sessions. .. versionadded:: 2.1 """ name = 'MSDOS Session' aliases = ['doscon'] filenames = [] mimetypes = [] _innerLexerCls = BatchLexer _ps1rgx = r'^([^>]+>)(.*\n?)' _ps2 = 'More? '
MSDOSSessionLexer
python
getsentry__sentry
tests/sentry/uptime/endpoints/test_utils.py
{ "start": 179, "end": 3548 }
class ____(TestCase): def test_successful_authorization_and_mapping(self) -> None: """Test successful authorization and mapping of detector subscription IDs.""" subscription_id = uuid.uuid4().hex subscription = self.create_uptime_subscription( url="https://example.com", subscription_id=subscription_id ) detector = self.create_uptime_detector( uptime_subscription=subscription, project=self.project ) mapping, subscription_ids = authorize_and_map_uptime_detector_subscription_ids( detector_ids=[str(detector.id)], projects=[self.project], ) expected_hex_id = uuid.UUID(subscription_id).hex assert expected_hex_id in mapping assert mapping[expected_hex_id] == detector.id assert subscription_ids == [expected_hex_id] def test_invalid_detector_id_raises_error(self) -> None: """Test that invalid detector IDs raise ValueError.""" invalid_id = "999999" with raises(ValueError): authorize_and_map_uptime_detector_subscription_ids( detector_ids=[invalid_id], projects=[self.project], ) def test_cross_project_access_denied(self) -> None: """Test that cross-project detector access is denied.""" other_project = self.create_project(organization=self.organization) subscription_id = uuid.uuid4().hex subscription = self.create_uptime_subscription( url="https://example.com", subscription_id=subscription_id ) other_detector = self.create_uptime_detector( uptime_subscription=subscription, project=other_project ) # Try to authorize with original project, should fail with raises(ValueError): authorize_and_map_uptime_detector_subscription_ids( detector_ids=[str(other_detector.id)], projects=[self.project], ) def test_multiple_detectors(self) -> None: """Test authorization with multiple detector IDs.""" subscription_id1 = uuid.uuid4().hex subscription_id2 = uuid.uuid4().hex subscription1 = self.create_uptime_subscription( url="https://example1.com", subscription_id=subscription_id1 ) subscription2 = self.create_uptime_subscription( url="https://example2.com", subscription_id=subscription_id2 ) detector1 = self.create_uptime_detector( uptime_subscription=subscription1, project=self.project ) detector2 = self.create_uptime_detector( uptime_subscription=subscription2, project=self.project ) mapping, subscription_ids = authorize_and_map_uptime_detector_subscription_ids( detector_ids=[str(detector1.id), str(detector2.id)], projects=[self.project], ) assert len(mapping) == 2 assert len(subscription_ids) == 2 expected_str_id1 = uuid.UUID(subscription_id1).hex expected_str_id2 = uuid.UUID(subscription_id2).hex assert expected_str_id1 in mapping assert expected_str_id2 in mapping assert mapping[expected_str_id1] == detector1.id assert mapping[expected_str_id2] == detector2.id
AuthorizeAndMapUptimeDetectorSubscriptionIdsTest
python
kubernetes-client__python
kubernetes/client/models/v1_lease.py
{ "start": 383, "end": 6419 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V1LeaseSpec' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'spec': 'spec' } def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501 """V1Lease - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._spec = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if spec is not None: self.spec = spec @property def api_version(self): """Gets the api_version of this V1Lease. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1Lease. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1Lease. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1Lease. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V1Lease. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1Lease. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1Lease. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1Lease. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1Lease. # noqa: E501 :return: The metadata of this V1Lease. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1Lease. :param metadata: The metadata of this V1Lease. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def spec(self): """Gets the spec of this V1Lease. # noqa: E501 :return: The spec of this V1Lease. # noqa: E501 :rtype: V1LeaseSpec """ return self._spec @spec.setter def spec(self, spec): """Sets the spec of this V1Lease. :param spec: The spec of this V1Lease. # noqa: E501 :type: V1LeaseSpec """ self._spec = spec def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1Lease): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1Lease): return True return self.to_dict() != other.to_dict()
V1Lease
python
pypa__setuptools
conftest.py
{ "start": 1244, "end": 2250 }
class ____: """Speed-up integration tests by only running what does not run in other tests.""" RUNS_ON_NORMAL_TESTS = ("checkdocks", "cov", "mypy", "perf", "ruff") @classmethod def disable_plugins_already_run(cls, config): if config.getoption("--integration"): for plugin in cls.RUNS_ON_NORMAL_TESTS: # no need to run again config.pluginmanager.set_blocked(plugin) @staticmethod def conditional_skip(request): running_integration_tests = request.config.getoption("--integration") is_integration_test = request.node.get_closest_marker("integration") if running_integration_tests and not is_integration_test: pytest.skip("running integration tests only") if not running_integration_tests and is_integration_test: pytest.skip("skipping integration tests") @pytest.fixture def windows_only(): if platform.system() != 'Windows': pytest.skip("Windows only")
_IntegrationTestSpeedups
python
tensorflow__tensorflow
tensorflow/python/platform/stacktrace_handler_test.py
{ "start": 1127, "end": 2598 }
class ____(test.TestCase): def testChildProcessKillsItself(self): if FLAGS.child: os.kill(os.getpid(), signal.SIGABRT) def testGeneratesStacktrace(self): if FLAGS.child: return # Subprocess sys.argv[0] with --child=True if sys.executable: child_process = subprocess.Popen( [sys.executable, sys.argv[0], '--child=True'], cwd=os.getcwd(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: child_process = subprocess.Popen( [sys.argv[0], '--child=True'], cwd=os.getcwd(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Capture its output. capture both stdout and stderr and append them. # We are not worried about timing or order of messages in this test. child_stdout, child_stderr = child_process.communicate() child_output = child_stdout + child_stderr # Make sure the child process is dead before we proceed. child_process.wait() logging.info('Output from the child process:') logging.info(child_output) # Verify a stack trace is printed. self.assertIn(b'PyEval_EvalFrame', child_output) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--child', type=bool, default=False, help=_CHILD_FLAG_HELP) FLAGS, unparsed = parser.parse_known_args() # Now update argv, so that unittest library does not get confused. sys.argv = [sys.argv[0]] + unparsed test.main()
StacktraceHandlerTest
python
run-llama__llama_index
llama-index-integrations/embeddings/llama-index-embeddings-dashscope/llama_index/embeddings/dashscope/base.py
{ "start": 372, "end": 531 }
class ____(str, Enum): """DashScope TextEmbedding text_type.""" TEXT_TYPE_QUERY = "query" TEXT_TYPE_DOCUMENT = "document"
DashScopeTextEmbeddingType
python
huggingface__transformers
src/transformers/models/xlnet/modeling_xlnet.py
{ "start": 34869, "end": 51373 }
class ____(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.word_embedding = nn.Embedding(config.vocab_size, config.d_model) self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model)) self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, new_embeddings): self.word_embedding = new_embeddings def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: Sequence length mlen: Mask length :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ mask = torch.ones((qlen, qlen + mlen), device=self.device) if self.same_length: mask_lo = mask[:, :qlen].tril(-1) mask.triu_(mlen + 1) mask[:, :qlen] += mask_lo else: mask.triu_(mlen + 1) return mask def cache_mem(self, curr_out, prev_mem): # cache hidden states into memory. if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if self.mem_len is None or self.mem_len == 0: # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time # and returns all of the past and current hidden states. cutoff = 0 else: # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden # states. This is the preferred setting for training and long-form generation. cutoff = -self.mem_len if prev_mem is None: # if `use_mems` is active and `mem_len` is defined, the model new_mem = curr_out[cutoff:] else: new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:] return new_mem.detach() @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq) pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = pos_emb.expand(-1, bsz, -1) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): # create relative positional encoding. freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.int64).float() inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError(f"Unknown `attn_type` {self.attn_type}.") if self.bi_data: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) if bsz is not None: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1) else: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float() if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, mems: Optional[torch.Tensor] = None, perm_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, input_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, use_mems: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # delete after depreciation warning is removed ) -> Union[tuple, XLNetModelOutput]: r""" mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as `input_ids` as they have already been computed. `use_mems` has to be set to `True` to make use of `mems`. perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*): Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`: - if `perm_mask[k, i, j] = 0`, i attend to j in batch k; - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*): Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*): Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in `[0, 1]`: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of `input_mask` and `attention_mask`. use_mems (`bool`, *optional*): Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden states from previous forward passes to compute attention, which can significantly improve performance for sequential decoding tasks. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems`" " instead.", FutureWarning, ) use_mems = kwargs["use_cache"] if self.training: use_mems = use_mems if use_mems is not None else self.config.use_mems_train else: use_mems = use_mems if use_mems is not None else self.config.use_mems_eval # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() qlen, bsz = input_ids.shape[0], input_ids.shape[1] elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen dtype_float = self.dtype device = self.device # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError(f"Unsupported attention type: {self.attn_type}") # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one." if input_mask is None and attention_mask is not None: input_mask = 1.0 - attention_mask if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask) data_mask = torch.cat([mems_mask, data_mask], dim=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = (attn_mask > 0).to(dtype_float) if attn_mask is not None: non_tgt_mask = -torch.eye(qlen).to(attn_mask) if mlen > 0: non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1) non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs_embeds is not None: word_emb_k = inputs_embeds else: word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k) if target_mapping is not None: word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q) else: output_g = None # Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device) cat_ids = torch.cat([mem_pad, token_type_ids], dim=0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long() seg_mat = nn.functional.one_hot(seg_mat, num_classes=2).to(dtype_float) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = pos_emb.to(output_h.device) pos_emb = self.dropout(pos_emb) new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] if output_attentions else None hidden_states = [] if output_hidden_states else None for i, layer_module in enumerate(self.layer): if use_mems: # cache new mems new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_attentions: attentions.append(outputs[2]) # Add last hidden state if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = output.permute(1, 0, 2).contiguous() if not use_mems: new_mems = None if output_hidden_states: if output_g is not None: hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs) else: hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states) if output_attentions: if target_mapping is not None: # when target_mapping is provided, there are 2-tuple of attentions attentions = tuple( tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions ) else: attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) if not return_dict: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return XLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) @auto_docstring( custom_intro=""" XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """ )
XLNetModel
python
wandb__wandb
wandb/sdk/interface/interface_sock.py
{ "start": 489, "end": 1779 }
class ____(InterfaceShared): def __init__( self, asyncer: asyncio_manager.AsyncioManager, client: ServiceClient, stream_id: str, ) -> None: super().__init__() self._asyncer = asyncer self._client = client self._stream_id = stream_id def _assign(self, record: pb.Record) -> None: record._info.stream_id = self._stream_id @override def _publish(self, record: pb.Record, *, nowait: bool = False) -> None: self._assign(record) request = spb.ServerRequest() request.record_publish.CopyFrom(record) if nowait: self._asyncer.run_soon(lambda: self._client.publish(request)) else: self._asyncer.run(lambda: self._client.publish(request)) @override def _deliver(self, record: pb.Record) -> MailboxHandle[pb.Result]: return self._asyncer.run(lambda: self.deliver_async(record)) @override async def deliver_async(self, record: pb.Record) -> MailboxHandle[pb.Result]: self._assign(record) request = spb.ServerRequest() request.record_publish.CopyFrom(record) handle = await self._client.deliver(request) return handle.map(lambda response: response.result_communicate)
InterfaceSock
python
giampaolo__psutil
psutil/_pslinux.py
{ "start": 3122, "end": 24099 }
class ____(enum.IntEnum): IOPRIO_CLASS_NONE = 0 IOPRIO_CLASS_RT = 1 IOPRIO_CLASS_BE = 2 IOPRIO_CLASS_IDLE = 3 globals().update(IOPriority.__members__) # See: # https://github.com/torvalds/linux/blame/master/fs/proc/array.c # ...and (TASK_* constants): # https://github.com/torvalds/linux/blob/master/include/linux/sched.h PROC_STATUSES = { "R": _common.STATUS_RUNNING, "S": _common.STATUS_SLEEPING, "D": _common.STATUS_DISK_SLEEP, "T": _common.STATUS_STOPPED, "t": _common.STATUS_TRACING_STOP, "Z": _common.STATUS_ZOMBIE, "X": _common.STATUS_DEAD, "x": _common.STATUS_DEAD, "K": _common.STATUS_WAKE_KILL, "W": _common.STATUS_WAKING, "I": _common.STATUS_IDLE, "P": _common.STATUS_PARKED, } # https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h TCP_STATUSES = { "01": _common.CONN_ESTABLISHED, "02": _common.CONN_SYN_SENT, "03": _common.CONN_SYN_RECV, "04": _common.CONN_FIN_WAIT1, "05": _common.CONN_FIN_WAIT2, "06": _common.CONN_TIME_WAIT, "07": _common.CONN_CLOSE, "08": _common.CONN_CLOSE_WAIT, "09": _common.CONN_LAST_ACK, "0A": _common.CONN_LISTEN, "0B": _common.CONN_CLOSING, } # ===================================================================== # --- utils # ===================================================================== def readlink(path): """Wrapper around os.readlink().""" assert isinstance(path, str), path path = os.readlink(path) # readlink() might return paths containing null bytes ('\x00') # resulting in "TypeError: must be encoded string without NULL # bytes, not str" errors when the string is passed to other # fs-related functions (os.*, open(), ...). # Apparently everything after '\x00' is garbage (we can have # ' (deleted)', 'new' and possibly others), see: # https://github.com/giampaolo/psutil/issues/717 path = path.split('\x00')[0] # Certain paths have ' (deleted)' appended. Usually this is # bogus as the file actually exists. Even if it doesn't we # don't care. if path.endswith(' (deleted)') and not path_exists_strict(path): path = path[:-10] return path def file_flags_to_mode(flags): """Convert file's open() flags into a readable string. Used by Process.open_files(). """ modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'} mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)] if flags & os.O_APPEND: mode = mode.replace('w', 'a', 1) mode = mode.replace('w+', 'r+') # possible values: r, w, a, r+, a+ return mode def is_storage_device(name): """Return True if the given name refers to a root device (e.g. "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1", "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram") return True. """ # Re-adapted from iostat source code, see: # https://github.com/sysstat/sysstat/blob/ # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208 # Some devices may have a slash in their name (e.g. cciss/c0d0...). name = name.replace('/', '!') including_virtual = True if including_virtual: path = f"/sys/block/{name}" else: path = f"/sys/block/{name}/device" return os.access(path, os.F_OK) @memoize def _scputimes_ntuple(procfs_path): """Return a namedtuple of variable fields depending on the CPU times available on this Linux kernel version which may be: (user, nice, system, idle, iowait, irq, softirq, [steal, [guest, [guest_nice]]]) Used by cpu_times() function. """ with open_binary(f"{procfs_path}/stat") as f: values = f.readline().split()[1:] fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'] vlen = len(values) if vlen >= 8: # Linux >= 2.6.11 fields.append('steal') if vlen >= 9: # Linux >= 2.6.24 fields.append('guest') if vlen >= 10: # Linux >= 3.2.0 fields.append('guest_nice') return namedtuple('scputimes', fields) # Set it into _ntuples.py namespace. try: ntp.scputimes = _scputimes_ntuple("/proc") except Exception as err: # noqa: BLE001 # Don't want to crash at import time. debug(f"ignoring exception on import: {err!r}") ntp.scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0) # XXX: must be available also at this module level in order to be # serialized (tests/test_misc.py::TestMisc::test_serialization). scputimes = ntp.scputimes # ===================================================================== # --- system memory # ===================================================================== def calculate_avail_vmem(mems): """Fallback for kernels < 3.14 where /proc/meminfo does not provide "MemAvailable", see: https://blog.famzah.net/2014/09/24/. This code reimplements the algorithm outlined here: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 We use this function also when "MemAvailable" returns 0 (possibly a kernel bug, see: https://github.com/giampaolo/psutil/issues/1915). In that case this routine matches "free" CLI tool result ("available" column). XXX: on recent kernels this calculation may differ by ~1.5% compared to "MemAvailable:", as it's calculated slightly differently. It is still way more realistic than doing (free + cached) though. See: * https://gitlab.com/procps-ng/procps/issues/42 * https://github.com/famzah/linux-memavailable-procfs/issues/2 """ # Note about "fallback" value. According to: # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 # ...long ago "available" memory was calculated as (free + cached), # We use fallback when one of these is missing from /proc/meminfo: # "Active(file)": introduced in 2.6.28 / Dec 2008 # "Inactive(file)": introduced in 2.6.28 / Dec 2008 # "SReclaimable": introduced in 2.6.19 / Nov 2006 # /proc/zoneinfo: introduced in 2.6.13 / Aug 2005 free = mems[b'MemFree:'] fallback = free + mems.get(b"Cached:", 0) try: lru_active_file = mems[b'Active(file):'] lru_inactive_file = mems[b'Inactive(file):'] slab_reclaimable = mems[b'SReclaimable:'] except KeyError as err: debug( f"{err.args[0]} is missing from /proc/meminfo; using an" " approximation for calculating available memory" ) return fallback try: f = open_binary(f"{get_procfs_path()}/zoneinfo") except OSError: return fallback # kernel 2.6.13 watermark_low = 0 with f: for line in f: line = line.strip() if line.startswith(b'low'): watermark_low += int(line.split()[1]) watermark_low *= PAGESIZE avail = free - watermark_low pagecache = lru_active_file + lru_inactive_file pagecache -= min(pagecache / 2, watermark_low) avail += pagecache avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low) return int(avail) def virtual_memory(): """Report virtual memory stats. This implementation mimics procps-ng-3.3.12, aka "free" CLI tool: https://gitlab.com/procps-ng/procps/blob/ 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L778-791 The returned values are supposed to match both "free" and "vmstat -s" CLI tools. """ missing_fields = [] mems = {} with open_binary(f"{get_procfs_path()}/meminfo") as f: for line in f: fields = line.split() mems[fields[0]] = int(fields[1]) * 1024 # /proc doc states that the available fields in /proc/meminfo vary # by architecture and compile options, but these 3 values are also # returned by sysinfo(2); as such we assume they are always there. total = mems[b'MemTotal:'] free = mems[b'MemFree:'] try: buffers = mems[b'Buffers:'] except KeyError: # https://github.com/giampaolo/psutil/issues/1010 buffers = 0 missing_fields.append('buffers') try: cached = mems[b"Cached:"] except KeyError: cached = 0 missing_fields.append('cached') else: # "free" cmdline utility sums reclaimable to cached. # Older versions of procps used to add slab memory instead. # This got changed in: # https://gitlab.com/procps-ng/procps/commit/ # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19 try: shared = mems[b'Shmem:'] # since kernel 2.6.32 except KeyError: try: shared = mems[b'MemShared:'] # kernels 2.4 except KeyError: shared = 0 missing_fields.append('shared') try: active = mems[b"Active:"] except KeyError: active = 0 missing_fields.append('active') try: inactive = mems[b"Inactive:"] except KeyError: try: inactive = ( mems[b"Inact_dirty:"] + mems[b"Inact_clean:"] + mems[b"Inact_laundry:"] ) except KeyError: inactive = 0 missing_fields.append('inactive') try: slab = mems[b"Slab:"] except KeyError: slab = 0 # - starting from 4.4.0 we match free's "available" column. # Before 4.4.0 we calculated it as (free + buffers + cached) # which matched htop. # - free and htop available memory differs as per: # http://askubuntu.com/a/369589 # http://unix.stackexchange.com/a/65852/168884 # - MemAvailable has been introduced in kernel 3.14 try: avail = mems[b'MemAvailable:'] except KeyError: avail = calculate_avail_vmem(mems) else: if avail == 0: # Yes, it can happen (probably a kernel bug): # https://github.com/giampaolo/psutil/issues/1915 # In this case "free" CLI tool makes an estimate. We do the same, # and it matches "free" CLI tool. avail = calculate_avail_vmem(mems) if avail < 0: avail = 0 missing_fields.append('available') elif avail > total: # If avail is greater than total or our calculation overflows, # that's symptomatic of running within a LCX container where such # values will be dramatically distorted over those of the host. # https://gitlab.com/procps-ng/procps/blob/ # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764 avail = free used = total - avail percent = usage_percent((total - avail), total, round_=1) # Warn about missing metrics which are set to 0. if missing_fields: msg = "{} memory stats couldn't be determined and {} set to 0".format( ", ".join(missing_fields), "was" if len(missing_fields) == 1 else "were", ) warnings.warn(msg, RuntimeWarning, stacklevel=2) return ntp.svmem( total, avail, percent, used, free, active, inactive, buffers, cached, shared, slab, ) def swap_memory(): """Return swap memory metrics.""" mems = {} with open_binary(f"{get_procfs_path()}/meminfo") as f: for line in f: fields = line.split() mems[fields[0]] = int(fields[1]) * 1024 # We prefer /proc/meminfo over sysinfo() syscall so that # psutil.PROCFS_PATH can be used in order to allow retrieval # for linux containers, see: # https://github.com/giampaolo/psutil/issues/1015 try: total = mems[b'SwapTotal:'] free = mems[b'SwapFree:'] except KeyError: _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo() total *= unit_multiplier free *= unit_multiplier used = total - free percent = usage_percent(used, total, round_=1) # get pgin/pgouts try: f = open_binary(f"{get_procfs_path()}/vmstat") except OSError as err: # see https://github.com/giampaolo/psutil/issues/722 msg = ( "'sin' and 'sout' swap memory stats couldn't " f"be determined and were set to 0 ({err})" ) warnings.warn(msg, RuntimeWarning, stacklevel=2) sin = sout = 0 else: with f: sin = sout = None for line in f: # values are expressed in 4 kilo bytes, we want # bytes instead if line.startswith(b'pswpin'): sin = int(line.split(b' ')[1]) * 4 * 1024 elif line.startswith(b'pswpout'): sout = int(line.split(b' ')[1]) * 4 * 1024 if sin is not None and sout is not None: break else: # we might get here when dealing with exotic Linux # flavors, see: # https://github.com/giampaolo/psutil/issues/313 msg = "'sin' and 'sout' swap memory stats couldn't " msg += "be determined and were set to 0" warnings.warn(msg, RuntimeWarning, stacklevel=2) sin = sout = 0 return ntp.sswap(total, used, free, percent, sin, sout) # malloc / heap functions; require glibc if hasattr(cext, "heap_info"): heap_info = cext.heap_info heap_trim = cext.heap_trim # ===================================================================== # --- CPU # ===================================================================== def cpu_times(): """Return a named tuple representing the following system-wide CPU times: (user, nice, system, idle, iowait, irq, softirq [steal, [guest, [guest_nice]]]) Last 3 fields may not be available on all Linux kernel versions. """ procfs_path = get_procfs_path() with open_binary(f"{procfs_path}/stat") as f: values = f.readline().split() fields = values[1 : len(ntp.scputimes._fields) + 1] fields = [float(x) / CLOCK_TICKS for x in fields] return ntp.scputimes(*fields) def per_cpu_times(): """Return a list of namedtuple representing the CPU times for every CPU available on the system. """ procfs_path = get_procfs_path() cpus = [] with open_binary(f"{procfs_path}/stat") as f: # get rid of the first line which refers to system wide CPU stats f.readline() for line in f: if line.startswith(b'cpu'): values = line.split() fields = values[1 : len(ntp.scputimes._fields) + 1] fields = [float(x) / CLOCK_TICKS for x in fields] entry = ntp.scputimes(*fields) cpus.append(entry) return cpus def cpu_count_logical(): """Return the number of logical CPUs in the system.""" try: return os.sysconf("SC_NPROCESSORS_ONLN") except ValueError: # as a second fallback we try to parse /proc/cpuinfo num = 0 with open_binary(f"{get_procfs_path()}/cpuinfo") as f: for line in f: if line.lower().startswith(b'processor'): num += 1 # unknown format (e.g. amrel/sparc architectures), see: # https://github.com/giampaolo/psutil/issues/200 # try to parse /proc/stat as a last resort if num == 0: search = re.compile(r'cpu\d') with open_text(f"{get_procfs_path()}/stat") as f: for line in f: line = line.split(' ')[0] if search.match(line): num += 1 if num == 0: # mimic os.cpu_count() return None return num def cpu_count_cores(): """Return the number of CPU cores in the system.""" # Method #1 ls = set() # These 2 files are the same but */core_cpus_list is newer while # */thread_siblings_list is deprecated and may disappear in the future. # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964 # https://lkml.org/lkml/2019/2/26/41 p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list" p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list" for path in glob.glob(p1) or glob.glob(p2): with open_binary(path) as f: ls.add(f.read().strip()) result = len(ls) if result != 0: return result # Method #2 mapping = {} current_info = {} with open_binary(f"{get_procfs_path()}/cpuinfo") as f: for line in f: line = line.strip().lower() if not line: # new section try: mapping[current_info[b'physical id']] = current_info[ b'cpu cores' ] except KeyError: pass current_info = {} elif line.startswith((b'physical id', b'cpu cores')): # ongoing section key, value = line.split(b'\t:', 1) current_info[key] = int(value) result = sum(mapping.values()) return result or None # mimic os.cpu_count() def cpu_stats(): """Return various CPU stats as a named tuple.""" with open_binary(f"{get_procfs_path()}/stat") as f: ctx_switches = None interrupts = None soft_interrupts = None for line in f: if line.startswith(b'ctxt'): ctx_switches = int(line.split()[1]) elif line.startswith(b'intr'): interrupts = int(line.split()[1]) elif line.startswith(b'softirq'): soft_interrupts = int(line.split()[1]) if ( ctx_switches is not None and soft_interrupts is not None and interrupts is not None ): break syscalls = 0 return ntp.scpustats(ctx_switches, interrupts, soft_interrupts, syscalls) def _cpu_get_cpuinfo_freq(): """Return current CPU frequency from cpuinfo if available.""" with open_binary(f"{get_procfs_path()}/cpuinfo") as f: return [ float(line.split(b':', 1)[1]) for line in f if line.lower().startswith(b'cpu mhz') ] if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or os.path.exists( "/sys/devices/system/cpu/cpu0/cpufreq" ): def cpu_freq(): """Return frequency metrics for all CPUs. Contrarily to other OSes, Linux updates these values in real-time. """ cpuinfo_freqs = _cpu_get_cpuinfo_freq() paths = glob.glob( "/sys/devices/system/cpu/cpufreq/policy[0-9]*" ) or glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq") paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group())) ret = [] pjoin = os.path.join for i, path in enumerate(paths): if len(paths) == len(cpuinfo_freqs): # take cached value from cpuinfo if available, see: # https://github.com/giampaolo/psutil/issues/1851 curr = cpuinfo_freqs[i] * 1000 else: curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None) if curr is None: # Likely an old RedHat, see: # https://github.com/giampaolo/psutil/issues/1071 curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None) if curr is None: online_path = f"/sys/devices/system/cpu/cpu{i}/online" # if cpu core is offline, set to all zeroes if cat(online_path, fallback=None) == "0\n": ret.append(ntp.scpufreq(0.0, 0.0, 0.0)) continue msg = "can't find current frequency file" raise NotImplementedError(msg) curr = int(curr) / 1000 max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000 min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000 ret.append(ntp.scpufreq(curr, min_, max_)) return ret else: def cpu_freq(): """Alternate implementation using /proc/cpuinfo. min and max frequencies are not available and are set to None. """ return [ntp.scpufreq(x, 0.0, 0.0) for x in _cpu_get_cpuinfo_freq()] # ===================================================================== # --- network # ===================================================================== net_if_addrs = cext.net_if_addrs
IOPriority
python
tensorflow__tensorflow
tensorflow/python/distribute/multi_process_runner.py
{ "start": 36450, "end": 45167 }
class ____(object): """A utility class to start a process pool to simulate a cluster. It's similar to MultiProcessRunner, but uses a pool of processes to avoid the expensive initialization cost of Tensorflow. """ def __init__(self, cluster_spec, initializer=None, share_gpu=True): """Creates a multi-process pool runner. Args: cluster_spec: Dict for cluster spec. The following is an example of cluster with three workers. {"worker": ["worker0.example.com:2222", "worker1.example.com:2222", "worker2.example.com:2222"]} initializer: a callable to called at the startup of worker processes. share_gpu: Whether to share GPUs among workers. If False, each worker is assigned different GPUs in a roundrobin fashion. Raises: RuntimeError: if `multi_process_runner.test_main()` is not called. ValueError: if there are more than one chief in the `cluster_spec`. """ _active_pool_runners.add(self) self._cluster_spec = cluster_spec self._initializer = initializer self._share_gpu = share_gpu self._conn = {} self._runner = None def __del__(self): self.shutdown() def shutdown(self): """Shuts down the worker pool.""" for conn in self._conn.values(): conn.close() self._conn = {} if self._runner is not None: try: self._runner.join() except unittest.SkipTest: raise except Exception as e: # pylint: disable=broad-except logging.exception( 'Ignoring exception when shutting down MultiProcessPoolRunner: %s', e, ) self._runner = None def _start(self): """Starts the worker pool.""" # We need different arguments for different processes so we're passing a # no-op fn here and use start_single_process instead. if dill is None: raise unittest.SkipTest( 'TODO(b/150264776): Resolve dependency issue in CI') self._runner = MultiProcessRunner( fn=lambda: None, cluster_spec=self._cluster_spec, use_dill_for_args=False, share_gpu=self._share_gpu) if self._initializer: initializer = dill.dumps(self._initializer, dill.HIGHEST_PROTOCOL) else: initializer = None for task_type, addresses in self._cluster_spec.items(): for task_id, _ in enumerate(addresses): conn1, conn2 = multiprocessing.Pipe(duplex=True) self._conn[(task_type, task_id)] = conn1 self._runner.start_single_process( task_type, task_id, fn=_pool_runner_worker, args=(task_type, task_id, initializer, conn2)) def run(self, fn, args=None, kwargs=None): """Runs `fn` with `args` and `kwargs` on all jobs. Args: fn: The function to be run. args: Optional positional arguments to be supplied in `fn`. kwargs: Optional keyword arguments to be supplied in `fn`. Returns: A list of return values. """ _check_initialization() # TODO(b/150264776): skip in OSS until it's implemented. multi_process_lib.Process() if self._runner is None: self._start() fn = dill.dumps(fn, dill.HIGHEST_PROTOCOL) for conn in self._conn.values(): conn.send((fn, args or [], kwargs or {})) process_statuses = [] total_timeout = 300 # 5 minutes total timeout for all processes start_time = time.time() for (task_type, task_id), conn in self._conn.items(): remaining_time = total_timeout - (time.time() - start_time) if remaining_time <= 0: raise RuntimeError( f'Total timeout of {total_timeout} s exceeded ' 'while waiting for processes' ) logging.info( 'Waiting for the result from %s-%d (timeout: %.1fs)', task_type, task_id, remaining_time, ) # Use threading to implement timeout for conn.recv() result_container = [] error_container = [] def recv_with_timeout(): try: result = conn.recv() result_container.append(result) except EOFError as e: error_container.append(('eof', e)) except Exception as e: error_container.append(('error', e)) recv_thread = threading.Thread(target=recv_with_timeout) recv_thread.daemon = True recv_thread.start() recv_thread.join( timeout=min(remaining_time, 120) ) # Max 2 minutes run per process if recv_thread.is_alive(): # Timeout occurred logging.info( '[%s-%d] Timeout waiting for process result after %d s', task_type, task_id, min(remaining_time, 120), ) # Try to get process status for debugging with self._runner._process_lock if self._runner else threading.Lock(): if self._runner and (task_type, task_id) in self._runner._processes: process = self._runner._processes[(task_type, task_id)] logging.info( '[%s-%d] Process state: exitcode=%d, pid=%d', task_type, task_id, process.exitcode, process.pid, ) # Terminate the process if it is still running raise RuntimeError( f'Timeout waiting for {task_type}-{task_id} ' f'after {min(remaining_time, 120):.1f}s. ' 'This often indicates a subprocess is stuck ' 'in initialization or pipe reading. ' 'Try increasing Docker container resources ' 'or check for deadlocks.' ) if error_container: error_type, error = error_container[0] if error_type == 'eof': # This shouldn't happen due to exceptions in fn. This usually # means bugs in the runner. self.shutdown() raise RuntimeError( 'Unexpected EOF. Worker process may have died. ' 'Please report a bug' ) else: raise error if result_container: process_statuses.append(result_container[0]) else: raise RuntimeError(f'No result received from {task_type}-{task_id}') return_values = [] for process_status in process_statuses: assert isinstance(process_status, _ProcessStatusInfo) if not process_status.is_successful: six.reraise(*process_status.exc_info) if process_status.return_value is not None: return_values.append(process_status.return_value) return return_values def _pool_runner_worker(task_type, task_id, initializer, conn): """Function that runs on the workers in a pool. It listens for callables to run and returns the result until `conn` is closed. It captures the exceptions during executing the callable and return it through `conn`. Args: task_type: the task type. task_id: the task index. initializer: a callable to execute during startup. conn: a multiprocessing.Connection object to listen for tasks and send results. """ if initializer: initializer = dill.loads(initializer) initializer() while True: try: fn, args, kwargs = conn.recv() except EOFError: break fn = dill.loads(fn) info = _run_contained(task_type, task_id, fn, args, kwargs) sys.stdout.flush() sys.stderr.flush() conn.send(info) def _run_contained(task_type, task_id, fn, args, kwargs): """Runs `fn` with `args` and `kwargs`. The function returns _ProcessStatusInfo which captures the return value and the exception. Args: task_type: the task type. task_id: the task index. fn: the function to be run. args: optional positional arguments to be supplied in `fn`. kwargs: optional keyword arguments to be supplied in `fn`. Returns: a _ProcessStatusInfo. """ is_successful = False return_value = None exc_info = None try: return_value = fn(*args, **kwargs) is_successful = True return _ProcessStatusInfo( task_type=task_type, task_id=task_id, is_successful=is_successful, exc_info=exc_info, return_value=return_value) # If `fn` ends up exiting with `sys.exit()`, the `SystemExit` is not # handled here. except Exception: # pylint: disable=broad-except exc_info = sys.exc_info() return _ProcessStatusInfo( task_type=task_type, task_id=task_id, is_successful=is_successful, exc_info=exc_info, return_value=return_value) @tf_export('__internal__.distribute.multi_process_runner' '.SubprocessTimeoutError', v1=[])
MultiProcessPoolRunner
python
pypa__setuptools
setuptools/config/pyprojecttoml.py
{ "start": 17965, "end": 18161 }
class ____(SetuptoolsWarning): _SUMMARY = ( "`{subject}` in `pyproject.toml` is still *experimental* " "and likely to change in future releases." )
_ExperimentalConfiguration
python
realpython__materials
python-callable-instances/serializing.py
{ "start": 27, "end": 123 }
class ____: def __call__(self, data): return json.dumps(data, indent=4)
JsonSerializer
python
getsentry__sentry
src/sentry/auth/authenticators/recovery_code.py
{ "start": 375, "end": 2533 }
class ____(AuthenticatorInterface): """A backup interface that is based on static recovery codes.""" type = 0 interface_id = "recovery" name = _("Recovery Codes") description = _( "Recovery codes are the only way to access your account " "if you lose your device and cannot receive two factor " "authentication codes." ) enroll_button = _("Activate") configure_button = _("View Codes") remove_button = None is_backup_interface = True def __init__(self, authenticator: Authenticator | None = None) -> None: super().__init__(authenticator) def get_codes(self) -> list[str]: rv = [] if self.is_enrolled(): h = hmac.new(key=self.config["salt"].encode(), msg=None, digestmod=sha1) for x in range(10): h.update(f"{x}|".encode()) rv.append(b32encode(h.digest())[:8].decode()) return rv def generate_new_config(self) -> dict[str, Any]: salt = hexlify(urandom(16)).decode() return {"salt": salt, "used": 0} def regenerate_codes(self, save: bool = True) -> None: if not self.is_enrolled(): raise RuntimeError("Interface is not enrolled") self.config.update(self.generate_new_config()) assert self.authenticator, "Cannot regenerate codes without self.authenticator" self.authenticator.reset_fields(save=False) if save: self.authenticator.save() def validate_otp(self, otp: str) -> bool: mask = self.config["used"] code = otp.strip().replace("-", "").upper() for idx, ref_code in enumerate(self.get_codes()): if code == ref_code: if mask & (1 << idx): break self.config["used"] = mask | (1 << idx) return True return False def get_unused_codes(self) -> list[str]: mask = self.config["used"] rv = [] for idx, code in enumerate(self.get_codes()): if not mask & (1 << idx): rv.append(f"{code[:4]}-{code[4:]}") return rv
RecoveryCodeInterface
python
celery__celery
t/unit/utils/test_saferepr.py
{ "start": 2245, "end": 5551 }
class ____: @pytest.mark.parametrize('value', list(D_NUMBERS.values())) def test_safe_types(self, value): assert saferepr(value) == old_repr(value) def test_numbers_dict(self): assert saferepr(D_NUMBERS) == old_repr(D_NUMBERS) def test_numbers_list(self): assert saferepr(L_NUMBERS) == old_repr(L_NUMBERS) def test_numbers_keys(self): assert saferepr(D_INT_KEYS) == old_repr(D_INT_KEYS) def test_text(self): assert saferepr(D_TEXT) == old_repr(D_TEXT).replace("u'", "'") def test_text_maxlen(self): assert saferepr(D_D_TEXT, 100).endswith("...', ...}}") def test_maxlevels(self): saferepr(D_ALL, maxlevels=1) def test_recursion(self): d = {1: 2, 3: {4: 5}} d[3][6] = d res = saferepr(d) assert 'Recursion on' in res @pytest.mark.parametrize('value', [ 0, 0, 0 + 0j, 0.0, '', b'', (), tuple2(), tuple3(), [], list2(), list3(), set(), set2(), set3(), frozenset(), frozenset2(), frozenset3(), {}, dict2(), dict3(), test_recursion, pprint, -6, -6, -6 - 6j, -1.5, 'x', b'x', (3,), [3], {3: 6}, (1, 2), [3, 4], {5: 6}, tuple2((1, 2)), tuple3((1, 2)), tuple3(range(100)), [3, 4], list2([3, 4]), list3([3, 4]), list3(range(100)), {7}, set2({7}), set3({7}), frozenset({8}), frozenset2({8}), frozenset3({8}), dict2({5: 6}), dict3({5: 6}), range(10, -11, -1) ]) def test_same_as_repr(self, value): # Simple objects, small containers, and classes that overwrite __repr__ # For those the result should be the same as repr(). # Ahem. The docs don't say anything about that -- this appears to # be testing an implementation quirk. Starting in Python 2.5, it's # not true for dicts: pprint always sorts dicts by key now; before, # it sorted a dict display if and only if the display required # multiple lines. For that reason, dicts with more than one element # aren't tested here. native = old_repr(value) assert saferepr(value) == native def test_single_quote(self): val = {"foo's": "bar's"} assert ast.literal_eval(saferepr(val)) == val def test_unicode_bytes(self): val = 'øystein'.encode() assert saferepr(val) == "b'øystein'" def test_unicode_bytes__long(self): val = 'øystein'.encode() * 1024 assert saferepr(val, maxlen=128).endswith("...'") def test_binary_bytes(self): val = struct.pack('>QQQ', 12223, 1234, 3123) if hasattr(bytes, 'hex'): # Python 3.5+ assert '2fbf' in saferepr(val, maxlen=128) else: # Python 3.4 assert saferepr(val, maxlen=128) def test_binary_bytes__long(self): val = struct.pack('>QQQ', 12223, 1234, 3123) * 1024 result = saferepr(val, maxlen=128) assert '2fbf' in result assert result.endswith("...'") def test_repr_raises(self): class O: def __repr__(self): raise KeyError('foo') assert 'Unrepresentable' in saferepr(O()) def test_bytes_with_unicode_py2_and_3(self): assert saferepr([b'foo', 'a®rgs'.encode()])
test_saferepr
python
ansible__ansible
lib/ansible/executor/module_common.py
{ "start": 6257, "end": 16288 }
class ____(ast.NodeVisitor): # DTFIX-FUTURE: add support for ignoring imports with a "controller only" comment, this will allow replacing import_controller_module with standard imports def __init__(self, module_fqn, tree, is_pkg_init=False, *args, **kwargs): """ Walk the ast tree for the python module. :arg module_fqn: The fully qualified name to reach this module in dotted notation. example: ansible.module_utils.basic :arg is_pkg_init: Inform the finder it's looking at a package init (eg __init__.py) to allow relative import expansion to use the proper package level without having imported it locally first. Save submodule[.submoduleN][.identifier] into self.submodules when they are from ansible.module_utils or ansible_collections packages self.submodules will end up with tuples like: - ('ansible', 'module_utils', 'basic',) - ('ansible', 'module_utils', 'urls', 'fetch_url') - ('ansible', 'module_utils', 'database', 'postgres') - ('ansible', 'module_utils', 'database', 'postgres', 'quote') - ('ansible', 'module_utils', 'database', 'postgres', 'quote') - ('ansible_collections', 'my_ns', 'my_col', 'plugins', 'module_utils', 'foo') It's up to calling code to determine whether the final element of the tuple are module names or something else (function, class, or variable names) .. seealso:: :python3:class:`ast.NodeVisitor` """ super(ModuleDepFinder, self).__init__(*args, **kwargs) self._tree = tree # squirrel this away so we can compare node parents to it self.submodules = set() self.optional_imports = set() self.module_fqn = module_fqn self.is_pkg_init = is_pkg_init self._visit_map = { Import: self.visit_Import, ImportFrom: self.visit_ImportFrom, } self.visit(tree) def generic_visit(self, node): """Overridden ``generic_visit`` that makes some assumptions about our use case, and improves performance by calling visitors directly instead of calling ``visit`` to offload calling visitors. """ generic_visit = self.generic_visit visit_map = self._visit_map for field, value in ast.iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, (Import, ImportFrom)): item.parent = node visit_map[item.__class__](item) elif isinstance(item, AST): generic_visit(item) visit = generic_visit def visit_Import(self, node): """ Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname] We save these as interesting submodules when the imported library is in ansible.module_utils or ansible.collections """ for alias in node.names: if (alias.name.startswith('ansible.module_utils.') or alias.name.startswith('ansible_collections.')): py_mod = tuple(alias.name.split('.')) self.submodules.add(py_mod) # if the import's parent is the root document, it's a required import, otherwise it's optional if node.parent != self._tree: self.optional_imports.add(py_mod) self.generic_visit(node) def visit_ImportFrom(self, node): """ Handle from ansible.module_utils.MODLIB import [.MODLIBn] [as asname] Also has to handle relative imports We save these as interesting submodules when the imported library is in ansible.module_utils or ansible.collections """ # FIXME: These should all get skipped: # from ansible.executor import module_common # from ...executor import module_common # from ... import executor (Currently it gives a non-helpful error) if node.level > 0: # if we're in a package init, we have to add one to the node level (and make it none if 0 to preserve the right slicing behavior) level_slice_offset = -node.level + 1 or None if self.is_pkg_init else -node.level if self.module_fqn: parts = tuple(self.module_fqn.split('.')) if node.module: # relative import: from .module import x node_module = '.'.join(parts[:level_slice_offset] + (node.module,)) else: # relative import: from . import x node_module = '.'.join(parts[:level_slice_offset]) else: # fall back to an absolute import node_module = node.module else: # absolute import: from module import x node_module = node.module # Specialcase: six is a special case because of its # import logic py_mod = None if node.names[0].name == '_six': self.submodules.add(('_six',)) elif node_module.startswith('ansible.module_utils'): # from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname] # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname] # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname] # from ansible.module_utils import MODULE1 [,MODULEn] [as asname] py_mod = tuple(node_module.split('.')) elif node_module.startswith('ansible_collections.'): if node_module.endswith('plugins.module_utils') or '.plugins.module_utils.' in node_module: # from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname] # from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname] # FIXME: Unhandled cornercase (needs to be ignored): # from ansible_collections.ns.coll.plugins.[!module_utils].[FOO].plugins.module_utils import IDENTIFIER py_mod = tuple(node_module.split('.')) else: # Not from module_utils so ignore. for instance: # from ansible_collections.ns.coll.plugins.lookup import IDENTIFIER pass if py_mod: for alias in node.names: self.submodules.add(py_mod + (alias.name,)) # if the import's parent is the root document, it's a required import, otherwise it's optional if node.parent != self._tree: self.optional_imports.add(py_mod + (alias.name,)) self.generic_visit(node) def _slurp(path): if not os.path.exists(path): raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path)) with open(path, 'rb') as fd: data = fd.read() return data def _get_shebang(interpreter, task_vars, templar: _template.Templar, args=tuple(), remote_is_local=False): """ Handles the different ways ansible allows overriding the shebang target for a module. """ # FUTURE: add logical equivalence for python3 in the case of py3-only modules interpreter_name = os.path.basename(interpreter).strip() # name for interpreter var interpreter_config = u'ansible_%s_interpreter' % interpreter_name # key for config interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper() interpreter_out = None # looking for python, rest rely on matching vars if interpreter_name == 'python': # skip detection for network os execution, use playbook supplied one if possible if remote_is_local: interpreter_out = task_vars['ansible_playbook_python'] # a config def exists for this interpreter type; consult config for the value elif C.config.get_configuration_definition(interpreter_config_key): interpreter_from_config = C.config.get_config_value(interpreter_config_key, variables=task_vars) interpreter_out = templar._engine.template(_utils.str_problematic_strip(interpreter_from_config), options=TemplateOptions(value_for_omit=C.config.get_config_default(interpreter_config_key))) # handle interpreter discovery if requested or empty interpreter was provided if not interpreter_out or interpreter_out in ['auto', 'auto_silent']: discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name facts_from_task_vars = task_vars.get('ansible_facts', {}) if discovered_interpreter_config not in facts_from_task_vars: # interpreter discovery is desired, but has not been run for this host raise InterpreterDiscoveryRequiredError("interpreter discovery needed", interpreter_name=interpreter_name, discovery_mode=interpreter_out) else: interpreter_out = facts_from_task_vars[discovered_interpreter_config] else: raise InterpreterDiscoveryRequiredError("interpreter discovery required", interpreter_name=interpreter_name, discovery_mode='auto') elif interpreter_config in task_vars: # for non python we consult vars for a possible direct override interpreter_out = templar._engine.template(_utils.str_problematic_strip(task_vars.get(interpreter_config)), options=TemplateOptions(value_for_omit=None)) if not interpreter_out: # nothing matched(None) or in case someone configures empty string or empty interpreter interpreter_out = interpreter # set shebang shebang = u'#!{0}'.format(interpreter_out) if args: shebang = shebang + u' ' + u' '.join(args) return shebang, interpreter_out
ModuleDepFinder
python
pypa__packaging
tests/test_requirements.py
{ "start": 4276, "end": 17598 }
class ____: @pytest.mark.parametrize( "marker", [ "python_implementation == ''", "platform_python_implementation == ''", "os.name == 'linux'", "os_name == 'linux'", "'8' in platform.version", "'8' not in platform.version", ], ) def test_valid_marker(self, marker: str) -> None: # GIVEN to_parse = f"name; {marker}" # WHEN Requirement(to_parse) @pytest.mark.parametrize( "url", [ "file:///absolute/path", "file://.", "file:.", "file:/.", ], ) def test_file_url(self, url: str) -> None: # GIVEN to_parse = f"name @ {url}" # WHEN req = Requirement(to_parse) # THEN assert req.url == url def test_empty_extras(self) -> None: # GIVEN to_parse = "name[]" # WHEN req = Requirement(to_parse) # THEN assert req.name == "name" assert req.extras == set() def test_empty_specifier(self) -> None: # GIVEN to_parse = "name()" # WHEN req = Requirement(to_parse) # THEN assert req.name == "name" assert req.specifier == "" # ---------------------------------------------------------------------------------- # Everything below this (in this class) should be parsing failure modes # ---------------------------------------------------------------------------------- # Start all method names with with `test_error_` # to make it easier to run these tests with `-k error` def test_error_when_empty_string(self) -> None: # GIVEN to_parse = "" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected package name at the start of dependency specifier\n" " \n" " ^" ) def test_error_no_name(self) -> None: # GIVEN to_parse = "==0.0" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected package name at the start of dependency specifier\n" " ==0.0\n" " ^" ) def test_error_when_missing_comma_in_extras(self) -> None: # GIVEN to_parse = "name[bar baz]" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected comma between extra names\n" " name[bar baz]\n" " ^" ) def test_error_when_trailing_comma_in_extras(self) -> None: # GIVEN to_parse = "name[bar, baz,]" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected extra name after comma\n" " name[bar, baz,]\n" " ^" ) def test_error_when_parens_not_closed_correctly(self) -> None: # GIVEN to_parse = "name (>= 1.0" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected matching RIGHT_PARENTHESIS for LEFT_PARENTHESIS, " "after version specifier\n" " name (>= 1.0\n" " ~~~~~~~^" ) def test_error_when_prefix_match_is_used_incorrectly(self) -> None: # GIVEN to_parse = "black (>=20.*) ; extra == 'format'" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " ".* suffix can only be used with `==` or `!=` operators\n" " black (>=20.*) ; extra == 'format'\n" " ~~~~~^" ) @pytest.mark.parametrize("operator", [">=", "<=", ">", "<", "~="]) def test_error_when_local_version_label_is_used_incorrectly( self, operator: str ) -> None: # GIVEN to_parse = f"name {operator} 1.0+local.version.label" op_tilde = len(operator) * "~" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Local version label can only be used with `==` or `!=` operators\n" f" name {operator} 1.0+local.version.label\n" f" {op_tilde}~~~~^" ) def test_error_when_bracket_not_closed_correctly(self) -> None: # GIVEN to_parse = "name[bar, baz >= 1.0" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected matching RIGHT_BRACKET for LEFT_BRACKET, " "after extras\n" " name[bar, baz >= 1.0\n" " ~~~~~~~~~~^" ) def test_error_when_extras_bracket_left_unclosed(self) -> None: # GIVEN to_parse = "name[bar, baz" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected matching RIGHT_BRACKET for LEFT_BRACKET, " "after extras\n" " name[bar, baz\n" " ~~~~~~~~~^" ) def test_error_no_space_after_url(self) -> None: # GIVEN to_parse = "name @ https://example.com/; extra == 'example'" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected semicolon (after URL and whitespace) or end\n" " name @ https://example.com/; extra == 'example'\n" " ~~~~~~~~~~~~~~~~~~~~~~^" ) def test_error_marker_bracket_unclosed(self) -> None: # GIVEN to_parse = "name; (extra == 'example'" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected matching RIGHT_PARENTHESIS for LEFT_PARENTHESIS, " "after marker expression\n" " name; (extra == 'example'\n" " ~~~~~~~~~~~~~~~~~~~^" ) def test_error_no_url_after_at(self) -> None: # GIVEN to_parse = "name @ " # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected URL after @\n" " name @ \n" " ^" ) def test_error_invalid_marker_lvalue(self) -> None: # GIVEN to_parse = "name; invalid_name" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected a marker variable or quoted string\n" " name; invalid_name\n" " ^" ) def test_error_invalid_marker_rvalue(self) -> None: # GIVEN to_parse = "name; '3.7' <= invalid_name" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected a marker variable or quoted string\n" " name; '3.7' <= invalid_name\n" " ^" ) def test_error_invalid_marker_notin_without_whitespace(self) -> None: # GIVEN to_parse = "name; '3.7' notin python_version" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, " "in, not in\n" " name; '3.7' notin python_version\n" " ^" ) def test_error_when_no_word_boundary(self) -> None: # GIVEN to_parse = "name; '3.6'inpython_version" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, " "in, not in\n" " name; '3.6'inpython_version\n" " ^" ) def test_error_invalid_marker_not_without_in(self) -> None: # GIVEN to_parse = "name; '3.7' not python_version" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected 'in' after 'not'\n" " name; '3.7' not python_version\n" " ^" ) def test_error_invalid_marker_with_invalid_op(self) -> None: # GIVEN to_parse = "name; '3.7' ~ python_version" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, " "in, not in\n" " name; '3.7' ~ python_version\n" " ^" ) def test_error_on_legacy_version_outside_triple_equals(self) -> None: # GIVEN to_parse = "name==1.0.org1" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected comma (within version specifier), " "semicolon (after version specifier) or end\n" " name==1.0.org1\n" " ~~~~~^" ) def test_error_on_missing_version_after_op(self) -> None: # GIVEN to_parse = "name==" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected semicolon (after name with no version specifier) or end\n" " name==\n" " ^" ) def test_error_on_missing_op_after_name(self) -> None: # GIVEN to_parse = "name 1.0" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected semicolon (after name with no version specifier) or end\n" " name 1.0\n" " ^" ) def test_error_on_random_char_after_specifier(self) -> None: # GIVEN to_parse = "name >= 1.0 #" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected comma (within version specifier), " "semicolon (after version specifier) or end\n" " name >= 1.0 #\n" " ~~~~~~~^" ) def test_error_on_missing_comma_in_specifier(self) -> None: # GIVEN to_parse = "name >= 1.0 <= 2.0" # WHEN with pytest.raises(InvalidRequirement) as ctx: Requirement(to_parse) # THEN assert ctx.exconly() == ( "packaging.requirements.InvalidRequirement: " "Expected comma (within version specifier), " "semicolon (after version specifier) or end\n" " name >= 1.0 <= 2.0\n" " ~~~~~~~^" )
TestRequirementParsing
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 59902, "end": 60180 }
class ____(_PropertyBase): target_collections: List[str] def to_dict(self) -> Dict[str, Any]: out = super().to_dict() out["dataType"] = self.target_collections return out ReferencePropertyConfig = _ReferenceProperty @dataclass
_ReferenceProperty
python
scikit-learn__scikit-learn
sklearn/utils/tests/test_deprecation.py
{ "start": 259, "end": 528 }
class ____: @deprecated("mockclass2_method") def method(self): pass @deprecated("n_features_ is deprecated") # type: ignore[prop-decorator] @property def n_features_(self): """Number of input features.""" return 10
MockClass2
python
getlogbook__logbook
tests/test_deadlock.py
{ "start": 29, "end": 250 }
class ____: def __init__(self, logger_func): self._logger_func = logger_func def __str__(self): self._logger_func("this debug message produced in __str__") return "<complex object>"
MyObject
python
huggingface__transformers
src/transformers/models/chameleon/modeling_chameleon.py
{ "start": 10794, "end": 15456 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: ChameleonConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.is_causal = True self.model_parallel_size = config.model_parallel_size self.scaling = self.head_dim**-0.5 if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) self.q_norm = ChameleonLayerNorm((self.num_heads, self.head_dim)) self.k_norm = ChameleonLayerNorm((self.num_key_value_heads, self.head_dim)) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.reshape(-1, self.num_heads, self.head_dim) query_states = self.q_norm(query_states) key_states = key_states.reshape(-1, self.num_key_value_heads, self.head_dim) key_states = self.k_norm(key_states) query_states = query_states.reshape(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; position_ids needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights # copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer with Llama->Chameleon, LLAMA->CHAMELEON
ChameleonAttention
python
getsentry__sentry
src/sentry/api/endpoints/timeseries.py
{ "start": 58, "end": 137 }
class ____(TypedDict): dataset: str start: float end: float
StatsMeta
python
numba__numba
numba/tests/test_linalg.py
{ "start": 44619, "end": 48711 }
class ____(TestLinalgBase): """ Tests for np.linalg.qr. """ @needs_lapack def test_linalg_qr(self): """ Test np.linalg.qr """ cfunc = jit(nopython=True)(qr_matrix) def check(a, **kwargs): expected = qr_matrix(a, **kwargs) got = cfunc(a, **kwargs) # check that the returned tuple is same length self.assertEqual(len(expected), len(got)) # and that length is 2 self.assertEqual(len(got), 2) # and that the computed results are contig and in the same way self.assert_contig_sanity(got, "F") use_reconstruction = False # try plain match of each array to np first for k in range(len(expected)): try: np.testing.assert_array_almost_equal_nulp( got[k], expected[k], nulp=10) except AssertionError: # plain match failed, test by reconstruction use_reconstruction = True # if plain match fails then reconstruction is used. # this checks that A ~= Q*R and that (Q^H)*Q = I # i.e. QR decomposition ties out # this is required as numpy uses only double precision lapack # routines and computation of qr is numerically # sensitive, numba using the type specific routines therefore # sometimes comes out with a different answer (orthonormal bases # are not unique etc.). if use_reconstruction: q, r = got # check they are dimensionally correct for k in range(len(expected)): self.assertEqual(got[k].shape, expected[k].shape) # check A=q*r rec = np.dot(q, r) resolution = np.finfo(a.dtype).resolution np.testing.assert_allclose( a, rec, rtol=10 * resolution, atol=100 * resolution # zeros tend to be fuzzy ) # check q is orthonormal self.assert_is_identity_matrix(np.dot(np.conjugate(q.T), q)) # Ensure proper resource management with self.assertNoNRTLeak(): cfunc(a, **kwargs) # test: column vector, tall, wide, square, row vector # prime sizes sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)] # test loop for size, dtype, order in \ product(sizes, self.dtypes, 'FC'): a = self.specific_sample_matrix(size, dtype, order) check(a) rn = "qr" # Wrong dtype self.assert_wrong_dtype(rn, cfunc, (np.ones((2, 2), dtype=np.int32),)) # Dimension issue self.assert_wrong_dimensions(rn, cfunc, (np.ones(10, dtype=np.float64),)) # no nans or infs self.assert_no_nan_or_inf(cfunc, (np.array([[1., 2., ], [np.inf, np.nan]], dtype=np.float64),)) # empty for sz in [(0, 1), (1, 0), (0, 0)]: self.assert_raise_on_empty(cfunc, (np.empty(sz),)) @needs_lapack def test_no_input_mutation(self): X = np.array([[1., 3, 2, 7,], [-5, 4, 2, 3,], [9, -3, 1, 1,], [2, -2, 2, 8,]], order='F') X_orig = np.copy(X) @jit(nopython=True) def func(X, test): if test: # not executed, but necessary to trigger A ordering in X X = X[1:2, :] return np.linalg.qr(X) expected = func.py_func(X, False) np.testing.assert_allclose(X, X_orig) got = func(X, False) np.testing.assert_allclose(X, X_orig) for e_a, g_a in zip(expected, got): np.testing.assert_allclose(e_a, g_a)
TestLinalgQr
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 76559, "end": 77781 }
class ____(FieldValues, TestCase): """ Values for `ListField` with UUIDField as child (since UUIDField can throw ValidationErrors from Django). The idea is to test that Django's ValidationErrors raised from Django internals are caught and serializers in a way that is structurally consistent with DRF's ValidationErrors. """ valid_inputs = [] invalid_inputs = [ ( ['not-a-valid-uuid', 'd7364368-d1b3-4455-aaa3-56439b460ca2', 'some-other-invalid-uuid'], { 0: [exceptions.ErrorDetail(string='“not-a-valid-uuid” is not a valid UUID.', code='invalid')], 1: [ exceptions.ErrorDetail( string='Invalid pk "d7364368-d1b3-4455-aaa3-56439b460ca2" - object does not exist.', code='does_not_exist', ) ], 2: [exceptions.ErrorDetail(string='“some-other-invalid-uuid” is not a valid UUID.', code='invalid')], }, ), ] outputs = {} field = serializers.ListField(child=serializers.PrimaryKeyRelatedField(queryset=UUIDForeignKeyTarget.objects.all()))
TestListFieldWithDjangoValidationErrors
python
falconry__falcon
falcon/typing.py
{ "start": 1013, "end": 1202 }
class ____(Protocol): """File-like protocol that defines only a read method. .. versionadded:: 4.0 """ def read(self, n: int | None = ..., /) -> bytes: ... # ASGI
ReadableIO
python
jina-ai__jina
jina/_docarray_legacy.py
{ "start": 261, "end": 1823 }
class ____(BaseDoc): """ This Document is the LegacyDocumentJina. It follows the same schema as in DocArray <=0.21. It can be useful to start migrating a codebase from v1 to v2. Nevertheless, the API is not totally compatible with DocArray <=0.21 `Document`. Indeed, none of the method associated with `Document` are present. Only the schema of the data is similar. ```python from docarray import DocList from docarray.documents.legacy import LegacyDocument import numpy as np doc = LegacyDocument(text='hello') doc.url = 'http://myimg.png' doc.tensor = np.zeros((3, 224, 224)) doc.embedding = np.zeros((100, 1)) doc.tags['price'] = 10 doc.chunks = DocList[Document]([Document() for _ in range(10)]) doc.chunks = DocList[Document]([Document() for _ in range(10)]) ``` """ id: Optional[ID] = Field( description='The ID of the BaseDoc. This is useful for indexing in vector stores. If not set by user, it will automatically be assigned a random value', default_factory=lambda: ID(os.urandom(16).hex()), ) tensor: Optional[AnyTensor] = None chunks: Optional[Union[DocList[LegacyDocumentJina], List[LegacyDocumentJina]]] = None matches: Optional[Union[DocList[LegacyDocumentJina], List[LegacyDocumentJina]]] = None blob: Optional[bytes] = None text: Optional[str] = None url: Optional[str] = None embedding: Optional[AnyEmbedding] = None tags: Dict[str, Any] = dict() scores: Optional[Dict[str, Any]] = None
LegacyDocumentJina
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_permissions.py
{ "start": 10892, "end": 12047 }
class ____(NonLaunchableGraphQLContextTestMatrix): def test_workspace_permissions_query(self, graphql_context): result = execute_dagster_graphql(graphql_context, WORKSPACE_PERMISSIONS_QUERY) assert result.data assert result.data["workspaceOrError"]["locationEntries"] assert result.data["locationStatusesOrError"]["entries"] # ensure both old and new ways to fetch work and return same data assert ( result.data["locationStatusesOrError"]["entries"] == result.data["workspaceOrError"]["locationEntries"] ) for location in result.data["locationStatusesOrError"]["entries"]: permissions_map = { permission["permission"]: permission["value"] for permission in location["permissions"] } expected_permissions_map = { key: perm.enabled for key, perm in get_location_scoped_user_permissions( graphql_context.read_only ).items() } assert permissions_map == expected_permissions_map
TestWorkspacePermissionsQuery
python
gevent__gevent
src/gevent/_semaphore.py
{ "start": 18494, "end": 20887 }
class ____(Semaphore): """ BoundedSemaphore(value=1) -> BoundedSemaphore A bounded semaphore checks to make sure its current value doesn't exceed its initial value. If it does, :class:`ValueError` is raised. In most situations semaphores are used to guard resources with limited capacity. If the semaphore is released too many times it's a sign of a bug. If not given, *value* defaults to 1. """ __slots__ = ( '_initial_value', ) #: For monkey-patching, allow changing the class of error we raise _OVER_RELEASE_ERROR = ValueError def __init__(self, *args, **kwargs): Semaphore.__init__(self, *args, **kwargs) self._initial_value = self.counter def release(self): """ Like :meth:`Semaphore.release`, but raises :class:`ValueError` if the semaphore is being over-released. """ if self.counter >= self._initial_value: raise self._OVER_RELEASE_ERROR("Semaphore released too many times") counter = Semaphore.release(self) # When we are absolutely certain that no one holds this semaphore, # release our hub and go back to floating. This assists in cross-thread # uses. if counter == self._initial_value: self.hub = None # pylint:disable=attribute-defined-outside-init return counter def _at_fork_reinit(self): super(BoundedSemaphore, self)._at_fork_reinit() self.counter = self._initial_value # By building the semaphore with Cython under PyPy, we get # atomic operations (specifically, exiting/releasing), at the # cost of some speed (one trivial semaphore micro-benchmark put the pure-python version # at around 1s and the compiled version at around 4s). Some clever subclassing # and having only the bare minimum be in cython might help reduce that penalty. # NOTE: You must use version 0.23.4 or later to avoid a memory leak. # https://mail.python.org/pipermail/cython-devel/2015-October/004571.html # However, that's all for naught on up to and including PyPy 4.0.1 which # have some serious crashing bugs with GC interacting with cython. # It hasn't been tested since then, and PURE_PYTHON is assumed to be true # for PyPy in all cases anyway, so this does nothing. from gevent._util import import_c_accel import_c_accel(globals(), 'gevent.__semaphore')
BoundedSemaphore
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/overloadOverlap1.py
{ "start": 4691, "end": 4877 }
class ____: @overload def method1(self, x: type[Any]) -> bool: ... @overload def method1(self, x: Any) -> str | bool: ... def method1(self, x: Any) -> Any: ...
ClassB
python
sphinx-doc__sphinx
sphinx/domains/c/_symbol.py
{ "start": 1313, "end": 1718 }
class ____: __slots__ = ('data',) data: Sequence[tuple[ASTIdentifier, str]] def __init__(self, data: Sequence[tuple[ASTIdentifier, str]], /) -> None: self.data = data def __repr__(self) -> str: return f'LookupKey({self.data!r})' def __str__(self) -> str: inner = ', '.join(f'({ident}, {id_})' for ident, id_ in self.data) return f'[{inner}]'
LookupKey
python
pytorch__pytorch
torch/_subclasses/meta_utils.py
{ "start": 20424, "end": 21401 }
class ____(ViewFunc[_TensorT], Generic[_TensorT]): func: Callable[ [ torch.Tensor, Optional[Callable[[int], int]], Optional[Callable[[torch.Tensor], _TensorT]], ], _TensorT, ] @override def apply( self, t: torch.Tensor, new_base: torch.Tensor, symint_visitor_fn: Optional[Callable[[int], int]] = None, tensor_visitor_fn: Optional[Callable[[torch.Tensor], _TensorT]] = None, ) -> _TensorT: # ignore `t` return self.func(new_base, symint_visitor_fn, tensor_visitor_fn) # A callback where the device is either optional or required. # All of these satisfy this protocol: # def mk(arg: Callable[[], torch.Tensor], device: Union[torch.device, str]) # def mk(arg: Callable[[], torch.Tensor], device: Union[torch.device, str] = "meta") # def mk(arg: Callable[[], torch.Tensor], device: Optional[Union[torch.device, str]] = None)
_CustomViewFunc
python
walkccc__LeetCode
solutions/3276. Select Cells in Grid With Maximum Score/3276.py
{ "start": 0, "end": 824 }
class ____: def maxScore(self, grid: list[list[int]]) -> int: numToIndices = collections.defaultdict(set) for index, row in enumerate(grid): for num in row: numToIndices[num].add(index) numToIndices = list(numToIndices.items()) @functools.lru_cache(None) def dp(i: int, mask: int) -> int: """ Returns the maximum score by selecting numbers from numToIndices[i..], where `mask` is the bitmask of the used row indices. """ if i == len(numToIndices): return 0 # Skip numToIndices[i][0]. res = dp(i + 1, mask) for index in numToIndices[i][1]: if (mask >> index & 1) == 0: # Take numToIndices[i][0]. res = max(res, numToIndices[i][0] + dp(i + 1, mask | 1 << index)) return res return dp(0, 0)
Solution
python
pandas-dev__pandas
pandas/tests/tseries/offsets/test_business_halfyear.py
{ "start": 7413, "end": 13968 }
class ____: def test_repr(self): expected = "<BusinessHalfYearEnd: startingMonth=6>" assert repr(BHalfYearEnd()) == expected expected = "<BusinessHalfYearEnd: startingMonth=3>" assert repr(BHalfYearEnd(startingMonth=3)) == expected expected = "<BusinessHalfYearEnd: startingMonth=1>" assert repr(BHalfYearEnd(startingMonth=1)) == expected def test_offset_corner_case(self): # corner offset = BHalfYearEnd(n=-1, startingMonth=1) assert datetime(2010, 1, 30) + offset == datetime(2010, 1, 29) offset_cases = [] offset_cases.append( ( BHalfYearEnd(startingMonth=1), { datetime(2008, 1, 1): datetime(2008, 1, 31), datetime(2008, 1, 31): datetime(2008, 7, 31), datetime(2008, 2, 15): datetime(2008, 7, 31), datetime(2008, 2, 29): datetime(2008, 7, 31), datetime(2008, 3, 15): datetime(2008, 7, 31), datetime(2008, 3, 31): datetime(2008, 7, 31), datetime(2008, 4, 15): datetime(2008, 7, 31), datetime(2008, 7, 31): datetime(2009, 1, 30), }, ) ) offset_cases.append( ( BHalfYearEnd(startingMonth=2), { datetime(2008, 1, 1): datetime(2008, 2, 29), datetime(2008, 1, 31): datetime(2008, 2, 29), datetime(2008, 2, 15): datetime(2008, 2, 29), datetime(2008, 2, 29): datetime(2008, 8, 29), datetime(2008, 3, 15): datetime(2008, 8, 29), datetime(2008, 3, 31): datetime(2008, 8, 29), datetime(2008, 4, 15): datetime(2008, 8, 29), datetime(2008, 8, 28): datetime(2008, 8, 29), datetime(2008, 8, 29): datetime(2009, 2, 27), }, ) ) offset_cases.append( ( BHalfYearEnd(startingMonth=1, n=0), { datetime(2008, 1, 1): datetime(2008, 1, 31), datetime(2008, 1, 31): datetime(2008, 1, 31), datetime(2008, 2, 15): datetime(2008, 7, 31), datetime(2008, 2, 29): datetime(2008, 7, 31), datetime(2008, 3, 15): datetime(2008, 7, 31), datetime(2008, 3, 31): datetime(2008, 7, 31), datetime(2008, 4, 15): datetime(2008, 7, 31), datetime(2008, 7, 31): datetime(2008, 7, 31), }, ) ) offset_cases.append( ( BHalfYearEnd(startingMonth=1, n=-1), { datetime(2008, 1, 1): datetime(2007, 7, 31), datetime(2008, 1, 31): datetime(2007, 7, 31), datetime(2008, 2, 15): datetime(2008, 1, 31), datetime(2008, 2, 29): datetime(2008, 1, 31), datetime(2008, 3, 15): datetime(2008, 1, 31), datetime(2008, 3, 31): datetime(2008, 1, 31), datetime(2008, 7, 15): datetime(2008, 1, 31), datetime(2008, 7, 30): datetime(2008, 1, 31), datetime(2008, 7, 31): datetime(2008, 1, 31), datetime(2008, 8, 1): datetime(2008, 7, 31), }, ) ) offset_cases.append( ( BHalfYearEnd(startingMonth=6, n=2), { datetime(2008, 1, 31): datetime(2008, 12, 31), datetime(2008, 2, 15): datetime(2008, 12, 31), datetime(2008, 2, 29): datetime(2008, 12, 31), datetime(2008, 3, 15): datetime(2008, 12, 31), datetime(2008, 3, 31): datetime(2008, 12, 31), datetime(2008, 4, 15): datetime(2008, 12, 31), datetime(2008, 4, 30): datetime(2008, 12, 31), datetime(2008, 6, 30): datetime(2009, 6, 30), }, ) ) @pytest.mark.parametrize("case", offset_cases) def test_offset(self, case): offset, cases = case for base, expected in cases.items(): assert_offset_equal(offset, base, expected) on_offset_cases = [ (BHalfYearEnd(1, startingMonth=1), datetime(2008, 1, 31), True), (BHalfYearEnd(1, startingMonth=1), datetime(2007, 12, 31), False), (BHalfYearEnd(1, startingMonth=1), datetime(2008, 2, 29), False), (BHalfYearEnd(1, startingMonth=1), datetime(2007, 3, 30), False), (BHalfYearEnd(1, startingMonth=1), datetime(2007, 3, 31), False), (BHalfYearEnd(1, startingMonth=1), datetime(2008, 4, 30), False), (BHalfYearEnd(1, startingMonth=1), datetime(2008, 5, 30), False), (BHalfYearEnd(1, startingMonth=1), datetime(2008, 5, 31), False), (BHalfYearEnd(1, startingMonth=1), datetime(2007, 6, 29), False), (BHalfYearEnd(1, startingMonth=1), datetime(2007, 6, 30), False), (BHalfYearEnd(1, startingMonth=3), datetime(2008, 1, 31), False), (BHalfYearEnd(1, startingMonth=3), datetime(2007, 12, 31), False), (BHalfYearEnd(1, startingMonth=3), datetime(2008, 2, 29), False), (BHalfYearEnd(1, startingMonth=3), datetime(2007, 3, 30), True), (BHalfYearEnd(1, startingMonth=3), datetime(2007, 3, 31), False), (BHalfYearEnd(1, startingMonth=3), datetime(2008, 4, 30), False), (BHalfYearEnd(1, startingMonth=3), datetime(2008, 5, 30), False), (BHalfYearEnd(1, startingMonth=3), datetime(2008, 5, 31), False), (BHalfYearEnd(1, startingMonth=3), datetime(2007, 6, 29), False), (BHalfYearEnd(1, startingMonth=3), datetime(2007, 6, 30), False), (BHalfYearEnd(1, startingMonth=6), datetime(2008, 1, 31), False), (BHalfYearEnd(1, startingMonth=6), datetime(2007, 12, 31), True), (BHalfYearEnd(1, startingMonth=6), datetime(2008, 2, 29), False), (BHalfYearEnd(1, startingMonth=6), datetime(2007, 3, 30), False), (BHalfYearEnd(1, startingMonth=6), datetime(2007, 3, 31), False), (BHalfYearEnd(1, startingMonth=6), datetime(2008, 4, 30), False), (BHalfYearEnd(1, startingMonth=6), datetime(2008, 5, 30), False), (BHalfYearEnd(1, startingMonth=6), datetime(2008, 5, 31), False), (BHalfYearEnd(1, startingMonth=6), datetime(2007, 6, 29), True), (BHalfYearEnd(1, startingMonth=6), datetime(2007, 6, 30), False), ] @pytest.mark.parametrize("case", on_offset_cases) def test_is_on_offset(self, case): offset, dt, expected = case assert_is_on_offset(offset, dt, expected)
TestBHalfYearEnd
python
huggingface__transformers
tests/models/glm4_moe/test_modeling_glm4_moe.py
{ "start": 1142, "end": 1706 }
class ____(CausalLMModelTester): if is_torch_available(): base_model_class = Glm4MoeModel def __init__( self, parent, n_routed_experts=8, n_shared_experts=1, n_group=1, topk_group=1, num_experts_per_tok=8, ): super().__init__(parent=parent, num_experts_per_tok=num_experts_per_tok) self.n_routed_experts = n_routed_experts self.n_shared_experts = n_shared_experts self.n_group = n_group self.topk_group = topk_group @require_torch
Glm4MoeModelTester
python
getsentry__sentry
tests/sentry/runner/commands/test_backup.py
{ "start": 29870, "end": 30648 }
class ____(TransactionTestCase): def test_import_integrity_error_exit_code(self, get_importer_for_model: MagicMock) -> None: get_importer_for_model.return_value.return_value = RpcImportError( kind=RpcImportErrorKind.IntegrityError, on=InstanceID(model=str(get_model_name(Email)), ordinal=1), reason="Test integrity error", ) # Global imports assume an empty DB, so this should fail with an `IntegrityError`. with pytest.raises(ImportingError) as excinfo: CliRunner().invoke( import_, ["global", GOOD_FILE_PATH, "--no-prompt"], catch_exceptions=False ) assert excinfo.value.context.get_kind() == RpcImportErrorKind.IntegrityError
BadImportExportDomainErrorTests
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/auto_materialize_asset_evaluations.py
{ "start": 8115, "end": 9701 }
class ____(graphene.ObjectType): id = graphene.NonNull(graphene.ID) evaluationId = graphene.NonNull(graphene.ID) numRequested = graphene.NonNull(graphene.Int) numSkipped = graphene.NonNull(graphene.Int) numDiscarded = graphene.NonNull(graphene.Int) rulesWithRuleEvaluations = non_null_list(GrapheneAutoMaterializeRuleWithRuleEvaluations) timestamp = graphene.NonNull(graphene.Float) runIds = non_null_list(graphene.String) rules = graphene.Field(graphene.List(graphene.NonNull(GrapheneAutoMaterializeRule))) assetKey = graphene.NonNull(GrapheneAssetKey) class Meta: name = "AutoMaterializeAssetEvaluationRecord" def __init__(self, record: AutoMaterializeAssetEvaluationRecord): evaluation_with_run_ids = record.get_evaluation_with_run_ids() evaluation = evaluation_with_run_ids.evaluation ( rules, rules_with_rule_evaluations, ) = create_graphene_auto_materialize_rules_with_rule_evaluations(evaluation) super().__init__( id=record.id, evaluationId=record.evaluation_id, numRequested=evaluation_with_run_ids.evaluation.true_subset.size, numSkipped=0, numDiscarded=0, rulesWithRuleEvaluations=rules_with_rule_evaluations, timestamp=record.timestamp, runIds=evaluation_with_run_ids.run_ids, rules=sorted(rules, key=lambda rule: rule.className), assetKey=GrapheneAssetKey(path=record.key.path), )
GrapheneAutoMaterializeAssetEvaluationRecord
python
python-markdown__markdown
markdown/extensions/toc.py
{ "start": 8033, "end": 15510 }
class ____(Treeprocessor): """ Step through document and build TOC. """ def __init__(self, md: Markdown, config: dict[str, Any]): super().__init__(md) self.marker: str = config["marker"] self.title: str = config["title"] self.base_level = int(config["baselevel"]) - 1 self.slugify = config["slugify"] self.sep = config["separator"] self.toc_class = config["toc_class"] self.title_class: str = config["title_class"] self.use_anchors: bool = parseBoolValue(config["anchorlink"]) self.anchorlink_class: str = config["anchorlink_class"] self.use_permalinks = parseBoolValue(config["permalink"], False) if self.use_permalinks is None: self.use_permalinks = config["permalink"] self.permalink_class: str = config["permalink_class"] self.permalink_title: str = config["permalink_title"] self.permalink_leading: bool | None = parseBoolValue(config["permalink_leading"], False) self.header_rgx = re.compile("[Hh][123456]") if isinstance(config["toc_depth"], str) and '-' in config["toc_depth"]: self.toc_top, self.toc_bottom = [int(x) for x in config["toc_depth"].split('-')] else: self.toc_top = 1 self.toc_bottom = int(config["toc_depth"]) def iterparent(self, node: etree.Element) -> Iterator[tuple[etree.Element, etree.Element]]: """ Iterator wrapper to get allowed parent and child all at once. """ # We do not allow the marker inside a header as that # would causes an endless loop of placing a new TOC # inside previously generated TOC. for child in node: if not self.header_rgx.match(child.tag) and child.tag not in ['pre', 'code']: yield node, child yield from self.iterparent(child) def replace_marker(self, root: etree.Element, elem: etree.Element) -> None: """ Replace marker with elem. """ for (p, c) in self.iterparent(root): text = ''.join(c.itertext()).strip() if not text: continue # To keep the output from screwing up the # validation by putting a `<div>` inside of a `<p>` # we actually replace the `<p>` in its entirety. # The `<p>` element may contain more than a single text content # (`nl2br` can introduce a `<br>`). In this situation, `c.text` returns # the very first content, ignore children contents or tail content. # `len(c) == 0` is here to ensure there is only text in the `<p>`. if c.text and c.text.strip() == self.marker and len(c) == 0: for i in range(len(p)): if p[i] == c: p[i] = elem break def set_level(self, elem: etree.Element) -> None: """ Adjust header level according to base level. """ level = int(elem.tag[-1]) + self.base_level if level > 6: level = 6 elem.tag = 'h%d' % level def add_anchor(self, c: etree.Element, elem_id: str) -> None: anchor = etree.Element("a") anchor.text = c.text anchor.attrib["href"] = "#" + elem_id anchor.attrib["class"] = self.anchorlink_class c.text = "" for elem in c: anchor.append(elem) while len(c): c.remove(c[0]) c.append(anchor) def add_permalink(self, c: etree.Element, elem_id: str) -> None: permalink = etree.Element("a") permalink.text = ("%spara;" % AMP_SUBSTITUTE if self.use_permalinks is True else self.use_permalinks) permalink.attrib["href"] = "#" + elem_id permalink.attrib["class"] = self.permalink_class if self.permalink_title: permalink.attrib["title"] = self.permalink_title if self.permalink_leading: permalink.tail = c.text c.text = "" c.insert(0, permalink) else: c.append(permalink) def build_toc_div(self, toc_list: list) -> etree.Element: """ Return a string div given a toc list. """ div = etree.Element("div") div.attrib["class"] = self.toc_class # Add title to the div if self.title: header = etree.SubElement(div, "span") if self.title_class: header.attrib["class"] = self.title_class header.text = self.title def build_etree_ul(toc_list: list, parent: etree.Element) -> etree.Element: ul = etree.SubElement(parent, "ul") for item in toc_list: # List item link, to be inserted into the toc div li = etree.SubElement(ul, "li") link = etree.SubElement(li, "a") link.text = item.get('name', '') link.attrib["href"] = '#' + item.get('id', '') if item['children']: build_etree_ul(item['children'], li) return ul build_etree_ul(toc_list, div) if 'prettify' in self.md.treeprocessors: self.md.treeprocessors['prettify'].run(div) return div def run(self, doc: etree.Element) -> None: # Get a list of id attributes used_ids = set() for el in doc.iter(): if "id" in el.attrib: used_ids.add(el.attrib["id"]) toc_tokens = [] for el in doc.iter(): if isinstance(el.tag, str) and self.header_rgx.match(el.tag): self.set_level(el) innerhtml = render_inner_html(remove_fnrefs(el), self.md) name = strip_tags(innerhtml) # Do not override pre-existing ids if "id" not in el.attrib: el.attrib["id"] = unique(self.slugify(html.unescape(name), self.sep), used_ids) data_toc_label = '' if 'data-toc-label' in el.attrib: data_toc_label = run_postprocessors(unescape(el.attrib['data-toc-label']), self.md) # Overwrite name with sanitized value of `data-toc-label`. name = escape_cdata(strip_tags(data_toc_label)) # Remove the data-toc-label attribute as it is no longer needed del el.attrib['data-toc-label'] if int(el.tag[-1]) >= self.toc_top and int(el.tag[-1]) <= self.toc_bottom: toc_tokens.append({ 'level': int(el.tag[-1]), 'id': unescape(el.attrib["id"]), 'name': name, 'html': innerhtml, 'data-toc-label': data_toc_label }) if self.use_anchors: self.add_anchor(el, el.attrib["id"]) if self.use_permalinks not in [False, None]: self.add_permalink(el, el.attrib["id"]) toc_tokens = nest_toc_tokens(toc_tokens) div = self.build_toc_div(toc_tokens) if self.marker: self.replace_marker(doc, div) # serialize and attach to markdown instance. toc = self.md.serializer(div) for pp in self.md.postprocessors: toc = pp.run(toc) self.md.toc_tokens = toc_tokens self.md.toc = toc
TocTreeprocessor
python
ethereum__web3.py
web3/types.py
{ "start": 4628, "end": 5481 }
class ____(TypedDict, total=False): baseFeePerGas: Wei difficulty: int extraData: HexBytes gasLimit: int gasUsed: int hash: HexBytes logsBloom: HexBytes miner: ChecksumAddress mixHash: HexBytes nonce: HexBytes number: BlockNumber parentHash: HexBytes receiptsRoot: HexBytes sha3Uncles: HexBytes size: int stateRoot: HexBytes timestamp: Timestamp totalDifficulty: int transactions: Sequence[HexBytes] | Sequence[TxData] transactionsRoot: HexBytes uncles: Sequence[HexBytes] withdrawals: Sequence[WithdrawalData] withdrawalsRoot: HexBytes parentBeaconBlockRoot: HexBytes blobGasUsed: int excessBlobGas: int requestsHash: HexBytes # ExtraDataToPOAMiddleware replaces extraData w/ proofOfAuthorityData proofOfAuthorityData: HexBytes
BlockData
python
kamyu104__LeetCode-Solutions
Python/reverse-linked-list.py
{ "start": 29, "end": 255 }
class ____(object): def __init__(self, x): self.val = x self.next = None def __repr__(self): if self: return "{} -> {}".format(self.val, repr(self.next)) # Iterative solution.
ListNode
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/schema.py
{ "start": 8438, "end": 10004 }
class ____: """define a class that includes the :meth:`.HasConditionalDDL.ddl_if` method, allowing for conditional rendering of DDL. Currently applies to constraints and indexes. .. versionadded:: 2.0 """ _ddl_if: Optional[ddl.DDLIf] = None def ddl_if( self, dialect: Optional[str] = None, callable_: Optional[ddl.DDLIfCallable] = None, state: Optional[Any] = None, ) -> Self: r"""apply a conditional DDL rule to this schema item. These rules work in a similar manner to the :meth:`.ExecutableDDLElement.execute_if` callable, with the added feature that the criteria may be checked within the DDL compilation phase for a construct such as :class:`.CreateTable`. :meth:`.HasConditionalDDL.ddl_if` currently applies towards the :class:`.Index` construct as well as all :class:`.Constraint` constructs. :param dialect: string name of a dialect, or a tuple of string names to indicate multiple dialect types. :param callable\_: a callable that is constructed using the same form as that described in :paramref:`.ExecutableDDLElement.execute_if.callable_`. :param state: any arbitrary object that will be passed to the callable, if present. .. versionadded:: 2.0 .. seealso:: :ref:`schema_ddl_ddl_if` - background and usage examples """ self._ddl_if = ddl.DDLIf(dialect, callable_, state) return self
HasConditionalDDL
python
huggingface__transformers
src/transformers/models/gemma3n/modeling_gemma3n.py
{ "start": 24223, "end": 29356 }
class ____(nn.Module): """Applies Group Normalization cumulatively over the time dimension. This layer normalizes the input by calculating the mean and variance cumulatively over the time dimension (dim 1). The statistics are computed over all feature dimensions (specified by `feature_dims` and `num_channels`) for elements marked as valid by the optional `mask`. If a `mask` is provided (True for valid, False for invalid/padded), invalid time steps do not contribute to the statistics calculation, and their corresponding output values are zeroed out. Scale and bias, if enabled, are applied per-channel (last dimension). This behavior is similar to JAX's `GroupNormalization` with `num_groups=1` and `cumulative=True`. """ def __init__( self, num_channels: int, # Number of channels (size of the last dimension) feature_dims: Sequence[int], # Sizes of non-channel feature dimensions, e.g., (H, W) for input [B,T,H,W,C] eps: float = 1e-3, ): super().__init__() self.num_channels = num_channels self.feature_dims = tuple(feature_dims) self.eps = eps # Scale parameter depends only on the channel dimension self.weight = nn.Parameter(torch.ones(num_channels)) # Axes for normalization: all dimensions except Batch (0) and Time (1). # For input [B, T, *feature_dims, C], these are dims from 2 onwards. self.reduction_axes = tuple(range(2, 2 + len(self.feature_dims) + 1)) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """Applies cumulative group norm, optionally using a mask. Args: hidden_states: Input tensor, shape [B, T, *feature_dims, C]. Returns: Normalized tensor with the same shape as x. """ expected_input_suffix = self.feature_dims + (self.num_channels,) if hidden_states.shape[2:] != expected_input_suffix: raise ValueError( f"Input tensor shape suffix {hidden_states.shape[2:]} does not match expected" f" suffix (feature_dims + num_channels) {expected_input_suffix}" ) input_dtype = hidden_states.dtype # Calculations are performed in float32 for numerical stability. calc_dtype = torch.float32 x_calc = hidden_states.to(calc_dtype) # Prepare a broadcastable mask (`mask_calc`). # If no mask is provided, treat all elements as valid # (mask_calc is all ones). # Otherwise, expand the [B, T] mask to [B, T, 1, ..., 1] for broadcasting. mask_calc = torch.ones_like(x_calc, dtype=calc_dtype) # Cumulative Statistics Calculation # 1. Sum of values over reduction axes at each time step. sum_values_at_t = torch.sum(x_calc, dim=self.reduction_axes, keepdim=True) # 2. Cumulative sum of values over time. cum_sum_values = torch.cumsum(sum_values_at_t, dim=1) # 3. Count of valid elements in the normalization group at each time step. # (A "group" here consists of all features at a given Batch, Time). elements_in_group_at_t = torch.sum(mask_calc, dim=self.reduction_axes, keepdim=True) # 4. Cumulative count of valid elements over time. cum_count_elements = torch.cumsum(elements_in_group_at_t, dim=1) # Avoid division by zero if all preceding elements were masked. safe_cum_count_elements = torch.clamp(cum_count_elements, min=1.0) # 5. Cumulative mean. cum_mean = cum_sum_values / safe_cum_count_elements # 6. Sum of squared differences from the cumulative mean. # Only sum for valid elements: (x_calc - cum_mean)^2 * mask_calc. # Using x_calc here for the difference, as cum_mean already accounts for masking. squared_diff_from_mean = (x_calc - cum_mean).pow(2) sum_sq_diff_at_t = torch.sum(squared_diff_from_mean, dim=self.reduction_axes, keepdim=True) # 7. Cumulative sum of squared differences over time. cum_sum_sq_diff = torch.cumsum(sum_sq_diff_at_t, dim=1) # 8. Cumulative variance. cum_variance = cum_sum_sq_diff / safe_cum_count_elements # Normalize the input using the calculated cumulative statistics: # (x - E[x]) / sqrt(Var[x] + eps) normalized_x = (x_calc - cum_mean) * torch.rsqrt(cum_variance + self.eps) # Apply affine transformation (scale and bias) if enabled. # Scale and bias are applied per-channel (last dimension). scale = self.weight.to(calc_dtype) # Reshape for broadcasting: [C] -> [1, ..., 1, C] scale_view_shape = [1] * (hidden_states.dim() - 1) + [self.num_channels] normalized_x = normalized_x * scale.view(scale_view_shape) # Zero out outputs for time steps that were originally masked (where mask_calc is 0). # This ensures padded/invalid positions in the input result in zero output. final_output = normalized_x * mask_calc return final_output.to(input_dtype)
Gemma3nAudioCumulativeGroupNorm
python
getsentry__sentry
src/sentry/integrations/github/blame.py
{ "start": 527, "end": 603 }
class ____(TypedDict): name: str | None email: str | None
GitHubAuthor
python
sqlalchemy__sqlalchemy
test/dialect/mssql/test_engine.py
{ "start": 24570, "end": 27936 }
class ____(fixtures.TestBase): def _fixture( self, view_result, simulate_perm_failure=False, simulate_no_system_views=False, ): class Error(Exception): pass dialect = pyodbc.MSDialect_pyodbc() dialect.dbapi = Mock(Error=Error) dialect.server_version_info = base.MS_2012_VERSION result = [] def fail_on_exec( stmt, ): result.clear() if "SELECT name FROM sys.system_views" in stmt: if simulate_no_system_views: raise dialect.dbapi.Error( "SQL Server simulated no system_views error" ) else: if view_result: result.append((view_result,)) elif re.match( ".*SELECT CASE transaction_isolation_level.*FROM sys.%s" % (view_result,), stmt, re.S, ): if simulate_perm_failure: raise dialect.dbapi.Error( "SQL Server simulated permission error" ) result.append(("SERIALIZABLE",)) else: assert False connection = Mock( cursor=Mock( return_value=Mock( execute=fail_on_exec, fetchone=lambda: result[0] if result else None, ) ) ) return dialect, connection def test_dm_pdw_nodes(self): dialect, connection = self._fixture("dm_pdw_nodes_exec_sessions") eq_(dialect.get_isolation_level(connection), "SERIALIZABLE") def test_exec_sessions(self): dialect, connection = self._fixture("exec_sessions") eq_(dialect.get_isolation_level(connection), "SERIALIZABLE") def test_not_supported(self): dialect, connection = self._fixture(None) assert_raises_message( NotImplementedError, "Can't fetch isolation level on this particular ", dialect.get_isolation_level, connection, ) @testing.combinations(True, False) def test_no_system_views(self, simulate_perm_failure_also): dialect, connection = self._fixture( "dm_pdw_nodes_exec_sessions", simulate_perm_failure=simulate_perm_failure_also, simulate_no_system_views=True, ) assert_raises_message( NotImplementedError, r"Can\'t fetch isolation level; encountered error SQL Server " r"simulated no system_views error when attempting to query the " r'"sys.system_views" view.', dialect.get_isolation_level, connection, ) def test_dont_have_table_perms(self): dialect, connection = self._fixture( "dm_pdw_nodes_exec_sessions", simulate_perm_failure=True ) assert_raises_message( NotImplementedError, r"Can\'t fetch isolation level; encountered error SQL Server " r"simulated permission error when attempting to query the " r'"sys.dm_pdw_nodes_exec_sessions" view.', dialect.get_isolation_level, connection, )
IsolationLevelDetectTest
python
pyca__cryptography
tests/hazmat/primitives/test_dsa.py
{ "start": 23390, "end": 26357 }
class ____: def test_dsa_parameter_numbers(self): parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3) assert parameter_numbers.p == 1 assert parameter_numbers.q == 2 assert parameter_numbers.g == 3 def test_dsa_parameter_numbers_invalid_types(self): with pytest.raises(TypeError): dsa.DSAParameterNumbers(p=None, q=2, g=3) # type: ignore[arg-type] with pytest.raises(TypeError): dsa.DSAParameterNumbers(p=1, q=None, g=3) # type: ignore[arg-type] with pytest.raises(TypeError): dsa.DSAParameterNumbers(p=1, q=2, g=None) # type: ignore[arg-type] def test_dsa_public_numbers(self): parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3) public_numbers = dsa.DSAPublicNumbers( y=4, parameter_numbers=parameter_numbers ) assert public_numbers.y == 4 assert public_numbers.parameter_numbers == parameter_numbers def test_dsa_public_numbers_invalid_types(self): with pytest.raises(TypeError): dsa.DSAPublicNumbers( y=4, parameter_numbers=None, # type: ignore[arg-type] ) with pytest.raises(TypeError): parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3) dsa.DSAPublicNumbers( y=None, # type: ignore[arg-type] parameter_numbers=parameter_numbers, ) def test_dsa_private_numbers(self): parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3) public_numbers = dsa.DSAPublicNumbers( y=4, parameter_numbers=parameter_numbers ) private_numbers = dsa.DSAPrivateNumbers( x=5, public_numbers=public_numbers ) assert private_numbers.x == 5 assert private_numbers.public_numbers == public_numbers def test_dsa_private_numbers_invalid_types(self): parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3) public_numbers = dsa.DSAPublicNumbers( y=4, parameter_numbers=parameter_numbers ) with pytest.raises(TypeError): dsa.DSAPrivateNumbers( x=4, public_numbers=None, # type: ignore[arg-type] ) with pytest.raises(TypeError): dsa.DSAPrivateNumbers( x=None, # type: ignore[arg-type] public_numbers=public_numbers, ) def test_repr(self): parameter_numbers = dsa.DSAParameterNumbers(p=1, q=2, g=3) assert ( repr(parameter_numbers) == "<DSAParameterNumbers(p=1, q=2, g=3)>" ) public_numbers = dsa.DSAPublicNumbers( y=4, parameter_numbers=parameter_numbers ) assert repr(public_numbers) == ( "<DSAPublicNumbers(y=4, parameter_numbers=<DSAParameterNumbers(p=1" ", q=2, g=3)>)>" )
TestDSANumbers
python
sqlalchemy__sqlalchemy
test/ext/test_deprecations.py
{ "start": 1239, "end": 2623 }
class ____(fixtures.TestBase): def test_query_chooser(self): m1 = mock.Mock() with testing.expect_deprecated( "The ``query_chooser`` parameter is deprecated; please use", ): s = ShardedSession( shard_chooser=m1.shard_chooser, identity_chooser=m1.identity_chooser, query_chooser=m1.query_chooser, ) m2 = mock.Mock() s.execute_chooser(m2) eq_(m1.mock_calls, [mock.call.query_chooser(m2.statement)]) def test_id_chooser(self, decl_base): class A(decl_base): __tablename__ = "a" id = Column(Integer, primary_key=True) m1 = mock.Mock() with testing.expect_deprecated( "The ``id_chooser`` parameter is deprecated; please use" ): s = ShardedSession( shard_chooser=m1.shard_chooser, id_chooser=m1.id_chooser, execute_chooser=m1.execute_chooser, ) m2 = mock.Mock() s.identity_chooser( A.__mapper__, m2.primary_key, lazy_loaded_from=m2.lazy_loaded_from, execution_options=m2.execution_options, bind_arguments=m2.bind_arguments, ) eq_(m1.mock_calls, [mock.call.id_chooser(mock.ANY, m2.primary_key)])
HorizontalShardTest
python
huggingface__transformers
tests/quantization/bnb/test_mixed_int8.py
{ "start": 34252, "end": 36352 }
class ____(BaseMixedInt8Test): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True) ) model.train() if torch_device in ["cuda", "xpu"]: self.assertEqual( set(model.hf_device_map.values()), {backend_torch_accelerator_module(torch_device).current_device()} ) else: self.assertTrue(all(param.device.type == "cpu" for param in model.parameters())) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later # cast all non INT8 parameters to fp32 if param.dtype in (torch.float16, torch.bfloat16) and param.__class__.__name__ != "Params4bit": param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if isinstance(module, OPTAttention): module.q_proj = LoRALayer(module.q_proj, rank=16, dtype=model.dtype) module.k_proj = LoRALayer(module.k_proj, rank=16, dtype=model.dtype) module.v_proj = LoRALayer(module.v_proj, rank=16, dtype=model.dtype) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(torch_device) # Step 4: Check if the gradient is not None with torch.autocast(torch_device): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) @apply_skip_if_not_implemented
MixedInt8TestTraining
python
Lightning-AI__lightning
tests/tests_pytorch/models/test_hparams.py
{ "start": 2019, "end": 2351 }
class ____(BoringModel): """Tests that a model can take an object.""" def __init__(self, hparams): super().__init__() self.save_hyperparameters(hparams) def decorate(func): @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper
SaveHparamsModel
python
kamyu104__LeetCode-Solutions
Python/next-greater-numerically-balanced-number.py
{ "start": 62, "end": 1817 }
class ____(object): def nextBeautifulNumber(self, n): """ :type n: int :rtype: int """ # precomputed by generating all balanced's permutations in solution2 # candidates = sorted(set(int("".join(perm)) for x in [1, 22, 122, 333, 1333, 4444, 14444, 22333, 55555, 122333, 155555, 224444, 666666] for perm in itertools.permutations(str(x)))) + [1224444] candidates = [ 1, 22, 122, 212, 221, 333 , 1333, 3133, 3313, 3331, 4444, 14444, 22333, 23233, 23323, 23332, 32233, 32323, 32332, 33223, 33232, 33322, 41444, 44144, 44414, 44441, 55555, 122333, 123233, 123323, 123332, 132233, 132323, 132332, 133223, 133232, 133322, 155555, 212333, 213233, 213323, 213332, 221333, 223133, 223313, 223331, 224444, 231233, 231323, 231332, 232133, 232313, 232331, 233123, 233132, 233213, 233231, 233312, 233321, 242444, 244244, 244424, 244442, 312233, 312323, 312332, 313223, 313232, 313322, 321233, 321323, 321332, 322133, 322313, 322331, 323123, 323132, 323213, 323231, 323312, 323321, 331223, 331232, 331322, 332123, 332132, 332213, 332231, 332312, 332321, 333122, 333212, 333221, 422444, 424244, 424424, 424442, 442244, 442424, 442442, 444224, 444242, 444422, 515555, 551555, 555155, 555515, 555551, 666666, 1224444] return candidates[bisect.bisect_right(candidates, n)] # Time: O(l * c) = O(1), c is the count of all balanced's permutations, l is the max length of permutations # Space: O(l * b) = O(1), b is the count of balanced
Solution