language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/instance.py
{ "start": 9518, "end": 14030 }
class ____(graphene.ObjectType): id = graphene.NonNull(graphene.String) info = graphene.Field(graphene.String) runLauncher = graphene.Field(GrapheneRunLauncher) runQueuingSupported = graphene.NonNull(graphene.Boolean) runQueueConfig = graphene.Field(GrapheneRunQueueConfig) executablePath = graphene.NonNull(graphene.String) daemonHealth = graphene.NonNull(GrapheneDaemonHealth) hasInfo = graphene.NonNull(graphene.Boolean) autoMaterializePaused = graphene.NonNull(graphene.Boolean) supportsConcurrencyLimits = graphene.NonNull(graphene.Boolean) minConcurrencyLimitValue = graphene.NonNull(graphene.Int) maxConcurrencyLimitValue = graphene.NonNull(graphene.Int) concurrencyLimits = non_null_list(GrapheneConcurrencyKeyInfo) concurrencyLimit = graphene.Field( graphene.NonNull(GrapheneConcurrencyKeyInfo), concurrencyKey=graphene.Argument(graphene.String), ) useAutoMaterializeSensors = graphene.Field( graphene.NonNull(graphene.Boolean), description="Whether or not the deployment is using automation policy sensors to materialize assets", ) poolConfig = graphene.Field(GraphenePoolConfig) freshnessEvaluationEnabled = graphene.NonNull(graphene.Boolean) class Meta: name = "Instance" def __init__(self, instance): super().__init__() self._instance = check.inst_param(instance, "instance", DagsterInstance) def resolve_id(self, _graphene_info: ResolveInfo): return "Instance" def resolve_hasInfo(self, graphene_info: ResolveInfo) -> bool: return graphene_info.context.show_instance_config def resolve_useAutoMaterializeSensors(self, _graphene_info: ResolveInfo): return self._instance.auto_materialize_use_sensors def resolve_info(self, graphene_info: ResolveInfo): return self._instance.info_str() if graphene_info.context.show_instance_config else None def resolve_runLauncher(self, _graphene_info: ResolveInfo): return ( GrapheneRunLauncher(self._instance.run_launcher) if self._instance.run_launcher else None ) def resolve_runQueuingSupported(self, _graphene_info: ResolveInfo): from dagster._core.run_coordinator import QueuedRunCoordinator return isinstance(self._instance.run_coordinator, QueuedRunCoordinator) def resolve_runQueueConfig(self, _graphene_info: ResolveInfo): run_queue_config = self._instance.get_concurrency_config().run_queue_config if run_queue_config: return GrapheneRunQueueConfig(run_queue_config) else: return None def resolve_executablePath(self, _graphene_info: ResolveInfo): return sys.executable def resolve_daemonHealth(self, _graphene_info: ResolveInfo): return GrapheneDaemonHealth(instance=self._instance) def resolve_autoMaterializePaused(self, _graphene_info: ResolveInfo): return get_auto_materialize_paused(self._instance) def resolve_supportsConcurrencyLimits(self, _graphene_info: ResolveInfo): return self._instance.event_log_storage.supports_global_concurrency_limits def resolve_concurrencyLimits(self, _graphene_info: ResolveInfo): res = [] for key in self._instance.event_log_storage.get_concurrency_keys(): res.append(GrapheneConcurrencyKeyInfo(key)) return res def resolve_concurrencyLimit(self, _graphene_info: ResolveInfo, concurrencyKey): return GrapheneConcurrencyKeyInfo(concurrencyKey) def resolve_minConcurrencyLimitValue(self, _graphene_info: ResolveInfo): if isinstance( self._instance.event_log_storage, SqlEventLogStorage ) and not self._instance.event_log_storage.has_table("concurrency_limits"): return 1 return 0 def resolve_maxConcurrencyLimitValue(self, _graphene_info: ResolveInfo): return get_max_concurrency_limit_value() def resolve_poolConfig(self, _graphene_info: ResolveInfo): concurrency_config = self._instance.get_concurrency_config() return GraphenePoolConfig(concurrency_config.pool_config) def resolve_freshnessEvaluationEnabled(self, graphene_info: ResolveInfo): return ( # freshness daemon turned on graphene_info.context.instance.freshness_enabled # override or graphene_info.context.instance.internal_asset_freshness_enabled() )
GrapheneInstance
python
apache__airflow
task-sdk/src/airflow/sdk/definitions/xcom_arg.py
{ "start": 2022, "end": 7449 }
class ____(ResolveMixin, DependencyMixin): """ Reference to an XCom value pushed from another operator. The implementation supports:: xcomarg >> op xcomarg << op op >> xcomarg # By BaseOperator code op << xcomarg # By BaseOperator code **Example**: The moment you get a result from any operator (decorated or regular) you can :: any_op = AnyOperator() xcomarg = XComArg(any_op) # or equivalently xcomarg = any_op.output my_op = MyOperator() my_op >> xcomarg This object can be used in legacy Operators via Jinja. **Example**: You can make this result to be part of any generated string:: any_op = AnyOperator() xcomarg = any_op.output op1 = MyOperator(my_text_message=f"the value is {xcomarg}") op2 = MyOperator(my_text_message=f"the value is {xcomarg['topic']}") :param operator: Operator instance to which the XComArg references. :param key: Key used to pull the XCom value. Defaults to *XCOM_RETURN_KEY*, i.e. the referenced operator's return value. """ @overload def __new__(cls: type[XComArg], operator: Operator, key: str = BaseXCom.XCOM_RETURN_KEY) -> XComArg: """Execute when the user writes ``XComArg(...)`` directly.""" @overload def __new__(cls: type[XComArg]) -> XComArg: """Execute by Python internals from subclasses.""" def __new__(cls, *args, **kwargs) -> XComArg: if cls is XComArg: return PlainXComArg(*args, **kwargs) return super().__new__(cls) def iter_references(self) -> Iterator[tuple[Operator, str]]: raise NotImplementedError() @staticmethod def iter_xcom_references(arg: Any) -> Iterator[tuple[Operator, str]]: """ Return XCom references in an arbitrary value. Recursively traverse ``arg`` and look for XComArg instances in any collection objects, and instances with ``template_fields`` set. """ if isinstance(arg, ResolveMixin): yield from arg.iter_references() elif isinstance(arg, (tuple, set, list)): for elem in arg: yield from XComArg.iter_xcom_references(elem) elif isinstance(arg, dict): for elem in arg.values(): yield from XComArg.iter_xcom_references(elem) elif isinstance(arg, AbstractOperator): for attr in arg.template_fields: yield from XComArg.iter_xcom_references(getattr(arg, attr)) @staticmethod def apply_upstream_relationship(op: DependencyMixin, arg: Any): """ Set dependency for XComArgs. This looks for XComArg objects in ``arg`` "deeply" (looking inside collections objects and classes decorated with ``template_fields``), and sets the relationship to ``op`` on any found. """ for operator, _ in XComArg.iter_xcom_references(arg): op.set_upstream(operator) @property def roots(self) -> list[Operator]: """Required by DependencyMixin.""" return [op for op, _ in self.iter_references()] @property def leaves(self) -> list[Operator]: """Required by DependencyMixin.""" return [op for op, _ in self.iter_references()] def set_upstream( self, task_or_task_list: DependencyMixin | Sequence[DependencyMixin], edge_modifier: EdgeModifier | None = None, ): """Proxy to underlying operator set_upstream method. Required by DependencyMixin.""" for operator, _ in self.iter_references(): operator.set_upstream(task_or_task_list, edge_modifier) def set_downstream( self, task_or_task_list: DependencyMixin | Sequence[DependencyMixin], edge_modifier: EdgeModifier | None = None, ): """Proxy to underlying operator set_downstream method. Required by DependencyMixin.""" for operator, _ in self.iter_references(): operator.set_downstream(task_or_task_list, edge_modifier) def _serialize(self) -> dict[str, Any]: """ Serialize an XComArg. The implementation should be the inverse function to ``deserialize``, returning a data dict converted from this XComArg derivative. DAG serialization does not call this directly, but ``serialize_xcom_arg`` instead, which adds additional information to dispatch deserialization to the correct class. """ raise NotImplementedError() def map(self, f: Callable[[Any], Any]) -> MapXComArg: return MapXComArg(self, [f]) def zip(self, *others: XComArg, fillvalue: Any = NOTSET) -> ZipXComArg: return ZipXComArg([self, *others], fillvalue=fillvalue) def concat(self, *others: XComArg) -> ConcatXComArg: return ConcatXComArg([self, *others]) def resolve(self, context: Mapping[str, Any]) -> Any: raise NotImplementedError() def __enter__(self): if not self.operator.is_setup and not self.operator.is_teardown: raise AirflowException("Only setup/teardown tasks can be used as context managers.") SetupTeardownContext.push_setup_teardown_task(self.operator) return SetupTeardownContext def __exit__(self, exc_type, exc_val, exc_tb): SetupTeardownContext.set_work_task_roots_and_leaves() @attrs.define
XComArg
python
django__django
tests/admin_views/admin.py
{ "start": 38756, "end": 39261 }
class ____(admin.ModelAdmin): def get_urls(self): # Opt-out of append slash for single model. urls = super().get_urls() for pattern in urls: pattern.callback = no_append_slash(pattern.callback) return urls site9 = admin.AdminSite(name="admin9") site9.register(Article, ArticleAdmin9) site9.register(Actor, ActorAdmin9) site10 = admin.AdminSite(name="admin10") site10.final_catch_all_view = False site10.register(Article, ArticleAdminWithExtraUrl)
ActorAdmin9
python
walkccc__LeetCode
solutions/79. Word Search/79.py
{ "start": 0, "end": 713 }
class ____: def exist(self, board: list[list[str]], word: str) -> bool: m = len(board) n = len(board[0]) def dfs(i: int, j: int, s: int) -> bool: if i < 0 or i == m or j < 0 or j == n: return False if board[i][j] != word[s] or board[i][j] == '*': return False if s == len(word) - 1: return True cache = board[i][j] board[i][j] = '*' isExist = (dfs(i + 1, j, s + 1) or dfs(i - 1, j, s + 1) or dfs(i, j + 1, s + 1) or dfs(i, j - 1, s + 1)) board[i][j] = cache return isExist return any(dfs(i, j, 0) for i in range(m) for j in range(n))
Solution
python
huggingface__transformers
src/transformers/models/siglip/modeling_siglip.py
{ "start": 2823, "end": 3616 }
class ____(ModelOutput): r""" image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. """ image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Base class for text model's outputs that also contains a pooling of the last hidden states. """ ) # Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Siglip
SiglipVisionModelOutput
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/pools.py
{ "start": 1226, "end": 1434 }
class ____(BaseModel): """Base serializer for Pool.""" pool: str = Field(serialization_alias="name") slots: int description: str | None = Field(default=None) include_deferred: bool
BasePool
python
oauthlib__oauthlib
oauthlib/oauth2/rfc6749/errors.py
{ "start": 3933, "end": 4001 }
class ____(OAuth2Error): error = 'token_expired'
TokenExpiredError
python
getsentry__sentry
tests/sentry/integrations/vsts/test_issues.py
{ "start": 27416, "end": 27895 }
class ____(VstsIssueBase): @responses.activate def test_raise_error_api_unauthorized(self) -> None: error_message = "According to Microsoft Entra, your Identity xxx is currently Deleted within the following Microsoft Entra tenant: xxx Please contact your Microsoft Entra administrator to resolve this." api_error = ApiError(error_message) with pytest.raises(ApiUnauthorized): self.integration.raise_error(api_error)
VstsIssueRaiseErrorTest
python
altair-viz__altair
altair/utils/server.py
{ "start": 709, "end": 4100 }
class ____: def __init__(self, ip_port, Handler): Handler(MockRequest(), ip_port[0], self) def serve_forever(self): pass def server_close(self): pass def generate_handler(html, files=None): if files is None: files = {} class MyHandler(server.BaseHTTPRequestHandler): def do_GET(self): """Respond to a GET request.""" if self.path == "/": self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(html.encode()) elif self.path in files: content_type, content = files[self.path] self.send_response(200) self.send_header("Content-type", content_type) self.end_headers() self.wfile.write(content.encode()) else: self.send_error(404) return MyHandler def find_open_port(ip, port, n=50): """Find an open port near the specified port.""" ports = itertools.chain( (port + i for i in range(n)), (port + random.randint(-2 * n, 2 * n)) ) for port in ports: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = s.connect_ex((ip, port)) s.close() if result != 0: return port msg = "no open ports found" raise ValueError(msg) def serve( html, ip="127.0.0.1", port=8888, n_retries=50, files=None, jupyter_warning=True, open_browser=True, http_server=None, ) -> None: """ Start a server serving the given HTML, and (optionally) open a browser. Parameters ---------- html : string HTML to serve ip : string (default = '127.0.0.1') ip address at which the HTML will be served. port : int (default = 8888) the port at which to serve the HTML n_retries : int (default = 50) the number of nearby ports to search if the specified port is in use. files : dictionary (optional) dictionary of extra content to serve jupyter_warning : bool (optional) if True (default), then print a warning if this is used within Jupyter open_browser : bool (optional) if True (default), then open a web browser to the given HTML http_server : class (optional) optionally specify an HTTPServer class to use for showing the figure. The default is Python's basic HTTPServer. """ port = find_open_port(ip, port, n_retries) Handler = generate_handler(html, files) if http_server is None: srvr = server.HTTPServer((ip, port), Handler) else: srvr = http_server((ip, port), Handler) if jupyter_warning: try: __IPYTHON__ # type: ignore # noqa except NameError: pass else: print(JUPYTER_WARNING) # Start the server print(f"Serving to http://{ip}:{port}/ [Ctrl-C to exit]") sys.stdout.flush() if open_browser: # Use a thread to open a web browser pointing to the server def b(): return webbrowser.open(f"http://{ip}:{port}") threading.Thread(target=b).start() try: srvr.serve_forever() except (KeyboardInterrupt, SystemExit): print("\nstopping Server...") srvr.server_close()
MockServer
python
doocs__leetcode
lcof2/剑指 Offer II 117. 相似的字符串/Solution.py
{ "start": 0, "end": 474 }
class ____: def numSimilarGroups(self, strs: List[str]) -> int: def find(x): if p[x] != x: p[x] = find(p[x]) return p[x] n, l = len(strs), len(strs[0]) p = list(range(n)) for i in range(n): for j in range(i + 1, n): if sum(strs[i][k] != strs[j][k] for k in range(l)) <= 2: p[find(i)] = find(j) return sum(i == find(i) for i in range(n))
Solution
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_searchstrategy.py
{ "start": 4774, "end": 5239 }
class ____: inner: Inner def test_jsonable_to_json_nested(): obj = Outer(Inner(42)) assert to_jsonable(obj, avoid_realization=False) == {"inner": "custom"} assert to_jsonable(obj, avoid_realization=True) == "<symbolic>" recursive_list = [] recursive_list.append(recursive_list) recursive_dict = {} recursive_dict["a"] = recursive_dict mutual1 = [] mutual2 = [mutual1] mutual1.append(mutual2) shared = UniqueIdentifier("shared") @dataclass
Outer
python
jazzband__django-model-utils
tests/test_fields/test_monitor_field.py
{ "start": 4089, "end": 4969 }
class ____(TestCase): def setUp(self) -> None: DoubleMonitored.objects.create(name='Charlie', name2='Charlie2') def test_recursion_error_with_only(self) -> None: # Any field passed to only() is generating a recursion error list(DoubleMonitored.objects.only('id')) def test_recursion_error_with_defer(self) -> None: # Only monitored fields passed to defer() are failing list(DoubleMonitored.objects.defer('name')) def test_monitor_still_works_with_deferred_fields_filtered_out_of_save_initial(self) -> None: obj = DoubleMonitored.objects.defer('name').get(name='Charlie') with time_machine.travel(datetime(2016, 12, 1, tzinfo=timezone.utc)): obj.name = 'Charlie2' obj.save() self.assertEqual(obj.name_changed, datetime(2016, 12, 1, tzinfo=timezone.utc))
MonitorDoubleFieldTests
python
walkccc__LeetCode
solutions/1481. Least Number of Unique Integers after K Removals/1481.py
{ "start": 0, "end": 361 }
class ____: def findLeastNumOfUniqueInts(self, arr: list[int], k: int) -> int: minHeap = list(collections.Counter(arr).values()) heapq.heapify(minHeap) # Greedily remove the k least frequent numbers to have the least number of unique integers. while k > 0: k -= heapq.heappop(minHeap) return len(minHeap) + (1 if k < 0 else 0)
Solution
python
dagster-io__dagster
python_modules/libraries/dagster-sling/dagster_sling/resources.py
{ "start": 4034, "end": 26262 }
class ____(ConfigurableResource): """Resource for interacting with the Sling package. This resource can be used to run Sling replications. Args: connections (List[SlingConnectionResource]): A list of connections to use for the replication. Examples: .. code-block:: python from dagster_etl.sling import SlingResource, SlingConnectionResource sling_resource = SlingResource( connections=[ SlingConnectionResource( name="MY_POSTGRES", type="postgres", connection_string=EnvVar("POSTGRES_CONNECTION_STRING"), ), SlingConnectionResource( name="MY_SNOWFLAKE", type="snowflake", host=EnvVar("SNOWFLAKE_HOST"), user=EnvVar("SNOWFLAKE_USER"), database=EnvVar("SNOWFLAKE_DATABASE"), password=EnvVar("SNOWFLAKE_PASSWORD"), role=EnvVar("SNOWFLAKE_ROLE"), ), ] ) """ connections: list[SlingConnectionResource] = [] _stdout: list[str] = [] @staticmethod def _get_replication_streams_for_context( context: Union[OpExecutionContext, AssetExecutionContext], ) -> dict[str, Any]: """Computes the sling replication streams config for a given execution context with an assets def, possibly involving a subset selection of sling assets. """ if not context.has_assets_def: no_assets_def_message = """ The current execution context has no backing AssetsDefinition object. Therefore, no sling assets subsetting will be performed... """ logger.warn(no_assets_def_message) return {} context_streams = {} assets_def = context.assets_def run_config = context.run_config if run_config: # triggered via sensor run_config_ops = run_config.get("ops", {}) if isinstance(run_config_ops, dict): assets_op_config = run_config_ops.get(assets_def.op.name, {}).get("config", {}) else: assets_op_config = {} context_streams = assets_op_config.get("context_streams", {}) if not context_streams: no_context_streams_config_message = f""" It was expected that your `run_config` would provide a `context_streams` config for the op {assets_def.op.name}. Instead, the received value for this op config was {assets_op_config}. NO ASSET SUBSETTING WILL BE PERFORMED! If that was your intention, you can safely ignore this message. Otherwise, provide the mentioned `context_streams` config for executing only your desired asset subset. """ logger.warn(no_context_streams_config_message) else: metadata_by_key = assets_def.metadata_by_key first_asset_metadata = next(iter(metadata_by_key.values())) replication_config: dict[str, Any] = first_asset_metadata.get( METADATA_KEY_REPLICATION_CONFIG, {} ) dagster_sling_translator: DagsterSlingTranslator = first_asset_metadata.get( METADATA_KEY_TRANSLATOR, DagsterSlingTranslator() ) raw_streams = get_streams_from_replication(replication_config) streams = streams_with_default_dagster_meta(raw_streams, replication_config) selected_asset_keys = context.selected_asset_keys for stream in streams: asset_key = dagster_sling_translator.get_asset_spec(stream).key if asset_key in selected_asset_keys: context_streams.update({stream["name"]: stream["config"]}) return context_streams @classmethod def _is_dagster_maintained(cls) -> bool: return True def _clean_connection_dict(self, d: dict[str, Any]) -> dict[str, Any]: d = _process_env_vars(d) if d["connection_string"]: d["url"] = d["connection_string"] if "connection_string" in d: del d["connection_string"] return d def _query_metadata( self, metadata_string: str, start_time: float, base_metadata: Union[list, None] = None ): """Metadata quering using regular expression from standard sling log. Args: metadata_string (str): raw log string containing log/metadata from sling cli run start_time (float): start time that will be assign to calculate elapse base_metadata (list, Null): list of metadata to be query from string Return: final_dict (dict): Final metadata idct contain metadata query from string """ if base_metadata is None: base_metadata = [ "stream_name", "row_count", "destination_table", "destination_file", "elapsed_time", ] tmp = None tmp_metadata = {} end_time = time.time() target_type = re.findall(r"writing to target ([\w\s]*) ", metadata_string) if target_type and target_type[0] == "database": tmp = re.findall(r"inserted ([0-9]*) rows .*into ([\w.:/;-_\"\'{}]*)", metadata_string) elif target_type and target_type[0] == "file system": tmp = re.findall(r"wrote ([0-9]*) rows .*to ([\w.:/;-_\"\'{}]*)", metadata_string) else: tmp = re.findall(r"inserted ([0-9]*) rows .*into ([\w.:/;-_\"\'{}]*)", metadata_string) if tmp: if target_type and target_type[0] == "database": tmp_metadata["destination_table"] = re.sub(r"[^\w\s.]", "", tmp[0][1]) if target_type and target_type[0] == "file system": tmp_metadata["destination_file"] = re.sub(r"[^\w\s.]", "", tmp[0][1]) tmp_metadata["elapsed_time"] = end_time - start_time tmp_metadata["row_count"] = tmp[0][0] final_dict = {} for k in base_metadata: if tmp_metadata.get(k): final_dict[k] = tmp_metadata.get(k) return final_dict def prepare_environment(self) -> dict[str, Any]: env = {} for conn in self.connections: d = self._clean_connection_dict(dict(conn)) env[conn.name] = json.dumps(d) return env @contextlib.contextmanager def _setup_config(self) -> Generator[None, None, None]: """Uses environment variables to set the Sling source and target connections.""" prepared_environment = self.prepare_environment() with environ(prepared_environment): yield def _clean_line(self, line: str) -> str: """Removes ANSI escape sequences and Sling log prefixes from a line of output.""" line = ANSI_ESCAPE.sub("", line) # Remove Sling log format prefix: "{timestamp} {LEVEL} " (e.g., "1:04PM INF ") # Match pattern: optional timestamp followed by log level (INF, WRN, ERR, DBG) and space line = re.sub(r"^\d{1,2}:\d{2}[AP]M\s+(INF|WRN|ERR|DBG)\s+", "", line) return line def _clean_timestamp_log(self, line: str): """Remove timestamp from log gather from sling cli to reduce redundency in dagster log. Args: line (str): line of log gather from cli to be cleaned Returns: text: cleaned log consist only of log data """ tmp = self._clean_line(line) try: text = tmp.split(" ")[1] except: text = tmp return text def _process_stdout(self, stdout: IO[AnyStr], encoding="utf8") -> Iterator[str]: """Process stdout from the Sling CLI.""" for line in stdout: assert isinstance(line, bytes) fmt_line = bytes.decode(line, encoding=encoding, errors="replace") yield self._clean_line(fmt_line) def _exec_sling_cmd( self, cmd, stdin=None, stdout=PIPE, stderr=STDOUT, encoding="utf8" ) -> Generator[str, None, None]: with Popen(cmd, shell=True, stdin=stdin, stdout=stdout, stderr=stderr) as proc: if proc.stdout: yield from self._process_stdout(proc.stdout, encoding=encoding) proc.wait() if proc.returncode != 0: raise Exception("Sling command failed with error code %s", proc.returncode) def _parse_json_table_output(self, table_output: dict[str, Any]) -> list[dict[str, str]]: column_keys: list[str] = table_output["fields"] column_values: list[list[str]] = table_output["rows"] return [dict(zip(column_keys, column_values)) for column_values in column_values] def get_column_info_for_table(self, target_name: str, table_name: str) -> list[dict[str, str]]: """Fetches column metadata for a given table in a Sling target and parses it into a list of dictionaries, keyed by column name. Args: target_name (str): The name of the target connection to use. table_name (str): The name of the table to fetch column metadata for. Returns: List[Dict[str, str]]: A list of dictionaries, keyed by column name, containing column metadata. """ output = self.run_sling_cli( ["conns", "discover", target_name, "--pattern", table_name, "--columns"], force_json=True, ) return self._parse_json_table_output(json.loads(output.strip())) def get_row_count_for_table(self, target_name: str, table_name: str) -> int: """Queries the target connection to get the row count for a given table. Args: target_name (str): The name of the target connection to use. table_name (str): The name of the table to fetch the row count for. Returns: int: The number of rows in the table. """ select_stmt: str = f"select count(*) as ct from {table_name}" output = self.run_sling_cli( ["conns", "exec", target_name, select_stmt], force_json=True, ) return int( next(iter(self._parse_json_table_output(json.loads(output.strip()))[0].values())) ) def run_sling_cli(self, args: Sequence[str], force_json: bool = False) -> str: """Runs the Sling CLI with the given arguments and returns the output. Args: args (Sequence[str]): The arguments to pass to the Sling CLI. Returns: str: The output from the Sling CLI. """ with environ({"SLING_OUTPUT": "json"}) if force_json else contextlib.nullcontext(): return subprocess.check_output(args=[sling.SLING_BIN, *args], text=True) def replicate( self, *, context: Union[OpExecutionContext, AssetExecutionContext], replication_config: Optional[SlingReplicationParam] = None, dagster_sling_translator: Optional[DagsterSlingTranslator] = None, debug: bool = False, stream: bool = False, ) -> SlingEventIterator[SlingEventType]: """Runs a Sling replication from the given replication config. Args: context: Asset or Op execution context. replication_config: The Sling replication config to use for the replication. dagster_sling_translator: The translator to use for the replication. debug: Whether to run the replication in debug mode. Returns: SlingEventIterator[MaterializeResult]: A generator of MaterializeResult """ if not (replication_config or dagster_sling_translator): metadata_by_key = context.assets_def.metadata_by_key first_asset_metadata = next(iter(metadata_by_key.values())) dagster_sling_translator = first_asset_metadata.get(METADATA_KEY_TRANSLATOR) replication_config = first_asset_metadata.get(METADATA_KEY_REPLICATION_CONFIG) dagster_sling_translator = dagster_sling_translator or DagsterSlingTranslator() replication_config_dict = dict(validate_replication(replication_config)) return SlingEventIterator( self._replicate( context=context, replication_config=replication_config_dict, dagster_sling_translator=dagster_sling_translator, debug=debug, stream=stream, ), sling_cli=self, replication_config=replication_config_dict, context=context, ) def _replicate( self, *, context: Union[OpExecutionContext, AssetExecutionContext], replication_config: dict[str, Any], dagster_sling_translator: DagsterSlingTranslator, debug: bool, stream: bool = False, ) -> Iterator[SlingEventType]: # if translator has not been defined on metadata _or_ through param, then use the default constructor with self._setup_config(): env = os.environ.copy() if not stream: ##### Old method use _run which is not streamable ##### generator = self._batch_sling_replicate( context=context, replication_config=replication_config, dagster_sling_translator=dagster_sling_translator, env=env, debug=debug, ) else: #### New method use sling _exec_cmd to stream log from sling to dagster log generator = self._stream_sling_replicate( context=context, replication_config=replication_config, dagster_sling_translator=dagster_sling_translator, env=env, debug=debug, ) yield from generator def _batch_sling_replicate( self, context: Union[OpExecutionContext, AssetExecutionContext], replication_config: dict[str, Any], dagster_sling_translator: DagsterSlingTranslator, env: dict, debug: bool, ) -> Generator[Union[MaterializeResult, AssetMaterialization], None, None]: """Underlying function to run replication and fetch metadata in batch mode.""" # convert to dict to enable updating the index context_streams = self._get_replication_streams_for_context(context) if context_streams: replication_config.update({"streams": context_streams}) stream_definitions = get_streams_from_replication(replication_config) # extract the destination name from the replication config destination_name = replication_config.get("target") uid = uuid.uuid4() temp_dir = tempfile.gettempdir() temp_file = os.path.join(temp_dir, f"sling-replication-{uid}.json") with open(temp_file, "w") as file: json.dump(replication_config, file, cls=sling.JsonEncoder) logger.debug(f"Replication config: {replication_config}") debug_str = "-d" if debug else "" cmd = f"{sling.SLING_BIN} run {debug_str} -r {temp_file}" logger.debug(f"Running Sling replication with command: {cmd}") # Get start time from wall clock start_time = time.time() results = sling._run( # noqa cmd=cmd, temp_file=temp_file, return_output=True, env=env, ) end_time = time.time() for row in results.split("\n"): clean_line = self._clean_line(row) logger.debug(clean_line + "\n") self._stdout.append(clean_line) for stream_definition in stream_definitions: asset_key = dagster_sling_translator.get_asset_spec(stream_definition).key object_key = (stream_definition.get("config") or {}).get("object") destination_stream_name = object_key or stream_definition["name"] table_name = None if destination_name and destination_stream_name: table_name = ".".join([destination_name, destination_stream_name]) metadata = { "elapsed_time": end_time - start_time, "stream_name": stream_definition["name"], **TableMetadataSet( table_name=table_name, ), } if context.has_assets_def: yield MaterializeResult(asset_key=asset_key, metadata=metadata) else: yield AssetMaterialization(asset_key=asset_key, metadata=metadata) def _stream_sling_replicate( self, context: Union[OpExecutionContext, AssetExecutionContext], replication_config: dict[str, Any], dagster_sling_translator: DagsterSlingTranslator, env: dict, debug: bool, ) -> Generator[Union[MaterializeResult, AssetMaterialization], None, None]: """Underlying function to run replication and fetch metadata in stream mode.""" # define variable to use to compute metadata during run current_stream = None metadata_text = [] metadata = {} # convert to dict to enable updating the index context_streams = self._get_replication_streams_for_context(context) if context_streams: replication_config.update({"streams": context_streams}) uid = uuid.uuid4() temp_dir = tempfile.gettempdir() temp_file = os.path.join(temp_dir, f"sling-replication-{uid}.json") with open(temp_file, "w") as file: json.dump(replication_config, file, cls=sling.JsonEncoder) logger.debug(f"Replication config: {replication_config}") debug_str = "-d" if debug else "" cmd = f"{sling.SLING_BIN} run {debug_str} -r {temp_file}" logger.debug(f"Running Sling replication with command: {cmd}") # Get start time from wall clock start_time = time.time() for line in sling._exec_cmd(cmd, env=env): # noqa if line == "": # if empty line -- skipped continue text = self._clean_timestamp_log(line) # else clean timestamp logger.info(text) # log info to dagster log # if no current stream is chosen if current_stream is None: # Try to match stream name with stream keyword matched = re.findall("stream (.*)$", text) # If found, extract stream name, stream config, asset key if matched: current_stream = matched[0] current_config = replication_config.get("streams", {}).get(current_stream, {}) asset_key = dagster_sling_translator.get_asset_spec( {"name": current_stream, "config": current_config} ).key if debug: logger.debug(current_stream) logger.debug(current_config) logger.debug(asset_key) # Else search for single replication format else: # If found, extract stream name, stream config, asset key matched = re.findall(r"Sling Replication [|] .* [|] (\S*)$", text) if matched: current_stream = matched[0] current_config = replication_config.get("streams", {}).get( current_stream, {} ) asset_key = dagster_sling_translator.get_asset_spec( {"name": current_stream, "config": current_config} ).key if debug: logger.debug(current_stream) logger.debug(current_config) logger.debug(asset_key) # Else log that no stream found. This is normal for a few line. But if multiple line come up, further evaluate might be needed for other pattern else: if debug: logger.debug("no match stream name") # If current stream is already choose else: # Search whether the current stream ended matched = re.findall("execution succeeded", text) if matched: # If yes, query metadata and materialize asset metadata = self._query_metadata("\n".join(metadata_text), start_time=start_time) start_time = time.time() metadata["stream_name"] = current_stream logger.debug(metadata) if context.has_assets_def: yield MaterializeResult(asset_key=asset_key, metadata=metadata) # pyright: ignore[reportPossiblyUnboundVariable] else: yield AssetMaterialization(asset_key=asset_key, metadata=metadata) # pyright: ignore[reportPossiblyUnboundVariable] current_stream = None metadata_text = [] metadata_text.append(text) # clean up unused file os.remove(temp_file) def stream_raw_logs(self) -> Generator[str, None, None]: """Returns a generator of raw logs from the Sling CLI.""" yield from self._stdout def _process_env_vars(config: dict[str, Any]) -> dict[str, Any]: out = {} for key, value in config.items(): if isinstance(value, dict) and len(value) == 1 and next(iter(value.keys())) == "env": out[key] = EnvVar(next(iter(value.values()))).get_value() elif isinstance(value, EnvVar): out[key] = value.get_value() else: out[key] = value return out
SlingResource
python
apache__airflow
helm-tests/tests/helm_tests/other/test_git_sync_scheduler.py
{ "start": 914, "end": 19123 }
class ____: """Test git sync scheduler. This is ignored when Airflow >=3 or a separate dag processor is used.""" def test_should_add_dags_volume(self): docs = render_chart( values={"airflowVersion": "2.10.5", "dags": {"gitSync": {"enabled": True}}}, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) # check that there is a volume and git-sync and scheduler container mount it assert len(jmespath.search("spec.template.spec.volumes[?name=='dags']", docs[0])) > 0 assert ( len( jmespath.search( "(spec.template.spec.containers[?name=='scheduler'].volumeMounts[])[?name=='dags']", docs[0], ) ) > 0 ) assert ( len( jmespath.search( "(spec.template.spec.containers[?name=='git-sync'].volumeMounts[])[?name=='dags']", docs[0], ) ) > 0 ) def test_validate_the_git_sync_container_spec(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "images": { "gitSync": { "repository": "test-registry/test-repo", "tag": "test-tag", "pullPolicy": "Always", } }, "dags": { "gitSync": { "enabled": True, "containerName": "git-sync-test", "wait": None, "period": "66s", "maxFailures": 70, "subPath": "path1/path2", "rev": "HEAD", "ref": "test-branch", "depth": 1, "repo": "https://github.com/apache/airflow.git", "branch": "test-branch", "sshKeySecret": None, "credentialsSecret": None, "knownHosts": None, "envFrom": "- secretRef:\n name: 'proxy-config'\n", }, "persistence": {"enabled": True}, }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[1]", docs[0]) == { "name": "git-sync-test", "securityContext": {"runAsUser": 65533}, "image": "test-registry/test-repo:test-tag", "imagePullPolicy": "Always", "envFrom": [{"secretRef": {"name": "proxy-config"}}], "env": [ {"name": "GIT_SYNC_REV", "value": "HEAD"}, {"name": "GITSYNC_REF", "value": "test-branch"}, {"name": "GIT_SYNC_BRANCH", "value": "test-branch"}, {"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"}, {"name": "GITSYNC_REPO", "value": "https://github.com/apache/airflow.git"}, {"name": "GIT_SYNC_DEPTH", "value": "1"}, {"name": "GITSYNC_DEPTH", "value": "1"}, {"name": "GIT_SYNC_ROOT", "value": "/git"}, {"name": "GITSYNC_ROOT", "value": "/git"}, {"name": "GIT_SYNC_DEST", "value": "repo"}, {"name": "GITSYNC_LINK", "value": "repo"}, {"name": "GIT_SYNC_ADD_USER", "value": "true"}, {"name": "GITSYNC_ADD_USER", "value": "true"}, {"name": "GITSYNC_PERIOD", "value": "66s"}, {"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"}, {"name": "GITSYNC_MAX_FAILURES", "value": "70"}, ], "volumeMounts": [{"mountPath": "/git", "name": "dags"}], "resources": {}, } def test_validate_the_git_sync_container_spec_if_wait_specified(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "images": { "gitSync": { "repository": "test-registry/test-repo", "tag": "test-tag", "pullPolicy": "Always", } }, "dags": { "gitSync": { "enabled": True, "containerName": "git-sync-test", "wait": 66, "period": "66s", "maxFailures": 70, "subPath": "path1/path2", "rev": "HEAD", "ref": "test-branch", "depth": 1, "repo": "https://github.com/apache/airflow.git", "branch": "test-branch", "sshKeySecret": None, "credentialsSecret": None, "knownHosts": None, "envFrom": "- secretRef:\n name: 'proxy-config'\n", }, "persistence": {"enabled": True}, }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[1]", docs[0]) == { "name": "git-sync-test", "securityContext": {"runAsUser": 65533}, "image": "test-registry/test-repo:test-tag", "imagePullPolicy": "Always", "envFrom": [{"secretRef": {"name": "proxy-config"}}], "env": [ {"name": "GIT_SYNC_REV", "value": "HEAD"}, {"name": "GITSYNC_REF", "value": "test-branch"}, {"name": "GIT_SYNC_BRANCH", "value": "test-branch"}, {"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"}, {"name": "GITSYNC_REPO", "value": "https://github.com/apache/airflow.git"}, {"name": "GIT_SYNC_DEPTH", "value": "1"}, {"name": "GITSYNC_DEPTH", "value": "1"}, {"name": "GIT_SYNC_ROOT", "value": "/git"}, {"name": "GITSYNC_ROOT", "value": "/git"}, {"name": "GIT_SYNC_DEST", "value": "repo"}, {"name": "GITSYNC_LINK", "value": "repo"}, {"name": "GIT_SYNC_ADD_USER", "value": "true"}, {"name": "GITSYNC_ADD_USER", "value": "true"}, {"name": "GIT_SYNC_WAIT", "value": "66"}, {"name": "GITSYNC_PERIOD", "value": "66s"}, {"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"}, {"name": "GITSYNC_MAX_FAILURES", "value": "70"}, ], "volumeMounts": [{"mountPath": "/git", "name": "dags"}], "resources": {}, } def test_validate_if_ssh_params_are_added(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "dags": { "gitSync": { "enabled": True, "containerName": "git-sync-test", "sshKeySecret": "ssh-secret", "knownHosts": None, "branch": "test-branch", } }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GITSYNC_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GITSYNC_SSH", "value": "true"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GITSYNC_SSH_KNOWN_HOSTS", "value": "false"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert { "name": "git-sync-ssh-key", "secret": {"secretName": "ssh-secret", "defaultMode": 288}, } in jmespath.search("spec.template.spec.volumes", docs[0]) def test_validate_if_ssh_params_are_added_with_git_ssh_key(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "dags": { "gitSync": { "enabled": True, "sshKey": "dummy-ssh-key", } }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GITSYNC_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert {"name": "GITSYNC_SSH", "value": "true"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) assert { "name": "git-sync-ssh-key", "secret": {"secretName": "release-name-ssh-secret", "defaultMode": 288}, } in jmespath.search("spec.template.spec.volumes", docs[0]) def test_validate_sshkeysecret_not_added_when_persistence_is_enabled(self): docs = render_chart( values={ "dags": { "gitSync": { "enabled": True, "containerName": "git-sync-test", "sshKeySecret": "ssh-secret", "knownHosts": None, "branch": "test-branch", }, "persistence": {"enabled": True}, } }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert "git-sync-ssh-key" not in jmespath.search("spec.template.spec.volumes[].name", docs[0]) @pytest.mark.parametrize( ("tag", "expected_prefix"), [ ("v3.6.7", "GIT_SYNC_"), ("v4.4.2", "GITSYNC_"), ("latest", "GITSYNC_"), ], ) def test_should_set_username_and_pass_env_variables_in_scheduler(self, tag, expected_prefix): docs = render_chart( values={ "airflowVersion": "2.10.5", "dags": { "gitSync": { "enabled": True, "credentialsSecret": "user-pass-secret", "sshKeySecret": None, } }, "images": { "gitSync": { "tag": tag, } }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) envs = jmespath.search("spec.template.spec.containers[1].env", docs[0]) assert { "name": f"{expected_prefix}USERNAME", "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": f"{expected_prefix}USERNAME"}}, } in envs assert { "name": f"{expected_prefix}PASSWORD", "valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": f"{expected_prefix}PASSWORD"}}, } in envs def test_should_set_the_volume_claim_correctly_when_using_an_existing_claim(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "dags": {"persistence": {"enabled": True, "existingClaim": "test-claim"}}, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}} in jmespath.search( "spec.template.spec.volumes", docs[0] ) def test_should_add_extra_volume_and_extra_volume_mount(self): docs = render_chart( values={ "executor": "CeleryExecutor", "scheduler": { "extraVolumes": [{"name": "test-volume-{{ .Chart.Name }}", "emptyDir": {}}], "extraVolumeMounts": [ {"name": "test-volume-{{ .Chart.Name }}", "mountPath": "/opt/test"} ], }, "dags": { "gitSync": { "enabled": True, } }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert {"name": "test-volume-airflow", "emptyDir": {}} in jmespath.search( "spec.template.spec.volumes", docs[0] ) assert {"name": "test-volume-airflow", "mountPath": "/opt/test"} in jmespath.search( "spec.template.spec.containers[0].volumeMounts", docs[0] ) def test_extra_volume_and_git_sync_extra_volume_mount(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "executor": "CeleryExecutor", "scheduler": { "extraVolumes": [{"name": "test-volume-{{ .Values.executor }}", "emptyDir": {}}], }, "dags": { "gitSync": { "enabled": True, "extraVolumeMounts": [ {"mountPath": "/opt/test", "name": "test-volume-{{ .Values.executor }}"} ], } }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert {"name": "test-volume-CeleryExecutor", "emptyDir": {}} in jmespath.search( "spec.template.spec.volumes", docs[0] ) assert {"mountPath": "/git", "name": "dags"} in jmespath.search( "spec.template.spec.containers[1].volumeMounts", docs[0] ) assert {"name": "test-volume-CeleryExecutor", "mountPath": "/opt/test"} in jmespath.search( "spec.template.spec.containers[1].volumeMounts", docs[0] ) def test_should_add_env(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "dags": { "gitSync": { "enabled": True, "env": [{"name": "FOO", "value": "bar"}], } }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert {"name": "FOO", "value": "bar"} in jmespath.search( "spec.template.spec.containers[1].env", docs[0] ) def test_resources_are_configurable(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "dags": { "gitSync": { "enabled": True, "resources": { "limits": {"cpu": "200m", "memory": "128Mi"}, "requests": {"cpu": "300m", "memory": "169Mi"}, }, }, }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[1].resources.limits.memory", docs[0]) == "128Mi" assert ( jmespath.search("spec.template.spec.containers[1].resources.requests.memory", docs[0]) == "169Mi" ) assert jmespath.search("spec.template.spec.containers[1].resources.requests.cpu", docs[0]) == "300m" def test_liveliness_and_readiness_probes_are_configurable(self): livenessProbe = { "failureThreshold": 10, "exec": {"command": ["/bin/true"]}, "initialDelaySeconds": 0, "periodSeconds": 1, "successThreshold": 1, "timeoutSeconds": 5, } readinessProbe = { "failureThreshold": 10, "exec": {"command": ["/bin/true"]}, "initialDelaySeconds": 0, "periodSeconds": 1, "successThreshold": 1, "timeoutSeconds": 5, } docs = render_chart( values={ "airflowVersion": "2.10.5", "dags": { "gitSync": { "enabled": True, "livenessProbe": livenessProbe, "readinessProbe": readinessProbe, }, }, }, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) container_search_result = jmespath.search( "spec.template.spec.containers[?name == 'git-sync']", docs[0] ) init_container_search_result = jmespath.search( "spec.template.spec.initContainers[?name == 'git-sync-init']", docs[0] ) assert "livenessProbe" in container_search_result[0] assert "readinessProbe" in container_search_result[0] assert "readinessProbe" not in init_container_search_result[0] assert "readinessProbe" not in init_container_search_result[0] assert livenessProbe == container_search_result[0]["livenessProbe"] assert readinessProbe == container_search_result[0]["readinessProbe"]
TestGitSyncSchedulerTest
python
tornadoweb__tornado
tornado/httpclient.py
{ "start": 5047, "end": 13299 }
class ____(Configurable): """An non-blocking HTTP client. Example usage:: async def f(): http_client = AsyncHTTPClient() try: response = await http_client.fetch("http://www.google.com") except Exception as e: print("Error: %s" % e) else: print(response.body) The constructor for this class is magic in several respects: It actually creates an instance of an implementation-specific subclass, and instances are reused as a kind of pseudo-singleton (one per `.IOLoop`). The keyword argument ``force_instance=True`` can be used to suppress this singleton behavior. Unless ``force_instance=True`` is used, no arguments should be passed to the `AsyncHTTPClient` constructor. The implementation subclass as well as arguments to its constructor can be set with the static method `configure()` All `AsyncHTTPClient` implementations support a ``defaults`` keyword argument, which can be used to set default values for `HTTPRequest` attributes. For example:: AsyncHTTPClient.configure( None, defaults=dict(user_agent="MyUserAgent")) # or with force_instance: client = AsyncHTTPClient(force_instance=True, defaults=dict(user_agent="MyUserAgent")) .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ _instance_cache = None # type: Dict[IOLoop, AsyncHTTPClient] @classmethod def configurable_base(cls) -> Type[Configurable]: return AsyncHTTPClient @classmethod def configurable_default(cls) -> Type[Configurable]: from tornado.simple_httpclient import SimpleAsyncHTTPClient return SimpleAsyncHTTPClient @classmethod def _async_clients(cls) -> Dict[IOLoop, "AsyncHTTPClient"]: attr_name = "_async_client_dict_" + cls.__name__ if not hasattr(cls, attr_name): setattr(cls, attr_name, weakref.WeakKeyDictionary()) return getattr(cls, attr_name) def __new__(cls, force_instance: bool = False, **kwargs: Any) -> "AsyncHTTPClient": io_loop = IOLoop.current() if force_instance: instance_cache = None else: instance_cache = cls._async_clients() if instance_cache is not None and io_loop in instance_cache: return instance_cache[io_loop] instance = super().__new__(cls, **kwargs) # type: ignore # Make sure the instance knows which cache to remove itself from. # It can't simply call _async_clients() because we may be in # __new__(AsyncHTTPClient) but instance.__class__ may be # SimpleAsyncHTTPClient. instance._instance_cache = instance_cache if instance_cache is not None: instance_cache[instance.io_loop] = instance return instance def initialize(self, defaults: Optional[Dict[str, Any]] = None) -> None: self.io_loop = IOLoop.current() self.defaults = dict(HTTPRequest._DEFAULTS) if defaults is not None: self.defaults.update(defaults) self._closed = False def close(self) -> None: """Destroys this HTTP client, freeing any file descriptors used. This method is **not needed in normal use** due to the way that `AsyncHTTPClient` objects are transparently reused. ``close()`` is generally only necessary when either the `.IOLoop` is also being closed, or the ``force_instance=True`` argument was used when creating the `AsyncHTTPClient`. No other methods may be called on the `AsyncHTTPClient` after ``close()``. """ if self._closed: return self._closed = True if self._instance_cache is not None: cached_val = self._instance_cache.pop(self.io_loop, None) # If there's an object other than self in the instance # cache for our IOLoop, something has gotten mixed up. A # value of None appears to be possible when this is called # from a destructor (HTTPClient.__del__) as the weakref # gets cleared before the destructor runs. if cached_val is not None and cached_val is not self: raise RuntimeError("inconsistent AsyncHTTPClient cache") def fetch( self, request: Union[str, "HTTPRequest"], raise_error: bool = True, **kwargs: Any, ) -> "Future[HTTPResponse]": """Executes a request, asynchronously returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` This method returns a `.Future` whose result is an `HTTPResponse`. By default, the ``Future`` will raise an `HTTPError` if the request returned a non-200 response code (other errors may also be raised if the server could not be contacted). Instead, if ``raise_error`` is set to False, the response will always be returned regardless of the response code. If a ``callback`` is given, it will be invoked with the `HTTPResponse`. In the callback interface, `HTTPError` is not automatically raised. Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. The ``raise_error=False`` argument only affects the `HTTPError` raised when a non-200 response code is used, instead of suppressing all errors. """ if self._closed: raise RuntimeError("fetch() called on closed AsyncHTTPClient") if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) else: if kwargs: raise ValueError( "kwargs can't be used if request is an HTTPRequest object" ) # We may modify this (to add Host, Accept-Encoding, etc), # so make sure we don't modify the caller's object. This is also # where normal dicts get converted to HTTPHeaders objects. request.headers = httputil.HTTPHeaders(request.headers) request_proxy = _RequestProxy(request, self.defaults) future = Future() # type: Future[HTTPResponse] def handle_response(response: "HTTPResponse") -> None: if response.error: if raise_error or not response._error_is_response_code: future_set_exception_unless_cancelled(future, response.error) return future_set_result_unless_cancelled(future, response) self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response) return future def fetch_impl( self, request: "HTTPRequest", callback: Callable[["HTTPResponse"], None] ) -> None: raise NotImplementedError() @classmethod def configure( cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any ) -> None: """Configures the `AsyncHTTPClient` subclass to use. ``AsyncHTTPClient()`` actually creates an instance of a subclass. This method may be called with either a class object or the fully-qualified name of such a class (or ``None`` to use the default, ``SimpleAsyncHTTPClient``) If additional keyword arguments are given, they will be passed to the constructor of each subclass instance created. The keyword argument ``max_clients`` determines the maximum number of simultaneous `~AsyncHTTPClient.fetch()` operations that can execute in parallel on each `.IOLoop`. Additional arguments may be supported depending on the implementation class in use. Example:: AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """ super().configure(impl, **kwargs)
AsyncHTTPClient
python
django__django
tests/expressions_case/tests.py
{ "start": 50868, "end": 56323 }
class ____(TestCase): @classmethod def setUpTestData(cls): Client.objects.create( name="Jane Doe", account_type=Client.REGULAR, registered_on=date.today() - timedelta(days=36), ) Client.objects.create( name="James Smith", account_type=Client.GOLD, registered_on=date.today() - timedelta(days=5), ) Client.objects.create( name="Jack Black", account_type=Client.PLATINUM, registered_on=date.today() - timedelta(days=10 * 365), ) def test_simple_example(self): self.assertQuerySetEqual( Client.objects.annotate( discount=Case( When(account_type=Client.GOLD, then=Value("5%")), When(account_type=Client.PLATINUM, then=Value("10%")), default=Value("0%"), ), ).order_by("pk"), [("Jane Doe", "0%"), ("James Smith", "5%"), ("Jack Black", "10%")], transform=attrgetter("name", "discount"), ) def test_lookup_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) self.assertQuerySetEqual( Client.objects.annotate( discount=Case( When(registered_on__lte=a_year_ago, then=Value("10%")), When(registered_on__lte=a_month_ago, then=Value("5%")), default=Value("0%"), ), ).order_by("pk"), [("Jane Doe", "5%"), ("James Smith", "0%"), ("Jack Black", "10%")], transform=attrgetter("name", "discount"), ) def test_conditional_update_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) Client.objects.update( account_type=Case( When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)), When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)), default=Value(Client.REGULAR), ), ) self.assertQuerySetEqual( Client.objects.order_by("pk"), [("Jane Doe", "G"), ("James Smith", "R"), ("Jack Black", "P")], transform=attrgetter("name", "account_type"), ) def test_conditional_aggregation_example(self): Client.objects.create( name="Jean Grey", account_type=Client.REGULAR, registered_on=date.today(), ) Client.objects.create( name="James Bond", account_type=Client.PLATINUM, registered_on=date.today(), ) Client.objects.create( name="Jane Porter", account_type=Client.PLATINUM, registered_on=date.today(), ) self.assertEqual( Client.objects.aggregate( regular=Count("pk", filter=Q(account_type=Client.REGULAR)), gold=Count("pk", filter=Q(account_type=Client.GOLD)), platinum=Count("pk", filter=Q(account_type=Client.PLATINUM)), ), {"regular": 2, "gold": 1, "platinum": 3}, ) # This was the example before the filter argument was added. self.assertEqual( Client.objects.aggregate( regular=Sum( Case( When(account_type=Client.REGULAR, then=1), ) ), gold=Sum( Case( When(account_type=Client.GOLD, then=1), ) ), platinum=Sum( Case( When(account_type=Client.PLATINUM, then=1), ) ), ), {"regular": 2, "gold": 1, "platinum": 3}, ) def test_filter_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) self.assertQuerySetEqual( Client.objects.filter( registered_on__lte=Case( When(account_type=Client.GOLD, then=a_month_ago), When(account_type=Client.PLATINUM, then=a_year_ago), ), ), [("Jack Black", "P")], transform=attrgetter("name", "account_type"), ) def test_hash(self): expression_1 = Case( When(account_type__in=[Client.REGULAR, Client.GOLD], then=1), default=2, output_field=IntegerField(), ) expression_2 = Case( When(account_type__in=(Client.REGULAR, Client.GOLD), then=1), default=2, output_field=IntegerField(), ) expression_3 = Case( When(account_type__in=[Client.REGULAR, Client.GOLD], then=1), default=2 ) expression_4 = Case( When(account_type__in=[Client.PLATINUM, Client.GOLD], then=2), default=1 ) self.assertEqual(hash(expression_1), hash(expression_2)) self.assertNotEqual(hash(expression_2), hash(expression_3)) self.assertNotEqual(hash(expression_1), hash(expression_4)) self.assertNotEqual(hash(expression_3), hash(expression_4))
CaseDocumentationExamples
python
pandas-dev__pandas
asv_bench/benchmarks/categoricals.py
{ "start": 6506, "end": 7082 }
class ____: def setup(self): N = 1000 self.c = pd.CategoricalIndex(list("a" * N + "b" * N + "c" * N)) self.s = pd.Series(self.c) def time_categorical_index_is_monotonic_increasing(self): self.c.is_monotonic_increasing def time_categorical_index_is_monotonic_decreasing(self): self.c.is_monotonic_decreasing def time_categorical_series_is_monotonic_increasing(self): self.s.is_monotonic_increasing def time_categorical_series_is_monotonic_decreasing(self): self.s.is_monotonic_decreasing
IsMonotonic
python
django__django
tests/utils_tests/test_lorem_ipsum.py
{ "start": 121, "end": 5452 }
class ____(unittest.TestCase): def test_negative_words(self): """words(n) returns n + 19 words, even if n is negative.""" self.assertEqual( words(-5), "lorem ipsum dolor sit amet consectetur adipisicing elit sed do " "eiusmod tempor incididunt ut", ) def test_same_or_less_common_words(self): """words(n) for n < 19.""" self.assertEqual(words(7), "lorem ipsum dolor sit amet consectetur adipisicing") def test_common_words_in_string(self): """ words(n) starts with the 19 standard lorem ipsum words for n > 19. """ self.assertTrue( words(25).startswith( "lorem ipsum dolor sit amet consectetur adipisicing elit sed " "do eiusmod tempor incididunt ut labore et dolore magna aliqua" ) ) def test_more_words_than_common(self): """words(n) returns n words for n > 19.""" self.assertEqual(len(words(25).split()), 25) def test_common_large_number_of_words(self): """words(n) has n words when n is greater than len(WORDS).""" self.assertEqual(len(words(500).split()), 500) @mock.patch("django.utils.lorem_ipsum.random.sample") def test_not_common_words(self, mock_sample): """words(n, common=False) returns random words.""" mock_sample.return_value = ["exercitationem", "perferendis"] self.assertEqual(words(2, common=False), "exercitationem perferendis") def test_sentence_starts_with_capital(self): """A sentence starts with a capital letter.""" self.assertTrue(sentence()[0].isupper()) @mock.patch("django.utils.lorem_ipsum.random.sample") @mock.patch("django.utils.lorem_ipsum.random.choice") @mock.patch("django.utils.lorem_ipsum.random.randint") def test_sentence(self, mock_randint, mock_choice, mock_sample): """ Sentences are built using some number of phrases and a set of words. """ mock_randint.return_value = 2 # Use two phrases. mock_sample.return_value = ["exercitationem", "perferendis"] mock_choice.return_value = "?" value = sentence() self.assertEqual(mock_randint.call_count, 3) self.assertEqual(mock_sample.call_count, 2) self.assertEqual(mock_choice.call_count, 1) self.assertEqual( value, "Exercitationem perferendis, exercitationem perferendis?" ) @mock.patch("django.utils.lorem_ipsum.random.choice") def test_sentence_ending(self, mock_choice): """Sentences end with a question mark or a period.""" mock_choice.return_value = "?" self.assertIn(sentence()[-1], "?") mock_choice.return_value = "." self.assertIn(sentence()[-1], ".") @mock.patch("django.utils.lorem_ipsum.random.sample") @mock.patch("django.utils.lorem_ipsum.random.choice") @mock.patch("django.utils.lorem_ipsum.random.randint") def test_paragraph(self, mock_paragraph_randint, mock_choice, mock_sample): """paragraph() generates a single paragraph.""" # Make creating 2 sentences use 2 phrases. mock_paragraph_randint.return_value = 2 mock_sample.return_value = ["exercitationem", "perferendis"] mock_choice.return_value = "." value = paragraph() self.assertEqual(mock_paragraph_randint.call_count, 7) self.assertEqual( value, ( "Exercitationem perferendis, exercitationem perferendis. " "Exercitationem perferendis, exercitationem perferendis." ), ) @mock.patch("django.utils.lorem_ipsum.random.sample") @mock.patch("django.utils.lorem_ipsum.random.choice") @mock.patch("django.utils.lorem_ipsum.random.randint") def test_paragraphs_not_common(self, mock_randint, mock_choice, mock_sample): """ paragraphs(1, common=False) generating one paragraph that's not the COMMON_P paragraph. """ # Make creating 2 sentences use 2 phrases. mock_randint.return_value = 2 mock_sample.return_value = ["exercitationem", "perferendis"] mock_choice.return_value = "." self.assertEqual( paragraphs(1, common=False), [ "Exercitationem perferendis, exercitationem perferendis. " "Exercitationem perferendis, exercitationem perferendis." ], ) self.assertEqual(mock_randint.call_count, 7) def test_paragraphs(self): """paragraphs(1) uses the COMMON_P paragraph.""" self.assertEqual( paragraphs(1), [ "Lorem ipsum dolor sit amet, consectetur adipisicing elit, " "sed do eiusmod tempor incididunt ut labore et dolore magna " "aliqua. Ut enim ad minim veniam, quis nostrud exercitation " "ullamco laboris nisi ut aliquip ex ea commodo consequat. " "Duis aute irure dolor in reprehenderit in voluptate velit " "esse cillum dolore eu fugiat nulla pariatur. Excepteur sint " "occaecat cupidatat non proident, sunt in culpa qui officia " "deserunt mollit anim id est laborum." ], )
LoremIpsumTests
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 221094, "end": 223066 }
class ____(GeneratedAirbyteSource): class APIToken: @public def __init__(self, email: str, api_token: str, auth_type: Optional[str] = None): self.auth_type = check.opt_str_param(auth_type, "auth_type") self.email = check.str_param(email, "email") self.api_token = check.str_param(api_token, "api_token") class OAuth20: @public def __init__(self, access_token: str, auth_type: Optional[str] = None): self.auth_type = check.opt_str_param(auth_type, "auth_type") self.access_token = check.str_param(access_token, "access_token") @public def __init__( self, name: str, subdomain: str, credentials: Union["ZendeskTalkSource.APIToken", "ZendeskTalkSource.OAuth20"], start_date: str, ): """Airbyte Source for Zendesk Talk. Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-talk Args: name (str): The name of the destination. subdomain (str): This is your Zendesk subdomain that can be found in your account URL. For example, in {my_subdomain}.zendesk.com, where my_subdomain is the value of your subdomain. credentials (Union[ZendeskTalkSource.APIToken, ZendeskTalkSource.OAuth20]): Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. start_date (str): The date from which you'd like to replicate data for Zendesk Talk API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. """ self.subdomain = check.str_param(subdomain, "subdomain") self.credentials = check.inst_param( credentials, "credentials", (ZendeskTalkSource.APIToken, ZendeskTalkSource.OAuth20) ) self.start_date = check.str_param(start_date, "start_date") super().__init__("Zendesk Talk", name)
ZendeskTalkSource
python
fluentpython__example-code
16-coroutine/coro_exc_demo.py
{ "start": 1189, "end": 1641 }
class ____(Exception): """An exception type for the demonstration.""" def demo_exc_handling(): print('-> coroutine started') while True: try: x = yield except DemoException: # <1> print('*** DemoException handled. Continuing...') else: # <2> print('-> coroutine received: {!r}'.format(x)) raise RuntimeError('This line should never run.') # <3> # END EX_CORO_EXC
DemoException
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/dt_diamond_right/package.py
{ "start": 217, "end": 593 }
class ____(Package): """This package has an indirect diamond dependency on dt-diamond-bottom""" homepage = "http://www.example.com" url = "http://www.example.com/dt-diamond-right-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") depends_on("dt-diamond-bottom", type=("build", "link", "run")) depends_on("c", type="build")
DtDiamondRight
python
pdm-project__pdm
src/pdm/models/markers.py
{ "start": 1209, "end": 5440 }
class ____: inner: BaseMarker def __and__(self, other: Any) -> Marker: if not isinstance(other, Marker): return NotImplemented return type(self)(self.inner & other.inner) def __or__(self, other: Any) -> Marker: if not isinstance(other, Marker): return NotImplemented return type(self)(self.inner | other.inner) def is_any(self) -> bool: return self.inner.is_any() def is_empty(self) -> bool: return self.inner.is_empty() def __str__(self) -> str: return str(self.inner) def __repr__(self) -> str: return f"<Marker {self.inner}>" def evaluate(self, environment: dict[str, Any] | None = None) -> bool: return self.inner.evaluate(environment) def matches(self, spec: EnvSpec) -> bool: non_python_marker, python_spec = self.split_pyspec() if spec.platform is None: non_python_marker = exclude_multi(non_python_marker, *PLATFORM_MARKERS) if spec.implementation is None: non_python_marker = exclude_multi(non_python_marker, *IMPLEMENTATION_MARKERS) return not (python_spec & spec.requires_python).is_empty() and non_python_marker.evaluate(spec.markers()) @lru_cache(maxsize=1024) def split_pyspec(self) -> tuple[Marker, PySpecSet]: """Split `python_version` and `python_full_version` from marker string""" python_marker = self.inner.only(*PYTHON_MARKERS) if python_marker.is_any(): return self, PySpecSet() new_marker = exclude_multi(self, *PYTHON_MARKERS) return new_marker, _build_pyspec_from_marker(python_marker) def split_extras(self) -> tuple[Marker, Marker]: """An element can be stripped from the marker only if all parts are connected with `and` operator. The rest part are returned as a string or `None` if all are stripped. """ return type(self)(self.inner.without_extras()), type(self)(self.inner.only("extra")) @overload def get_marker(marker: None) -> None: ... @overload def get_marker(marker: PackageMarker | Marker | str) -> Marker: ... def get_marker(marker: PackageMarker | Marker | str | None) -> Marker | None: if marker is None: return None if isinstance(marker, Marker): return marker elif isinstance(marker, PackageMarker): return Marker(from_pkg_marker(marker)) try: return Marker(parse_marker(marker)) except InvalidMarker as e: raise RequirementError(f"Invalid marker {marker}: {e}") from e def _build_pyspec_from_marker(marker: BaseMarker) -> PySpecSet: def split_version(version: str) -> list[str]: if "," in version: return [v.strip() for v in version.split(",")] return version.split() if isinstance(marker, MarkerExpression): name = marker.name op = marker.op version = marker.value if name == "python_version": if op == ">": int_versions = [int(ver) for ver in version.split(".")] if len(int_versions) < 2: int_versions.append(0) int_versions[-1] += 1 version = ".".join(str(v) for v in int_versions) op = ">=" elif op in ("==", "!="): if len(version.split(".")) < 3: version += ".*" elif op in ("in", "not in"): version = " ".join(v + ".*" for v in split_version(version)) if op == "in": pyspec = reduce(operator.or_, (PySpecSet(f"=={v}") for v in split_version(version))) elif op == "not in": pyspec = reduce(operator.and_, (PySpecSet(f"!={v}") for v in split_version(version))) else: pyspec = PySpecSet(f"{op}{version}") return pyspec elif isinstance(marker, MultiMarker): return reduce(operator.and_, (_build_pyspec_from_marker(m) for m in marker.markers)) elif isinstance(marker, MarkerUnion): return reduce(operator.or_, (_build_pyspec_from_marker(m) for m in marker.markers)) else: # pragma: no cover raise TypeError(f"Unsupported marker type: {type(marker)}")
Marker
python
airbytehq__airbyte
airbyte-integrations/connectors/source-surveycto/source_surveycto/source.py
{ "start": 1612, "end": 3713 }
class ____(SurveyStream, IncrementalMixin): primary_key = "KEY" cursor_field = "SubmissionDate" _cursor_value = None @property def state(self) -> Mapping[str, Any]: if self._cursor_value: return {self.cursor_field: self._cursor_value} else: return {self.cursor_field: self.start_date} @state.setter def state(self, value: Mapping[str, Any]): self._cursor_value = value[self.cursor_field] @property def name(self) -> str: return self.form_id def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: return None def get_json_schema(self): return self.schema def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None ) -> MutableMapping[str, Any]: ix = self.state[self.cursor_field] return {"date": ix} def request_headers( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> Mapping[str, Any]: return {"Authorization": "Basic " + self.auth_token} def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return self.form_id def parse_response( self, response: requests.Response, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None, ) -> Iterable[Mapping]: self.response_json = response.json() for data in self.response_json: try: yield data except Exception as e: msg = "Encountered an exception parsing schema" self.logger.exception(msg) raise e def read_records(self, *args, **kwargs) -> Iterable[Mapping[str, Any]]: for record in super().read_records(*args, **kwargs): self._cursor_value = record[self.cursor_field] yield record # Source
SurveyctoStream
python
celery__celery
t/unit/apps/test_multi.py
{ "start": 1689, "end": 6148 }
class ____: @patch('celery.apps.multi.os.mkdir') @patch('celery.apps.multi.gethostname') def test_parse(self, gethostname, mkdirs_mock): gethostname.return_value = 'example.com' p = NamespacedOptionParser([ '-c:jerry,elaine', '5', '--loglevel:kramer=DEBUG', '--flag', '--logfile=/var/log/celery/foo', '-Q', 'bar', 'jerry', 'elaine', 'kramer', '--', '.disable_rate_limits=1', ]) p.parse() it = multi_args(p, cmd='celery multi', append='*AP*', prefix='*P*', suffix='*S*') nodes = list(it) def assert_line_in(name, args): assert name in {n.name for n in nodes} argv = None for node in nodes: if node.name == name: argv = node.argv assert argv for arg in args: assert arg in argv assert_line_in( '*P*jerry@*S*', ['celery multi', '-n *P*jerry@*S*', '-Q bar', '-c 5', '--flag', '--logfile=/var/log/celery/foo', '-- .disable_rate_limits=1', '*AP*'], ) assert_line_in( '*P*elaine@*S*', ['celery multi', '-n *P*elaine@*S*', '-Q bar', '-c 5', '--flag', '--logfile=/var/log/celery/foo', '-- .disable_rate_limits=1', '*AP*'], ) assert_line_in( '*P*kramer@*S*', ['celery multi', '--loglevel=DEBUG', '-n *P*kramer@*S*', '-Q bar', '--flag', '--logfile=/var/log/celery/foo', '-- .disable_rate_limits=1', '*AP*'], ) expand = nodes[0].expander assert expand('%h') == '*P*jerry@*S*' assert expand('%n') == '*P*jerry' nodes2 = list(multi_args(p, cmd='celery multi', append='', prefix='*P*', suffix='*S*')) assert nodes2[0].argv[-1] == '-- .disable_rate_limits=1' p2 = NamespacedOptionParser(['10', '-c:1', '5']) p2.parse() nodes3 = list(multi_args(p2, cmd='celery multi')) def _args(name, *args): return args + ( '--pidfile={}.pid'.format(os.path.join(os.path.normpath('/var/run/celery/'), name)), '--logfile={}%I.log'.format(os.path.join(os.path.normpath('/var/log/celery/'), name)), f'--executable={sys.executable}', '', ) assert len(nodes3) == 10 assert nodes3[0].name == 'celery1@example.com' assert nodes3[0].argv == ( 'celery multi', '-c 5', '-n celery1@example.com') + _args('celery1') for i, worker in enumerate(nodes3[1:]): assert worker.name == 'celery%s@example.com' % (i + 2) node_i = f'celery{i + 2}' assert worker.argv == ( 'celery multi', f'-n {node_i}@example.com') + _args(node_i) nodes4 = list(multi_args(p2, cmd='celery multi', suffix='""')) assert len(nodes4) == 10 assert nodes4[0].name == 'celery1@' assert nodes4[0].argv == ( 'celery multi', '-c 5', '-n celery1@') + _args('celery1') p3 = NamespacedOptionParser(['foo@', '-c:foo', '5']) p3.parse() nodes5 = list(multi_args(p3, cmd='celery multi', suffix='""')) assert nodes5[0].name == 'foo@' assert nodes5[0].argv == ( 'celery multi', '-c 5', '-n foo@') + _args('foo') p4 = NamespacedOptionParser(['foo', '-Q:1', 'test']) p4.parse() nodes6 = list(multi_args(p4, cmd='celery multi', suffix='""')) assert nodes6[0].name == 'foo@' assert nodes6[0].argv == ( 'celery multi', '-Q test', '-n foo@') + _args('foo') p5 = NamespacedOptionParser(['foo@bar', '-Q:1', 'test']) p5.parse() nodes7 = list(multi_args(p5, cmd='celery multi', suffix='""')) assert nodes7[0].name == 'foo@bar' assert nodes7[0].argv == ( 'celery multi', '-Q test', '-n foo@bar') + _args('foo') p6 = NamespacedOptionParser(['foo@bar', '-Q:0', 'test']) p6.parse() with pytest.raises(KeyError): list(multi_args(p6)) def test_optmerge(self): p = NamespacedOptionParser(['foo', 'test']) p.parse() p.options = {'x': 'y'} r = p.optmerge('foo') assert r['x'] == 'y'
test_multi_args
python
huggingface__transformers
src/transformers/models/mistral/modeling_mistral.py
{ "start": 21242, "end": 21351 }
class ____(GenericForSequenceClassification, MistralPreTrainedModel): pass
MistralForSequenceClassification
python
hyperopt__hyperopt
hyperopt/pyll/base.py
{ "start": 588, "end": 688 }
class ____: """Object to represent a missing argument to a function application"""
MissingArgument
python
urllib3__urllib3
dummyserver/socketserver.py
{ "start": 2519, "end": 2583 }
class ____(HTTPWarning): "IPv6 is not available"
NoIPv6Warning
python
bokeh__bokeh
src/bokeh/models/annotations/html/toolbars.py
{ "start": 1318, "end": 2148 }
class ____(HTMLAnnotation): # TODO: this shouldn't be an annotation # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) toolbar = Instance(".models.tools.Toolbar", help=""" A toolbar to display. """) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
ToolbarPanel
python
google__jax
jax/_src/interpreters/pxla.py
{ "start": 14320, "end": 18258 }
class ____(NamedTuple): backend: str | None devices: Sequence[Any] | None def _emap_impl(fun: lu.WrappedFun, *args, backend: str | None, axis_name: core.AxisName, axis_size: int, global_axis_size: int, devices: Sequence[Any] | None, name: str, in_axes: Sequence[int | None], out_axes_thunk: Callable[[], Sequence[int | None]], donated_invars: Sequence[bool], is_explicit_global_axis_size: bool, ): # TODO(sharadmv,mattjj): implement these cases if any(d for d in donated_invars): raise NotImplementedError("Buffer donation not supported in eager pmap.") if is_explicit_global_axis_size: raise NotImplementedError("Non-default global_axis_size not supported in " "eager pmap.") emap_info = EmapInfo(backend, devices) shard_axes = [{} if in_axis is None else {axis_name: in_axis} for in_axis in in_axes] trace = MapTrace(axis_name, emap_info) with core.extend_axis_env_nd([(axis_name, axis_size)]): tracers = [MapTracer(trace, arg, s) for arg, s in zip(args, shard_axes)] with core.set_current_trace(trace): ans = fun.call_wrapped(*tracers) out_tracers = map(trace.to_map_tracer, ans) outvals, out_axes_src = unzip2((t.val, t.shard_axes) for t in out_tracers) out_axes = out_axes_thunk() platform = xb.get_backend(backend).platform donate_argnums = (1,) if platform in {"cuda", "rocm", "tpu"} else () new_outvals = [] for out_axis_src, out_axis, outval in zip(out_axes_src, out_axes, outvals): with api.disable_jit(False): donate_argnums_ = donate_argnums if isinstance(outval, array.ArrayImpl): # We don't want to donate if it's already sharded. donate_argnums_ = () out = api.pmap( lambda _, x: x, in_axes=(0, out_axis_src.get(axis_name)), out_axes=out_axis, devices=(None if devices is None else list(devices)), backend=backend, donate_argnums=donate_argnums_)(np.arange(axis_size), outval) new_outvals.append(out) return new_outvals def _map_schedule(idx: tuple[int | None, ...]) -> tuple[int | None, ...]: # In order to do a multi-map (a simultaneous map over several axes), we will # nest several maps. Each time we do a map, we "remove" an input axis so we # need to update the remaining map axes. For example, if we are to map over # the axes 0, 3, and 4, we make three calls to pmap with in_axes as 0, 2, 2. return tuple(None if i is None else i - sum(j is not None and j < i for j in idx[:l]) for l, i in enumerate(idx)) # We're often creating `f`s on the fly and we try to carefully make them have # the right __hash__ and __eq__. However, despite our attempts pmap's caching # still ends up not working, because it has a separate cache per # _function object_. Adding this annotation here lets us reuse the same pmap # callable for all equivalent primitive pmaps. @util.cache(max_size=None, trace_context_in_key=False) def _multi_pmap(f: Callable, info: EmapInfo, names: list[core.AxisName], all_axes: list[tuple[int | None, ...]] ) -> tuple[Callable, dict[core.AxisName, int]]: used_names = [] for i, name in reversed(list(enumerate(names))): in_axes = tuple(arg_axis[i] for arg_axis in all_axes) if any(in_axis is not None for in_axis in in_axes): f = api.pmap( f, in_axes=in_axes, axis_name=name, out_axes=0, backend=info.backend, devices=(None if info.devices is None else list(info.devices))) used_names.append(name) out_shard_axes = {name: i for i, name in enumerate(reversed(used_names))} return f, out_shard_axes FakePrimitive = namedtuple("FakePrimitive", ["multiple_results", "bind"])
EmapInfo
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/vertex_ai/test_feature_store.py
{ "start": 2183, "end": 3622 }
class ____: @mock.patch(VERTEX_AI_PATH.format("feature_store.FeatureStoreHook")) def test_execute(self, mock_hook_class): # Create the mock hook and set up its return value mock_hook = mock.MagicMock() mock_hook_class.return_value = mock_hook # Set up the return value for sync_feature_view to match the hook implementation mock_hook.sync_feature_view.return_value = FEATURE_VIEW_SYNC_NAME op = SyncFeatureViewOperator( task_id=TASK_ID, project_id=GCP_PROJECT, location=GCP_LOCATION, feature_online_store_id=FEATURE_ONLINE_STORE_ID, feature_view_id=FEATURE_VIEW_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) response = op.execute(context={"ti": mock.MagicMock()}) # Verify hook initialization mock_hook_class.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) # Verify hook method call mock_hook.sync_feature_view.assert_called_once_with( project_id=GCP_PROJECT, location=GCP_LOCATION, feature_online_store_id=FEATURE_ONLINE_STORE_ID, feature_view_id=FEATURE_VIEW_ID, ) # Verify response matches expected value assert response == FEATURE_VIEW_SYNC_NAME
TestSyncFeatureViewOperator
python
pytorch__pytorch
torch/_export/db/examples/specialized_attribute.py
{ "start": 101, "end": 520 }
class ____(torch.nn.Module): """ Model attributes are specialized. """ def __init__(self) -> None: super().__init__() self.a = "moo" self.b = 4 def forward(self, x): if self.a == Animal.COW.value: return x * x + self.b else: raise ValueError("bad") example_args = (torch.randn(3, 2),) model = SpecializedAttribute()
SpecializedAttribute
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_vermont_zip.py
{ "start": 742, "end": 1743 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_vermont_zip" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_valid_vermont_zip(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidVermontZip
python
django-extensions__django-extensions
django_extensions/db/fields/__init__.py
{ "start": 3122, "end": 10498 }
class ____(UniqueFieldMixin, SlugField): """ AutoSlugField By default, sets editable=False, blank=True. Required arguments: populate_from Specifies which field, list of fields, or model method the slug will be populated from. populate_from can traverse a ForeignKey relationship by using Django ORM syntax: populate_from = 'related_model__field' Optional arguments: separator Defines the used separator (default: '-') overwrite If set to True, overwrites the slug on every save (default: False) slugify_function Defines the function which will be used to "slugify" a content (default: :py:func:`~django.template.defaultfilters.slugify` ) It is possible to provide custom "slugify" function with the ``slugify_function`` function in a model class. ``slugify_function`` function in a model class takes priority over ``slugify_function`` given as an argument to :py:class:`~AutoSlugField`. Example .. code-block:: python # models.py from django.db import models from django_extensions.db.fields import AutoSlugField class MyModel(models.Model): def slugify_function(self, content): return content.replace('_', '-').lower() title = models.CharField(max_length=42) slug = AutoSlugField(populate_from='title') Inspired by SmileyChris' Unique Slugify snippet: https://www.djangosnippets.org/snippets/690/ """ def __init__(self, *args, **kwargs): kwargs.setdefault("blank", True) kwargs.setdefault("editable", False) populate_from = kwargs.pop("populate_from", None) if populate_from is None: raise ValueError("missing 'populate_from' argument") else: self._populate_from = populate_from if not callable(populate_from): if not isinstance(populate_from, (list, tuple)): populate_from = (populate_from,) if not all(isinstance(e, str) for e in populate_from): raise TypeError( "'populate_from' must be str or list[str] or tuple[str], found `%s`" % populate_from ) self.slugify_function = kwargs.pop("slugify_function", slugify) self.separator = kwargs.pop("separator", "-") self.overwrite = kwargs.pop("overwrite", False) self.check_is_bool("overwrite") self.overwrite_on_add = kwargs.pop("overwrite_on_add", True) self.check_is_bool("overwrite_on_add") self.allow_duplicates = kwargs.pop("allow_duplicates", False) self.check_is_bool("allow_duplicates") self.max_unique_query_attempts = kwargs.pop( "max_unique_query_attempts", MAX_UNIQUE_QUERY_ATTEMPTS ) super().__init__(*args, **kwargs) def _slug_strip(self, value): """ Clean up a slug by removing slug separator characters that occur at the beginning or end of a slug. If an alternate separator is used, it will also replace any instances of the default '-' separator with the new separator. """ re_sep = "(?:-|%s)" % re.escape(self.separator) value = re.sub("%s+" % re_sep, self.separator, value) return re.sub(r"^%s+|%s+$" % (re_sep, re_sep), "", value) @staticmethod def slugify_func(content, slugify_function): if content: return slugify_function(content) return "" def slug_generator(self, original_slug, start): yield original_slug for i in range(start, self.max_unique_query_attempts): slug = original_slug end = "%s%s" % (self.separator, i) end_len = len(end) if self.slug_len and len(slug) + end_len > self.slug_len: slug = slug[: self.slug_len - end_len] slug = self._slug_strip(slug) slug = "%s%s" % (slug, end) yield slug raise RuntimeError( "max slug attempts for %s exceeded (%s)" % (original_slug, self.max_unique_query_attempts) ) def create_slug(self, model_instance, add): slug = getattr(model_instance, self.attname) use_existing_slug = False if slug and not self.overwrite: # Existing slug and not configured to overwrite - Short-circuit # here to prevent slug generation when not required. use_existing_slug = True if self.overwrite_on_add and add: use_existing_slug = False if use_existing_slug: return slug # get fields to populate from and slug field to set populate_from = self._populate_from if not isinstance(populate_from, (list, tuple)): populate_from = (populate_from,) slug_field = model_instance._meta.get_field(self.attname) slugify_function = getattr( model_instance, "slugify_function", self.slugify_function ) # slugify the original field content and set next step to 2 slug_for_field = lambda lookup_value: self.slugify_func( self.get_slug_fields(model_instance, lookup_value), slugify_function=slugify_function, ) slug = self.separator.join(map(slug_for_field, populate_from)) start = 2 # strip slug depending on max_length attribute of the slug field # and clean-up self.slug_len = slug_field.max_length if self.slug_len: slug = slug[: self.slug_len] slug = self._slug_strip(slug) original_slug = slug if self.allow_duplicates: setattr(model_instance, self.attname, slug) return slug return self.find_unique( model_instance, slug_field, self.slug_generator(original_slug, start) ) def get_slug_fields(self, model_instance, lookup_value): if callable(lookup_value): # A function has been provided return "%s" % lookup_value(model_instance) lookup_value_path = lookup_value.split(LOOKUP_SEP) attr = model_instance for elem in lookup_value_path: try: attr = getattr(attr, elem) except AttributeError: raise AttributeError( "value {} in AutoSlugField's 'populate_from' argument {} returned an error - {} has no attribute {}".format( # noqa: E501 elem, lookup_value, attr, elem ) ) if callable(attr): return "%s" % attr() return attr def pre_save(self, model_instance, add): value = force_str(self.create_slug(model_instance, add)) return value def get_internal_type(self): return "SlugField" def deconstruct(self): name, path, args, kwargs = super().deconstruct() kwargs["populate_from"] = self._populate_from if not self.separator == "-": kwargs["separator"] = self.separator if self.overwrite is not False: kwargs["overwrite"] = True if self.allow_duplicates is not False: kwargs["allow_duplicates"] = True return name, path, args, kwargs
AutoSlugField
python
plotly__plotly.py
plotly/graph_objs/layout/yaxis/_unifiedhovertitle.py
{ "start": 235, "end": 5090 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.yaxis" _path_str = "layout.yaxis.unifiedhovertitle" _valid_props = {"text"} @property def text(self): """ Template string used for rendering the title that appear on x or y unified hover box. Variables are inserted using %{variable}, for example "y: %{y}". Numbers are formatted using d3-format's syntax %{variable:d3-format}, for example "Price: %{y:$.2f}". https://github.com/d3/d3-format/tree/v1.4.5#d3-format for details on the formatting syntax. Dates are formatted using d3-time-format's syntax %{variable|d3-time-format}, for example "Day: %{2019-01-01|%A}". https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format for details on the date formatting syntax. Variables that can't be found will be replaced with the specifier. For example, a template of "data: %{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1 and y is missing. Variables with an undefined value will be replaced with the fallback value. The 'text' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["text"] @text.setter def text(self, val): self["text"] = val @property def _prop_descriptions(self): return """\ text Template string used for rendering the title that appear on x or y unified hover box. Variables are inserted using %{variable}, for example "y: %{y}". Numbers are formatted using d3-format's syntax %{variable:d3-format}, for example "Price: %{y:$.2f}". https://github.com/d3/d3-format/tree/v1.4.5#d3-format for details on the formatting syntax. Dates are formatted using d3-time-format's syntax %{variable|d3-time-format}, for example "Day: %{2019-01-01|%A}". https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format for details on the date formatting syntax. Variables that can't be found will be replaced with the specifier. For example, a template of "data: %{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1 and y is missing. Variables with an undefined value will be replaced with the fallback value. """ def __init__(self, arg=None, text=None, **kwargs): """ Construct a new Unifiedhovertitle object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.yaxis.U nifiedhovertitle` text Template string used for rendering the title that appear on x or y unified hover box. Variables are inserted using %{variable}, for example "y: %{y}". Numbers are formatted using d3-format's syntax %{variable:d3-format}, for example "Price: %{y:$.2f}". https://github.com/d3/d3-format/tree/v1.4.5#d3-format for details on the formatting syntax. Dates are formatted using d3-time-format's syntax %{variable|d3-time-format}, for example "Day: %{2019-01-01|%A}". https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format for details on the date formatting syntax. Variables that can't be found will be replaced with the specifier. For example, a template of "data: %{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1 and y is missing. Variables with an undefined value will be replaced with the fallback value. Returns ------- Unifiedhovertitle """ super().__init__("unifiedhovertitle") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.yaxis.Unifiedhovertitle constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.yaxis.Unifiedhovertitle`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("text", arg, text) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Unifiedhovertitle
python
ansible__ansible
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py
{ "start": 90, "end": 197 }
class ____(object): def tests(self): return { 'testtest': testtest }
TestModule
python
sphinx-doc__sphinx
sphinx/writers/latex.py
{ "start": 1725, "end": 1812 }
class ____(SphinxError): category = 'Markup is unsupported in LaTeX'
UnsupportedError
python
encode__django-rest-framework
tests/test_relations.py
{ "start": 9892, "end": 11154 }
class ____(APISimpleTestCase): def setUp(self): self.instance = MockObject(pk=1, name='foo') self.field = serializers.HyperlinkedIdentityField(view_name='example') self.field.reverse = mock_reverse self.field._context = {'request': True} def test_representation(self): representation = self.field.to_representation(self.instance) assert representation == 'http://example.org/example/1/' def test_representation_unsaved_object(self): representation = self.field.to_representation(MockObject(pk=None)) assert representation is None def test_representation_with_format(self): self.field._context['format'] = 'xml' representation = self.field.to_representation(self.instance) assert representation == 'http://example.org/example/1.xml/' def test_improperly_configured(self): """ If a matching view cannot be reversed with the given instance, the user has misconfigured something, as the URL conf and the hyperlinked field do not match. """ self.field.reverse = fail_reverse with pytest.raises(ImproperlyConfigured): self.field.to_representation(self.instance)
TestHyperlinkedIdentityField
python
kamyu104__LeetCode-Solutions
Python/strobogrammatic-number.py
{ "start": 29, "end": 408 }
class ____(object): lookup = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'} # @param {string} num # @return {boolean} def isStrobogrammatic(self, num): n = len(num) for i in xrange((n+1) / 2): if num[n-1-i] not in self.lookup or \ num[i] != self.lookup[num[n-1-i]]: return False return True
Solution
python
pypa__warehouse
tests/unit/manage/test_forms.py
{ "start": 2308, "end": 3934 }
class ____: @pytest.mark.parametrize( ("is_team", "team_name", "team_choices", "username", "user_choices", "errors"), [ # Team validators ("true", "", [], "", [], {"team_name": ["This field is required."]}), ("true", "team", [], "", [], {"team_name": ["Not a valid choice."]}), ("true", "team", ["team"], "", [], {}), # User validators ("false", "", [], "", [], {"username": ["This field is required."]}), ("false", "", [], "foo", [], {"username": ["Not a valid choice."]}), ("false", "", [], "foo", ["foo"], {}), ], ) def test_validate( self, pyramid_request, is_team, team_name, team_choices, username, user_choices, errors, ): pyramid_request.POST = MultiDict( { "is_team": is_team, "team_name": team_name, "username": username, # Required fields with no effect on validation "role_name": "Maintainer", "team_project_role_name": "Maintainer", } ) user_service = pretend.stub() form = forms.CreateInternalRoleForm( pyramid_request.POST, team_choices=team_choices, user_choices=user_choices, user_service=user_service, ) assert form.user_service is user_service assert not form.validate() if errors else form.validate(), str(form.errors) assert form.errors == errors
TestCreateInternalRoleForm
python
tensorflow__tensorflow
tensorflow/python/data/ops/interleave_op.py
{ "start": 2024, "end": 3715 }
class ____(dataset_ops.UnaryDataset): """A `Dataset` that interleaves the result of transformed inputs.""" def __init__(self, input_dataset, map_func, cycle_length, block_length, name=None): """See `Dataset.interleave()` for details.""" self._input_dataset = input_dataset self._map_func = structured_function.StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset) if not isinstance(self._map_func.output_structure, dataset_ops.DatasetSpec): raise TypeError( "The `map_func` argument must return a `Dataset` object. Got " f"{dataset_ops.get_type(self._map_func.output_structure)!r}.") self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access self._cycle_length = ops.convert_to_tensor( cycle_length, dtype=dtypes.int64, name="cycle_length") self._block_length = ops.convert_to_tensor( block_length, dtype=dtypes.int64, name="block_length") self._name = name variant_tensor = gen_dataset_ops.interleave_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, # pylint: disable=protected-access self._cycle_length, self._block_length, f=self._map_func.function, **self._common_args) super().__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._structure def _transformation_name(self): return "Dataset.interleave()"
_InterleaveDataset
python
davidhalter__jedi
test/completion/descriptors.py
{ "start": 2771, "end": 3130 }
class ____(): def __init__(self, func): self.func = func def __get__(self, obj, objtype): if obj is None: return self.func return partial(self, obj) def __call__(self, *args, **kwargs): # We don't do caching here, but that's what would normally happen. return self.func(*args, **kwargs)
Memoize
python
getsentry__sentry
src/sentry/rules/processing/buffer_processing.py
{ "start": 593, "end": 671 }
class ____: model: type[models.Model] filters: FilterKeys
BufferHashKeys
python
getsentry__sentry
tests/sentry/core/endpoints/test_project_index.py
{ "start": 772, "end": 10424 }
class ____(APITestCase): endpoint = "sentry-api-0-projects" def test_member_constraints(self) -> None: user = self.create_user(is_superuser=True) org = self.create_organization() team = self.create_team(organization=org, members=[user]) project = self.create_project(teams=[team]) org2 = self.create_organization() team2 = self.create_team(organization=org2, members=[]) self.create_project(teams=[team2]) self.login_as(user=user, superuser=True) response = self.get_success_response() assert len(response.data) == 1 assert response.data[0]["id"] == str(project.id) assert response.data[0]["organization"]["id"] == str(org.id) def test_show_all_with_superuser(self) -> None: with unguarded_write(using=router.db_for_write(Project)): Project.objects.all().delete() user = self.create_user(is_superuser=True) org = self.create_organization(owner=user) self.create_project(organization=org) org2 = self.create_organization() self.create_project(organization=org2) self.login_as(user=user, superuser=True) response = self.get_success_response(qs_params={"show": "all"}) assert len(response.data) == 2 def test_show_all_without_superuser(self) -> None: with unguarded_write(using=router.db_for_write(Project)): Project.objects.all().delete() user = self.create_user(is_superuser=False) org = self.create_organization(owner=user) self.create_project(organization=org) org2 = self.create_organization() self.create_project(organization=org2) self.login_as(user=user) response = self.get_success_response() assert len(response.data) == 0 def test_filter_by_org_id(self) -> None: user = self.create_user(is_superuser=True) org = self.create_organization() team = self.create_team(organization=org, members=[user]) project = self.create_project(teams=[team]) org2 = self.create_organization() team2 = self.create_team(organization=org2, members=[user]) self.create_project(teams=[team2]) self.login_as(user=user, superuser=False) response = self.get_success_response(qs_params={"organizationId": str(org.id)}) assert len(response.data) == 1 assert response.data[0]["id"] == str(project.id) assert response.data[0]["organization"]["id"] == str(org.id) def test_status_filter(self) -> None: with unguarded_write(using=router.db_for_write(Project)): Project.objects.all().delete() user = self.create_user() org = self.create_organization() team = self.create_team(organization=org, members=[user]) project1 = self.create_project(teams=[team]) project2 = self.create_project(teams=[team], status=ObjectStatus.PENDING_DELETION) self.login_as(user=user) response = self.get_success_response(qs_params={"status": "active"}) assert len(response.data) == 1 assert response.data[0]["id"] == str(project1.id) response = self.get_success_response(qs_params={"status": "deleted"}) assert len(response.data) == 1 assert response.data[0]["id"] == str(project2.id) def test_query_filter(self) -> None: with unguarded_write(using=router.db_for_write(Project)): Project.objects.all().delete() user = self.create_user() org = self.create_organization() team = self.create_team(organization=org, members=[user]) project1 = self.create_project(name="foo", teams=[team]) self.create_project(name="bar", teams=[team]) self.login_as(user=user) response = self.get_success_response(qs_params={"query": "foo"}) assert len(response.data) == 1 assert response.data[0]["id"] == str(project1.id) response = self.get_success_response(qs_params={"query": "baz"}) assert len(response.data) == 0 def test_slug_query(self) -> None: with unguarded_write(using=router.db_for_write(Project)): Project.objects.all().delete() user = self.create_user() org = self.create_organization() team = self.create_team(organization=org, members=[user]) project1 = self.create_project(slug="foo", name="foo", teams=[team]) self.create_project(name="bar", slug="bar", teams=[team]) self.login_as(user=user) response = self.get_success_response(qs_params={"query": "slug:foo"}) assert len(response.data) == 1 assert response.data[0]["id"] == str(project1.id) response = self.get_success_response(qs_params={"query": "slug:baz"}) assert len(response.data) == 0 def test_dsn_filter(self) -> None: with unguarded_write(using=router.db_for_write(Project)): Project.objects.all().delete() user = self.create_user() org = self.create_organization() team = self.create_team(organization=org, members=[user]) project1 = self.create_project(teams=[team]) key = ProjectKey.objects.get_or_create(project=project1)[0] self.create_project(teams=[team]) self.login_as(user=user) response = self.get_success_response(qs_params={"query": f"dsn:{key.public_key}"}) assert len(response.data) == 1 assert response.data[0]["id"] == str(project1.id) response = self.get_success_response(qs_params={"query": "dsn:nope"}) assert len(response.data) == 0 def test_id_query(self) -> None: with unguarded_write(using=router.db_for_write(Project)): Project.objects.all().delete() user = self.create_user() org = self.create_organization() team = self.create_team(organization=org, members=[user]) project1 = self.create_project(teams=[team]) self.create_project(teams=[team]) self.login_as(user=user) response = self.get_success_response(qs_params={"query": f"id:{project1.id}"}) assert len(response.data) == 1 assert response.data[0]["id"] == str(project1.id) response = self.get_success_response(qs_params={"query": "id:-1"}) assert len(response.data) == 0 def test_valid_with_internal_integration(self) -> None: project = self.create_project(organization=self.organization, teams=[self.team]) internal_integration = self.create_internal_integration( name="my_app", organization=self.organization, scopes=("project:read",), webhook_url="http://example.com", ) token = self.create_internal_integration_token( user=self.user, internal_integration=internal_integration ) path = reverse(self.endpoint) response = self.client.get(path, HTTP_AUTHORIZATION=f"Bearer {token.token}") assert project.name.encode("utf-8") in response.content def test_deleted_token_with_internal_integration(self) -> None: internal_integration = self.create_internal_integration( name="my_app", organization=self.organization, scopes=("project:read",), webhook_url="http://example.com", ) token = self.create_internal_integration_token( user=self.user, internal_integration=internal_integration ) with self.tasks(), assume_test_silo_mode(SiloMode.CONTROL), outbox_runner(): # Fetch the record using the created token install_token = SentryAppInstallationToken.objects.get(api_token=token) # Delete the token install_token.delete() schedule_hybrid_cloud_foreign_key_jobs_control() with self.tasks(): schedule_hybrid_cloud_foreign_key_jobs() self.get_error_response( extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"}, status_code=status.HTTP_401_UNAUTHORIZED, ) def get_installed_unpublished_sentry_app_access_token(self) -> ApiToken: self.project = self.create_project(organization=self.organization, teams=[self.team]) sentry_app = self.create_sentry_app( scopes=("project:read",), published=False, verify_install=False, name="Super Awesome App", ) installation = self.create_sentry_app_installation( slug=sentry_app.slug, organization=self.organization, user=self.user ) return installation.api_token.token def test_valid_with_public_integration(self) -> None: token = self.get_installed_unpublished_sentry_app_access_token() # there should only be one record created so just grab the first one response = self.get_success_response( extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token}"} ) assert self.project.name.encode("utf-8") in response.content @responses.activate def test_deleted_token_with_public_integration(self) -> None: token = self.get_installed_unpublished_sentry_app_access_token() with assume_test_silo_mode(SiloMode.CONTROL), outbox_runner(): token = ApiToken.objects.get(token=token) token.delete() with self.tasks(): schedule_hybrid_cloud_foreign_key_jobs() self.get_error_response( extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token}"}, status_code=status.HTTP_401_UNAUTHORIZED, )
ProjectsListTest
python
astropy__astropy
astropy/units/tests/test_quantity_array_methods.py
{ "start": 5545, "end": 14959 }
class ____: """ Test statistical functions """ def test_mean(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m assert_array_equal(np.mean(q1), 3.6 * u.m) assert_array_equal(np.mean(q1, keepdims=True), [3.6] * u.m) def test_mean_inplace(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m qi = 1.5 * u.s qi2 = np.mean(q1, out=qi) assert qi2 is qi assert qi == 3.6 * u.m def test_mean_where(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0, 7.0]) * u.m assert_array_equal(np.mean(q1, where=q1 < 7 * u.m), 3.6 * u.m) def test_std(self): q1 = np.array([1.0, 2.0]) * u.m assert_array_equal(np.std(q1), 0.5 * u.m) assert_array_equal(q1.std(axis=-1, keepdims=True), [0.5] * u.m) def test_std_inplace(self): q1 = np.array([1.0, 2.0]) * u.m qi = 1.5 * u.s np.std(q1, out=qi) assert qi == 0.5 * u.m def test_std_where(self): q1 = np.array([1.0, 2.0, 3.0]) * u.m assert_array_equal(np.std(q1, where=q1 < 3 * u.m), 0.5 * u.m) def test_var(self): q1 = np.array([1.0, 2.0]) * u.m assert_array_equal(np.var(q1), 0.25 * u.m**2) assert_array_equal(q1.var(axis=0, keepdims=True), [0.25] * u.m**2) def test_var_inplace(self): q1 = np.array([1.0, 2.0]) * u.m qi = 1.5 * u.s np.var(q1, out=qi) assert qi == 0.25 * u.m**2 def test_var_where(self): q1 = np.array([1.0, 2.0, 3.0]) * u.m assert_array_equal(np.var(q1, where=q1 < 3 * u.m), 0.25 * u.m**2) def test_median(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m assert np.median(q1) == 4.0 * u.m def test_median_inplace(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m qi = 1.5 * u.s np.median(q1, out=qi) assert qi == 4 * u.m def test_min(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m assert np.min(q1) == 1.0 * u.m def test_min_inplace(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m qi = 1.5 * u.s np.min(q1, out=qi) assert qi == 1.0 * u.m def test_min_where(self): q1 = np.array([0.0, 1.0, 2.0, 4.0, 5.0, 6.0]) * u.m assert np.min(q1, initial=10 * u.m, where=q1 > 0 * u.m) == 1.0 * u.m def test_argmin(self): q1 = np.array([6.0, 2.0, 4.0, 5.0, 6.0]) * u.m assert np.argmin(q1) == 1 def test_argmin_keepdims(self): q1 = np.array([[6.0, 2.0], [4.0, 5.0]]) * u.m assert_array_equal(q1.argmin(axis=0, keepdims=True), np.array([[1, 0]])) def test_max(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m assert np.max(q1) == 6.0 * u.m def test_max_inplace(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m qi = 1.5 * u.s np.max(q1, out=qi) assert qi == 6.0 * u.m def test_max_where(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0, 7.0]) * u.m assert np.max(q1, initial=0 * u.m, where=q1 < 7 * u.m) == 6.0 * u.m def test_argmax(self): q1 = np.array([5.0, 2.0, 4.0, 5.0, 6.0]) * u.m assert np.argmax(q1) == 4 def test_argmax_keepdims(self): q1 = np.array([[6.0, 2.0], [4.0, 5.0]]) * u.m assert_array_equal(q1.argmax(axis=0, keepdims=True), np.array([[0, 1]])) def test_clip(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m c1 = q1.clip(1500, 5.5 * u.Mm / u.km) assert np.all(c1 == np.array([1.5, 2.0, 4.0, 5.0, 5.5]) * u.km / u.m) def test_clip_inplace(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m c1 = q1.clip(1500, 5.5 * u.Mm / u.km, out=q1) assert np.all(q1 == np.array([1.5, 2.0, 4.0, 5.0, 5.5]) * u.km / u.m) c1[0] = 10 * u.Mm / u.mm assert np.all(c1.value == q1.value) def test_conj(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m assert np.all(q1.conj() == q1) def test_ptp(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m assert np.ptp(q1) == 5.0 * u.m def test_ptp_inplace(self): q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m qi = 1.5 * u.s np.ptp(q1, out=qi) assert qi == 5.0 * u.m def test_round(self): q1 = np.array([1.253, 2.253, 3.253]) * u.kg assert np.all(np.round(q1) == np.array([1, 2, 3]) * u.kg) assert np.all(np.round(q1, decimals=2) == np.round(q1.value, decimals=2) * u.kg) assert np.all(q1.round(decimals=2) == q1.value.round(decimals=2) * u.kg) def test_round_inplace(self): q1 = np.array([1.253, 2.253, 3.253]) * u.kg qi = np.zeros(3) * u.s a = q1.round(decimals=2, out=qi) assert a is qi assert np.all(q1.round(decimals=2) == qi) def test_sum(self): q1 = np.array([1.0, 2.0, 6.0]) * u.m assert np.all(q1.sum() == 9.0 * u.m) assert np.all(np.sum(q1) == 9.0 * u.m) q2 = np.array([[4.0, 5.0, 9.0], [1.0, 1.0, 1.0]]) * u.s assert np.all(q2.sum(0) == np.array([5.0, 6.0, 10.0]) * u.s) assert np.all(np.sum(q2, 0) == np.array([5.0, 6.0, 10.0]) * u.s) def test_sum_inplace(self): q1 = np.array([1.0, 2.0, 6.0]) * u.m qi = 1.5 * u.s np.sum(q1, out=qi) assert qi == 9.0 * u.m def test_sum_where(self): q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m where = q1 < 7 * u.m assert np.all(q1.sum(where=where) == 9.0 * u.m) assert np.all(np.sum(q1, where=where) == 9.0 * u.m) @pytest.mark.parametrize("initial", [0, 0 * u.m, 1 * u.km]) def test_sum_initial(self, initial): q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m expected = 16 * u.m + initial assert q1.sum(initial=initial) == expected assert np.sum(q1, initial=initial) == expected def test_sum_dimensionless_initial(self): q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.one assert q1.sum(initial=1000) == 1016 * u.one @pytest.mark.parametrize("initial", [10, 1 * u.s]) def test_sum_initial_exception(self, initial): q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m with pytest.raises(u.UnitsError): q1.sum(initial=initial) def test_cumsum(self): q1 = np.array([1, 2, 6]) * u.m assert np.all(q1.cumsum() == np.array([1, 3, 9]) * u.m) assert np.all(np.cumsum(q1) == np.array([1, 3, 9]) * u.m) q2 = np.array([4, 5, 9]) * u.s assert np.all(q2.cumsum() == np.array([4, 9, 18]) * u.s) assert np.all(np.cumsum(q2) == np.array([4, 9, 18]) * u.s) def test_cumsum_inplace(self): q1 = np.array([1, 2, 6]) * u.m qi = np.ones(3) * u.s np.cumsum(q1, out=qi) assert np.all(qi == np.array([1, 3, 9]) * u.m) q2 = q1 q1.cumsum(out=q1) assert np.all(q2 == qi) def test_prod(self): q1 = np.array([1, 2, 6]) * u.m with pytest.raises(u.UnitsError) as exc: q1.prod() with pytest.raises(u.UnitsError) as exc: np.prod(q1) q2 = np.array([3.0, 4.0, 5.0]) * u.Unit(1) assert q2.prod() == 60.0 * u.Unit(1) assert np.prod(q2) == 60.0 * u.Unit(1) def test_cumprod(self): q1 = np.array([1, 2, 6]) * u.m with pytest.raises(u.UnitsError) as exc: q1.cumprod() with pytest.raises(u.UnitsError) as exc: np.cumprod(q1) q2 = np.array([3, 4, 5]) * u.Unit(1) assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1)) assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1)) def test_diff(self): q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m assert np.all(q1.diff() == np.array([1.0, 2.0, 6.0]) * u.m) assert np.all(np.diff(q1) == np.array([1.0, 2.0, 6.0]) * u.m) def test_ediff1d(self): q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m assert np.all(q1.ediff1d() == np.array([1.0, 2.0, 6.0]) * u.m) assert np.all(np.ediff1d(q1) == np.array([1.0, 2.0, 6.0]) * u.m) def test_dot_meth(self): q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m q2 = np.array([3.0, 4.0, 5.0, 6.0]) * u.s q3 = q1.dot(q2) assert q3.value == np.dot(q1.value, q2.value) assert q3.unit == u.m * u.s def test_trace_func(self): q = np.array([[1.0, 2.0], [3.0, 4.0]]) * u.m assert np.trace(q) == 5.0 * u.m def test_trace_meth(self): q1 = np.array([[1.0, 2.0], [3.0, 4.0]]) * u.m assert q1.trace() == 5.0 * u.m cont = u.Quantity(4.0, u.s) q2 = np.array([[3.0, 4.0], [5.0, 6.0]]) * u.m q2.trace(out=cont) assert cont == 9.0 * u.m def test_clip_func(self): q = np.arange(10) * u.m assert np.all( np.clip(q, 3 * u.m, 6 * u.m) == np.array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0]) * u.m ) def test_clip_meth(self): expected = np.array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0]) * u.m q1 = np.arange(10) * u.m q3 = q1.clip(3 * u.m, 6 * u.m) assert np.all(q1.clip(3 * u.m, 6 * u.m) == expected) cont = np.zeros(10) * u.s q1.clip(3 * u.m, 6 * u.m, out=cont) assert np.all(cont == expected)
TestQuantityStatsFuncs
python
pandas-dev__pandas
pandas/tests/arrays/sparse/test_indexing.py
{ "start": 4281, "end": 9973 }
class ____: def test_take_scalar_raises(self, arr): msg = "'indices' must be an array, not a scalar '2'." with pytest.raises(ValueError, match=msg): arr.take(2) def test_take(self, arr_data, arr): exp = SparseArray(np.take(arr_data, [2, 3])) tm.assert_sp_array_equal(arr.take([2, 3]), exp) exp = SparseArray(np.take(arr_data, [0, 1, 2])) tm.assert_sp_array_equal(arr.take([0, 1, 2]), exp) def test_take_all_empty(self): sparse = pd.array([0, 0], dtype=SparseDtype("int64")) result = sparse.take([0, 1], allow_fill=True, fill_value=np.nan) tm.assert_sp_array_equal(sparse, result) def test_take_different_fill_value(self): # Take with a different fill value shouldn't overwrite the original sparse = pd.array([0.0], dtype=SparseDtype("float64", fill_value=0.0)) result = sparse.take([0, -1], allow_fill=True, fill_value=np.nan) expected = pd.array([0, np.nan], dtype=sparse.dtype) tm.assert_sp_array_equal(expected, result) def test_take_fill_value(self): data = np.array([1, np.nan, 0, 3, 0]) sparse = SparseArray(data, fill_value=0) exp = SparseArray(np.take(data, [0]), fill_value=0) tm.assert_sp_array_equal(sparse.take([0]), exp) exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0) tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp) def test_take_negative(self, arr_data, arr): exp = SparseArray(np.take(arr_data, [-1])) tm.assert_sp_array_equal(arr.take([-1]), exp) exp = SparseArray(np.take(arr_data, [-4, -3, -2])) tm.assert_sp_array_equal(arr.take([-4, -3, -2]), exp) def test_bad_take(self, arr): with pytest.raises(IndexError, match="bounds"): arr.take([11]) def test_take_filling(self): # similar tests as GH 12631 sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4]) result = sparse.take(np.array([1, 0, -1])) expected = SparseArray([np.nan, np.nan, 4]) tm.assert_sp_array_equal(result, expected) # TODO: actionable? # XXX: test change: fill_value=True -> allow_fill=True result = sparse.take(np.array([1, 0, -1]), allow_fill=True) expected = SparseArray([np.nan, np.nan, np.nan]) tm.assert_sp_array_equal(result, expected) # allow_fill=False result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = SparseArray([np.nan, np.nan, 4]) tm.assert_sp_array_equal(result, expected) msg = "Invalid value in 'indices'" with pytest.raises(ValueError, match=msg): sparse.take(np.array([1, 0, -2]), allow_fill=True) with pytest.raises(ValueError, match=msg): sparse.take(np.array([1, 0, -5]), allow_fill=True) msg = "out of bounds value in 'indices'" with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, -6])) with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, 5])) with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, 5]), allow_fill=True) def test_take_filling_fill_value(self): # same tests as GH#12631 sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0) result = sparse.take(np.array([1, 0, -1])) expected = SparseArray([0, np.nan, 4], fill_value=0) tm.assert_sp_array_equal(result, expected) # fill_value result = sparse.take(np.array([1, 0, -1]), allow_fill=True) # TODO: actionable? # XXX: behavior change. # the old way of filling self.fill_value doesn't follow EA rules. # It's supposed to be self.dtype.na_value (nan in this case) expected = SparseArray([0, np.nan, np.nan], fill_value=0) tm.assert_sp_array_equal(result, expected) # allow_fill=False result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = SparseArray([0, np.nan, 4], fill_value=0) tm.assert_sp_array_equal(result, expected) msg = "Invalid value in 'indices'." with pytest.raises(ValueError, match=msg): sparse.take(np.array([1, 0, -2]), allow_fill=True) with pytest.raises(ValueError, match=msg): sparse.take(np.array([1, 0, -5]), allow_fill=True) msg = "out of bounds value in 'indices'" with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, -6])) with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, 5])) with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, 5]), fill_value=True) @pytest.mark.parametrize("kind", ["block", "integer"]) def test_take_filling_all_nan(self, kind): sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan], kind=kind) result = sparse.take(np.array([1, 0, -1])) expected = SparseArray([np.nan, np.nan, np.nan], kind=kind) tm.assert_sp_array_equal(result, expected) result = sparse.take(np.array([1, 0, -1]), fill_value=True) expected = SparseArray([np.nan, np.nan, np.nan], kind=kind) tm.assert_sp_array_equal(result, expected) msg = "out of bounds value in 'indices'" with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, -6])) with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, 5])) with pytest.raises(IndexError, match=msg): sparse.take(np.array([1, 5]), fill_value=True)
TestTake
python
walkccc__LeetCode
solutions/1938. Maximum Genetic Difference Query/1938.py
{ "start": 0, "end": 113 }
class ____: def __init__(self): self.children: list[TrieNode | None] = [None] * 2 self.count = 0
TrieNode
python
django-mptt__django-mptt
tests/myapp/tests.py
{ "start": 25535, "end": 33263 }
class ____(TreeTestCase): def setUp(self): self.a = ConcreteModel.objects.create(name="a") self.b = ConcreteModel.objects.create(name="b", parent=self.a) self.c = ConcreteModel.objects.create(name="c", parent=self.a) self.d = ConcreteModel.objects.create(name="d") self.z = ConcreteModel.objects.create(name="z") # state is now: self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 3 1 1 1 4 5 4 - 2 0 1 2 5 - 3 0 1 2 """, ) def test_proxy(self): self.assertFalse(ConcreteModel._mptt_is_tracking) self.assertFalse(SingleProxyModel._mptt_is_tracking) self.assertRaises( CantDisableUpdates, SingleProxyModel.objects.delay_mptt_updates().__enter__ ) self.assertFalse(ConcreteModel._mptt_is_tracking) self.assertFalse(SingleProxyModel._mptt_is_tracking) with ConcreteModel.objects.delay_mptt_updates(): self.assertTrue(ConcreteModel._mptt_is_tracking) self.assertTrue(SingleProxyModel._mptt_is_tracking) self.assertFalse(ConcreteModel._mptt_is_tracking) self.assertFalse(SingleProxyModel._mptt_is_tracking) def test_double_context_manager(self): with ConcreteModel.objects.delay_mptt_updates(): self.assertTrue(ConcreteModel._mptt_is_tracking) with ConcreteModel.objects.delay_mptt_updates(): self.assertTrue(ConcreteModel._mptt_is_tracking) self.assertTrue(ConcreteModel._mptt_is_tracking) self.assertFalse(ConcreteModel._mptt_is_tracking) def test_insert_child(self): with self.assertNumQueries(7), ConcreteModel.objects.delay_mptt_updates(): with self.assertNumQueries(2): # 1 query for target stale check, # 1 query to save node. ConcreteModel.objects.create(name="e", parent=self.d) # 3rd query here: self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 3 1 1 1 4 5 4 - 2 0 1 2 6 4 2 1 2 3 5 - 3 0 1 2 """, ) # remaining queries (4 through 7) are the partial rebuild process. self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 3 1 1 1 4 5 4 - 2 0 1 4 6 4 2 1 2 3 5 - 3 0 1 2 """, ) def test_insert_root(self): with self.assertNumQueries(3), ConcreteModel.objects.delay_mptt_updates(): with self.assertNumQueries(2): # 2 queries required here: # (one to get the correct tree_id, then one to insert) ConcreteModel.objects.create(name="e") # 3rd query here: self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 3 1 1 1 4 5 4 - 2 0 1 2 5 - 3 0 1 2 6 - 4 0 1 2 """, ) # no partial rebuild necessary, as no trees were modified # (newly created tree is already okay) self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 3 1 1 1 4 5 4 - 2 0 1 2 5 - 3 0 1 2 6 - 4 0 1 2 """, ) def test_move_node_same_tree(self): with self.assertNumQueries(7), ConcreteModel.objects.delay_mptt_updates(): with self.assertNumQueries(2): # 1 query to ensure target fields aren't stale # 1 update query self.c.parent = self.b self.c.save() # query 3 here: self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 3 2 1 2 3 4 4 - 2 0 1 2 5 - 3 0 1 2 """, ) # the remaining 4 queries are the partial rebuild. self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 5 3 2 1 2 3 4 4 - 2 0 1 2 5 - 3 0 1 2 """, ) def test_move_node_different_tree(self): with self.assertNumQueries(7), ConcreteModel.objects.delay_mptt_updates(): with self.assertNumQueries(2): # 2 queries here: # 1. update the node # 2. collapse old tree since it is now empty. self.d.parent = self.c self.d.save() # query 3 here: self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 3 1 1 1 4 5 4 3 1 2 5 6 5 - 2 0 1 2 """, ) # the other 4 queries are the partial rebuild self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 8 2 1 1 1 2 3 3 1 1 1 4 7 4 3 1 2 5 6 5 - 2 0 1 2 """, ) def test_move_node_to_root(self): with self.assertNumQueries(4), ConcreteModel.objects.delay_mptt_updates(): with self.assertNumQueries(3): # 3 queries here! # 1. find the next tree_id to move to # 2. update the tree_id on all nodes to the right of that # 3. update tree fields on self.c self.c.parent = None self.c.save() # 4th query here: self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 4 - 2 0 1 2 5 - 3 0 1 2 3 - 4 0 1 2 """, ) self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 4 - 2 0 1 2 5 - 3 0 1 2 3 - 4 0 1 2 """, ) def test_move_root_to_child(self): with self.assertNumQueries(7), ConcreteModel.objects.delay_mptt_updates(): with self.assertNumQueries(2): # 2 queries here: # 1. update the node # 2. collapse old tree since it is now empty. self.d.parent = self.c self.d.save() # query 3 here: self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 6 2 1 1 1 2 3 3 1 1 1 4 5 4 3 1 2 5 6 5 - 2 0 1 2 """, ) # the remaining 4 queries are the partial rebuild. self.assertTreeEqual( ConcreteModel.objects.all(), """ 1 - 1 0 1 8 2 1 1 1 2 3 3 1 1 1 4 7 4 3 1 2 5 6 5 - 2 0 1 2 """, )
DelayedUpdatesTestCase
python
sqlalchemy__sqlalchemy
test/engine/test_reflection.py
{ "start": 57978, "end": 66027 }
class ____(fixtures.TestBase): __sparse_driver_backend__ = True @testing.requires.schemas def test_has_schema(self): with testing.db.connect() as conn: eq_( testing.db.dialect.has_schema( conn, testing.config.test_schema ), True, ) eq_( testing.db.dialect.has_schema(conn, "sa_fake_schema_123"), False, ) @testing.requires.schemas @testing.requires.cross_schema_fk_reflection @testing.requires.implicit_default_schema def test_blank_schema_arg(self, connection, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("sid", Integer, sa.ForeignKey("some_other_table.id")), schema=testing.config.test_schema, test_needs_fk=True, ) Table( "some_other_table", metadata, Column("id", Integer, primary_key=True), schema=None, test_needs_fk=True, ) metadata.create_all(connection) meta2 = MetaData(schema=testing.config.test_schema) meta2.reflect(connection) eq_( set(meta2.tables), { "some_other_table", "%s.some_table" % testing.config.test_schema, }, ) @testing.requires.schemas def test_explicit_default_schema(self, connection, metadata): schema = connection.dialect.default_schema_name assert bool(schema) Table( "table1", metadata, Column("col1", sa.Integer, primary_key=True), test_needs_fk=True, schema=schema, ) Table( "table2", metadata, Column("col1", sa.Integer, primary_key=True), Column( "col2", sa.Integer, sa.ForeignKey("%s.table1.col1" % schema) ), test_needs_fk=True, schema=schema, ) metadata.create_all(connection) metadata.create_all(connection, checkfirst=True) eq_(len(metadata.tables), 2) m1 = MetaData() Table("table1", m1, autoload_with=connection, schema=schema) Table("table2", m1, autoload_with=connection, schema=schema) eq_(len(m1.tables), 2) @testing.requires.schemas def test_schema_translation(self, connection, metadata): Table( "foob", metadata, Column("q", Integer), schema=config.test_schema, ) metadata.create_all(connection) m = MetaData() map_ = {"foob": config.test_schema} c2 = connection.execution_options(schema_translate_map=map_) t = Table("foob", m, schema="foob", autoload_with=c2) eq_(t.schema, "foob") eq_(t.c.keys(), ["q"]) @testing.requires.schemas def test_explicit_default_schema_metadata(self, connection, metadata): schema = connection.dialect.default_schema_name is_true(schema) metadata.schema = schema Table( "table1", metadata, Column("col1", sa.Integer, primary_key=True), test_needs_fk=True, ) Table( "table2", metadata, Column("col1", sa.Integer, primary_key=True), Column("col2", sa.Integer, sa.ForeignKey("table1.col1")), test_needs_fk=True, ) metadata.create_all(connection) metadata.create_all(connection, checkfirst=True) m1 = MetaData(schema=schema) Table("table1", m1, autoload_with=connection) Table("table2", m1, autoload_with=connection) eq_(len(m1.tables), 2) @testing.requires.schemas def test_metadata_reflect_schema(self, connection, metadata): createTables(metadata, testing.config.test_schema) metadata.create_all(connection) m2 = MetaData(schema=testing.config.test_schema) m2.reflect(connection) eq_( set(m2.tables), { "%s.dingalings" % testing.config.test_schema, "%s.users" % testing.config.test_schema, "%s.email_addresses" % testing.config.test_schema, }, ) @testing.requires.schemas @testing.requires.cross_schema_fk_reflection @testing.requires.implicit_default_schema def test_reflect_all_schemas_default_overlap(self, connection, metadata): Table("t", metadata, Column("id", Integer, primary_key=True)) Table( "t", metadata, Column("id1", sa.ForeignKey("t.id")), schema=testing.config.test_schema, ) metadata.create_all(connection) m2 = MetaData() m2.reflect(connection, schema=testing.config.test_schema) m3 = MetaData() m3.reflect(connection) m3.reflect(connection, schema=testing.config.test_schema) eq_( {(t.name, t.schema) for t in m2.tables.values()}, {(t.name, t.schema) for t in m3.tables.values()}, ) # Tests related to engine.reflection def createTables(meta, schema=None): if schema: schema_prefix = schema + "." else: schema_prefix = "" users = Table( "users", meta, Column("user_id", sa.INT, primary_key=True), Column("user_name", sa.VARCHAR(20), nullable=False), Column("test1", sa.CHAR(5), nullable=False), Column("test2", sa.Float(), nullable=False), Column("test3", sa.Text), Column("test4", sa.Numeric(10, 2), nullable=False), Column("test5", sa.Date), Column( "parent_user_id", sa.Integer, sa.ForeignKey("%susers.user_id" % schema_prefix), ), Column("test6", sa.Date, nullable=False), Column("test7", sa.Text), Column("test8", sa.LargeBinary), Column("test_passivedefault2", sa.Integer, server_default="5"), Column("test9", sa.LargeBinary(100)), Column("test10", sa.Numeric(10, 2)), schema=schema, test_needs_fk=True, ) dingalings = Table( "dingalings", meta, Column("dingaling_id", sa.Integer, primary_key=True), Column( "address_id", sa.Integer, sa.ForeignKey("%semail_addresses.address_id" % schema_prefix), ), Column("data", sa.String(30)), schema=schema, test_needs_fk=True, ) addresses = Table( "email_addresses", meta, Column("address_id", sa.Integer), Column("remote_user_id", sa.Integer, sa.ForeignKey(users.c.user_id)), Column("email_address", sa.String(20)), sa.PrimaryKeyConstraint("address_id", name="email_ad_pk"), schema=schema, test_needs_fk=True, ) return (users, addresses, dingalings) def createIndexes(con, schema=None): fullname = "users" if schema: fullname = "%s.%s" % (schema, "users") query = "CREATE INDEX users_t_idx ON %s (test1, test2)" % fullname con.execute(sa.sql.text(query)) @testing.requires.views def _create_views(conn, schema=None): for table_name in ("users", "email_addresses"): fullname = table_name if schema: fullname = "%s.%s" % (schema, table_name) view_name = fullname + "_v" query = "CREATE VIEW %s AS SELECT * FROM %s" % ( view_name, fullname, ) conn.execute(sa.sql.text(query)) @testing.requires.views def _drop_views(conn, schema=None): for table_name in ("email_addresses", "users"): fullname = table_name if schema: fullname = "%s.%s" % (schema, table_name) view_name = fullname + "_v" query = "DROP VIEW %s" % view_name conn.execute(sa.sql.text(query))
SchemaTest
python
fastapi__sqlmodel
docs_src/tutorial/update/tutorial004_py310.py
{ "start": 71, "end": 2357 }
class ____(SQLModel, table=True): id: int | None = Field(default=None, primary_key=True) name: str = Field(index=True) secret_name: str age: int | None = Field(default=None, index=True) sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48) hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32) hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35) hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36) hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93) with Session(engine) as session: session.add(hero_1) session.add(hero_2) session.add(hero_3) session.add(hero_4) session.add(hero_5) session.add(hero_6) session.add(hero_7) session.commit() def update_heroes(): with Session(engine) as session: statement = select(Hero).where(Hero.name == "Spider-Boy") # (1)! results = session.exec(statement) # (2)! hero_1 = results.one() # (3)! print("Hero 1:", hero_1) # (4)! statement = select(Hero).where(Hero.name == "Captain North America") # (5)! results = session.exec(statement) # (6)! hero_2 = results.one() # (7)! print("Hero 2:", hero_2) # (8)! hero_1.age = 16 # (9)! hero_1.name = "Spider-Youngster" # (10)! session.add(hero_1) # (11)! hero_2.name = "Captain North America Except Canada" # (12)! hero_2.age = 110 # (13)! session.add(hero_2) # (14)! session.commit() # (15)! session.refresh(hero_1) # (16)! session.refresh(hero_2) # (17)! print("Updated hero 1:", hero_1) # (18)! print("Updated hero 2:", hero_2) # (19)! # (20)! def main(): create_db_and_tables() create_heroes() update_heroes() if __name__ == "__main__": main()
Hero
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/hooks/test_cloud_formation.py
{ "start": 957, "end": 3694 }
class ____: def setup_method(self, _): self.hook = CloudFormationHook(aws_conn_id="aws_default") def create_stack(self, stack_name): timeout = 15 template_body = json.dumps( { "Resources": { "myResource": { "Type": "AWS::EC2::VPC", "Properties": { "CidrBlock": {"Ref": "VPCCidr"}, "Tags": [{"Key": "Name", "Value": "Primary_CF_VPC"}], }, } }, "Parameters": { "VPCCidr": { "Type": "String", "Default": "10.0.0.0/16", "Description": "Enter the CIDR block for the VPC. Default is 10.0.0.0/16.", } }, } ) self.hook.create_stack( stack_name=stack_name, cloudformation_parameters={ "TimeoutInMinutes": timeout, "TemplateBody": template_body, "Parameters": [{"ParameterKey": "VPCCidr", "ParameterValue": "10.0.0.0/16"}], }, ) def test_get_conn_returns_a_boto3_connection(self): assert self.hook.get_conn().describe_stacks() is not None def test_get_stack_status(self): stack_name = "my_test_get_stack_status_stack" stack_status = self.hook.get_stack_status(stack_name=stack_name) assert stack_status is None self.create_stack(stack_name) stack_status = self.hook.get_stack_status(stack_name=stack_name) assert stack_status == "CREATE_COMPLETE", "Incorrect stack status returned." def test_create_stack(self): stack_name = "my_test_create_stack_stack" self.create_stack(stack_name) stacks = self.hook.get_conn().describe_stacks()["Stacks"] assert len(stacks) > 0, "CloudFormation should have stacks" matching_stacks = [x for x in stacks if x["StackName"] == stack_name] assert len(matching_stacks) == 1, f"stack with name {stack_name} should exist" stack = matching_stacks[0] assert stack["StackStatus"] == "CREATE_COMPLETE", "Stack should be in status CREATE_COMPLETE" def test_delete_stack(self): stack_name = "my_test_delete_stack_stack" self.create_stack(stack_name) self.hook.delete_stack(stack_name=stack_name) stacks = self.hook.get_conn().describe_stacks()["Stacks"] matching_stacks = [x for x in stacks if x["StackName"] == stack_name] assert not matching_stacks, f"stack with name {stack_name} should not exist"
TestCloudFormationHook
python
pytorch__pytorch
.github/scripts/generate_ci_workflows.py
{ "start": 1323, "end": 1397 }
class ____(TypedDict): num_shards: int runner: str @dataclass
Config
python
doocs__leetcode
solution/1000-1099/1034.Coloring A Border/Solution.py
{ "start": 0, "end": 749 }
class ____: def colorBorder( self, grid: List[List[int]], row: int, col: int, color: int ) -> List[List[int]]: def dfs(i: int, j: int, c: int) -> None: vis[i][j] = True for a, b in pairwise((-1, 0, 1, 0, -1)): x, y = i + a, j + b if 0 <= x < m and 0 <= y < n: if not vis[x][y]: if grid[x][y] == c: dfs(x, y, c) else: grid[i][j] = color else: grid[i][j] = color m, n = len(grid), len(grid[0]) vis = [[False] * n for _ in range(m)] dfs(row, col, grid[row][col]) return grid
Solution
python
dask__dask
dask/delayed.py
{ "start": 28609, "end": 30465 }
class ____(Delayed): __slots__ = ("_obj", "_attr") def __init__(self, obj, attr): key = f"getattr-{tokenize(obj, attr, pure=True)}" super().__init__(key, None) self._obj = obj self._attr = attr def __getattr__(self, attr): # Calling np.dtype(dask.delayed(...)) used to result in a segfault, as # numpy recursively tries to get `dtype` from the object. This is # likely a bug in numpy. For now, we can do a dumb for if # `x.dtype().dtype()` is called (which shouldn't ever show up in real # code). See https://github.com/dask/dask/pull/4374#issuecomment-454381465 if attr == "dtype" and self._attr == "dtype": raise AttributeError("Attribute dtype not found") return super().__getattr__(attr) @property def dask(self): layer = {self._key: (getattr, self._obj._key, self._attr)} return HighLevelGraph.from_collections( self._key, layer, dependencies=[self._obj] ) def __call__(self, *args, **kwargs): return call_function( methodcaller(self._attr), self._attr, (self._obj,) + args, kwargs ) for op in [ operator.abs, operator.neg, operator.pos, operator.invert, operator.add, operator.sub, operator.mul, operator.floordiv, operator.truediv, operator.mod, operator.pow, operator.and_, operator.or_, operator.xor, operator.lshift, operator.rshift, operator.eq, operator.ge, operator.gt, operator.ne, operator.le, operator.lt, operator.getitem, ]: Delayed._bind_operator(op) try: Delayed._bind_operator(operator.matmul) except AttributeError: pass def single_key(seq): """Pick out the only element of this list, a list of keys""" return seq[0]
DelayedAttr
python
getsentry__sentry
src/sentry/api/endpoints/organization_releases.py
{ "start": 10777, "end": 37459 }
class ____(OrganizationReleasesBaseEndpoint, ReleaseAnalyticsMixin): publish_status = { "GET": ApiPublishStatus.UNKNOWN, "POST": ApiPublishStatus.UNKNOWN, } rate_limits = RateLimitConfig( limit_overrides={ "GET": { RateLimitCategory.IP: RateLimit(limit=40, window=1), RateLimitCategory.USER: RateLimit(limit=40, window=1), RateLimitCategory.ORGANIZATION: RateLimit(limit=40, window=1), }, "POST": { RateLimitCategory.IP: RateLimit(limit=40, window=1), RateLimitCategory.USER: RateLimit(limit=40, window=1), RateLimitCategory.ORGANIZATION: RateLimit(limit=40, window=1), }, } ) SESSION_SORTS = frozenset( [ "crash_free_sessions", "crash_free_users", "sessions", "users", "sessions_24h", "users_24h", ] ) def get_projects(self, request: Request, organization, project_ids=None, project_slugs=None): return super().get_projects( request, organization, project_ids=project_ids, project_slugs=project_slugs, include_all_accessible=False, ) def get(self, request: Request, organization: Organization) -> Response: if ( features.has("organizations:releases-serializer-v2", organization, actor=request.user) or request.headers.get("X-Performance-Optimizations") == "enabled" ): return self.__get_new(request, organization) else: return self.__get_old(request, organization) def __get_new(self, request: Request, organization: Organization) -> Response: """ List an Organization's Releases ``````````````````````````````` Return a list of releases for a given organization. :pparam string organization_id_or_slug: the id or slug of the organization :qparam string query: this parameter can be used to create a "starts with" filter for the version. """ query = request.GET.get("query") status_filter = request.GET.get("status", "open") flatten = request.GET.get("flatten") == "1" sort = request.GET.get("sort") or "date" summary_stats_period = request.GET.get("summaryStatsPeriod") or "14d" if summary_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail("summaryStatsPeriod", STATS_PERIODS)) paginator_cls = OffsetPaginator paginator_kwargs = {} try: filter_params = self.get_filter_params(request, organization, date_filter_optional=True) except NoProjects: return Response([]) # This should get us all the projects into postgres that have received # health data in the last 24 hours. debounce_update_release_health_data(organization, filter_params["project_id"]) queryset = Release.objects.filter(organization_id=organization.id) queryset = filter_releases_by_environments( queryset, filter_params["project_id"], [e.id for e in filter_params.get("environment_objects", [])], ) queryset = queryset.annotate(date=F("date_added")) if status_filter: try: status_int = ReleaseStatus.from_string(status_filter) except ValueError: raise ParseError(detail="invalid value for status") if status_int == ReleaseStatus.OPEN: queryset = queryset.filter(Q(status=status_int) | Q(status=None)) else: queryset = queryset.filter(status=status_int) if query: try: queryset = _filter_releases_by_query(queryset, organization, query, filter_params) except InvalidSearchQuery as e: return Response( {"detail": str(e)}, status=400, ) queryset = filter_releases_by_projects(queryset, filter_params["project_id"]) if sort == "date": queryset = queryset.order_by("-date") paginator_kwargs["order_by"] = "-date" elif sort == "build": queryset = queryset.filter(build_number__isnull=False).order_by("-build_number") paginator_kwargs["order_by"] = "-build_number" elif sort == "semver": queryset = queryset.annotate_prerelease_column() order_by = [F(col).desc(nulls_last=True) for col in Release.SEMVER_COLS] # TODO: Adding this extra sort order breaks index usage. Index usage is already broken # when we filter by status, so when we fix that we should also consider the best way to # make this work as expected. order_by.append(F("date_added").desc()) paginator_kwargs["order_by"] = order_by elif sort == "adoption": # sort by adoption date (most recently adopted first) order_by = F("releaseprojectenvironment__adopted").desc(nulls_last=True) queryset = queryset.order_by(order_by) paginator_kwargs["order_by"] = order_by elif sort in self.SESSION_SORTS: if not flatten: return Response( {"detail": "sorting by crash statistics requires flattening (flatten=1)"}, status=400, ) def qs_load_func(queryset, total_offset, qs_offset, limit): # We want to fetch at least total_offset + limit releases to check, to make sure # we're not fetching only releases that were on previous pages. release_versions = list( queryset.order_by_recent().values_list("version", flat=True)[ : total_offset + limit ] ) releases_with_session_data = release_health.backend.check_releases_have_health_data( organization.id, filter_params["project_id"], release_versions, ( filter_params["start"] if filter_params["start"] else datetime.utcnow() - timedelta(days=90) ), filter_params["end"] if filter_params["end"] else datetime.utcnow(), ) valid_versions = [ rv for rv in release_versions if rv not in releases_with_session_data ] results = list( Release.objects.filter( organization_id=organization.id, version__in=valid_versions, ).order_by_recent()[qs_offset : qs_offset + limit] ) return results paginator_cls = ReleasesMergingOffsetPaginator paginator_kwargs.update( data_load_func=lambda offset, limit: release_health.backend.get_project_releases_by_stability( project_ids=filter_params["project_id"], environments=filter_params.get("environment"), scope=sort, offset=offset, stats_period=summary_stats_period, limit=limit, ), data_count_func=lambda: release_health.backend.get_project_releases_count( organization_id=organization.id, project_ids=filter_params["project_id"], environments=filter_params.get("environment"), scope=sort, stats_period=summary_stats_period, ), apply_to_queryset=lambda queryset, rows: queryset.filter( version__in=list(x[1] for x in rows) ), queryset_load_func=qs_load_func, project_ids=filter_params["project_id"], ) else: return Response({"detail": "invalid sort"}, status=400) queryset = add_date_filter_to_queryset(queryset, filter_params) return self.paginate( request=request, queryset=queryset, paginator_cls=paginator_cls, on_results=lambda releases: release_serializer( releases, request.user, organization_id=organization.id, environment_ids=[e.id for e in filter_params.get("environment_objects", [])], projects=filter_params["project_objects"], ), **paginator_kwargs, ) def __get_old(self, request: Request, organization: Organization) -> Response: """ List an Organization's Releases ``````````````````````````````` Return a list of releases for a given organization. :pparam string organization_id_or_slug: the id or slug of the organization :qparam string query: this parameter can be used to create a "starts with" filter for the version. """ query = request.GET.get("query") with_health = request.GET.get("health") == "1" with_adoption_stages = request.GET.get("adoptionStages") == "1" status_filter = request.GET.get("status", "open") flatten = request.GET.get("flatten") == "1" sort = request.GET.get("sort") or "date" health_stat = request.GET.get("healthStat") or "sessions" summary_stats_period = request.GET.get("summaryStatsPeriod") or "14d" health_stats_period = request.GET.get("healthStatsPeriod") or ("24h" if with_health else "") if summary_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail("summaryStatsPeriod", STATS_PERIODS)) if health_stats_period and health_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail("healthStatsPeriod", STATS_PERIODS)) if health_stat not in ("sessions", "users"): raise ParseError(detail="invalid healthStat") paginator_cls = OffsetPaginator paginator_kwargs = {} try: filter_params = self.get_filter_params(request, organization, date_filter_optional=True) except NoProjects: return Response([]) # This should get us all the projects into postgres that have received # health data in the last 24 hours. debounce_update_release_health_data(organization, filter_params["project_id"]) queryset = Release.objects.filter(organization=organization) if status_filter: try: status_int = ReleaseStatus.from_string(status_filter) except ValueError: raise ParseError(detail="invalid value for status") if status_int == ReleaseStatus.OPEN: queryset = queryset.filter(Q(status=status_int) | Q(status=None)) else: queryset = queryset.filter(status=status_int) queryset = queryset.annotate(date=F("date_added")) queryset = add_environment_to_queryset(queryset, filter_params) if query: try: queryset = _filter_releases_by_query(queryset, organization, query, filter_params) except InvalidSearchQuery as e: return Response( {"detail": str(e)}, status=400, ) select_extra = {} queryset = queryset.distinct() if flatten: select_extra["_for_project_id"] = "sentry_release_project.project_id" queryset = queryset.filter(projects__id__in=filter_params["project_id"]) if sort == "date": queryset = queryset.order_by("-date") paginator_kwargs["order_by"] = "-date" elif sort == "build": queryset = queryset.filter(build_number__isnull=False).order_by("-build_number") paginator_kwargs["order_by"] = "-build_number" elif sort == "semver": queryset = queryset.annotate_prerelease_column() order_by = [F(col).desc(nulls_last=True) for col in Release.SEMVER_COLS] # TODO: Adding this extra sort order breaks index usage. Index usage is already broken # when we filter by status, so when we fix that we should also consider the best way to # make this work as expected. order_by.append(F("date_added").desc()) paginator_kwargs["order_by"] = order_by elif sort == "adoption": # sort by adoption date (most recently adopted first) order_by = F("releaseprojectenvironment__adopted").desc(nulls_last=True) queryset = queryset.order_by(order_by) paginator_kwargs["order_by"] = order_by elif sort in self.SESSION_SORTS: if not flatten: return Response( {"detail": "sorting by crash statistics requires flattening (flatten=1)"}, status=400, ) def qs_load_func(queryset, total_offset, qs_offset, limit): # We want to fetch at least total_offset + limit releases to check, to make sure # we're not fetching only releases that were on previous pages. release_versions = list( queryset.order_by_recent().values_list("version", flat=True)[ : total_offset + limit ] ) releases_with_session_data = release_health.backend.check_releases_have_health_data( organization.id, filter_params["project_id"], release_versions, ( filter_params["start"] if filter_params["start"] else datetime.utcnow() - timedelta(days=90) ), filter_params["end"] if filter_params["end"] else datetime.utcnow(), ) valid_versions = [ rv for rv in release_versions if rv not in releases_with_session_data ] results = list( Release.objects.filter( organization_id=organization.id, version__in=valid_versions, ).order_by_recent()[qs_offset : qs_offset + limit] ) return results paginator_cls = MergingOffsetPaginator paginator_kwargs.update( data_load_func=lambda offset, limit: release_health.backend.get_project_releases_by_stability( project_ids=filter_params["project_id"], environments=filter_params.get("environment"), scope=sort, offset=offset, stats_period=summary_stats_period, limit=limit, ), data_count_func=lambda: release_health.backend.get_project_releases_count( organization_id=organization.id, project_ids=filter_params["project_id"], environments=filter_params.get("environment"), scope=sort, stats_period=summary_stats_period, ), apply_to_queryset=lambda queryset, rows: queryset.filter( version__in=list(x[1] for x in rows) ), queryset_load_func=qs_load_func, key_from_model=lambda x: (x._for_project_id, x.version), ) else: return Response({"detail": "invalid sort"}, status=400) queryset = queryset.extra(select=select_extra) queryset = add_date_filter_to_queryset(queryset, filter_params) return self.paginate( request=request, queryset=queryset, paginator_cls=paginator_cls, on_results=lambda x: serialize( x, request.user, with_health_data=with_health, with_adoption_stages=with_adoption_stages, health_stat=health_stat, health_stats_period=health_stats_period, summary_stats_period=summary_stats_period, environments=filter_params.get("environment") or None, ), **paginator_kwargs, ) def post(self, request: Request, organization: Organization) -> Response: """ Create a New Release for an Organization ```````````````````````````````````````` Create a new release for the given Organization. Releases are used by Sentry to improve its error reporting abilities by correlating first seen events with the release that might have introduced the problem. Releases are also necessary for sourcemaps and other debug features that require manual upload for functioning well. :pparam string organization_id_or_slug: the id or slug of the organization the release belongs to. :param string version: a version identifier for this release. Can be a version number, a commit hash etc. It cannot contain certain whitespace characters (`\\r`, `\\n`, `\\f`, `\\x0c`, `\\t`) or any slashes (`\\`, `/`). The version names `.`, `..` and `latest` are also reserved, and cannot be used. :param string ref: an optional commit reference. This is useful if a tagged version has been provided. :param url url: a URL that points to the release. This can be the path to an online interface to the sourcecode for instance. :param array projects: a list of project ids or slugs that are involved in this release :param datetime dateReleased: an optional date that indicates when the release went live. If not provided the current time is assumed. :param array commits: an optional list of commit data to be associated with the release. Commits must include parameters ``id`` (the sha of the commit), and can optionally include ``repository``, ``message``, ``patch_set``, ``author_name``, ``author_email``, and ``timestamp``. See [release without integration example](/workflow/releases/). :param array refs: an optional way to indicate the start and end commits for each repository included in a release. Head commits must include parameters ``repository`` and ``commit`` (the HEAD sha). They can optionally include ``previousCommit`` (the sha of the HEAD of the previous release), which should be specified if this is the first time you've sent commit data. ``commit`` may contain a range in the form of ``previousCommit..commit`` :auth: required """ bind_organization_context(organization) serializer = ReleaseSerializerWithProjects( data=request.data, context={"organization": organization} ) scope = sentry_sdk.get_isolation_scope() if serializer.is_valid(): result = serializer.validated_data scope.set_tag("version", result["version"]) # Get all projects that are available to the user/token # Note: Does not use the "projects" data param from the request projects_from_request = self.get_projects(request, organization) allowed_projects: dict[object, Project] = {} for project in projects_from_request: allowed_projects[project.slug] = project allowed_projects[project.id] = project projects: list[Project] = [] for id_or_slug in result["projects"]: if id_or_slug not in allowed_projects: return Response({"projects": ["Invalid project ids or slugs"]}, status=400) projects.append(allowed_projects[id_or_slug]) new_status = result.get("status") owner_id: int | None = None if owner := result.get("owner"): owner_id = owner.id # release creation is idempotent to simplify user # experiences created = False try: release, created = Release.objects.get_or_create( organization_id=organization.id, version=result["version"], defaults={ "ref": result.get("ref"), "url": result.get("url"), "owner_id": owner_id, "date_released": result.get("dateReleased"), "status": new_status or ReleaseStatus.OPEN, "user_agent": request.META.get("HTTP_USER_AGENT", "")[:256], }, ) except IntegrityError: raise ConflictError( "Could not create the release it conflicts with existing data", ) # In case of disabled Open Membership, we have to check for project-level # permissions on the existing release. release_projects = ReleaseProject.objects.filter(release=release) existing_projects = [rp.project for rp in release_projects] if not request.access.has_projects_access(existing_projects): projects_str = ", ".join([p.slug for p in existing_projects]) return Response( { "projects": [ f"You do not have permission to one of the projects: {projects_str}" ] }, status=400, ) if created: release_created.send_robust(release=release, sender=self.__class__) if not created and new_status is not None and new_status != release.status: release.status = new_status release.save() new_releaseprojects = [] for project in projects: _, releaseproject_created = release.add_project(project) if releaseproject_created: new_releaseprojects.append(project) if release.date_released: for project in new_releaseprojects: Activity.objects.create( type=ActivityType.RELEASE.value, project=project, ident=Activity.get_version_ident(result["version"]), data={"version": result["version"]}, datetime=release.date_released, ) commit_list = result.get("commits") if commit_list: try: release.set_commits(commit_list) self.track_set_commits_local( request, organization_id=organization.id, project_ids=[project.id for project in projects], ) except ReleaseCommitError: raise ConflictError("Release commits are currently being processed") refs = result.get("refs") if not refs: refs = [ { "repository": r["repository"], "previousCommit": r.get("previousId"), "commit": r["currentId"], } for r in result.get("headCommits", []) ] scope.set_tag("has_refs", bool(refs)) if refs: if not request.user.is_authenticated and not request.auth: scope.set_tag("failure_reason", "user_not_authenticated") return Response( {"refs": ["You must use an authenticated API token to fetch refs"]}, status=400, ) fetch_commits = not commit_list try: release.set_refs(refs, request.user.id, fetch=fetch_commits) except InvalidRepository as e: scope.set_tag("failure_reason", "InvalidRepository") return Response({"refs": [str(e)]}, status=400) if not created and not new_releaseprojects: # This is the closest status code that makes sense, and we want # a unique 2xx response code so people can understand when # behavior differs. # 208 Already Reported (WebDAV; RFC 5842) status = 208 else: status = 201 analytics.record( ReleaseCreatedEvent( user_id=request.user.id if request.user and request.user.id else None, organization_id=organization.id, project_ids=[project.id for project in projects], user_agent=request.META.get("HTTP_USER_AGENT", ""), created_status=status, auth_type=get_auth_api_token_type(request.auth), ) ) if is_org_auth_token_auth(request.auth): update_org_auth_token_last_used(request.auth, [project.id for project in projects]) scope.set_tag("success_status", status) return Response( serialize(release, request.user, no_snuba_for_release_creation=True), status=status ) scope.set_tag("failure_reason", "serializer_error") return Response(serializer.errors, status=400) @region_silo_endpoint
OrganizationReleasesEndpoint
python
apache__airflow
task-sdk/src/airflow/sdk/execution_time/secrets/execution_api.py
{ "start": 1041, "end": 6653 }
class ____(BaseSecretsBackend): """ Secrets backend for client contexts (workers, DAG processors, triggerers). Routes connection and variable requests through SUPERVISOR_COMMS to the Execution API server. This backend should only be registered in client processes, not in API server/scheduler processes. """ def get_conn_value(self, conn_id: str) -> str | None: """ Get connection URI via SUPERVISOR_COMMS. Not used since we override get_connection directly. """ raise NotImplementedError("Use get_connection instead") def get_connection(self, conn_id: str) -> Connection | None: # type: ignore[override] """ Return connection object by routing through SUPERVISOR_COMMS. :param conn_id: connection id :return: Connection object or None if not found """ from airflow.sdk.execution_time.comms import ErrorResponse, GetConnection from airflow.sdk.execution_time.context import _process_connection_result_conn from airflow.sdk.execution_time.task_runner import SUPERVISOR_COMMS try: msg = SUPERVISOR_COMMS.send(GetConnection(conn_id=conn_id)) if isinstance(msg, ErrorResponse): # Connection not found or error occurred return None # Convert ExecutionAPI response to SDK Connection return _process_connection_result_conn(msg) except RuntimeError as e: # TriggerCommsDecoder.send() uses async_to_sync internally, which raises RuntimeError # when called within an async event loop. In greenback portal contexts (triggerer), # we catch this and use greenback to call the async version instead. if str(e).startswith("You cannot use AsyncToSync in the same thread as an async event loop"): import asyncio import greenback task = asyncio.current_task() if greenback.has_portal(task): import warnings warnings.warn( "You should not use sync calls here -- use `await aget_connection` instead", stacklevel=2, ) return greenback.await_(self.aget_connection(conn_id)) # Fall through to the general exception handler for other RuntimeErrors return None except Exception: # If SUPERVISOR_COMMS fails for any reason, return None # to allow fallback to other backends return None def get_variable(self, key: str) -> str | None: """ Return variable value by routing through SUPERVISOR_COMMS. :param key: Variable key :return: Variable value or None if not found """ from airflow.sdk.execution_time.comms import ErrorResponse, GetVariable, VariableResult from airflow.sdk.execution_time.task_runner import SUPERVISOR_COMMS try: msg = SUPERVISOR_COMMS.send(GetVariable(key=key)) if isinstance(msg, ErrorResponse): # Variable not found or error occurred return None # Extract value from VariableResult if isinstance(msg, VariableResult): return msg.value # Already a string | None return None except Exception: # If SUPERVISOR_COMMS fails for any reason, return None # to allow fallback to other backends return None async def aget_connection(self, conn_id: str) -> Connection | None: # type: ignore[override] """ Return connection object asynchronously via SUPERVISOR_COMMS. :param conn_id: connection id :return: Connection object or None if not found """ from airflow.sdk.execution_time.comms import ErrorResponse, GetConnection from airflow.sdk.execution_time.context import _process_connection_result_conn from airflow.sdk.execution_time.task_runner import SUPERVISOR_COMMS try: msg = await SUPERVISOR_COMMS.asend(GetConnection(conn_id=conn_id)) if isinstance(msg, ErrorResponse): # Connection not found or error occurred return None # Convert ExecutionAPI response to SDK Connection return _process_connection_result_conn(msg) except Exception: # If SUPERVISOR_COMMS fails for any reason, return None # to allow fallback to other backends return None async def aget_variable(self, key: str) -> str | None: """ Return variable value asynchronously via SUPERVISOR_COMMS. :param key: Variable key :return: Variable value or None if not found """ from airflow.sdk.execution_time.comms import ErrorResponse, GetVariable, VariableResult from airflow.sdk.execution_time.task_runner import SUPERVISOR_COMMS try: msg = await SUPERVISOR_COMMS.asend(GetVariable(key=key)) if isinstance(msg, ErrorResponse): # Variable not found or error occurred return None # Extract value from VariableResult if isinstance(msg, VariableResult): return msg.value # Already a string | None return None except Exception: # If SUPERVISOR_COMMS fails for any reason, return None # to allow fallback to other backends return None
ExecutionAPISecretsBackend
python
numba__numba
numba/core/types/misc.py
{ "start": 14411, "end": 14609 }
class ____(SimpleIteratorType): def __init__(self, dtype): name = "iter_unicode" self.data = dtype super(UnicodeIteratorType, self).__init__(name, dtype)
UnicodeIteratorType
python
pdm-project__pdm
src/pdm/formats/base.py
{ "start": 895, "end": 1321 }
class ____(type): def __init__(cls, name: str, bases: tuple[type, ...], ns: dict[str, Any]) -> None: super().__init__(name, bases, ns) cls._converters = {} _default = object() for key, value in ns.items(): if getattr(value, "_convert_from", _default) is not _default: name = value._convert_to or key cls._converters[name] = value
_MetaConverterMeta
python
apache__airflow
airflow-ctl/src/airflowctl/api/operations.py
{ "start": 4029, "end": 6079 }
class ____: """ Base class for operations. This class is used to decorate all callable methods with a check for ServerResponseError. Set exit_in_error false to not exit. """ __slots__ = ("client", "response", "exit_in_error") def __init__(self, client: Client, response=None, exit_in_error: bool = True): self.client = client self.response = response self.exit_in_error = exit_in_error def __init_subclass__(cls, **kwargs): """Decorate all callable methods with a check for ServerResponseError and exit if the server is not running.""" super().__init_subclass__(**kwargs) for attr, value in cls.__dict__.items(): if callable(value): setattr(cls, attr, _check_flag_and_exit_if_server_response_error(value)) def execute_list( self, *, path: str, data_model: type[T], offset: int = 0, limit: int = 50, params: dict | None = None, ) -> T | ServerResponseError: shared_params = {**(params or {})} self.response = self.client.get(path, params=shared_params) first_pass = data_model.model_validate_json(self.response.content) total_entries = first_pass.total_entries # type: ignore[attr-defined] if total_entries < limit: return first_pass for key, value in first_pass.model_dump().items(): if key != "total_entries" and isinstance(value, list): break entry_list = getattr(first_pass, key) offset = offset + limit while offset < total_entries: self.response = self.client.get(path, params={**shared_params, "offset": offset}) entry = data_model.model_validate_json(self.response.content) offset = offset + limit entry_list.extend(getattr(entry, key)) obj = data_model(**{key: entry_list, "total_entries": total_entries}) return data_model.model_validate(obj.model_dump()) # Login operations
BaseOperations
python
davidhalter__jedi
test/refactor/extract_variable.py
{ "start": 2779, "end": 2902 }
class ____(foo.Bar): pass # ++++++++++++++++++++++++++++++++++++++++++++++++++ #? 12 text {'new_name': 'x'} x = foo.Bar
Foo
python
charliermarsh__ruff
crates/ty_python_semantic/resources/corpus/88_regression_generic_method_with_nested_function.py
{ "start": 109, "end": 211 }
class ____: def method[T](self, x: T) -> T: def inner(): self.attr = 1 C().attr
C
python
scipy__scipy
scipy/signal/tests/test_dltisys.py
{ "start": 20190, "end": 21567 }
class ____: """Test private conversions between 'z' and 'z**-1' polynomials.""" def test_full(self): # Numerator and denominator same order num = np.asarray([2.0, 3, 4]) den = np.asarray([5.0, 6, 7]) num2, den2 = TransferFunction._z_to_zinv(num, den) xp_assert_equal(num, num2) xp_assert_equal(den, den2) num2, den2 = TransferFunction._zinv_to_z(num, den) xp_assert_equal(num, num2) xp_assert_equal(den, den2) def test_numerator(self): # Numerator lower order than denominator num = np.asarray([2.0, 3]) den = np.asarray([50, 6, 7]) num2, den2 = TransferFunction._z_to_zinv(num, den) xp_assert_equal([0.0, 2, 3], num2) xp_assert_equal(den, den2) num2, den2 = TransferFunction._zinv_to_z(num, den) xp_assert_equal([2.0, 3, 0], num2) xp_assert_equal(den, den2) def test_denominator(self): # Numerator higher order than denominator num = np.asarray([2., 3, 4]) den = np.asarray([5.0, 6]) num2, den2 = TransferFunction._z_to_zinv(num, den) xp_assert_equal(num, num2) xp_assert_equal([0.0, 5, 6], den2) num2, den2 = TransferFunction._zinv_to_z(num, den) xp_assert_equal(num, num2) xp_assert_equal([5.0, 6, 0], den2)
TestTransferFunctionZConversion
python
sanic-org__sanic
sanic/worker/reloader.py
{ "start": 387, "end": 3955 }
class ____: INTERVAL = 1.0 # seconds def __init__( self, publisher: Connection, interval: float, reload_dirs: set[Path], app_loader: AppLoader, ): self._publisher = publisher self.interval = interval or self.INTERVAL self.reload_dirs = reload_dirs self.run = True self.app_loader = app_loader def __call__(self) -> None: app = self.app_loader.load() signal_func(SIGINT, self.stop) signal_func(SIGTERM, self.stop) mtimes: dict[str, float] = {} reloader_start = app.listeners.get("reload_process_start") reloader_stop = app.listeners.get("reload_process_stop") before_trigger = app.listeners.get("before_reload_trigger") after_trigger = app.listeners.get("after_reload_trigger") loop = new_event_loop() if reloader_start: trigger_events(reloader_start, loop, app) while self.run: changed = set() for filename in self.files(): try: if self.check_file(filename, mtimes): path = ( filename if isinstance(filename, str) else filename.resolve() ) changed.add(str(path)) except OSError: continue if changed: if before_trigger: trigger_events(before_trigger, loop, app) self.reload(",".join(changed) if changed else "unknown") if after_trigger: trigger_events(after_trigger, loop, app, changed=changed) sleep(self.interval) else: if reloader_stop: trigger_events(reloader_stop, loop, app) def stop(self, *_): self.run = False def reload(self, reloaded_files): message = f"__ALL_PROCESSES__:{reloaded_files}" self._publisher.send(message) def files(self): return chain( self.python_files(), *(d.glob("**/*") for d in self.reload_dirs), ) def python_files(self): # no cov """This iterates over all relevant Python files. It goes through all loaded files from modules, all files in folders of already loaded modules as well as all files reachable through a package. """ # The list call is necessary on Python 3 in case the module # dictionary modifies during iteration. for module in list(sys.modules.values()): if module is None: continue filename = getattr(module, "__file__", None) if filename: old = None while not os.path.isfile(filename): old = filename filename = os.path.dirname(filename) if filename == old: break else: if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] yield filename @staticmethod def check_file(filename, mtimes) -> bool: need_reload = False mtime = os.stat(filename).st_mtime old_time = mtimes.get(filename) if old_time is None: mtimes[filename] = mtime elif mtime > old_time: mtimes[filename] = mtime need_reload = True return need_reload
Reloader
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/overloads.py
{ "start": 455, "end": 715 }
class ____: def call_me(self, x): _test_sink(x) @overload def g(o: A) -> None: pass @overload def g(o: int) -> None: pass def g(o): x = _test_source() if isinstance(o, A): o.call_me(x) # Requires type refinement on `o`.
A
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 64345, "end": 64491 }
class ____(_ConfigBase): model: Dict[str, Any] reranker: Union[Rerankers, str] RerankerConfig = _RerankerConfig @dataclass
_RerankerConfig
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/shard_test.py
{ "start": 8170, "end": 9368 }
class ____( test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( dataset_range=[100], num_shards=[1, 3, 5], shard_index=[0, 1, 2, 4], seed=[None, 42], reshuffle_each_iteration=[True, False]))) def testShard( self, dataset_range: int, num_shards: int, shard_index: int, seed: Optional[int], reshuffle_each_iteration: bool): if shard_index >= num_shards: return dataset = dataset_ops.Dataset.range(dataset_range) dataset = dataset.shard(num_shards, shard_index) dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE) dataset = global_shuffle_op._global_shuffle( dataset, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) expected = list(range(shard_index, dataset_range, num_shards)) dataset_output = self.getDatasetOutput( dataset, requires_initialization=True) self.assertCountEqual(dataset_output, expected) self.assertNotEqual(dataset_output, expected)
ShardGlobalShuffleTest
python
has2k1__plotnine
plotnine/mapping/aes.py
{ "start": 1504, "end": 13766 }
class ____(Dict[str, Any]): """ Create aesthetic mappings Parameters ---------- x : str | array_like | scalar x aesthetic mapping y : str | array_like | scalar y aesthetic mapping **kwargs : Any Other aesthetic mappings Notes ----- Only the **x** and **y** aesthetic mappings can be specified as positional arguments. All the rest must be keyword arguments. The value of each mapping must be one of: - **str** ```python import pandas as pd import numpy as np arr = [11, 12, 13] df = pd.DataFrame({ "alpha": [1, 2, 3], "beta": [1, 2, 3], "gam ma": [1, 2, 3] }) # Refer to a column in a dataframe ggplot(df, aes(x="alpha", y="beta")) ``` - **array_like** ```python # A variable ggplot(df, aes(x="alpha", y=arr)) # or an inplace list ggplot(df, aes(x="alpha", y=[4, 5, 6])) ``` - **scalar** ```python # A scalar value/variable ggplot(df, aes(x="alpha", y=4)) # The above statement is equivalent to ggplot(df, aes(x="alpha", y=[4, 4, 4])) ``` - **String expression** ```python ggplot(df, aes(x="alpha", y="2*beta")) ggplot(df, aes(x="alpha", y="np.sin(beta)")) ggplot(df, aes(x="df.index", y="beta")) # If `count` is an aesthetic calculated by a stat ggplot(df, aes(x="alpha", y=after_stat("count"))) ggplot(df, aes(x="alpha", y=after_stat("count/np.max(count)"))) ``` The strings in the expression can refer to; 1. columns in the dataframe 2. variables in the namespace 3. aesthetic values (columns) calculated by the `stat` with the column names having precedence over the variables. For expressions, columns in the dataframe that are mapped to must have names that would be valid python variable names. This is okay: ```python # "gam ma" is a column in the dataframe ggplot(df, aes(x="df.index", y="gam ma")) ``` While this is not: ```python # "gam ma" is a column in the dataframe, but not # valid python variable name ggplot(df, aes(x="df.index", y="np.sin(gam ma)")) ``` `aes` has 2 internal functions that you can use in your expressions when transforming the variables. 1. [](:func:`~plotnine.mapping._eval_environment.factor`) 1. [](:func:`~plotnine.mapping._eval_environment.reorder`) **The group aesthetic** `group` is a special aesthetic that the user can *map* to. It is used to group the plotted items. If not specified, it is automatically computed and in most cases the computed groups are sufficient. However, there may be cases were it is handy to map to it. See Also -------- plotnine.after_stat : For how to map aesthetics to variable calculated by the stat plotnine.after_scale : For how to alter aesthetics after the data has been mapped by the scale. plotnine.stage : For how to map to evaluate the mapping to aesthetics at more than one stage of the plot building pipeline. """ def __init__(self, x=None, y=None, **kwargs): kwargs = rename_aesthetics(kwargs) if x is not None: kwargs["x"] = x if y is not None: kwargs["y"] = y kwargs = self._convert_deprecated_expr(kwargs) self.update(kwargs) def __iter__(self): return iter(self.keys()) def _convert_deprecated_expr(self, kwargs): """ Handle old-style calculated aesthetic expression mappings Just converts them to use `stage` e.g. "stat(count)" to after_stat(count) "..count.." to after_stat(count) """ for name, value in kwargs.items(): if not isinstance(value, stage) and is_calculated_aes(value): _after_stat = strip_calculated_markers(value) kwargs[name] = after_stat(_after_stat) return kwargs @cached_property def _starting(self) -> dict[str, Any]: """ Return the subset of aesthetics mapped from the layer data The mapping is a dict of the form ``{name: expr}``, i.e the stage class has been peeled off. """ d = {} for name, value in self.items(): if not isinstance(value, stage): d[name] = value elif isinstance(value, stage) and value.start is not None: d[name] = value.start return d @cached_property def _calculated(self) -> dict[str, Any]: """ Return only the aesthetics mapped to calculated statistics The mapping is a dict of the form ``{name: expr}``, i.e the stage class has been peeled off. """ d = {} for name, value in self.items(): if isinstance(value, stage) and value.after_stat is not None: d[name] = value.after_stat return d @cached_property def _scaled(self) -> dict[str, Any]: """ Return only the aesthetics mapped to after scaling The mapping is a dict of the form ``{name: expr}``, i.e the stage class has been peeled off. """ d = {} for name, value in self.items(): if isinstance(value, stage) and value.after_scale is not None: d[name] = value.after_scale return d def __deepcopy__(self, memo): """ Deep copy without copying the environment """ cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result # Just copy the keys and point to the env for key, item in self.items(): result[key] = deepcopy(item, memo) return result def __radd__(self, other): """ Add aesthetic mappings to ggplot """ self = deepcopy(self) other.mapping.update(self) return other @property def labels(self) -> labels_view: """ The labels for this mapping """ return make_labels(self) def copy(self): return aes(**self) def inherit(self, other: dict[str, Any] | aes) -> aes: """ Create a mapping that inherits aesthetics in other Parameters ---------- other: aes | dict[str, Any] Default aesthetics Returns ------- new : aes Aesthetic mapping """ new = self.copy() for k in other: if k not in self: new[k] = other[k] return new def rename_aesthetics(obj: THasAesNames) -> THasAesNames: """ Rename aesthetics in obj Parameters ---------- obj : Object that contains aesthetics names Returns ------- : Object that contains aesthetics names """ if isinstance(obj, dict): for name in tuple(obj.keys()): new_name = name.replace("colour", "color") if name != new_name: obj[new_name] = obj.pop(name) elif isinstance(obj, Sequence): T = type(obj) return T(s.replace("colour", "color") for s in obj) # pyright: ignore elif obj.color is None and obj.colour is not None: obj.color, obj.colour = obj.colour, None return obj def is_calculated_aes(ae: Any) -> bool: """ Return True if Aesthetic expression maps to calculated statistic This function is now only used to identify the deprecated versions e.g. "..var.." or "stat(var)". Parameters ---------- ae : object Single aesthetic mapping >>> is_calculated_aes("density") False >>> is_calculated_aes(4) False >>> is_calculated_aes("..density..") True >>> is_calculated_aes("stat(density)") True >>> is_calculated_aes("stat(100*density)") True >>> is_calculated_aes("100*stat(density)") True """ if not isinstance(ae, str): return False return any(pattern.search(ae) for pattern in (STAT_RE, DOTS_RE)) def strip_stat(value): """ Remove stat function that mark calculated aesthetics Parameters ---------- value : object Aesthetic value. In most cases this will be a string but other types will pass through unmodified. Return ------ out : object Aesthetic value with the dots removed. >>> strip_stat("stat(density + stat(count))") density + count >>> strip_stat("stat(density) + 5") density + 5 >>> strip_stat("5 + stat(func(density))") 5 + func(density) >>> strip_stat("stat(func(density) + var1)") func(density) + var1 >>> strip_stat("stat + var1") stat + var1 >>> strip_stat(4) 4 """ def strip_hanging_closing_parens(s): """ Remove leftover parens """ # Use and integer stack to track parens # and ignore leftover closing parens stack = 0 idx = [] for i, c in enumerate(s): if c == "(": stack += 1 elif c == ")": stack -= 1 if stack < 0: idx.append(i) stack = 0 continue yield c with suppress(TypeError): if STAT_RE.search(value): value = re.sub(r"\bstat\(", "", value) value = "".join(strip_hanging_closing_parens(value)) return value def strip_dots(value): """ Remove dots(if any) that mark calculated aesthetics Parameters ---------- value : object Aesthetic value. In most cases this will be a string but other types will pass through unmodified. Return ------ out : object Aesthetic value with the dots removed. """ with suppress(TypeError): value = DOTS_RE.sub(r"\1", value) return value def strip_calculated_markers(value): """ Remove markers for calculated aesthetics Parameters ---------- value : object Aesthetic value. In most cases this will be a string but other types will pass through unmodified. Return ------ out : object Aesthetic value with the dots removed. """ return strip_stat(strip_dots(value)) def aes_to_scale(var: str): """ Look up the scale that should be used for a given aesthetic """ if var in {"x", "xmin", "xmax", "xend", "xintercept"}: var = "x" elif var in {"y", "ymin", "ymax", "yend", "yintercept"}: var = "y" return var def is_position_aes(vars_: Sequence[str]): """ Figure out if an aesthetic is a position aesthetic or not """ return all(aes_to_scale(v) in {"x", "y"} for v in vars_) def make_labels(mapping: dict[str, Any] | aes) -> labels_view: """ Convert aesthetic mapping into text labels """ def _nice_label(value: Any) -> str | None: if isinstance(value, str): return value elif isinstance(value, pd.Series): return value.name # pyright: ignore elif not isinstance(value, Iterable): return str(value) elif isinstance(value, Sequence) and len(value) == 1: return str(value[0]) else: return None def _make_label(ae: str, value: Any) -> str | None: if not isinstance(value, stage): return _nice_label(value) elif value.start is None: if value.after_stat is not None: return value.after_stat elif value.after_scale is not None: return value.after_scale else: raise ValueError("Unknown mapping") else: if value.after_stat is not None: return value.after_stat else: return _nice_label(value) valid_names = {f.name for f in fields(labels_view)} return labels_view( **{ str(ae): _make_label(ae, label) for ae, label in mapping.items() if ae in valid_names } )
aes
python
ray-project__ray
python/ray/dashboard/modules/metrics/dashboards/common.py
{ "start": 12934, "end": 13295 }
class ____: """Defines a Grafana row that can contain multiple panels. Attributes: title: The title of the row panels: List of panels contained in this row collapsed: Whether the row should be collapsed by default """ title: str id: int panels: List[Panel] collapsed: bool = False @DeveloperAPI @dataclass
Row
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_permissions.py
{ "start": 3685, "end": 3874 }
class ____: @require_permission_check(Permissions.LAUNCH_PARTITION_BACKFILL) async def mutate(self, graphene_info, **_kwargs): pass
EndpointMissingRequiredPermissionCheckAsync
python
TheAlgorithms__Python
sorts/external_sort.py
{ "start": 109, "end": 1103 }
class ____: BLOCK_FILENAME_FORMAT = "block_{0}.dat" def __init__(self, filename): self.filename = filename self.block_filenames = [] def write_block(self, data, block_number): filename = self.BLOCK_FILENAME_FORMAT.format(block_number) with open(filename, "w") as file: file.write(data) self.block_filenames.append(filename) def get_block_filenames(self): return self.block_filenames def split(self, block_size, sort_key=None): i = 0 with open(self.filename) as file: while True: lines = file.readlines(block_size) if lines == []: break if sort_key is None: lines.sort() else: lines.sort(key=sort_key) self.write_block("".join(lines), i) i += 1 def cleanup(self): map(os.remove, self.block_filenames)
FileSplitter
python
django__django
tests/admin_views/admin.py
{ "start": 38151, "end": 38250 }
class ____(admin.ModelAdmin): list_display = ("title", "book") sortable_by = ()
ChapterAdmin6
python
celery__celery
celery/backends/cache.py
{ "start": 2340, "end": 4831 }
class ____(KeyValueStoreBackend): """Cache result backend.""" servers = None supports_autoexpire = True supports_native_join = True implements_incr = True def __init__(self, app, expires=None, backend=None, options=None, url=None, **kwargs): options = {} if not options else options super().__init__(app, **kwargs) self.url = url self.options = dict(self.app.conf.cache_backend_options, **options) self.backend = url or backend or self.app.conf.cache_backend if self.backend: self.backend, _, servers = self.backend.partition('://') self.servers = servers.rstrip('/').split(';') self.expires = self.prepare_expires(expires, type=int) try: self.Client, self.key_t = backends[self.backend]() except KeyError: raise ImproperlyConfigured(UNKNOWN_BACKEND.format( self.backend, ', '.join(backends))) self._encode_prefixes() # rencode the keyprefixes def get(self, key): return self.client.get(key) def mget(self, keys): return self.client.get_multi(keys) def set(self, key, value): return self.client.set(key, value, self.expires) def delete(self, key): return self.client.delete(key) def _apply_chord_incr(self, header_result_args, body, **kwargs): chord_key = self.get_key_for_chord(header_result_args[0]) self.client.set(chord_key, 0, time=self.expires) return super()._apply_chord_incr( header_result_args, body, **kwargs) def incr(self, key): return self.client.incr(key) def expire(self, key, value): return self.client.touch(key, value) @cached_property def client(self): return self.Client(self.servers, **self.options) def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs servers = ';'.join(self.servers) backend = f'{self.backend}://{servers}/' kwargs.update( {'backend': backend, 'expires': self.expires, 'options': self.options}) return super().__reduce__(args, kwargs) def as_uri(self, *args, **kwargs): """Return the backend as an URI. This properly handles the case of multiple servers. """ servers = ';'.join(self.servers) return f'{self.backend}://{servers}/'
CacheBackend
python
pytorch__pytorch
test/torch_np/test_ufuncs_basic.py
{ "start": 873, "end": 4259 }
class ____(TestCase): def get_x(self, ufunc): return np.arange(5, dtype="float64") @parametrize_unary_ufuncs def test_scalar(self, ufunc): # check that ufunc accepts a scalar and the result is convertible to scalar x = self.get_x(ufunc)[0] float(ufunc(x)) @skip(True, reason="XXX: unary ufuncs ignore the dtype=... parameter") @parametrize_unary_ufuncs def test_x_and_dtype(self, ufunc): x = self.get_x(ufunc) res = ufunc(x, dtype="float") assert res.dtype == np.dtype("float") @skip(True, reason="XXX: unary ufuncs ignore the dtype=... parameter") @parametrize_casting @parametrize_unary_ufuncs @parametrize("dtype", ["float64", "complex128", "float32"]) def test_x_and_dtype_casting(self, ufunc, casting, dtype): x = self.get_x(ufunc) if not np.can_cast(x, dtype, casting=casting): with assert_raises(TypeError): ufunc(x, dtype=dtype, casting=casting) else: assert ufunc(x, dtype=dtype, casting=casting).dtype == dtype @parametrize_casting @parametrize_unary_ufuncs @parametrize("out_dtype", ["float64", "complex128", "float32"]) def test_x_and_out_casting(self, ufunc, casting, out_dtype): x = self.get_x(ufunc) out = np.empty_like(x, dtype=out_dtype) if not np.can_cast(x, out_dtype, casting=casting): with assert_raises(TypeError): ufunc(x, out=out, casting=casting) else: result = ufunc(x, out=out, casting=casting) assert result.dtype == out_dtype assert result is out @parametrize_unary_ufuncs def test_x_and_out_broadcast(self, ufunc): x = self.get_x(ufunc) out = np.empty((x.shape[0], x.shape[0])) x_b = np.broadcast_to(x, out.shape) res_out = ufunc(x, out=out) res_bcast = ufunc(x_b) # TODO: switching the order causes a graph break, failing the test. # See test/dynamo/test_misc.py -k test_numpy_graph_break assert res_out is out assert_equal(res_out, res_bcast) out = np.empty((1, x.shape[0])) x_b = np.broadcast_to(x, out.shape) res_out = ufunc(x, out=out) res_bcast = ufunc(x_b) assert res_out is out assert_equal(res_out, res_bcast) ufunc_op_iop_numeric = [ (np.add, operator.__add__, operator.__iadd__), (np.subtract, operator.__sub__, operator.__isub__), (np.multiply, operator.__mul__, operator.__imul__), ] ufuncs_with_dunders = [ufunc for ufunc, _, _ in ufunc_op_iop_numeric] numeric_binary_ufuncs = [ np.float_power, np.power, ] # these are not implemented for complex inputs no_complex = [ np.floor_divide, np.hypot, np.arctan2, np.copysign, np.fmax, np.fmin, np.fmod, np.heaviside, np.logaddexp, np.logaddexp2, np.maximum, np.minimum, ] parametrize_binary_ufuncs = parametrize( "ufunc", ufuncs_with_dunders + numeric_binary_ufuncs + no_complex ) # TODO: these snowflakes need special handling """ 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'equal', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'gcd', 'greater', 'greater_equal', 'logical_and', 'logical_or', 'logical_xor', 'matmul', 'not_equal', """ @instantiate_parametrized_tests
TestUnaryUfuncs
python
numba__numba
numba/np/arrayobj.py
{ "start": 33562, "end": 257882 }
class ____(object): """ Perform fancy indexing on the given array. """ def __init__(self, context, builder, aryty, ary, index_types, indices): self.context = context self.builder = builder self.aryty = aryty self.shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim) self.strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim) self.ll_intp = self.context.get_value_type(types.intp) self.newaxes = [] indexers = [] num_newaxes = len([idx for idx in index_types if is_nonelike(idx)]) ax = 0 # keeps track of position of original axes new_ax = 0 # keeps track of position for inserting new axes for indexval, idxty in zip(indices, index_types): if idxty is types.ellipsis: # Fill up missing dimensions at the middle n_missing = aryty.ndim - len(indices) + 1 + num_newaxes for i in range(n_missing): indexer = EntireIndexer(context, builder, aryty, ary, ax) indexers.append(indexer) ax += 1 new_ax += 1 continue # Regular index value if isinstance(idxty, types.SliceType): slice = context.make_helper(builder, idxty, indexval) indexer = SliceIndexer(context, builder, aryty, ary, ax, idxty, slice) indexers.append(indexer) elif isinstance(idxty, types.Integer): ind = fix_integer_index(context, builder, idxty, indexval, self.shapes[ax]) indexer = IntegerIndexer(context, builder, ind) indexers.append(indexer) elif isinstance(idxty, types.Array): idxary = make_array(idxty)(context, builder, indexval) if isinstance(idxty.dtype, types.Integer): indexer = IntegerArrayIndexer(context, builder, idxty, idxary, self.shapes[ax]) elif isinstance(idxty.dtype, types.Boolean): indexer = BooleanArrayIndexer(context, builder, idxty, idxary) else: assert 0 indexers.append(indexer) elif is_nonelike(idxty): self.newaxes.append(new_ax) ax -= 1 else: raise AssertionError("unexpected index type: %s" % (idxty,)) ax += 1 new_ax += 1 # Fill up missing dimensions at the end assert ax <= aryty.ndim, (ax, aryty.ndim) while ax < aryty.ndim: indexer = EntireIndexer(context, builder, aryty, ary, ax) indexers.append(indexer) ax += 1 assert len(indexers) == aryty.ndim, (len(indexers), aryty.ndim) self.indexers = indexers def prepare(self): for i in self.indexers: i.prepare() one = self.context.get_constant(types.intp, 1) # Compute the resulting shape given by the indices res_shape = [i.get_shape() for i in self.indexers] # At every position where newaxis/None is present insert # one as a constant shape in the resulting list of shapes. for i in self.newaxes: res_shape.insert(i, (one,)) # Store the shape as a tuple, we can't do a simple # tuple(res_shape) here since res_shape is a list # of tuples which may be differently sized. self.indexers_shape = sum(res_shape, ()) def get_shape(self): """ Get the resulting data shape as Python tuple. """ return self.indexers_shape def get_offset_bounds(self, strides, itemsize): """ Get a half-open [lower, upper) range of byte offsets spanned by the indexer with the given strides and itemsize. The indexer is guaranteed to not go past those bounds. """ assert len(strides) == self.aryty.ndim builder = self.builder is_empty = cgutils.false_bit zero = self.ll_intp(0) one = self.ll_intp(1) lower = zero upper = zero for indexer, shape, stride in zip(self.indexers, self.indexers_shape, strides): is_empty = builder.or_(is_empty, builder.icmp_unsigned('==', shape, zero)) # Compute [lower, upper) indices on this dimension lower_index, upper_index = indexer.get_index_bounds() lower_offset = builder.mul(stride, lower_index) upper_offset = builder.mul(stride, builder.sub(upper_index, one)) # Adjust total interval is_downwards = builder.icmp_signed('<', stride, zero) lower = builder.add(lower, builder.select(is_downwards, upper_offset, lower_offset)) upper = builder.add(upper, builder.select(is_downwards, lower_offset, upper_offset)) # Make interval half-open upper = builder.add(upper, itemsize) # Adjust for empty shape lower = builder.select(is_empty, zero, lower) upper = builder.select(is_empty, zero, upper) return lower, upper def begin_loops(self): indices, counts = zip(*(i.loop_head() for i in self.indexers)) return indices, counts def end_loops(self): for i in reversed(self.indexers): i.loop_tail() def fancy_getitem(context, builder, sig, args, aryty, ary, index_types, indices): shapes = cgutils.unpack_tuple(builder, ary.shape) strides = cgutils.unpack_tuple(builder, ary.strides) data = ary.data indexer = FancyIndexer(context, builder, aryty, ary, index_types, indices) indexer.prepare() # Construct output array out_ty = sig.return_type out_shapes = indexer.get_shape() out = _empty_nd_impl(context, builder, out_ty, out_shapes) out_data = out.data out_idx = cgutils.alloca_once_value(builder, context.get_constant(types.intp, 0)) # Loop on source and copy to destination indices, _ = indexer.begin_loops() # No need to check for wraparound, as the indexers all ensure # a positive index is returned. ptr = cgutils.get_item_pointer2(context, builder, data, shapes, strides, aryty.layout, indices, wraparound=False, boundscheck=context.enable_boundscheck) val = load_item(context, builder, aryty, ptr) # Since the destination is C-contiguous, no need for multi-dimensional # indexing. cur = builder.load(out_idx) ptr = builder.gep(out_data, [cur]) store_item(context, builder, out_ty, val, ptr) next_idx = cgutils.increment_index(builder, cur) builder.store(next_idx, out_idx) indexer.end_loops() return impl_ret_new_ref(context, builder, out_ty, out._getvalue()) @lower_builtin(operator.getitem, types.Buffer, types.Array) def fancy_getitem_array(context, builder, sig, args): """ Advanced or basic indexing with an array. """ aryty, idxty = sig.args ary, idx = args ary = make_array(aryty)(context, builder, ary) if idxty.ndim == 0: # 0-d array index acts as a basic integer index idxty, idx = normalize_index(context, builder, idxty, idx) res = _getitem_array_generic(context, builder, sig.return_type, aryty, ary, (idxty,), (idx,)) return impl_ret_borrowed(context, builder, sig.return_type, res) else: # Advanced indexing return fancy_getitem(context, builder, sig, args, aryty, ary, (idxty,), (idx,)) def offset_bounds_from_strides(context, builder, arrty, arr, shapes, strides): """ Compute a half-open range [lower, upper) of byte offsets from the array's data pointer, that bound the in-memory extent of the array. This mimics offset_bounds_from_strides() from numpy/core/src/private/mem_overlap.c """ itemsize = arr.itemsize zero = itemsize.type(0) one = zero.type(1) if arrty.layout in 'CF': # Array is contiguous: contents are laid out sequentially # starting from arr.data and upwards lower = zero upper = builder.mul(itemsize, arr.nitems) else: # Non-contiguous array: need to examine strides lower = zero upper = zero for i in range(arrty.ndim): # Compute the largest byte offset on this dimension # max_axis_offset = strides[i] * (shapes[i] - 1) # (shapes[i] == 0 is catered for by the empty array case below) max_axis_offset = builder.mul(strides[i], builder.sub(shapes[i], one)) is_upwards = builder.icmp_signed('>=', max_axis_offset, zero) # Expand either upwards or downwards depending on stride upper = builder.select(is_upwards, builder.add(upper, max_axis_offset), upper) lower = builder.select(is_upwards, lower, builder.add(lower, max_axis_offset)) # Return a half-open range upper = builder.add(upper, itemsize) # Adjust for empty arrays is_empty = builder.icmp_signed('==', arr.nitems, zero) upper = builder.select(is_empty, zero, upper) lower = builder.select(is_empty, zero, lower) return lower, upper def compute_memory_extents(context, builder, lower, upper, data): """ Given [lower, upper) byte offsets and a base data pointer, compute the memory pointer bounds as pointer-sized integers. """ data_ptr_as_int = builder.ptrtoint(data, lower.type) start = builder.add(data_ptr_as_int, lower) end = builder.add(data_ptr_as_int, upper) return start, end def get_array_memory_extents(context, builder, arrty, arr, shapes, strides, data): """ Compute a half-open range [start, end) of pointer-sized integers which fully contain the array data. """ lower, upper = offset_bounds_from_strides(context, builder, arrty, arr, shapes, strides) return compute_memory_extents(context, builder, lower, upper, data) def extents_may_overlap(context, builder, a_start, a_end, b_start, b_end): """ Whether two memory extents [a_start, a_end) and [b_start, b_end) may overlap. """ # Comparisons are unsigned, since we are really comparing pointers may_overlap = builder.and_( builder.icmp_unsigned('<', a_start, b_end), builder.icmp_unsigned('<', b_start, a_end), ) return may_overlap def maybe_copy_source(context, builder, use_copy, srcty, src, src_shapes, src_strides, src_data): ptrty = src_data.type copy_layout = 'C' copy_data = cgutils.alloca_once_value(builder, src_data) copy_shapes = src_shapes copy_strides = None # unneeded for contiguous arrays with builder.if_then(use_copy, likely=False): # Allocate temporary scratchpad # XXX: should we use a stack-allocated array for very small # data sizes? allocsize = builder.mul(src.itemsize, src.nitems) data = context.nrt.allocate(builder, allocsize) voidptrty = data.type data = builder.bitcast(data, ptrty) builder.store(data, copy_data) # Copy source data into scratchpad intp_t = context.get_value_type(types.intp) with cgutils.loop_nest(builder, src_shapes, intp_t) as indices: src_ptr = cgutils.get_item_pointer2(context, builder, src_data, src_shapes, src_strides, srcty.layout, indices) dest_ptr = cgutils.get_item_pointer2(context, builder, data, copy_shapes, copy_strides, copy_layout, indices) builder.store(builder.load(src_ptr), dest_ptr) def src_getitem(source_indices): src_ptr = cgutils.alloca_once(builder, ptrty) with builder.if_else(use_copy, likely=False) as (if_copy, otherwise): with if_copy: builder.store( cgutils.get_item_pointer2(context, builder, builder.load(copy_data), copy_shapes, copy_strides, copy_layout, source_indices, wraparound=False), src_ptr) with otherwise: builder.store( cgutils.get_item_pointer2(context, builder, src_data, src_shapes, src_strides, srcty.layout, source_indices, wraparound=False), src_ptr) return load_item(context, builder, srcty, builder.load(src_ptr)) def src_cleanup(): # Deallocate memory with builder.if_then(use_copy, likely=False): data = builder.load(copy_data) data = builder.bitcast(data, voidptrty) context.nrt.free(builder, data) return src_getitem, src_cleanup def _bc_adjust_dimension(context, builder, shapes, strides, target_shape): """ Preprocess dimension for broadcasting. Returns (shapes, strides) such that the ndim match *target_shape*. When expanding to higher ndim, the returning shapes and strides are prepended with ones and zeros, respectively. When truncating to lower ndim, the shapes are checked (in runtime). All extra dimension must have size of 1. """ zero = context.get_constant(types.uintp, 0) one = context.get_constant(types.uintp, 1) # Adjust for broadcasting to higher dimension if len(target_shape) > len(shapes): nd_diff = len(target_shape) - len(shapes) # Fill missing shapes with one, strides with zeros shapes = [one] * nd_diff + shapes strides = [zero] * nd_diff + strides # Adjust for broadcasting to lower dimension elif len(target_shape) < len(shapes): # Accepted if all extra dims has shape 1 nd_diff = len(shapes) - len(target_shape) dim_is_one = [builder.icmp_unsigned('==', sh, one) for sh in shapes[:nd_diff]] accepted = functools.reduce(builder.and_, dim_is_one, cgutils.true_bit) # Check error with builder.if_then(builder.not_(accepted), likely=False): msg = "cannot broadcast source array for assignment" context.call_conv.return_user_exc(builder, ValueError, (msg,)) # Truncate extra shapes, strides shapes = shapes[nd_diff:] strides = strides[nd_diff:] return shapes, strides def _bc_adjust_shape_strides(context, builder, shapes, strides, target_shape): """ Broadcast shapes and strides to target_shape given that their ndim already matches. For each location where the shape is 1 and does not match the dim for target, it is set to the value at the target and the stride is set to zero. """ bc_shapes = [] bc_strides = [] zero = context.get_constant(types.uintp, 0) one = context.get_constant(types.uintp, 1) # Adjust all mismatching ones in shape mismatch = [builder.icmp_signed('!=', tar, old) for tar, old in zip(target_shape, shapes)] src_is_one = [builder.icmp_signed('==', old, one) for old in shapes] preds = [builder.and_(x, y) for x, y in zip(mismatch, src_is_one)] bc_shapes = [builder.select(p, tar, old) for p, tar, old in zip(preds, target_shape, shapes)] bc_strides = [builder.select(p, zero, old) for p, old in zip(preds, strides)] return bc_shapes, bc_strides def _broadcast_to_shape(context, builder, arrtype, arr, target_shape): """ Broadcast the given array to the target_shape. Returns (array_type, array) """ # Compute broadcasted shape and strides shapes = cgutils.unpack_tuple(builder, arr.shape) strides = cgutils.unpack_tuple(builder, arr.strides) shapes, strides = _bc_adjust_dimension(context, builder, shapes, strides, target_shape) shapes, strides = _bc_adjust_shape_strides(context, builder, shapes, strides, target_shape) new_arrtype = arrtype.copy(ndim=len(target_shape), layout='A') # Create new view new_arr = make_array(new_arrtype)(context, builder) populate_array(new_arr, data=arr.data, shape=cgutils.pack_array(builder, shapes), strides=cgutils.pack_array(builder, strides), itemsize=arr.itemsize, meminfo=arr.meminfo, parent=arr.parent) return new_arrtype, new_arr @intrinsic def _numpy_broadcast_to(typingctx, array, shape): ret = array.copy(ndim=shape.count, layout='A', readonly=True) sig = ret(array, shape) def codegen(context, builder, sig, args): src, shape_ = args srcty = sig.args[0] src = make_array(srcty)(context, builder, src) shape_ = cgutils.unpack_tuple(builder, shape_) _, dest = _broadcast_to_shape(context, builder, srcty, src, shape_,) # Hack to get np.broadcast_to to return a read-only array setattr(dest, 'parent', Constant( context.get_value_type(dest._datamodel.get_type('parent')), None)) res = dest._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) return sig, codegen @intrinsic def get_readonly_array(typingctx, arr): # returns a copy of arr which is readonly ret = arr.copy(readonly=True) sig = ret(arr) def codegen(context, builder, sig, args): [src] = args srcty = sig.args[0] dest = make_array(srcty)(context, builder, src) # Hack to return a read-only array dest.parent = cgutils.get_null_value(dest.parent.type) res = dest._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) return sig, codegen @register_jitable def _can_broadcast(array, dest_shape): src_shape = array.shape src_ndim = len(src_shape) dest_ndim = len(dest_shape) if src_ndim > dest_ndim: raise ValueError('input operand has more dimensions than allowed ' 'by the axis remapping') for size in dest_shape: if size < 0: raise ValueError('all elements of broadcast shape must be ' 'non-negative') # based on _broadcast_onto function in numba/np/npyimpl.py src_index = 0 dest_index = dest_ndim - src_ndim while src_index < src_ndim: src_dim = src_shape[src_index] dest_dim = dest_shape[dest_index] # possible cases for (src_dim, dest_dim): # * (1, 1) -> Ok # * (>1, 1) -> Error! # * (>1, >1) -> src_dim == dest_dim else error! # * (1, >1) -> Ok if src_dim == dest_dim or src_dim == 1: src_index += 1 dest_index += 1 else: raise ValueError('operands could not be broadcast together ' 'with remapped shapes') def _default_broadcast_to_impl(array, shape): array = np.asarray(array) _can_broadcast(array, shape) return _numpy_broadcast_to(array, shape) @overload(np.broadcast_to) def numpy_broadcast_to(array, shape): if not type_can_asarray(array): raise errors.TypingError('The first argument "array" must ' 'be array-like') if isinstance(shape, types.Integer): def impl(array, shape): return np.broadcast_to(array, (shape,)) return impl elif isinstance(shape, types.UniTuple): if not isinstance(shape.dtype, types.Integer): msg = 'The second argument "shape" must be a tuple of integers' raise errors.TypingError(msg) return _default_broadcast_to_impl elif isinstance(shape, types.Tuple) and shape.count > 0: # check if all types are integers if not all([isinstance(typ, types.IntegerLiteral) for typ in shape]): msg = f'"{shape}" object cannot be interpreted as an integer' raise errors.TypingError(msg) return _default_broadcast_to_impl elif isinstance(shape, types.Tuple) and shape.count == 0: is_scalar_array = isinstance(array, types.Array) and array.ndim == 0 if type_is_scalar(array) or is_scalar_array: def impl(array, shape): # broadcast_to(array, ()) # Array type must be supported by "type_can_asarray" # Quick note that unicode types are not supported! array = np.asarray(array) return get_readonly_array(array) return impl else: msg = 'Cannot broadcast a non-scalar to a scalar array' raise errors.TypingError(msg) else: msg = ('The argument "shape" must be a tuple or an integer. ' 'Got %s' % shape) raise errors.TypingError(msg) @register_jitable def numpy_broadcast_shapes_list(r, m, shape): for i in range(len(shape)): k = m - len(shape) + i tmp = shape[i] if tmp < 0: raise ValueError("negative dimensions are not allowed") if tmp == 1: continue if r[k] == 1: r[k] = tmp elif r[k] != tmp: raise ValueError("shape mismatch: objects" " cannot be broadcast" " to a single shape") @overload(np.broadcast_shapes) def ol_numpy_broadcast_shapes(*args): # Based on https://github.com/numpy/numpy/blob/f702b26fff3271ba6a6ba29a021fc19051d1f007/numpy/core/src/multiarray/iterators.c#L1129-L1212 # noqa for idx, arg in enumerate(args): is_int = isinstance(arg, types.Integer) is_int_tuple = isinstance(arg, types.UniTuple) and \ isinstance(arg.dtype, types.Integer) is_empty_tuple = isinstance(arg, types.Tuple) and len(arg.types) == 0 if not (is_int or is_int_tuple or is_empty_tuple): msg = (f'Argument {idx} must be either an int or tuple[int]. ' f'Got {arg}') raise errors.TypingError(msg) # discover the number of dimensions m = 0 for arg in args: if isinstance(arg, types.Integer): m = max(m, 1) elif isinstance(arg, types.BaseTuple): m = max(m, len(arg)) if m == 0: return lambda *args: () else: tup_init = (1,) * m def impl(*args): # propagate args r = [1] * m tup = tup_init for arg in literal_unroll(args): if isinstance(arg, tuple) and len(arg) > 0: numpy_broadcast_shapes_list(r, m, arg) elif isinstance(arg, int): numpy_broadcast_shapes_list(r, m, (arg,)) for idx, elem in enumerate(r): tup = tuple_setitem(tup, idx, elem) return tup return impl @overload(np.broadcast_arrays) def numpy_broadcast_arrays(*args): for idx, arg in enumerate(args): if not type_can_asarray(arg): raise errors.TypingError(f'Argument "{idx}" must ' 'be array-like') unified_dtype = None dt = None for arg in args: if isinstance(arg, (types.Array, types.BaseTuple)): dt = arg.dtype else: dt = arg if unified_dtype is None: unified_dtype = dt elif unified_dtype != dt: raise errors.TypingError('Mismatch of argument types. Numba cannot ' 'broadcast arrays with different types. ' f'Got {args}') # number of dimensions m = 0 for idx, arg in enumerate(args): if isinstance(arg, types.ArrayCompatible): m = max(m, arg.ndim) elif isinstance(arg, (types.Number, types.Boolean, types.BaseTuple)): m = max(m, 1) else: raise errors.TypingError(f'Unhandled type {arg}') tup_init = (0,) * m def impl(*args): # find out the output shape # we can't call np.broadcast_shapes here since args may have arrays # with different shapes and it is not possible to create a list # with those shapes dynamically shape = [1] * m for array in literal_unroll(args): numpy_broadcast_shapes_list(shape, m, np.asarray(array).shape) tup = tup_init for i in range(m): tup = tuple_setitem(tup, i, shape[i]) # numpy checks if the input arrays have the same shape as `shape` outs = [] for array in literal_unroll(args): outs.append(np.broadcast_to(np.asarray(array), tup)) return outs return impl def raise_with_shape_context(src_shapes, index_shape): """Targets should implement this if they wish to specialize the error handling/messages. The overload implementation takes two tuples as arguments and should raise a ValueError.""" raise NotImplementedError @overload(raise_with_shape_context, target="generic") def ol_raise_with_shape_context_generic(src_shapes, index_shape): # This overload is for a "generic" target, which makes no assumption about # the NRT or string support, but does assume exceptions can be raised. if (isinstance(src_shapes, types.UniTuple) and isinstance(index_shape, types.UniTuple) and src_shapes.dtype == index_shape.dtype and isinstance(src_shapes.dtype, types.Integer)): def impl(src_shapes, index_shape): raise ValueError("cannot assign slice from input of different size") return impl @overload(raise_with_shape_context, target="CPU") def ol_raise_with_shape_context_cpu(src_shapes, index_shape): if (isinstance(src_shapes, types.UniTuple) and isinstance(index_shape, types.UniTuple) and src_shapes.dtype == index_shape.dtype and isinstance(src_shapes.dtype, types.Integer)): def impl(src_shapes, index_shape): if len(src_shapes) == 1: shape_str = f"({src_shapes[0]},)" else: shape_str = f"({', '.join([str(x) for x in src_shapes])})" if len(index_shape) == 1: index_str = f"({index_shape[0]},)" else: index_str = f"({', '.join([str(x) for x in index_shape])})" msg = (f"cannot assign slice of shape {shape_str} from input of " f"shape {index_str}") raise ValueError(msg) return impl def fancy_setslice(context, builder, sig, args, index_types, indices): """ Implement slice assignment for arrays. This implementation works for basic as well as fancy indexing, since there's no functional difference between the two for indexed assignment. """ aryty, _, srcty = sig.args ary, _, src = args ary = make_array(aryty)(context, builder, ary) dest_shapes = cgutils.unpack_tuple(builder, ary.shape) dest_strides = cgutils.unpack_tuple(builder, ary.strides) dest_data = ary.data indexer = FancyIndexer(context, builder, aryty, ary, index_types, indices) indexer.prepare() def raise_shape_mismatch_error(context, builder, src_shapes, index_shape): # This acts as the "trampoline" to raise a ValueError in the case # of the source and destination shapes mismatch at runtime. It resolves # the public overload stub `raise_with_shape_context` fnty = context.typing_context.resolve_value_type( raise_with_shape_context) argtys = (types.UniTuple(types.int64, len(src_shapes)), types.UniTuple(types.int64, len(index_shape))) raise_sig = fnty.get_call_type(context.typing_context, argtys, {}) func = context.get_function(fnty, raise_sig) func(builder, (context.make_tuple(builder, raise_sig.args[0], src_shapes), context.make_tuple(builder, raise_sig.args[1], index_shape))) if isinstance(srcty, types.Buffer): # Source is an array src_dtype = srcty.dtype index_shape = indexer.get_shape() src = make_array(srcty)(context, builder, src) # Broadcast source array to shape srcty, src = _broadcast_to_shape(context, builder, srcty, src, index_shape) src_shapes = cgutils.unpack_tuple(builder, src.shape) src_strides = cgutils.unpack_tuple(builder, src.strides) src_data = src.data # Check shapes are equal shape_error = cgutils.false_bit assert len(index_shape) == len(src_shapes) for u, v in zip(src_shapes, index_shape): shape_error = builder.or_(shape_error, builder.icmp_signed('!=', u, v)) with builder.if_then(shape_error, likely=False): raise_shape_mismatch_error(context, builder, src_shapes, index_shape) # Check for array overlap src_start, src_end = get_array_memory_extents(context, builder, srcty, src, src_shapes, src_strides, src_data) dest_lower, dest_upper = indexer.get_offset_bounds(dest_strides, ary.itemsize) dest_start, dest_end = compute_memory_extents(context, builder, dest_lower, dest_upper, dest_data) use_copy = extents_may_overlap(context, builder, src_start, src_end, dest_start, dest_end) src_getitem, src_cleanup = maybe_copy_source(context, builder, use_copy, srcty, src, src_shapes, src_strides, src_data) elif isinstance(srcty, types.Sequence): src_dtype = srcty.dtype # Check shape is equal to sequence length index_shape = indexer.get_shape() assert len(index_shape) == 1 len_impl = context.get_function(len, signature(types.intp, srcty)) seq_len = len_impl(builder, (src,)) shape_error = builder.icmp_signed('!=', index_shape[0], seq_len) with builder.if_then(shape_error, likely=False): raise_shape_mismatch_error(context, builder, (seq_len,), (index_shape[0],)) def src_getitem(source_indices): idx, = source_indices getitem_impl = context.get_function( operator.getitem, signature(src_dtype, srcty, types.intp), ) return getitem_impl(builder, (src, idx)) def src_cleanup(): pass else: # Source is a scalar (broadcast or not, depending on destination # shape). src_dtype = srcty def src_getitem(source_indices): return src def src_cleanup(): pass zero = context.get_constant(types.uintp, 0) # Loop on destination and copy from source to destination dest_indices, counts = indexer.begin_loops() # Source is iterated in natural order # Counts represent a counter for the number of times a specified axis # is being accessed, during setitem they are used as source # indices counts = list(counts) # We need to artifically introduce the index zero wherever a # newaxis is present within the indexer. These always remain # zero. for i in indexer.newaxes: counts.insert(i, zero) source_indices = [c for c in counts if c is not None] val = src_getitem(source_indices) # Cast to the destination dtype (cross-dtype slice assignment is allowed) val = context.cast(builder, val, src_dtype, aryty.dtype) # No need to check for wraparound, as the indexers all ensure # a positive index is returned. dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data, dest_shapes, dest_strides, aryty.layout, dest_indices, wraparound=False, boundscheck=context.enable_boundscheck) store_item(context, builder, aryty, val, dest_ptr) indexer.end_loops() src_cleanup() return context.get_dummy_value() # ------------------------------------------------------------------------------ # Shape / layout altering def vararg_to_tuple(context, builder, sig, args): aryty = sig.args[0] dimtys = sig.args[1:] # values ary = args[0] dims = args[1:] # coerce all types to intp dims = [context.cast(builder, val, ty, types.intp) for ty, val in zip(dimtys, dims)] # make a tuple shape = cgutils.pack_array(builder, dims, dims[0].type) shapety = types.UniTuple(dtype=types.intp, count=len(dims)) new_sig = typing.signature(sig.return_type, aryty, shapety) new_args = ary, shape return new_sig, new_args @lower_builtin('array.transpose', types.Array) def array_transpose(context, builder, sig, args): return array_T(context, builder, sig.args[0], args[0]) def permute_arrays(axis, shape, strides): if len(axis) != len(set(axis)): raise ValueError("repeated axis in transpose") dim = len(shape) for x in axis: if x >= dim or abs(x) > dim: raise ValueError("axis is out of bounds for array of " "given dimension") shape[:] = shape[axis] strides[:] = strides[axis] # Transposing an array involves permuting the shape and strides of the array # based on the given axes. @lower_builtin('array.transpose', types.Array, types.BaseTuple) def array_transpose_tuple(context, builder, sig, args): aryty = sig.args[0] ary = make_array(aryty)(context, builder, args[0]) axisty, axis = sig.args[1], args[1] num_axis, dtype = axisty.count, axisty.dtype ll_intp = context.get_value_type(types.intp) ll_ary_size = ir.ArrayType(ll_intp, num_axis) # Allocate memory for axes, shapes, and strides arrays. arys = [axis, ary.shape, ary.strides] ll_arys = [cgutils.alloca_once(builder, ll_ary_size) for _ in arys] # Store axes, shapes, and strides arrays to the allocated memory. for src, dst in zip(arys, ll_arys): builder.store(src, dst) np_ary_ty = types.Array(dtype=dtype, ndim=1, layout='C') np_itemsize = context.get_constant(types.intp, context.get_abi_sizeof(ll_intp)) # Form NumPy arrays for axes, shapes, and strides arrays. np_arys = [make_array(np_ary_ty)(context, builder) for _ in arys] # Roughly, `np_ary = np.array(ll_ary)` for each of axes, shapes, and strides for np_ary, ll_ary in zip(np_arys, ll_arys): populate_array(np_ary, data=builder.bitcast(ll_ary, ll_intp.as_pointer()), shape=[context.get_constant(types.intp, num_axis)], strides=[np_itemsize], itemsize=np_itemsize, meminfo=None) # Pass NumPy arrays formed above to permute_arrays function that permutes # shapes and strides based on axis contents. context.compile_internal(builder, permute_arrays, typing.signature(types.void, np_ary_ty, np_ary_ty, np_ary_ty), [a._getvalue() for a in np_arys]) # Make a new array based on permuted shape and strides and return it. ret = make_array(sig.return_type)(context, builder) populate_array(ret, data=ary.data, shape=builder.load(ll_arys[1]), strides=builder.load(ll_arys[2]), itemsize=ary.itemsize, meminfo=ary.meminfo, parent=ary.parent) res = ret._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) @lower_builtin('array.transpose', types.Array, types.VarArg(types.Any)) def array_transpose_vararg(context, builder, sig, args): new_sig, new_args = vararg_to_tuple(context, builder, sig, args) return array_transpose_tuple(context, builder, new_sig, new_args) @overload(np.transpose) def numpy_transpose(a, axes=None): if isinstance(a, types.BaseTuple): raise errors.TypingError("np.transpose does not accept tuples") if axes is None: def np_transpose_impl(a, axes=None): return a.transpose() else: def np_transpose_impl(a, axes=None): return a.transpose(axes) return np_transpose_impl @lower_getattr(types.Array, 'T') def array_T(context, builder, typ, value): if typ.ndim <= 1: res = value else: ary = make_array(typ)(context, builder, value) ret = make_array(typ)(context, builder) shapes = cgutils.unpack_tuple(builder, ary.shape, typ.ndim) strides = cgutils.unpack_tuple(builder, ary.strides, typ.ndim) populate_array(ret, data=ary.data, shape=cgutils.pack_array(builder, shapes[::-1]), strides=cgutils.pack_array(builder, strides[::-1]), itemsize=ary.itemsize, meminfo=ary.meminfo, parent=ary.parent) res = ret._getvalue() return impl_ret_borrowed(context, builder, typ, res) @overload(np.logspace) def numpy_logspace(start, stop, num=50): if not isinstance(start, types.Number): raise errors.TypingError('The first argument "start" must be a number') if not isinstance(stop, types.Number): raise errors.TypingError('The second argument "stop" must be a number') if not isinstance(num, (int, types.Integer)): raise errors.TypingError('The third argument "num" must be an integer') def impl(start, stop, num=50): y = np.linspace(start, stop, num) return np.power(10.0, y) return impl @overload(np.geomspace) def numpy_geomspace(start, stop, num=50): if not isinstance(start, types.Number): msg = 'The argument "start" must be a number' raise errors.TypingError(msg) if not isinstance(stop, types.Number): msg = 'The argument "stop" must be a number' raise errors.TypingError(msg) if not isinstance(num, (int, types.Integer)): msg = 'The argument "num" must be an integer' raise errors.TypingError(msg) if any(isinstance(arg, types.Complex) for arg in [start, stop]): result_dtype = from_dtype(np.result_type(as_dtype(start), as_dtype(stop), None)) def impl(start, stop, num=50): if start == 0 or stop == 0: raise ValueError('Geometric sequence cannot include zero') start = result_dtype(start) stop = result_dtype(stop) if numpy_version < (2, 0): both_imaginary = (start.real == 0) & (stop.real == 0) both_negative = (np.sign(start) == -1) & (np.sign(stop) == -1) out_sign = 1 if both_imaginary: start = start.imag stop = stop.imag out_sign = 1j if both_negative: start = -start stop = -stop out_sign = -out_sign else: out_sign = np.sign(start) start /= out_sign stop /= out_sign logstart = np.log10(start) logstop = np.log10(stop) result = np.logspace(logstart, logstop, num) # Make sure the endpoints match the start and stop arguments. # This is necessary because np.exp(np.log(x)) is not necessarily # equal to x. if num > 0: result[0] = start if num > 1: result[-1] = stop return out_sign * result else: def impl(start, stop, num=50): if start == 0 or stop == 0: raise ValueError('Geometric sequence cannot include zero') both_negative = (np.sign(start) == -1) & (np.sign(stop) == -1) out_sign = 1 if both_negative: start = -start stop = -stop out_sign = -out_sign logstart = np.log10(start) logstop = np.log10(stop) result = np.logspace(logstart, logstop, num) # Make sure the endpoints match the start and stop arguments. # This is necessary because np.exp(np.log(x)) is not necessarily # equal to x. if num > 0: result[0] = start if num > 1: result[-1] = stop return out_sign * result return impl @overload(np.rot90) def numpy_rot90(m, k=1): # supporting axes argument it needs to be included in np.flip if not isinstance(k, (int, types.Integer)): raise errors.TypingError('The second argument "k" must be an integer') if not isinstance(m, types.Array): raise errors.TypingError('The first argument "m" must be an array') if m.ndim < 2: raise errors.NumbaValueError('Input must be >= 2-d.') def impl(m, k=1): k = k % 4 if k == 0: return m[:] elif k == 1: return np.swapaxes(np.fliplr(m), 0, 1) elif k == 2: return np.flipud(np.fliplr(m)) elif k == 3: return np.fliplr(np.swapaxes(m, 0, 1)) else: raise AssertionError # unreachable return impl def _attempt_nocopy_reshape(context, builder, aryty, ary, newnd, newshape, newstrides): """ Call into Numba_attempt_nocopy_reshape() for the given array type and instance, and the specified new shape. Return value is non-zero if successful, and the array pointed to by *newstrides* will be filled up with the computed results. """ ll_intp = context.get_value_type(types.intp) ll_intp_star = ll_intp.as_pointer() ll_intc = context.get_value_type(types.intc) fnty = ir.FunctionType(ll_intc, [ # nd, *dims, *strides ll_intp, ll_intp_star, ll_intp_star, # newnd, *newdims, *newstrides ll_intp, ll_intp_star, ll_intp_star, # itemsize, is_f_order ll_intp, ll_intc]) fn = cgutils.get_or_insert_function(builder.module, fnty, "numba_attempt_nocopy_reshape") nd = ll_intp(aryty.ndim) shape = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'), 0, 0) strides = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('strides'), 0, 0) newnd = ll_intp(newnd) newshape = cgutils.gep_inbounds(builder, newshape, 0, 0) newstrides = cgutils.gep_inbounds(builder, newstrides, 0, 0) is_f_order = ll_intc(0) res = builder.call(fn, [nd, shape, strides, newnd, newshape, newstrides, ary.itemsize, is_f_order]) return res def normalize_reshape_value(origsize, shape): num_neg_value = 0 known_size = 1 for ax, s in enumerate(shape): if s < 0: num_neg_value += 1 neg_ax = ax else: known_size *= s if num_neg_value == 0: if origsize != known_size: raise ValueError("total size of new array must be unchanged") elif num_neg_value == 1: # Infer negative dimension if known_size == 0: inferred = 0 ok = origsize == 0 else: inferred = origsize // known_size ok = origsize % known_size == 0 if not ok: raise ValueError("total size of new array must be unchanged") shape[neg_ax] = inferred else: raise ValueError("multiple negative shape values") @lower_builtin('array.reshape', types.Array, types.BaseTuple) def array_reshape(context, builder, sig, args): aryty = sig.args[0] retty = sig.return_type shapety = sig.args[1] shape = args[1] ll_intp = context.get_value_type(types.intp) ll_shape = ir.ArrayType(ll_intp, shapety.count) ary = make_array(aryty)(context, builder, args[0]) # We will change the target shape in this slot # (see normalize_reshape_value() below) newshape = cgutils.alloca_once(builder, ll_shape) builder.store(shape, newshape) # Create a shape array pointing to the value of newshape. # (roughly, `shape_ary = np.array(ary.shape)`) shape_ary_ty = types.Array(dtype=shapety.dtype, ndim=1, layout='C') shape_ary = make_array(shape_ary_ty)(context, builder) shape_itemsize = context.get_constant(types.intp, context.get_abi_sizeof(ll_intp)) populate_array(shape_ary, data=builder.bitcast(newshape, ll_intp.as_pointer()), shape=[context.get_constant(types.intp, shapety.count)], strides=[shape_itemsize], itemsize=shape_itemsize, meminfo=None) # Compute the original array size size = ary.nitems # Call our normalizer which will fix the shape array in case of negative # shape value context.compile_internal(builder, normalize_reshape_value, typing.signature(types.void, types.uintp, shape_ary_ty), [size, shape_ary._getvalue()]) # Perform reshape (nocopy) newnd = shapety.count newstrides = cgutils.alloca_once(builder, ll_shape) ok = _attempt_nocopy_reshape(context, builder, aryty, ary, newnd, newshape, newstrides) fail = builder.icmp_unsigned('==', ok, ok.type(0)) with builder.if_then(fail): msg = "incompatible shape for array" context.call_conv.return_user_exc(builder, NotImplementedError, (msg,)) ret = make_array(retty)(context, builder) populate_array(ret, data=ary.data, shape=builder.load(newshape), strides=builder.load(newstrides), itemsize=ary.itemsize, meminfo=ary.meminfo, parent=ary.parent) res = ret._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) @lower_builtin('array.reshape', types.Array, types.VarArg(types.Any)) def array_reshape_vararg(context, builder, sig, args): new_sig, new_args = vararg_to_tuple(context, builder, sig, args) return array_reshape(context, builder, new_sig, new_args) if numpy_version < (2, 1): @overload(np.reshape) def np_reshape(a, newshape): def np_reshape_impl(a, newshape): return a.reshape(newshape) return np_reshape_impl else: @overload(np.reshape) def np_reshape(a, shape): def np_reshape_impl(a, shape): return a.reshape(shape) return np_reshape_impl @overload(np.resize) def numpy_resize(a, new_shape): if not type_can_asarray(a): msg = 'The argument "a" must be array-like' raise errors.TypingError(msg) if not ((isinstance(new_shape, types.UniTuple) and isinstance(new_shape.dtype, types.Integer)) or isinstance(new_shape, types.Integer)): msg = ('The argument "new_shape" must be an integer or ' 'a tuple of integers') raise errors.TypingError(msg) def impl(a, new_shape): a = np.asarray(a) a = np.ravel(a) if isinstance(new_shape, tuple): new_size = 1 for dim_length in np.asarray(new_shape): new_size *= dim_length if dim_length < 0: msg = 'All elements of `new_shape` must be non-negative' raise ValueError(msg) else: if new_shape < 0: msg2 = 'All elements of `new_shape` must be non-negative' raise ValueError(msg2) new_size = new_shape if a.size == 0: return np.zeros(new_shape).astype(a.dtype) repeats = -(-new_size // a.size) # ceil division res = a for i in range(repeats - 1): res = np.concatenate((res, a)) res = res[:new_size] return np.reshape(res, new_shape) return impl @overload(np.append) def np_append(arr, values, axis=None): if not type_can_asarray(arr): raise errors.TypingError('The first argument "arr" must be array-like') if not type_can_asarray(values): raise errors.TypingError('The second argument "values" must be ' 'array-like') if is_nonelike(axis): def impl(arr, values, axis=None): arr = np.ravel(np.asarray(arr)) values = np.ravel(np.asarray(values)) return np.concatenate((arr, values)) else: if not isinstance(axis, types.Integer): raise errors.TypingError('The third argument "axis" must be an ' 'integer') def impl(arr, values, axis=None): return np.concatenate((arr, values), axis=axis) return impl @lower_builtin('array.ravel', types.Array) def array_ravel(context, builder, sig, args): # Only support no argument version (default order='C') def imp_nocopy(ary): """No copy version""" return ary.reshape(ary.size) def imp_copy(ary): """Copy version""" return ary.flatten() # If the input array is C layout already, use the nocopy version if sig.args[0].layout == 'C': imp = imp_nocopy # otherwise, use flatten under-the-hood else: imp = imp_copy res = context.compile_internal(builder, imp, sig, args) res = impl_ret_new_ref(context, builder, sig.return_type, res) return res @lower_builtin(np.ravel, types.Array) def np_ravel(context, builder, sig, args): def np_ravel_impl(a): return a.ravel() return context.compile_internal(builder, np_ravel_impl, sig, args) @lower_builtin('array.flatten', types.Array) def array_flatten(context, builder, sig, args): # Only support flattening to C layout currently. def imp(ary): return ary.copy().reshape(ary.size) res = context.compile_internal(builder, imp, sig, args) res = impl_ret_new_ref(context, builder, sig.return_type, res) return res @register_jitable def _np_clip_impl(a, a_min, a_max, out): # Both a_min and a_max are numpy arrays ret = np.empty_like(a) if out is None else out a_b, a_min_b, a_max_b = np.broadcast_arrays(a, a_min, a_max) for index in np.ndindex(a_b.shape): val_a = a_b[index] val_a_min = a_min_b[index] val_a_max = a_max_b[index] ret[index] = min(max(val_a, val_a_min), val_a_max) return ret @register_jitable def _np_clip_impl_none(a, b, use_min, out): for index in np.ndindex(a.shape): val_a = a[index] val_b = b[index] if use_min: out[index] = min(val_a, val_b) else: out[index] = max(val_a, val_b) return out @overload(np.clip) def np_clip(a, a_min, a_max, out=None): if not type_can_asarray(a): raise errors.TypingError('The argument "a" must be array-like') if (not isinstance(a_min, types.NoneType) and not type_can_asarray(a_min)): raise errors.TypingError(('The argument "a_min" must be a number ' 'or an array-like')) if (not isinstance(a_max, types.NoneType) and not type_can_asarray(a_max)): raise errors.TypingError('The argument "a_max" must be a number ' 'or an array-like') if not (isinstance(out, types.Array) or is_nonelike(out)): msg = 'The argument "out" must be an array if it is provided' raise errors.TypingError(msg) # TODO: support scalar a (issue #3469) a_min_is_none = a_min is None or isinstance(a_min, types.NoneType) a_max_is_none = a_max is None or isinstance(a_max, types.NoneType) if a_min_is_none and a_max_is_none: # Raises value error when both a_min and a_max are None def np_clip_nn(a, a_min, a_max, out=None): raise ValueError("array_clip: must set either max or min") return np_clip_nn a_min_is_scalar = isinstance(a_min, types.Number) a_max_is_scalar = isinstance(a_max, types.Number) if a_min_is_scalar and a_max_is_scalar: def np_clip_ss(a, a_min, a_max, out=None): # a_min and a_max are scalars # since their shape will be empty # so broadcasting is not needed at all ret = np.empty_like(a) if out is None else out for index in np.ndindex(a.shape): val_a = a[index] ret[index] = min(max(val_a, a_min), a_max) return ret return np_clip_ss elif a_min_is_scalar and not a_max_is_scalar: if a_max_is_none: def np_clip_sn(a, a_min, a_max, out=None): # a_min is a scalar # since its shape will be empty # so broadcasting is not needed at all ret = np.empty_like(a) if out is None else out for index in np.ndindex(a.shape): val_a = a[index] ret[index] = max(val_a, a_min) return ret return np_clip_sn else: def np_clip_sa(a, a_min, a_max, out=None): # a_min is a scalar # since its shape will be empty # broadcast it to shape of a # by using np.full_like a_min_full = np.full_like(a, a_min) return _np_clip_impl(a, a_min_full, a_max, out) return np_clip_sa elif not a_min_is_scalar and a_max_is_scalar: if a_min_is_none: def np_clip_ns(a, a_min, a_max, out=None): # a_max is a scalar # since its shape will be empty # so broadcasting is not needed at all ret = np.empty_like(a) if out is None else out for index in np.ndindex(a.shape): val_a = a[index] ret[index] = min(val_a, a_max) return ret return np_clip_ns else: def np_clip_as(a, a_min, a_max, out=None): # a_max is a scalar # since its shape will be empty # broadcast it to shape of a # by using np.full_like a_max_full = np.full_like(a, a_max) return _np_clip_impl(a, a_min, a_max_full, out) return np_clip_as else: # Case where exactly one of a_min or a_max is None if a_min_is_none: def np_clip_na(a, a_min, a_max, out=None): # a_max is a numpy array but a_min is None ret = np.empty_like(a) if out is None else out a_b, a_max_b = np.broadcast_arrays(a, a_max) return _np_clip_impl_none(a_b, a_max_b, True, ret) return np_clip_na elif a_max_is_none: def np_clip_an(a, a_min, a_max, out=None): # a_min is a numpy array but a_max is None ret = np.empty_like(a) if out is None else out a_b, a_min_b = np.broadcast_arrays(a, a_min) return _np_clip_impl_none(a_b, a_min_b, False, ret) return np_clip_an else: def np_clip_aa(a, a_min, a_max, out=None): # Both a_min and a_max are clearly arrays # because none of the above branches # returned return _np_clip_impl(a, a_min, a_max, out) return np_clip_aa @overload_method(types.Array, 'clip') def array_clip(a, a_min=None, a_max=None, out=None): def impl(a, a_min=None, a_max=None, out=None): return np.clip(a, a_min, a_max, out) return impl def _change_dtype(context, builder, oldty, newty, ary): """ Attempt to fix up *ary* for switching from *oldty* to *newty*. See Numpy's array_descr_set() (np/core/src/multiarray/getset.c). Attempt to fix the array's shape and strides for a new dtype. False is returned on failure, True on success. """ assert oldty.ndim == newty.ndim assert oldty.layout == newty.layout new_layout = ord(newty.layout) any_layout = ord('A') c_layout = ord('C') f_layout = ord('F') int8 = types.int8 def imp(nd, dims, strides, old_itemsize, new_itemsize, layout): # Attempt to update the layout due to limitation of the numba # type system. if layout == any_layout: # Test rightmost stride to be contiguous if strides[-1] == old_itemsize: # Process this as if it is C contiguous layout = int8(c_layout) # Test leftmost stride to be F contiguous elif strides[0] == old_itemsize: # Process this as if it is F contiguous layout = int8(f_layout) if old_itemsize != new_itemsize and (layout == any_layout or nd == 0): return False if layout == c_layout: i = nd - 1 else: i = 0 if new_itemsize < old_itemsize: # If it is compatible, increase the size of the dimension # at the end (or at the front if F-contiguous) if (old_itemsize % new_itemsize) != 0: return False newdim = old_itemsize // new_itemsize dims[i] *= newdim strides[i] = new_itemsize elif new_itemsize > old_itemsize: # Determine if last (or first if F-contiguous) dimension # is compatible bytelength = dims[i] * old_itemsize if (bytelength % new_itemsize) != 0: return False dims[i] = bytelength // new_itemsize strides[i] = new_itemsize else: # Same item size: nothing to do (this also works for # non-contiguous arrays). pass return True old_itemsize = context.get_constant(types.intp, get_itemsize(context, oldty)) new_itemsize = context.get_constant(types.intp, get_itemsize(context, newty)) nd = context.get_constant(types.intp, newty.ndim) shape_data = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'), 0, 0) strides_data = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('strides'), 0, 0) shape_strides_array_type = types.Array(dtype=types.intp, ndim=1, layout='C') arycls = context.make_array(shape_strides_array_type) shape_constant = cgutils.pack_array(builder, [context.get_constant(types.intp, newty.ndim)]) sizeof_intp = context.get_abi_sizeof(context.get_data_type(types.intp)) sizeof_intp = context.get_constant(types.intp, sizeof_intp) strides_constant = cgutils.pack_array(builder, [sizeof_intp]) shape_ary = arycls(context, builder) populate_array(shape_ary, data=shape_data, shape=shape_constant, strides=strides_constant, itemsize=sizeof_intp, meminfo=None) strides_ary = arycls(context, builder) populate_array(strides_ary, data=strides_data, shape=shape_constant, strides=strides_constant, itemsize=sizeof_intp, meminfo=None) shape = shape_ary._getvalue() strides = strides_ary._getvalue() args = [nd, shape, strides, old_itemsize, new_itemsize, context.get_constant(types.int8, new_layout)] sig = signature(types.boolean, types.intp, # nd shape_strides_array_type, # dims shape_strides_array_type, # strides types.intp, # old_itemsize types.intp, # new_itemsize types.int8, # layout ) res = context.compile_internal(builder, imp, sig, args) update_array_info(newty, ary) res = impl_ret_borrowed(context, builder, sig.return_type, res) return res @overload(np.shape) def np_shape(a): if not type_can_asarray(a): raise errors.TypingError("The argument to np.shape must be array-like") def impl(a): return np.asarray(a).shape return impl @overload(np.size) def np_size(a): if not type_can_asarray(a): raise errors.TypingError("The argument to np.size must be array-like") def impl(a): return np.asarray(a).size return impl # ------------------------------------------------------------------------------ @overload(np.unique) def np_unique(ar): def np_unique_impl(ar): def isnan(x): # instead of np.isnan because it can't handle non-numeric type return not (x == x) b = np.sort(ar.ravel()) head = list(b[:1]) tail = [ x for i, x in enumerate(b[1:]) if b[i] != x and not (isnan(b[i]) and isnan(x)) ] return np.array(head + tail) return np_unique_impl @overload(np.repeat) def np_repeat(a, repeats): # Implementation for repeats being a scalar is a module global function # (see below) because it might be called from the implementation below. def np_repeat_impl_repeats_array_like(a, repeats): # implementation if repeats is an array like repeats_array = np.asarray(repeats, dtype=np.int64) # if it is a singleton array, invoke the scalar implementation if repeats_array.shape[0] == 1: return np_repeat_impl_repeats_scaler(a, repeats_array[0]) if np.any(repeats_array < 0): raise ValueError("negative dimensions are not allowed") asa = np.asarray(a) aravel = asa.ravel() n = aravel.shape[0] if aravel.shape != repeats_array.shape: raise ValueError( "operands could not be broadcast together") to_return = np.empty(np.sum(repeats_array), dtype=asa.dtype) pos = 0 for i in range(n): to_return[pos : pos + repeats_array[i]] = aravel[i] pos += repeats_array[i] return to_return # type checking if isinstance(a, (types.Array, types.List, types.BaseTuple, types.Number, types.Boolean, ) ): if isinstance(repeats, types.Integer): return np_repeat_impl_repeats_scaler elif isinstance(repeats, (types.Array, types.List)): if isinstance(repeats.dtype, types.Integer): return np_repeat_impl_repeats_array_like raise errors.TypingError( "The repeats argument must be an integer " "or an array-like of integer dtype") @register_jitable def np_repeat_impl_repeats_scaler(a, repeats): if repeats < 0: raise ValueError("negative dimensions are not allowed") asa = np.asarray(a) aravel = asa.ravel() n = aravel.shape[0] if repeats == 0: return np.empty(0, dtype=asa.dtype) elif repeats == 1: return np.copy(aravel) else: to_return = np.empty(n * repeats, dtype=asa.dtype) for i in range(n): to_return[i * repeats : (i + 1) * repeats] = aravel[i] return to_return @extending.overload_method(types.Array, 'repeat') def array_repeat(a, repeats): def array_repeat_impl(a, repeats): return np.repeat(a, repeats) return array_repeat_impl @intrinsic def _intrin_get_itemsize(tyctx, dtype): """Computes the itemsize of the dtype""" sig = types.intp(dtype) def codegen(cgctx, builder, sig, llargs): llty = cgctx.get_data_type(sig.args[0].dtype) llintp = cgctx.get_data_type(sig.return_type) return llintp(cgctx.get_abi_sizeof(llty)) return sig, codegen def _compatible_view(a, dtype): pass @overload(_compatible_view, target='generic') def ol_compatible_view(a, dtype): """Determines if the array and dtype are compatible for forming a view.""" # NOTE: NumPy 1.23+ uses this check. # Code based on: # https://github.com/numpy/numpy/blob/750ad21258cfc00663586d5a466e24f91b48edc7/numpy/core/src/multiarray/getset.c#L500-L555 # noqa: E501 def impl(a, dtype): dtype_size = _intrin_get_itemsize(dtype) if dtype_size != a.itemsize: # catch forbidden cases if a.ndim == 0: msg1 = ("Changing the dtype of a 0d array is only supported " "if the itemsize is unchanged") raise ValueError(msg1) else: # NumPy has a check here for subarray type conversion which # Numba doesn't support pass # Resize on last axis only axis = a.ndim - 1 p1 = a.shape[axis] != 1 p2 = a.size != 0 p3 = a.strides[axis] != a.itemsize if (p1 and p2 and p3): msg2 = ("To change to a dtype of a different size, the last " "axis must be contiguous") raise ValueError(msg2) if dtype_size < a.itemsize: if dtype_size == 0 or a.itemsize % dtype_size != 0: msg3 = ("When changing to a smaller dtype, its size must " "be a divisor of the size of original dtype") raise ValueError(msg3) else: newdim = a.shape[axis] * a.itemsize if newdim % dtype_size != 0: msg4 = ("When changing to a larger dtype, its size must be " "a divisor of the total size in bytes of the last " "axis of the array.") raise ValueError(msg4) return impl @lower_builtin('array.view', types.Array, types.DTypeSpec) def array_view(context, builder, sig, args): aryty = sig.args[0] retty = sig.return_type ary = make_array(aryty)(context, builder, args[0]) ret = make_array(retty)(context, builder) # Copy all fields, casting the "data" pointer appropriately fields = set(ret._datamodel._fields) for k in sorted(fields): val = getattr(ary, k) if k == 'data': ptrty = ret.data.type ret.data = builder.bitcast(val, ptrty) else: setattr(ret, k, val) if numpy_version >= (1, 23): # NumPy 1.23+ bans views using a dtype that is a different size to that # of the array when the last axis is not contiguous. For example, this # manifests at runtime when a dtype size altering view is requested # on a Fortran ordered array. tyctx = context.typing_context fnty = tyctx.resolve_value_type(_compatible_view) _compatible_view_sig = fnty.get_call_type(tyctx, (*sig.args,), {}) impl = context.get_function(fnty, _compatible_view_sig) impl(builder, args) ok = _change_dtype(context, builder, aryty, retty, ret) fail = builder.icmp_unsigned('==', ok, Constant(ok.type, 0)) with builder.if_then(fail): msg = "new type not compatible with array" context.call_conv.return_user_exc(builder, ValueError, (msg,)) res = ret._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) # ------------------------------------------------------------------------------ # Array attributes @lower_getattr(types.Array, "dtype") def array_dtype(context, builder, typ, value): res = context.get_dummy_value() return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.Array, "shape") @lower_getattr(types.MemoryView, "shape") def array_shape(context, builder, typ, value): arrayty = make_array(typ) array = arrayty(context, builder, value) res = array.shape return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.Array, "strides") @lower_getattr(types.MemoryView, "strides") def array_strides(context, builder, typ, value): arrayty = make_array(typ) array = arrayty(context, builder, value) res = array.strides return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.Array, "ndim") @lower_getattr(types.MemoryView, "ndim") def array_ndim(context, builder, typ, value): res = context.get_constant(types.intp, typ.ndim) return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.Array, "size") def array_size(context, builder, typ, value): arrayty = make_array(typ) array = arrayty(context, builder, value) res = array.nitems return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.Array, "itemsize") @lower_getattr(types.MemoryView, "itemsize") def array_itemsize(context, builder, typ, value): arrayty = make_array(typ) array = arrayty(context, builder, value) res = array.itemsize return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.Array, "nbytes") @lower_getattr(types.MemoryView, "nbytes") def array_nbytes(context, builder, typ, value): """ nbytes = size * itemsize """ arrayty = make_array(typ) array = arrayty(context, builder, value) res = builder.mul(array.nitems, array.itemsize) return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.MemoryView, "contiguous") def array_contiguous(context, builder, typ, value): res = context.get_constant(types.boolean, typ.is_contig) return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.MemoryView, "c_contiguous") def array_c_contiguous(context, builder, typ, value): res = context.get_constant(types.boolean, typ.is_c_contig) return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.MemoryView, "f_contiguous") def array_f_contiguous(context, builder, typ, value): res = context.get_constant(types.boolean, typ.is_f_contig) return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.MemoryView, "readonly") def array_readonly(context, builder, typ, value): res = context.get_constant(types.boolean, not typ.mutable) return impl_ret_untracked(context, builder, typ, res) # array.ctypes @lower_getattr(types.Array, "ctypes") def array_ctypes(context, builder, typ, value): arrayty = make_array(typ) array = arrayty(context, builder, value) # Create new ArrayCType structure act = types.ArrayCTypes(typ) ctinfo = context.make_helper(builder, act) ctinfo.data = array.data ctinfo.meminfo = array.meminfo res = ctinfo._getvalue() return impl_ret_borrowed(context, builder, act, res) @lower_getattr(types.ArrayCTypes, "data") def array_ctypes_data(context, builder, typ, value): ctinfo = context.make_helper(builder, typ, value=value) res = ctinfo.data # Convert it to an integer res = builder.ptrtoint(res, context.get_value_type(types.intp)) return impl_ret_untracked(context, builder, typ, res) @lower_cast(types.ArrayCTypes, types.CPointer) @lower_cast(types.ArrayCTypes, types.voidptr) def array_ctypes_to_pointer(context, builder, fromty, toty, val): ctinfo = context.make_helper(builder, fromty, value=val) res = ctinfo.data res = builder.bitcast(res, context.get_value_type(toty)) return impl_ret_untracked(context, builder, toty, res) def _call_contiguous_check(checker, context, builder, aryty, ary): """Helper to invoke the contiguous checker function on an array Args ---- checker : ``numba.numpy_supports.is_contiguous``, or ``numba.numpy_supports.is_fortran``. context : target context builder : llvm ir builder aryty : numba type ary : llvm value """ ary = make_array(aryty)(context, builder, value=ary) tup_intp = types.UniTuple(types.intp, aryty.ndim) itemsize = context.get_abi_sizeof(context.get_value_type(aryty.dtype)) check_sig = signature(types.bool_, tup_intp, tup_intp, types.intp) check_args = [ary.shape, ary.strides, context.get_constant(types.intp, itemsize)] is_contig = context.compile_internal(builder, checker, check_sig, check_args) return is_contig # array.flags @lower_getattr(types.Array, "flags") def array_flags(context, builder, typ, value): flagsobj = context.make_helper(builder, types.ArrayFlags(typ)) flagsobj.parent = value res = flagsobj._getvalue() context.nrt.incref(builder, typ, value) return impl_ret_new_ref(context, builder, typ, res) @lower_getattr(types.ArrayFlags, "contiguous") @lower_getattr(types.ArrayFlags, "c_contiguous") def array_flags_c_contiguous(context, builder, typ, value): if typ.array_type.layout != 'C': # any layout can still be contiguous flagsobj = context.make_helper(builder, typ, value=value) res = _call_contiguous_check(is_contiguous, context, builder, typ.array_type, flagsobj.parent) else: val = typ.array_type.layout == 'C' res = context.get_constant(types.boolean, val) return impl_ret_untracked(context, builder, typ, res) @lower_getattr(types.ArrayFlags, "f_contiguous") def array_flags_f_contiguous(context, builder, typ, value): if typ.array_type.layout != 'F': # any layout can still be contiguous flagsobj = context.make_helper(builder, typ, value=value) res = _call_contiguous_check(is_fortran, context, builder, typ.array_type, flagsobj.parent) else: layout = typ.array_type.layout val = layout == 'F' if typ.array_type.ndim > 1 else layout in 'CF' res = context.get_constant(types.boolean, val) return impl_ret_untracked(context, builder, typ, res) # ------------------------------------------------------------------------------ # .real / .imag @lower_getattr(types.Array, "real") def array_real_part(context, builder, typ, value): if typ.dtype in types.complex_domain: return array_complex_attr(context, builder, typ, value, attr='real') elif typ.dtype in types.number_domain: # as an identity function return impl_ret_borrowed(context, builder, typ, value) else: raise NotImplementedError('unsupported .real for {}'.format(type.dtype)) @lower_getattr(types.Array, "imag") def array_imag_part(context, builder, typ, value): if typ.dtype in types.complex_domain: return array_complex_attr(context, builder, typ, value, attr='imag') elif typ.dtype in types.number_domain: # return a readonly zero array sig = signature(typ.copy(readonly=True), typ) arrtype, shapes = _parse_empty_like_args(context, builder, sig, [value]) ary = _empty_nd_impl(context, builder, arrtype, shapes) cgutils.memset(builder, ary.data, builder.mul(ary.itemsize, ary.nitems), 0) return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue()) else: raise NotImplementedError('unsupported .imag for {}'.format(type.dtype)) def array_complex_attr(context, builder, typ, value, attr): """ Given a complex array, it's memory layout is: R C R C R C ^ ^ ^ (`R` indicates a float for the real part; `C` indicates a float for the imaginary part; the `^` indicates the start of each element) To get the real part, we can simply change the dtype and itemsize to that of the underlying float type. The new layout is: R x R x R x ^ ^ ^ (`x` indicates unused) A load operation will use the dtype to determine the number of bytes to load. To get the imaginary part, we shift the pointer by 1 float offset and change the dtype and itemsize. The new layout is: x C x C x C ^ ^ ^ """ if attr not in ['real', 'imag'] or typ.dtype not in types.complex_domain: raise NotImplementedError("cannot get attribute `{}`".format(attr)) arrayty = make_array(typ) array = arrayty(context, builder, value) # sizeof underlying float type flty = typ.dtype.underlying_float sizeof_flty = context.get_abi_sizeof(context.get_data_type(flty)) itemsize = array.itemsize.type(sizeof_flty) # cast data pointer to float type llfltptrty = context.get_value_type(flty).as_pointer() dataptr = builder.bitcast(array.data, llfltptrty) # add offset if attr == 'imag': dataptr = builder.gep(dataptr, [ir.IntType(32)(1)]) # make result resultty = typ.copy(dtype=flty, layout='A') result = make_array(resultty)(context, builder) repl = dict(data=dataptr, itemsize=itemsize) cgutils.copy_struct(result, array, repl) return impl_ret_borrowed(context, builder, resultty, result._getvalue()) @overload_method(types.Array, 'conj') @overload_method(types.Array, 'conjugate') def array_conj(arr): def impl(arr): return np.conj(arr) return impl # ------------------------------------------------------------------------------ # DType attribute def dtype_type(context, builder, dtypety, dtypeval): # Just return a dummy opaque value return context.get_dummy_value() lower_getattr(types.DType, 'type')(dtype_type) lower_getattr(types.DType, 'kind')(dtype_type) # ------------------------------------------------------------------------------ # static_getitem on Numba numerical types to create "array" types @lower_builtin('static_getitem', types.NumberClass, types.Any) def static_getitem_number_clazz(context, builder, sig, args): """This handles the "static_getitem" when a Numba type is subscripted e.g: var = typed.List.empty_list(float64[::1, :]) It only allows this on simple numerical types. Compound types, like records, are not supported. """ retty = sig.return_type if isinstance(retty, types.Array): # This isn't used or practically accessible, but has to exist, so just # put in a NULL of the right type. res = context.get_value_type(retty)(None) return impl_ret_untracked(context, builder, retty, res) else: # This should be unreachable unless the implementation on the Type # metaclass is changed. msg = ("Unreachable; the definition of __getitem__ on the " "numba.types.abstract.Type metaclass should prevent access.") raise errors.LoweringError(msg) # ------------------------------------------------------------------------------ # Structured / record lookup @lower_getattr_generic(types.Array) def array_record_getattr(context, builder, typ, value, attr): """ Generic getattr() implementation for record arrays: fetch the given record member, i.e. a subarray. """ arrayty = make_array(typ) array = arrayty(context, builder, value) rectype = typ.dtype if not isinstance(rectype, types.Record): raise NotImplementedError("attribute %r of %s not defined" % (attr, typ)) dtype = rectype.typeof(attr) offset = rectype.offset(attr) if isinstance(dtype, types.NestedArray): resty = typ.copy( dtype=dtype.dtype, ndim=typ.ndim + dtype.ndim, layout='A') else: resty = typ.copy(dtype=dtype, layout='A') raryty = make_array(resty) rary = raryty(context, builder) constoffset = context.get_constant(types.intp, offset) newdataptr = cgutils.pointer_add( builder, array.data, constoffset, return_type=rary.data.type, ) if isinstance(dtype, types.NestedArray): # new shape = recarray shape + inner dimension from nestedarray shape = cgutils.unpack_tuple(builder, array.shape, typ.ndim) shape += [context.get_constant(types.intp, i) for i in dtype.shape] # new strides = recarray strides + strides of the inner nestedarray strides = cgutils.unpack_tuple(builder, array.strides, typ.ndim) strides += [context.get_constant(types.intp, i) for i in dtype.strides] # New datasize = size of elements of the nestedarray datasize = context.get_abi_sizeof(context.get_data_type(dtype.dtype)) else: # New shape, strides, and datasize match the underlying array shape = array.shape strides = array.strides datasize = context.get_abi_sizeof(context.get_data_type(dtype)) populate_array(rary, data=newdataptr, shape=shape, strides=strides, itemsize=context.get_constant(types.intp, datasize), meminfo=array.meminfo, parent=array.parent) res = rary._getvalue() return impl_ret_borrowed(context, builder, resty, res) @lower_builtin('static_getitem', types.Array, types.StringLiteral) def array_record_getitem(context, builder, sig, args): index = args[1] if not isinstance(index, str): # This will fallback to normal getitem raise NotImplementedError return array_record_getattr(context, builder, sig.args[0], args[0], index) @lower_getattr_generic(types.Record) def record_getattr(context, builder, typ, value, attr): """ Generic getattr() implementation for records: get the given record member. """ context.sentry_record_alignment(typ, attr) offset = typ.offset(attr) elemty = typ.typeof(attr) if isinstance(elemty, types.NestedArray): # Only a nested array's *data* is stored in a structured array, # so we create an array structure to point to that data. aryty = make_array(elemty) ary = aryty(context, builder) dtype = elemty.dtype newshape = [context.get_constant(types.intp, s) for s in elemty.shape] newstrides = [context.get_constant(types.intp, s) for s in elemty.strides] newdata = cgutils.get_record_member(builder, value, offset, context.get_data_type(dtype)) populate_array( ary, data=newdata, shape=cgutils.pack_array(builder, newshape), strides=cgutils.pack_array(builder, newstrides), itemsize=context.get_constant(types.intp, elemty.size), meminfo=None, parent=None, ) res = ary._getvalue() return impl_ret_borrowed(context, builder, typ, res) else: dptr = cgutils.get_record_member(builder, value, offset, context.get_data_type(elemty)) align = None if typ.aligned else 1 res = context.unpack_value(builder, elemty, dptr, align) return impl_ret_borrowed(context, builder, typ, res) @lower_setattr_generic(types.Record) def record_setattr(context, builder, sig, args, attr): """ Generic setattr() implementation for records: set the given record member. """ typ, valty = sig.args target, val = args context.sentry_record_alignment(typ, attr) offset = typ.offset(attr) elemty = typ.typeof(attr) if isinstance(elemty, types.NestedArray): # Copy the data from the RHS into the nested array val_struct = cgutils.create_struct_proxy(valty)(context, builder, value=args[1]) src = val_struct.data dest = cgutils.get_record_member(builder, target, offset, src.type.pointee) cgutils.memcpy(builder, dest, src, context.get_constant(types.intp, elemty.nitems)) else: # Set the given scalar record member dptr = cgutils.get_record_member(builder, target, offset, context.get_data_type(elemty)) val = context.cast(builder, val, valty, elemty) align = None if typ.aligned else 1 context.pack_value(builder, elemty, val, dptr, align=align) @lower_builtin('static_getitem', types.Record, types.StringLiteral) def record_static_getitem_str(context, builder, sig, args): """ Record.__getitem__ redirects to getattr() """ impl = context.get_getattr(sig.args[0], args[1]) return impl(context, builder, sig.args[0], args[0], args[1]) @lower_builtin('static_getitem', types.Record, types.IntegerLiteral) def record_static_getitem_int(context, builder, sig, args): """ Record.__getitem__ redirects to getattr() """ idx = sig.args[1].literal_value fields = list(sig.args[0].fields) ll_field = context.insert_const_string(builder.module, fields[idx]) impl = context.get_getattr(sig.args[0], ll_field) return impl(context, builder, sig.args[0], args[0], fields[idx]) @lower_builtin('static_setitem', types.Record, types.StringLiteral, types.Any) def record_static_setitem_str(context, builder, sig, args): """ Record.__setitem__ redirects to setattr() """ recty, _, valty = sig.args rec, idx, val = args getattr_sig = signature(sig.return_type, recty, valty) impl = context.get_setattr(idx, getattr_sig) assert impl is not None return impl(builder, (rec, val)) @lower_builtin('static_setitem', types.Record, types.IntegerLiteral, types.Any) def record_static_setitem_int(context, builder, sig, args): """ Record.__setitem__ redirects to setattr() """ recty, _, valty = sig.args rec, idx, val = args getattr_sig = signature(sig.return_type, recty, valty) fields = list(sig.args[0].fields) impl = context.get_setattr(fields[idx], getattr_sig) assert impl is not None return impl(builder, (rec, val)) # ------------------------------------------------------------------------------ # Constant arrays and records @lower_constant(types.Array) def constant_array(context, builder, ty, pyval): """ Create a constant array (mechanism is target-dependent). """ return context.make_constant_array(builder, ty, pyval) @lower_constant(types.Record) def constant_record(context, builder, ty, pyval): """ Create a record constant as a stack-allocated array of bytes. """ lty = ir.ArrayType(ir.IntType(8), pyval.nbytes) val = lty(bytearray(pyval.tobytes())) return cgutils.alloca_once_value(builder, val) @lower_constant(types.Bytes) def constant_bytes(context, builder, ty, pyval): """ Create a constant array from bytes (mechanism is target-dependent). """ buf = np.array(bytearray(pyval), dtype=np.uint8) return context.make_constant_array(builder, ty, buf) # ------------------------------------------------------------------------------ # Comparisons @lower_builtin(operator.is_, types.Array, types.Array) def array_is(context, builder, sig, args): aty, bty = sig.args if aty != bty: return cgutils.false_bit def array_is_impl(a, b): return (a.shape == b.shape and a.strides == b.strides and a.ctypes.data == b.ctypes.data) return context.compile_internal(builder, array_is_impl, sig, args) # ------------------------------------------------------------------------------ # Hash @overload_attribute(types.Array, "__hash__") def ol_array_hash(arr): return lambda arr: None # ------------------------------------------------------------------------------ # builtin `np.flat` implementation def make_array_flat_cls(flatiterty): """ Return the Structure representation of the given *flatiterty* (an instance of types.NumpyFlatType). """ return _make_flattening_iter_cls(flatiterty, 'flat') def make_array_ndenumerate_cls(nditerty): """ Return the Structure representation of the given *nditerty* (an instance of types.NumpyNdEnumerateType). """ return _make_flattening_iter_cls(nditerty, 'ndenumerate') def _increment_indices(context, builder, ndim, shape, indices, end_flag=None, loop_continue=None, loop_break=None): zero = context.get_constant(types.intp, 0) bbend = builder.append_basic_block('end_increment') if end_flag is not None: builder.store(cgutils.false_byte, end_flag) for dim in reversed(range(ndim)): idxptr = cgutils.gep_inbounds(builder, indices, dim) idx = cgutils.increment_index(builder, builder.load(idxptr)) count = shape[dim] in_bounds = builder.icmp_signed('<', idx, count) with cgutils.if_likely(builder, in_bounds): # New index is still in bounds builder.store(idx, idxptr) if loop_continue is not None: loop_continue(dim) builder.branch(bbend) # Index out of bounds => reset it and proceed it to outer index builder.store(zero, idxptr) if loop_break is not None: loop_break(dim) if end_flag is not None: builder.store(cgutils.true_byte, end_flag) builder.branch(bbend) builder.position_at_end(bbend) def _increment_indices_array(context, builder, arrty, arr, indices, end_flag=None): shape = cgutils.unpack_tuple(builder, arr.shape, arrty.ndim) _increment_indices(context, builder, arrty.ndim, shape, indices, end_flag) def make_nditer_cls(nditerty): """ Return the Structure representation of the given *nditerty* (an instance of types.NumpyNdIterType). """ ndim = nditerty.ndim layout = nditerty.layout narrays = len(nditerty.arrays) nshapes = ndim if nditerty.need_shaped_indexing else 1 class BaseSubIter(object): """ Base class for sub-iterators of a nditer() instance. """ def __init__(self, nditer, member_name, start_dim, end_dim): self.nditer = nditer self.member_name = member_name self.start_dim = start_dim self.end_dim = end_dim self.ndim = end_dim - start_dim def set_member_ptr(self, ptr): setattr(self.nditer, self.member_name, ptr) @functools.cached_property def member_ptr(self): return getattr(self.nditer, self.member_name) def init_specific(self, context, builder): pass def loop_continue(self, context, builder, logical_dim): pass def loop_break(self, context, builder, logical_dim): pass class FlatSubIter(BaseSubIter): """ Sub-iterator walking a contiguous array in physical order, with support for broadcasting (the index is reset on the outer dimension). """ def init_specific(self, context, builder): zero = context.get_constant(types.intp, 0) self.set_member_ptr(cgutils.alloca_once_value(builder, zero)) def compute_pointer(self, context, builder, indices, arrty, arr): index = builder.load(self.member_ptr) return builder.gep(arr.data, [index]) def loop_continue(self, context, builder, logical_dim): if logical_dim == self.ndim - 1: # Only increment index inside innermost logical dimension index = builder.load(self.member_ptr) index = cgutils.increment_index(builder, index) builder.store(index, self.member_ptr) def loop_break(self, context, builder, logical_dim): if logical_dim == 0: # At the exit of outermost logical dimension, reset index zero = context.get_constant(types.intp, 0) builder.store(zero, self.member_ptr) elif logical_dim == self.ndim - 1: # Inside innermost logical dimension, increment index index = builder.load(self.member_ptr) index = cgutils.increment_index(builder, index) builder.store(index, self.member_ptr) class TrivialFlatSubIter(BaseSubIter): """ Sub-iterator walking a contiguous array in physical order, *without* support for broadcasting. """ def init_specific(self, context, builder): assert not nditerty.need_shaped_indexing def compute_pointer(self, context, builder, indices, arrty, arr): assert len(indices) <= 1, len(indices) return builder.gep(arr.data, indices) class IndexedSubIter(BaseSubIter): """ Sub-iterator walking an array in logical order. """ def compute_pointer(self, context, builder, indices, arrty, arr): assert len(indices) == self.ndim return cgutils.get_item_pointer(context, builder, arrty, arr, indices, wraparound=False) class ZeroDimSubIter(BaseSubIter): """ Sub-iterator "walking" a 0-d array. """ def compute_pointer(self, context, builder, indices, arrty, arr): return arr.data class ScalarSubIter(BaseSubIter): """ Sub-iterator "walking" a scalar value. """ def compute_pointer(self, context, builder, indices, arrty, arr): return arr class NdIter(cgutils.create_struct_proxy(nditerty)): """ .nditer() implementation. Note: 'F' layout means the shape is iterated in reverse logical order, so indices and shapes arrays have to be reversed as well. """ @functools.cached_property def subiters(self): l = [] factories = {'flat': FlatSubIter if nditerty.need_shaped_indexing else TrivialFlatSubIter, 'indexed': IndexedSubIter, '0d': ZeroDimSubIter, 'scalar': ScalarSubIter, } for i, sub in enumerate(nditerty.indexers): kind, start_dim, end_dim, _ = sub member_name = 'index%d' % i factory = factories[kind] l.append(factory(self, member_name, start_dim, end_dim)) return l def init_specific(self, context, builder, arrtys, arrays): """ Initialize the nditer() instance for the specific array inputs. """ zero = context.get_constant(types.intp, 0) # Store inputs self.arrays = context.make_tuple(builder, types.Tuple(arrtys), arrays) # Create slots for scalars for i, ty in enumerate(arrtys): if not isinstance(ty, types.Array): member_name = 'scalar%d' % i # XXX as_data()? slot = cgutils.alloca_once_value(builder, arrays[i]) setattr(self, member_name, slot) arrays = self._arrays_or_scalars(context, builder, arrtys, arrays) # Extract iterator shape (the shape of the most-dimensional input) main_shape_ty = types.UniTuple(types.intp, ndim) main_shape = None main_nitems = None for i, arrty in enumerate(arrtys): if isinstance(arrty, types.Array) and arrty.ndim == ndim: main_shape = arrays[i].shape main_nitems = arrays[i].nitems break else: # Only scalar inputs => synthesize a dummy shape assert ndim == 0 main_shape = context.make_tuple(builder, main_shape_ty, ()) main_nitems = context.get_constant(types.intp, 1) # Validate shapes of array inputs def check_shape(shape, main_shape): n = len(shape) for i in range(n): if shape[i] != main_shape[len(main_shape) - n + i]: raise ValueError("nditer(): operands could not be " "broadcast together") for arrty, arr in zip(arrtys, arrays): if isinstance(arrty, types.Array) and arrty.ndim > 0: sig = signature(types.none, types.UniTuple(types.intp, arrty.ndim), main_shape_ty) context.compile_internal(builder, check_shape, sig, (arr.shape, main_shape)) # Compute shape and size shapes = cgutils.unpack_tuple(builder, main_shape) if layout == 'F': shapes = shapes[::-1] # If shape is empty, mark iterator exhausted shape_is_empty = builder.icmp_signed('==', main_nitems, zero) exhausted = builder.select(shape_is_empty, cgutils.true_byte, cgutils.false_byte) if not nditerty.need_shaped_indexing: # Flatten shape to make iteration faster on small innermost # dimensions (e.g. a (100000, 3) shape) shapes = (main_nitems,) assert len(shapes) == nshapes indices = cgutils.alloca_once(builder, zero.type, size=nshapes) for dim in range(nshapes): idxptr = cgutils.gep_inbounds(builder, indices, dim) builder.store(zero, idxptr) self.indices = indices self.shape = cgutils.pack_array(builder, shapes, zero.type) self.exhausted = cgutils.alloca_once_value(builder, exhausted) # Initialize subiterators for subiter in self.subiters: subiter.init_specific(context, builder) def iternext_specific(self, context, builder, result): """ Compute next iteration of the nditer() instance. """ bbend = builder.append_basic_block('end') # Branch early if exhausted exhausted = cgutils.as_bool_bit(builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, exhausted): result.set_valid(False) builder.branch(bbend) arrtys = nditerty.arrays arrays = cgutils.unpack_tuple(builder, self.arrays) arrays = self._arrays_or_scalars(context, builder, arrtys, arrays) indices = self.indices # Compute iterated results result.set_valid(True) views = self._make_views(context, builder, indices, arrtys, arrays) views = [v._getvalue() for v in views] if len(views) == 1: result.yield_(views[0]) else: result.yield_(context.make_tuple(builder, nditerty.yield_type, views)) shape = cgutils.unpack_tuple(builder, self.shape) _increment_indices(context, builder, len(shape), shape, indices, self.exhausted, functools.partial(self._loop_continue, context, builder), functools.partial(self._loop_break, context, builder), ) builder.branch(bbend) builder.position_at_end(bbend) def _loop_continue(self, context, builder, dim): for sub in self.subiters: if sub.start_dim <= dim < sub.end_dim: sub.loop_continue(context, builder, dim - sub.start_dim) def _loop_break(self, context, builder, dim): for sub in self.subiters: if sub.start_dim <= dim < sub.end_dim: sub.loop_break(context, builder, dim - sub.start_dim) def _make_views(self, context, builder, indices, arrtys, arrays): """ Compute the views to be yielded. """ views = [None] * narrays indexers = nditerty.indexers subiters = self.subiters rettys = nditerty.yield_type if isinstance(rettys, types.BaseTuple): rettys = list(rettys) else: rettys = [rettys] indices = [builder.load(cgutils.gep_inbounds(builder, indices, i)) for i in range(nshapes)] for sub, subiter in zip(indexers, subiters): _, _, _, array_indices = sub sub_indices = indices[subiter.start_dim:subiter.end_dim] if layout == 'F': sub_indices = sub_indices[::-1] for i in array_indices: assert views[i] is None views[i] = self._make_view(context, builder, sub_indices, rettys[i], arrtys[i], arrays[i], subiter) assert all(v for v in views) return views def _make_view(self, context, builder, indices, retty, arrty, arr, subiter): """ Compute a 0d view for a given input array. """ assert isinstance(retty, types.Array) and retty.ndim == 0 ptr = subiter.compute_pointer(context, builder, indices, arrty, arr) view = context.make_array(retty)(context, builder) itemsize = get_itemsize(context, retty) shape = context.make_tuple(builder, types.UniTuple(types.intp, 0), ()) strides = context.make_tuple(builder, types.UniTuple(types.intp, 0), ()) # HACK: meminfo=None avoids expensive refcounting operations # on ephemeral views populate_array(view, ptr, shape, strides, itemsize, meminfo=None) return view def _arrays_or_scalars(self, context, builder, arrtys, arrays): # Return a list of either array structures or pointers to # scalar slots l = [] for i, (arrty, arr) in enumerate(zip(arrtys, arrays)): if isinstance(arrty, types.Array): l.append(context.make_array(arrty)(context, builder, value=arr)) else: l.append(getattr(self, "scalar%d" % i)) return l return NdIter def make_ndindex_cls(nditerty): """ Return the Structure representation of the given *nditerty* (an instance of types.NumpyNdIndexType). """ ndim = nditerty.ndim class NdIndexIter(cgutils.create_struct_proxy(nditerty)): """ .ndindex() implementation. """ def init_specific(self, context, builder, shapes): zero = context.get_constant(types.intp, 0) indices = cgutils.alloca_once(builder, zero.type, size=context.get_constant(types.intp, ndim)) exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte) for dim in range(ndim): idxptr = cgutils.gep_inbounds(builder, indices, dim) builder.store(zero, idxptr) # 0-sized dimensions really indicate an empty array, # but we have to catch that condition early to avoid # a bug inside the iteration logic. dim_size = shapes[dim] dim_is_empty = builder.icmp_unsigned('==', dim_size, zero) with cgutils.if_unlikely(builder, dim_is_empty): builder.store(cgutils.true_byte, exhausted) self.indices = indices self.exhausted = exhausted self.shape = cgutils.pack_array(builder, shapes, zero.type) def iternext_specific(self, context, builder, result): zero = context.get_constant(types.intp, 0) bbend = builder.append_basic_block('end') exhausted = cgutils.as_bool_bit(builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, exhausted): result.set_valid(False) builder.branch(bbend) indices = [builder.load(cgutils.gep_inbounds(builder, self.indices, dim)) for dim in range(ndim)] for load in indices: mark_positive(builder, load) result.yield_(cgutils.pack_array(builder, indices, zero.type)) result.set_valid(True) shape = cgutils.unpack_tuple(builder, self.shape, ndim) _increment_indices(context, builder, ndim, shape, self.indices, self.exhausted) builder.branch(bbend) builder.position_at_end(bbend) return NdIndexIter def _make_flattening_iter_cls(flatiterty, kind): assert kind in ('flat', 'ndenumerate') array_type = flatiterty.array_type if array_type.layout == 'C': class CContiguousFlatIter(cgutils.create_struct_proxy(flatiterty)): """ .flat() / .ndenumerate() implementation for C-contiguous arrays. """ def init_specific(self, context, builder, arrty, arr): zero = context.get_constant(types.intp, 0) self.index = cgutils.alloca_once_value(builder, zero) # We can't trust strides[-1] to always contain the right # step value, see # http://docs.scipy.org/doc/numpy-dev/release.html#npy-relaxed-strides-checking # noqa: E501 self.stride = arr.itemsize if kind == 'ndenumerate': # Zero-initialize the indices array. indices = cgutils.alloca_once( builder, zero.type, size=context.get_constant(types.intp, arrty.ndim)) for dim in range(arrty.ndim): idxptr = cgutils.gep_inbounds(builder, indices, dim) builder.store(zero, idxptr) self.indices = indices # NOTE: Using gep() instead of explicit pointer addition helps # LLVM vectorize the loop (since the stride is known and # constant). This is not possible in the non-contiguous case, # where the strides are unknown at compile-time. def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim nitems = arr.nitems index = builder.load(self.index) is_valid = builder.icmp_signed('<', index, nitems) result.set_valid(is_valid) with cgutils.if_likely(builder, is_valid): ptr = builder.gep(arr.data, [index]) value = load_item(context, builder, arrty, ptr) if kind == 'flat': result.yield_(value) else: # ndenumerate(): fetch and increment indices indices = self.indices idxvals = [builder.load(cgutils.gep_inbounds(builder, indices, dim)) for dim in range(ndim)] idxtuple = cgutils.pack_array(builder, idxvals, ty=context.get_data_type( types.intp)) result.yield_( cgutils.make_anonymous_struct(builder, [idxtuple, value])) _increment_indices_array(context, builder, arrty, arr, indices) index = cgutils.increment_index(builder, index) builder.store(index, self.index) def getitem(self, context, builder, arrty, arr, index): ptr = builder.gep(arr.data, [index]) return load_item(context, builder, arrty, ptr) def setitem(self, context, builder, arrty, arr, index, value): ptr = builder.gep(arr.data, [index]) store_item(context, builder, arrty, value, ptr) return CContiguousFlatIter else: class FlatIter(cgutils.create_struct_proxy(flatiterty)): """ Generic .flat() / .ndenumerate() implementation for non-contiguous arrays. It keeps track of pointers along each dimension in order to minimize computations. """ def init_specific(self, context, builder, arrty, arr): zero = context.get_constant(types.intp, 0) data = arr.data ndim = arrty.ndim shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) indices = cgutils.alloca_once( builder, zero.type, size=context.get_constant(types.intp, arrty.ndim)) pointers = cgutils.alloca_once( builder, data.type, size=context.get_constant(types.intp, arrty.ndim)) exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte) # Initialize indices and pointers with their start values. for dim in range(ndim): idxptr = cgutils.gep_inbounds(builder, indices, dim) ptrptr = cgutils.gep_inbounds(builder, pointers, dim) builder.store(data, ptrptr) builder.store(zero, idxptr) # 0-sized dimensions really indicate an empty array, # but we have to catch that condition early to avoid # a bug inside the iteration logic (see issue #846). dim_size = shapes[dim] dim_is_empty = builder.icmp_unsigned('==', dim_size, zero) with cgutils.if_unlikely(builder, dim_is_empty): builder.store(cgutils.true_byte, exhausted) self.indices = indices self.pointers = pointers self.exhausted = exhausted def iternext_specific(self, context, builder, arrty, arr, result): ndim = arrty.ndim shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) strides = cgutils.unpack_tuple(builder, arr.strides, ndim) indices = self.indices pointers = self.pointers zero = context.get_constant(types.intp, 0) bbend = builder.append_basic_block('end') # Catch already computed iterator exhaustion is_exhausted = cgutils.as_bool_bit( builder, builder.load(self.exhausted)) with cgutils.if_unlikely(builder, is_exhausted): result.set_valid(False) builder.branch(bbend) result.set_valid(True) # Current pointer inside last dimension last_ptr = cgutils.gep_inbounds(builder, pointers, ndim - 1) ptr = builder.load(last_ptr) value = load_item(context, builder, arrty, ptr) if kind == 'flat': result.yield_(value) else: # ndenumerate() => yield (indices, value) idxvals = [builder.load(cgutils.gep_inbounds(builder, indices, dim)) for dim in range(ndim)] idxtuple = cgutils.pack_array(builder, idxvals) result.yield_( cgutils.make_anonymous_struct(builder, [idxtuple, value])) # Update indices and pointers by walking from inner # dimension to outer. for dim in reversed(range(ndim)): idxptr = cgutils.gep_inbounds(builder, indices, dim) idx = cgutils.increment_index(builder, builder.load(idxptr)) count = shapes[dim] stride = strides[dim] in_bounds = builder.icmp_signed('<', idx, count) with cgutils.if_likely(builder, in_bounds): # Index is valid => pointer can simply be incremented. builder.store(idx, idxptr) ptrptr = cgutils.gep_inbounds(builder, pointers, dim) ptr = builder.load(ptrptr) ptr = cgutils.pointer_add(builder, ptr, stride) builder.store(ptr, ptrptr) # Reset pointers in inner dimensions for inner_dim in range(dim + 1, ndim): ptrptr = cgutils.gep_inbounds(builder, pointers, inner_dim) builder.store(ptr, ptrptr) builder.branch(bbend) # Reset index and continue with next dimension builder.store(zero, idxptr) # End of array builder.store(cgutils.true_byte, self.exhausted) builder.branch(bbend) builder.position_at_end(bbend) def _ptr_for_index(self, context, builder, arrty, arr, index): ndim = arrty.ndim shapes = cgutils.unpack_tuple(builder, arr.shape, count=ndim) strides = cgutils.unpack_tuple(builder, arr.strides, count=ndim) # First convert the flattened index into a regular n-dim index indices = [] for dim in reversed(range(ndim)): indices.append(builder.urem(index, shapes[dim])) index = builder.udiv(index, shapes[dim]) indices.reverse() ptr = cgutils.get_item_pointer2(context, builder, arr.data, shapes, strides, arrty.layout, indices) return ptr def getitem(self, context, builder, arrty, arr, index): ptr = self._ptr_for_index(context, builder, arrty, arr, index) return load_item(context, builder, arrty, ptr) def setitem(self, context, builder, arrty, arr, index, value): ptr = self._ptr_for_index(context, builder, arrty, arr, index) store_item(context, builder, arrty, value, ptr) return FlatIter @lower_getattr(types.Array, "flat") def make_array_flatiter(context, builder, arrty, arr): flatitercls = make_array_flat_cls(types.NumpyFlatType(arrty)) flatiter = flatitercls(context, builder) flatiter.array = arr arrcls = context.make_array(arrty) arr = arrcls(context, builder, ref=flatiter._get_ptr_by_name('array')) flatiter.init_specific(context, builder, arrty, arr) res = flatiter._getvalue() return impl_ret_borrowed(context, builder, types.NumpyFlatType(arrty), res) @lower_builtin('iternext', types.NumpyFlatType) @iternext_impl(RefType.BORROWED) def iternext_numpy_flatiter(context, builder, sig, args, result): [flatiterty] = sig.args [flatiter] = args flatitercls = make_array_flat_cls(flatiterty) flatiter = flatitercls(context, builder, value=flatiter) arrty = flatiterty.array_type arrcls = context.make_array(arrty) arr = arrcls(context, builder, value=flatiter.array) flatiter.iternext_specific(context, builder, arrty, arr, result) @lower_builtin(operator.getitem, types.NumpyFlatType, types.Integer) def iternext_numpy_getitem(context, builder, sig, args): flatiterty = sig.args[0] flatiter, index = args flatitercls = make_array_flat_cls(flatiterty) flatiter = flatitercls(context, builder, value=flatiter) arrty = flatiterty.array_type arrcls = context.make_array(arrty) arr = arrcls(context, builder, value=flatiter.array) res = flatiter.getitem(context, builder, arrty, arr, index) return impl_ret_borrowed(context, builder, sig.return_type, res) @lower_builtin(operator.setitem, types.NumpyFlatType, types.Integer, types.Any) def iternext_numpy_getitem_any(context, builder, sig, args): flatiterty = sig.args[0] flatiter, index, value = args flatitercls = make_array_flat_cls(flatiterty) flatiter = flatitercls(context, builder, value=flatiter) arrty = flatiterty.array_type arrcls = context.make_array(arrty) arr = arrcls(context, builder, value=flatiter.array) flatiter.setitem(context, builder, arrty, arr, index, value) return context.get_dummy_value() @lower_builtin(len, types.NumpyFlatType) def iternext_numpy_getitem_flat(context, builder, sig, args): flatiterty = sig.args[0] flatitercls = make_array_flat_cls(flatiterty) flatiter = flatitercls(context, builder, value=args[0]) arrcls = context.make_array(flatiterty.array_type) arr = arrcls(context, builder, value=flatiter.array) return arr.nitems @lower_builtin(np.ndenumerate, types.Array) def make_array_ndenumerate(context, builder, sig, args): arrty, = sig.args arr, = args nditercls = make_array_ndenumerate_cls(types.NumpyNdEnumerateType(arrty)) nditer = nditercls(context, builder) nditer.array = arr arrcls = context.make_array(arrty) arr = arrcls(context, builder, ref=nditer._get_ptr_by_name('array')) nditer.init_specific(context, builder, arrty, arr) res = nditer._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) @lower_builtin('iternext', types.NumpyNdEnumerateType) @iternext_impl(RefType.BORROWED) def iternext_numpy_nditer(context, builder, sig, args, result): [nditerty] = sig.args [nditer] = args nditercls = make_array_ndenumerate_cls(nditerty) nditer = nditercls(context, builder, value=nditer) arrty = nditerty.array_type arrcls = context.make_array(arrty) arr = arrcls(context, builder, value=nditer.array) nditer.iternext_specific(context, builder, arrty, arr, result) @lower_builtin(pndindex, types.VarArg(types.Integer)) @lower_builtin(np.ndindex, types.VarArg(types.Integer)) def make_array_ndindex(context, builder, sig, args): """ndindex(*shape)""" shape = [context.cast(builder, arg, argty, types.intp) for argty, arg in zip(sig.args, args)] nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape))) nditer = nditercls(context, builder) nditer.init_specific(context, builder, shape) res = nditer._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) @lower_builtin(pndindex, types.BaseTuple) @lower_builtin(np.ndindex, types.BaseTuple) def make_array_ndindex_tuple(context, builder, sig, args): """ndindex(shape)""" ndim = sig.return_type.ndim if ndim > 0: idxty = sig.args[0].dtype tup = args[0] shape = cgutils.unpack_tuple(builder, tup, ndim) shape = [context.cast(builder, idx, idxty, types.intp) for idx in shape] else: shape = [] nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape))) nditer = nditercls(context, builder) nditer.init_specific(context, builder, shape) res = nditer._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) @lower_builtin('iternext', types.NumpyNdIndexType) @iternext_impl(RefType.BORROWED) def iternext_numpy_ndindex(context, builder, sig, args, result): [nditerty] = sig.args [nditer] = args nditercls = make_ndindex_cls(nditerty) nditer = nditercls(context, builder, value=nditer) nditer.iternext_specific(context, builder, result) @lower_builtin(np.nditer, types.Any) def make_array_nditer(context, builder, sig, args): """ nditer(...) """ nditerty = sig.return_type arrtys = nditerty.arrays if isinstance(sig.args[0], types.BaseTuple): arrays = cgutils.unpack_tuple(builder, args[0]) else: arrays = [args[0]] nditer = make_nditer_cls(nditerty)(context, builder) nditer.init_specific(context, builder, arrtys, arrays) res = nditer._getvalue() return impl_ret_borrowed(context, builder, nditerty, res) @lower_builtin('iternext', types.NumpyNdIterType) @iternext_impl(RefType.BORROWED) def iternext_numpy_nditer2(context, builder, sig, args, result): [nditerty] = sig.args [nditer] = args nditer = make_nditer_cls(nditerty)(context, builder, value=nditer) nditer.iternext_specific(context, builder, result) @lower_builtin(operator.eq, types.DType, types.DType) def dtype_eq_impl(context, builder, sig, args): arg1, arg2 = sig.args res = ir.Constant(ir.IntType(1), int(arg1 == arg2)) return impl_ret_untracked(context, builder, sig.return_type, res) # ------------------------------------------------------------------------------ # Numpy array constructors def _empty_nd_impl(context, builder, arrtype, shapes): """Utility function used for allocating a new array during LLVM code generation (lowering). Given a target context, builder, array type, and a tuple or list of lowered dimension sizes, returns a LLVM value pointing at a Numba runtime allocated array. """ arycls = make_array(arrtype) ary = arycls(context, builder) datatype = context.get_data_type(arrtype.dtype) itemsize = context.get_constant(types.intp, get_itemsize(context, arrtype)) # compute array length arrlen = context.get_constant(types.intp, 1) overflow = Constant(ir.IntType(1), 0) for s in shapes: arrlen_mult = builder.smul_with_overflow(arrlen, s) arrlen = builder.extract_value(arrlen_mult, 0) overflow = builder.or_( overflow, builder.extract_value(arrlen_mult, 1) ) if arrtype.ndim == 0: strides = () elif arrtype.layout == 'C': strides = [itemsize] for dimension_size in reversed(shapes[1:]): strides.append(builder.mul(strides[-1], dimension_size)) strides = tuple(reversed(strides)) elif arrtype.layout == 'F': strides = [itemsize] for dimension_size in shapes[:-1]: strides.append(builder.mul(strides[-1], dimension_size)) strides = tuple(strides) else: raise NotImplementedError( "Don't know how to allocate array with layout '{0}'.".format( arrtype.layout)) # Check overflow, numpy also does this after checking order allocsize_mult = builder.smul_with_overflow(arrlen, itemsize) allocsize = builder.extract_value(allocsize_mult, 0) overflow = builder.or_(overflow, builder.extract_value(allocsize_mult, 1)) with builder.if_then(overflow, likely=False): # Raise same error as numpy, see: # https://github.com/numpy/numpy/blob/2a488fe76a0f732dc418d03b452caace161673da/numpy/core/src/multiarray/ctors.c#L1095-L1101 # noqa: E501 context.call_conv.return_user_exc( builder, ValueError, ("array is too big; `arr.size * arr.dtype.itemsize` is larger than" " the maximum possible size.",) ) dtype = arrtype.dtype align_val = context.get_preferred_array_alignment(dtype) align = context.get_constant(types.uint32, align_val) args = (context.get_dummy_value(), allocsize, align) mip = types.MemInfoPointer(types.voidptr) arytypeclass = types.TypeRef(type(arrtype)) argtypes = signature(mip, arytypeclass, types.intp, types.uint32) meminfo = context.compile_internal(builder, _call_allocator, argtypes, args) data = context.nrt.meminfo_data(builder, meminfo) intp_t = context.get_value_type(types.intp) shape_array = cgutils.pack_array(builder, shapes, ty=intp_t) strides_array = cgutils.pack_array(builder, strides, ty=intp_t) populate_array(ary, data=builder.bitcast(data, datatype.as_pointer()), shape=shape_array, strides=strides_array, itemsize=itemsize, meminfo=meminfo) return ary @overload_classmethod(types.Array, "_allocate", target="CPU") def _ol_array_allocate(cls, allocsize, align): """Implements a Numba-only default target (cpu) classmethod on the array type. """ def impl(cls, allocsize, align): return intrin_alloc(allocsize, align) return impl def _call_allocator(arrtype, size, align): """Trampoline to call the intrinsic used for allocation """ return arrtype._allocate(size, align) @intrinsic def intrin_alloc(typingctx, allocsize, align): """Intrinsic to call into the allocator for Array """ def codegen(context, builder, signature, args): [allocsize, align] = args meminfo = context.nrt.meminfo_alloc_aligned(builder, allocsize, align) return meminfo mip = types.MemInfoPointer(types.voidptr) # return untyped pointer sig = signature(mip, allocsize, align) return sig, codegen def _parse_shape(context, builder, ty, val): """ Parse the shape argument to an array constructor. """ def safecast_intp(context, builder, src_t, src): """Cast src to intp only if value can be maintained""" intp_t = context.get_value_type(types.intp) intp_width = intp_t.width intp_ir = ir.IntType(intp_width) maxval = Constant(intp_ir, ((1 << intp_width - 1) - 1)) if src_t.width < intp_width: res = builder.sext(src, intp_ir) elif src_t.width >= intp_width: is_larger = builder.icmp_signed(">", src, maxval) with builder.if_then(is_larger, likely=False): context.call_conv.return_user_exc( builder, ValueError, ("Cannot safely convert value to intp",) ) if src_t.width > intp_width: res = builder.trunc(src, intp_ir) else: res = src return res if isinstance(ty, types.Integer): ndim = 1 passed_shapes = [context.cast(builder, val, ty, types.intp)] else: assert isinstance(ty, types.BaseTuple) ndim = ty.count passed_shapes = cgutils.unpack_tuple(builder, val, count=ndim) shapes = [] for s in passed_shapes: shapes.append(safecast_intp(context, builder, s.type, s)) zero = context.get_constant_generic(builder, types.intp, 0) for dim in range(ndim): is_neg = builder.icmp_signed('<', shapes[dim], zero) with cgutils.if_unlikely(builder, is_neg): context.call_conv.return_user_exc( builder, ValueError, ("negative dimensions not allowed",) ) return shapes def _parse_empty_args(context, builder, sig, args): """ Parse the arguments of a np.empty(), np.zeros() or np.ones() call. """ arrshapetype = sig.args[0] arrshape = args[0] arrtype = sig.return_type return arrtype, _parse_shape(context, builder, arrshapetype, arrshape) def _parse_empty_like_args(context, builder, sig, args): """ Parse the arguments of a np.empty_like(), np.zeros_like() or np.ones_like() call. """ arytype = sig.args[0] if isinstance(arytype, types.Array): ary = make_array(arytype)(context, builder, value=args[0]) shapes = cgutils.unpack_tuple(builder, ary.shape, count=arytype.ndim) return sig.return_type, shapes else: return sig.return_type, () def _check_const_str_dtype(fname, dtype): if isinstance(dtype, types.UnicodeType): msg = f"If np.{fname} dtype is a string it must be a string constant." raise errors.TypingError(msg) @intrinsic def numpy_empty_nd(tyctx, ty_shape, ty_dtype, ty_retty_ref): ty_retty = ty_retty_ref.instance_type sig = ty_retty(ty_shape, ty_dtype, ty_retty_ref) def codegen(cgctx, builder, sig, llargs): arrtype, shapes = _parse_empty_args(cgctx, builder, sig, llargs) ary = _empty_nd_impl(cgctx, builder, arrtype, shapes) return ary._getvalue() return sig, codegen @overload(np.empty) def ol_np_empty(shape, dtype=float): _check_const_str_dtype("empty", dtype) if (dtype is float or (isinstance(dtype, types.Function) and dtype.typing_key is float) or is_nonelike(dtype)): #default nb_dtype = types.double else: nb_dtype = ty_parse_dtype(dtype) ndim = ty_parse_shape(shape) if nb_dtype is not None and ndim is not None: retty = types.Array(dtype=nb_dtype, ndim=ndim, layout='C') def impl(shape, dtype=float): return numpy_empty_nd(shape, dtype, retty) return impl else: msg = f"Cannot parse input types to function np.empty({shape}, {dtype})" raise errors.TypingError(msg) @intrinsic def numpy_empty_like_nd(tyctx, ty_prototype, ty_dtype, ty_retty_ref): ty_retty = ty_retty_ref.instance_type sig = ty_retty(ty_prototype, ty_dtype, ty_retty_ref) def codegen(cgctx, builder, sig, llargs): arrtype, shapes = _parse_empty_like_args(cgctx, builder, sig, llargs) ary = _empty_nd_impl(cgctx, builder, arrtype, shapes) return ary._getvalue() return sig, codegen @overload(np.empty_like) def ol_np_empty_like(arr, dtype=None): _check_const_str_dtype("empty_like", dtype) if not is_nonelike(dtype): nb_dtype = ty_parse_dtype(dtype) elif isinstance(arr, types.Array): nb_dtype = arr.dtype else: nb_dtype = arr if nb_dtype is not None: if isinstance(arr, types.Array): layout = arr.layout if arr.layout != 'A' else 'C' retty = arr.copy(dtype=nb_dtype, layout=layout, readonly=False) else: retty = types.Array(nb_dtype, 0, 'C') else: msg = ("Cannot parse input types to function " f"np.empty_like({arr}, {dtype})") raise errors.TypingError(msg) def impl(arr, dtype=None): return numpy_empty_like_nd(arr, dtype, retty) return impl @intrinsic def _zero_fill_array_method(tyctx, self): sig = types.none(self) def codegen(cgctx, builder, sig, llargs): ary = make_array(sig.args[0])(cgctx, builder, llargs[0]) cgutils.memset(builder, ary.data, builder.mul(ary.itemsize, ary.nitems), 0) return sig, codegen @overload_method(types.Array, '_zero_fill') def ol_array_zero_fill(self): """Adds a `._zero_fill` method to zero fill an array using memset.""" def impl(self): _zero_fill_array_method(self) return impl @overload(np.zeros) def ol_np_zeros(shape, dtype=float): _check_const_str_dtype("zeros", dtype) def impl(shape, dtype=float): arr = np.empty(shape, dtype=dtype) arr._zero_fill() return arr return impl @overload(np.zeros_like) def ol_np_zeros_like(a, dtype=None): _check_const_str_dtype("zeros_like", dtype) # NumPy uses 'a' as the arg name for the array-like def impl(a, dtype=None): arr = np.empty_like(a, dtype=dtype) arr._zero_fill() return arr return impl @overload(np.ones_like) def ol_np_ones_like(a, dtype=None): _check_const_str_dtype("ones_like", dtype) # NumPy uses 'a' as the arg name for the array-like def impl(a, dtype=None): arr = np.empty_like(a, dtype=dtype) arr_flat = arr.flat for idx in range(len(arr_flat)): arr_flat[idx] = 1 return arr return impl @overload(np.full) def impl_np_full(shape, fill_value, dtype=None): _check_const_str_dtype("full", dtype) if not is_nonelike(dtype): nb_dtype = ty_parse_dtype(dtype) else: nb_dtype = fill_value def full(shape, fill_value, dtype=None): arr = np.empty(shape, nb_dtype) arr_flat = arr.flat for idx in range(len(arr_flat)): arr_flat[idx] = fill_value return arr return full @overload(np.full_like) def impl_np_full_like(a, fill_value, dtype=None): _check_const_str_dtype("full_like", dtype) def full_like(a, fill_value, dtype=None): arr = np.empty_like(a, dtype) arr_flat = arr.flat for idx in range(len(arr_flat)): arr_flat[idx] = fill_value return arr return full_like @overload(np.ones) def ol_np_ones(shape, dtype=None): # for some reason the NumPy default for dtype is None in the source but # ends up as np.float64 by definition. _check_const_str_dtype("ones", dtype) def impl(shape, dtype=None): arr = np.empty(shape, dtype=dtype) arr_flat = arr.flat for idx in range(len(arr_flat)): arr_flat[idx] = 1 return arr return impl @overload(np.identity) def impl_np_identity(n, dtype=None): _check_const_str_dtype("identity", dtype) if not is_nonelike(dtype): nb_dtype = ty_parse_dtype(dtype) else: nb_dtype = types.double def identity(n, dtype=None): arr = np.zeros((n, n), nb_dtype) for i in range(n): arr[i, i] = 1 return arr return identity def _eye_none_handler(N, M): pass @extending.overload(_eye_none_handler) def _eye_none_handler_impl(N, M): if isinstance(M, types.NoneType): def impl(N, M): return N else: def impl(N, M): return M return impl @extending.overload(np.eye) def numpy_eye(N, M=None, k=0, dtype=float): if dtype is None or isinstance(dtype, types.NoneType): dt = np.dtype(float) elif isinstance(dtype, (types.DTypeSpec, types.Number)): # dtype or instance of dtype dt = as_dtype(getattr(dtype, 'dtype', dtype)) else: dt = np.dtype(dtype) def impl(N, M=None, k=0, dtype=float): _M = _eye_none_handler(N, M) arr = np.zeros((N, _M), dt) if k >= 0: d = min(N, _M - k) for i in range(d): arr[i, i + k] = 1 else: d = min(N + k, _M) for i in range(d): arr[i - k, i] = 1 return arr return impl @overload(np.diag) def impl_np_diag(v, k=0): if not type_can_asarray(v): raise errors.TypingError('The argument "v" must be array-like') if isinstance(v, types.Array): if v.ndim not in (1, 2): raise errors.NumbaTypeError("Input must be 1- or 2-d.") def diag_impl(v, k=0): if v.ndim == 1: s = v.shape n = s[0] + abs(k) ret = np.zeros((n, n), v.dtype) if k >= 0: for i in range(n - k): ret[i, k + i] = v[i] else: for i in range(n + k): ret[i - k, i] = v[i] return ret else: # 2-d rows, cols = v.shape if k < 0: rows = rows + k if k > 0: cols = cols - k n = max(min(rows, cols), 0) ret = np.empty(n, v.dtype) if k >= 0: for i in range(n): ret[i] = v[i, k + i] else: for i in range(n): ret[i] = v[i - k, i] return ret return diag_impl @overload(np.indices) def numpy_indices(dimensions): if not isinstance(dimensions, types.UniTuple): msg = 'The argument "dimensions" must be a tuple of integers' raise errors.TypingError(msg) if not isinstance(dimensions.dtype, types.Integer): msg = 'The argument "dimensions" must be a tuple of integers' raise errors.TypingError(msg) N = len(dimensions) shape = (1,) * N def impl(dimensions): res = np.empty((N,) + dimensions, dtype=np.int64) i = 0 for dim in dimensions: idx = np.arange(dim, dtype=np.int64).reshape( tuple_setitem(shape, i, dim) ) res[i] = idx i += 1 return res return impl @overload(np.diagflat) def numpy_diagflat(v, k=0): if not type_can_asarray(v): msg = 'The argument "v" must be array-like' raise errors.TypingError(msg) if not isinstance(k, (int, types.Integer)): msg = 'The argument "k" must be an integer' raise errors.TypingError(msg) def impl(v, k=0): v = np.asarray(v) v = v.ravel() s = len(v) abs_k = abs(k) n = s + abs_k res = np.zeros((n, n), v.dtype) i = np.maximum(0, -k) j = np.maximum(0, k) for t in range(s): res[i + t, j + t] = v[t] return res return impl def generate_getitem_setitem_with_axis(ndim, kind): assert kind in ('getitem', 'setitem') if kind == 'getitem': fn = ''' def _getitem(a, idx, axis): if axis == 0: return a[idx, ...] ''' for i in range(1, ndim): lst = (':',) * i fn += f''' elif axis == {i}: return a[{", ".join(lst)}, idx, ...] ''' else: fn = ''' def _setitem(a, idx, axis, vals): if axis == 0: a[idx, ...] = vals ''' for i in range(1, ndim): lst = (':',) * i fn += f''' elif axis == {i}: a[{", ".join(lst)}, idx, ...] = vals ''' fn = textwrap.dedent(fn) exec(fn, globals()) fn = globals()[f'_{kind}'] return register_jitable(fn) @overload(np.take) @overload_method(types.Array, 'take') def numpy_take(a, indices, axis=None): if cgutils.is_nonelike(axis): if isinstance(a, types.Array) and isinstance(indices, types.Integer): def take_impl(a, indices, axis=None): if indices > (a.size - 1) or indices < -a.size: raise IndexError("Index out of bounds") return a.ravel()[indices] return take_impl if isinstance(a, types.Array) and isinstance(indices, types.Array): F_order = indices.layout == 'F' def take_impl(a, indices, axis=None): ret = np.empty(indices.size, dtype=a.dtype) if F_order: walker = indices.copy() # get C order else: walker = indices it = np.nditer(walker) i = 0 flat = a.ravel() for x in it: if x > (a.size - 1) or x < -a.size: raise IndexError("Index out of bounds") ret[i] = flat[x] i = i + 1 return ret.reshape(indices.shape) return take_impl if isinstance(a, types.Array) and \ isinstance(indices, (types.List, types.BaseTuple)): def take_impl(a, indices, axis=None): convert = np.array(indices) return np.take(a, convert) return take_impl else: if isinstance(a, types.Array) and isinstance(indices, types.Integer): t = (0,) * (a.ndim - 1) # np.squeeze is too hard to implement in Numba as the tuple "t" # needs to be allocated beforehand we don't know it's size until # code gets executed. @register_jitable def _squeeze(r, axis): tup = tuple(t) j = 0 assert axis < len(r.shape) and r.shape[axis] == 1, r.shape for idx in range(len(r.shape)): s = r.shape[idx] if idx != axis: tup = tuple_setitem(tup, j, s) j += 1 return r.reshape(tup) def take_impl(a, indices, axis=None): r = np.take(a, (indices,), axis=axis) if a.ndim == 1: # caveats # >>> isinstance(np.take(1d_arr, 0), int) # True # >>> isinstance(np.take(1d_arr, (0,)), int) # False # The latter returns an array return r[0] if axis < 0: axis += a.ndim return _squeeze(r, axis) return take_impl if isinstance(a, types.Array) and \ isinstance(indices, (types.Array, types.List, types.BaseTuple)): ndim = a.ndim _getitem = generate_getitem_setitem_with_axis(ndim, 'getitem') _setitem = generate_getitem_setitem_with_axis(ndim, 'setitem') def take_impl(a, indices, axis=None): ax = axis if axis < 0: axis += a.ndim if axis < 0 or axis >= a.ndim: msg = (f"axis {ax} is out of bounds for array " f"of dimension {a.ndim}") raise ValueError(msg) shape = tuple_setitem(a.shape, axis, len(indices)) out = np.empty(shape, dtype=a.dtype) for i in range(len(indices)): y = _getitem(a, indices[i], axis) _setitem(out, i, axis, y) return out return take_impl def _arange_dtype(*args): bounds = [a for a in args if not isinstance(a, types.NoneType)] if any(isinstance(a, types.Complex) for a in bounds): dtype = types.complex128 elif any(isinstance(a, types.Float) for a in bounds): dtype = types.float64 else: # `np.arange(10).dtype` is always `np.dtype(int)`, aka `np.int_`, which # in all released versions of numpy corresponds to the C `long` type. # Windows 64 is broken by default here because Numba (as of 0.47) does # not differentiate between Python and NumPy integers, so a `typeof(1)` # on w64 is `int64`, i.e. `intp`. This means an arange(<some int>) will # be typed as arange(int64) and the following will yield int64 opposed # to int32. Example: without a load of analysis to work out of the args # were wrapped in NumPy int*() calls it's not possible to detect the # difference between `np.arange(10)` and `np.arange(np.int64(10)`. NPY_TY = getattr(types, "int%s" % (8 * np.dtype(int).itemsize)) # unliteral these types such that `max` works. unliteral_bounds = [types.unliteral(x) for x in bounds] dtype = max(unliteral_bounds + [NPY_TY,]) return dtype @overload(np.arange) def np_arange(start, / ,stop=None, step=None, dtype=None): if isinstance(stop, types.Optional): stop = stop.type if isinstance(step, types.Optional): step = step.type if isinstance(dtype, types.Optional): dtype = dtype.type if stop is None: stop = types.none if step is None: step = types.none if dtype is None: dtype = types.none if (not isinstance(start, types.Number) or not isinstance(stop, (types.NoneType, types.Number)) or not isinstance(step, (types.NoneType, types.Number)) or not isinstance(dtype, (types.NoneType, types.DTypeSpec))): return if isinstance(dtype, types.NoneType): true_dtype = _arange_dtype(start, stop, step) else: true_dtype = dtype.dtype use_complex = any([isinstance(x, types.Complex) for x in (start, stop, step)]) start_value = getattr(start, "literal_value", None) stop_value = getattr(stop, "literal_value", None) step_value = getattr(step, "literal_value", None) def impl(start, /, stop=None, step=None, dtype=None): # Allow for improved performance if given literal arguments. lit_start = start_value if start_value is not None else start lit_stop = stop_value if stop_value is not None else stop lit_step = step_value if step_value is not None else step _step = lit_step if lit_step is not None else 1 if lit_stop is None: _start, _stop = 0, lit_start else: _start, _stop = lit_start, lit_stop if _step == 0: raise ValueError("Maximum allowed size exceeded") nitems_c = (_stop - _start) / _step nitems_r = int(math.ceil(nitems_c.real)) # Binary operator needed for compiler branch pruning. if use_complex is True: nitems_i = int(math.ceil(nitems_c.imag)) nitems = max(min(nitems_i, nitems_r), 0) else: nitems = max(nitems_r, 0) arr = np.empty(nitems, true_dtype) val = _start for i in range(nitems): arr[i] = val + (i * _step) return arr return impl @overload(np.linspace) def numpy_linspace(start, stop, num=50): if not all(isinstance(arg, types.Number) for arg in [start, stop]): return if not isinstance(num, (int, types.Integer)): msg = 'The argument "num" must be an integer' raise errors.TypingError(msg) if any(isinstance(arg, types.Complex) for arg in [start, stop]): if config.USE_LEGACY_TYPE_SYSTEM: dtype = types.complex128 else: dtype = types.np_complex128 else: dtype = types.float64 # Implementation based on https://github.com/numpy/numpy/blob/v1.20.0/numpy/core/function_base.py#L24 # noqa: E501 def linspace(start, stop, num=50): arr = np.empty(num, dtype) # The multiply by 1.0 mirrors # https://github.com/numpy/numpy/blob/v1.20.0/numpy/core/function_base.py#L125-L128 # noqa: E501 # the side effect of this is important... start and stop become the same # type as `dtype` i.e. 64/128 bits wide (float/complex). This is # important later when used in the `np.divide`. start = start * 1.0 stop = stop * 1.0 if num == 0: return arr div = num - 1 if div > 0: delta = stop - start step = np.divide(delta, div) for i in range(0, num): arr[i] = start + (i * step) else: arr[0] = start if num > 1: arr[-1] = stop return arr return linspace def _array_copy(context, builder, sig, args): """ Array copy. """ arytype = sig.args[0] ary = make_array(arytype)(context, builder, value=args[0]) shapes = cgutils.unpack_tuple(builder, ary.shape) rettype = sig.return_type ret = _empty_nd_impl(context, builder, rettype, shapes) src_data = ary.data dest_data = ret.data assert rettype.layout in "CF" if arytype.layout == rettype.layout: # Fast path: memcpy cgutils.raw_memcpy(builder, dest_data, src_data, ary.nitems, ary.itemsize, align=1) else: src_strides = cgutils.unpack_tuple(builder, ary.strides) dest_strides = cgutils.unpack_tuple(builder, ret.strides) intp_t = context.get_value_type(types.intp) with cgutils.loop_nest(builder, shapes, intp_t) as indices: src_ptr = cgutils.get_item_pointer2(context, builder, src_data, shapes, src_strides, arytype.layout, indices) dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data, shapes, dest_strides, rettype.layout, indices) builder.store(builder.load(src_ptr), dest_ptr) return impl_ret_new_ref(context, builder, sig.return_type, ret._getvalue()) @intrinsic def _array_copy_intrinsic(typingctx, a): assert isinstance(a, types.Array) layout = 'F' if a.layout == 'F' else 'C' ret = a.copy(layout=layout, readonly=False) sig = ret(a) return sig, _array_copy @lower_builtin("array.copy", types.Array) def array_copy(context, builder, sig, args): return _array_copy(context, builder, sig, args) @overload(np.copy) def impl_numpy_copy(a): if not type_can_asarray(a): raise errors.TypingError('The argument "a" must ' 'be array-like') if isinstance(a, types.Array): def numpy_copy(a): return _array_copy_intrinsic(a) else: def numpy_copy(a): # asarray automatically copies non-Array types return np.asarray(a) return numpy_copy def _as_layout_array(context, builder, sig, args, output_layout): """ Common logic for layout conversion function; e.g. ascontiguousarray and asfortranarray """ retty = sig.return_type aryty = sig.args[0] assert retty.layout == output_layout, 'return-type has incorrect layout' if aryty.ndim == 0: # 0-dim input => asfortranarray() returns a 1-dim array assert retty.ndim == 1 ary = make_array(aryty)(context, builder, value=args[0]) ret = make_array(retty)(context, builder) shape = context.get_constant_generic( builder, types.UniTuple(types.intp, 1), (1,), ) strides = context.make_tuple(builder, types.UniTuple(types.intp, 1), (ary.itemsize,)) populate_array(ret, ary.data, shape, strides, ary.itemsize, ary.meminfo, ary.parent) return impl_ret_borrowed(context, builder, retty, ret._getvalue()) elif (retty.layout == aryty.layout or (aryty.ndim == 1 and aryty.layout in 'CF')): # 1-dim contiguous input => return the same array return impl_ret_borrowed(context, builder, retty, args[0]) else: if aryty.layout == 'A': # There's still chance the array is in contiguous layout, # just that we don't know at compile time. # We can do a runtime check. # Prepare and call is_contiguous or is_fortran assert output_layout in 'CF' check_func = is_contiguous if output_layout == 'C' else is_fortran is_contig = _call_contiguous_check(check_func, context, builder, aryty, args[0]) with builder.if_else(is_contig) as (then, orelse): # If the array is already contiguous, just return it with then: out_then = impl_ret_borrowed(context, builder, retty, args[0]) then_blk = builder.block # Otherwise, copy to a new contiguous region with orelse: out_orelse = _array_copy(context, builder, sig, args) orelse_blk = builder.block # Phi node for the return value ret_phi = builder.phi(out_then.type) ret_phi.add_incoming(out_then, then_blk) ret_phi.add_incoming(out_orelse, orelse_blk) return ret_phi else: # Return a copy with the right layout return _array_copy(context, builder, sig, args) @intrinsic def _as_layout_array_intrinsic(typingctx, a, output_layout): if not isinstance(output_layout, types.StringLiteral): raise errors.RequireLiteralValue(output_layout) ret = a.copy(layout=output_layout.literal_value, ndim=max(a.ndim, 1)) sig = ret(a, output_layout) return sig, lambda c, b, s, a: _as_layout_array( c, b, s, a, output_layout=output_layout.literal_value) @overload(np.ascontiguousarray) def array_ascontiguousarray(a): if not type_can_asarray(a): raise errors.TypingError('The argument "a" must be array-like') if isinstance(a, (types.Number, types.Boolean,)): def impl(a): return np.ascontiguousarray(np.array(a)) elif isinstance(a, types.Array): def impl(a): return _as_layout_array_intrinsic(a, 'C') return impl @overload(np.asfortranarray) def array_asfortranarray(a): if not type_can_asarray(a): raise errors.TypingError('The argument "a" must be array-like') if isinstance(a, (types.Number, types.Boolean,)): def impl(a): return np.asfortranarray(np.array(a)) return impl elif isinstance(a, types.Array): def impl(a): return _as_layout_array_intrinsic(a, 'F') return impl @lower_builtin("array.astype", types.Array, types.DTypeSpec) @lower_builtin("array.astype", types.Array, types.StringLiteral) def array_astype(context, builder, sig, args): arytype = sig.args[0] ary = make_array(arytype)(context, builder, value=args[0]) shapes = cgutils.unpack_tuple(builder, ary.shape) rettype = sig.return_type ret = _empty_nd_impl(context, builder, rettype, shapes) src_data = ary.data dest_data = ret.data src_strides = cgutils.unpack_tuple(builder, ary.strides) dest_strides = cgutils.unpack_tuple(builder, ret.strides) intp_t = context.get_value_type(types.intp) with cgutils.loop_nest(builder, shapes, intp_t) as indices: src_ptr = cgutils.get_item_pointer2(context, builder, src_data, shapes, src_strides, arytype.layout, indices) dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data, shapes, dest_strides, rettype.layout, indices) item = load_item(context, builder, arytype, src_ptr) item = context.cast(builder, item, arytype.dtype, rettype.dtype) store_item(context, builder, rettype, item, dest_ptr) return impl_ret_new_ref(context, builder, sig.return_type, ret._getvalue()) @intrinsic def _array_tobytes_intrinsic(typingctx, b): assert isinstance(b, types.Array) sig = bytes_type(b) def codegen(context, builder, sig, args): [ty] = sig.args arrty = make_array(ty) arr = arrty(context, builder, args[0]) itemsize = arr.itemsize nbytes = builder.mul(itemsize, arr.nitems) bstr = _make_constant_bytes(context, builder, nbytes) if (ty.is_c_contig and ty.layout == "C"): cgutils.raw_memcpy(builder, bstr.data, arr.data, arr.nitems, itemsize) else: shape = cgutils.unpack_tuple(builder, arr.shape) strides = cgutils.unpack_tuple(builder, arr.strides) layout = ty.layout intp_t = context.get_value_type(types.intp) byteidx = cgutils.alloca_once( builder, intp_t, name="byteptr", zfill=True ) with cgutils.loop_nest(builder, shape, intp_t) as indices: ptr = cgutils.get_item_pointer2( context, builder, arr.data, shape, strides, layout, indices ) srcptr = builder.bitcast(ptr, bstr.data.type) idx = builder.load(byteidx) destptr = builder.gep(bstr.data, [idx]) cgutils.memcpy(builder, destptr, srcptr, itemsize) builder.store(builder.add(idx, itemsize), byteidx) return bstr._getvalue() return sig, codegen @overload_method(types.Array, "tobytes") def impl_array_tobytes(arr): if isinstance(arr, types.Array): def impl(arr): return _array_tobytes_intrinsic(arr) return impl @intrinsic def np_frombuffer(typingctx, buffer, dtype, count, offset, retty): ty = retty.instance_type sig = ty(buffer, dtype, count, offset, retty) def codegen(context, builder, sig, args): bufty = sig.args[0] arg_count = args[2] arg_offset = args[3] aryty = sig.return_type buf = make_array(bufty)(context, builder, value=args[0]) out_ary_ty = make_array(aryty) out_ary = out_ary_ty(context, builder) out_datamodel = out_ary._datamodel itemsize = get_itemsize(context, aryty) ll_itemsize = Constant(buf.itemsize.type, itemsize) nbytes = builder.mul(buf.nitems, buf.itemsize) ll_offset_size = builder.mul(arg_offset, ll_itemsize) nbytes = builder.sub(nbytes, ll_offset_size) nbytes_is_negative = builder.icmp_signed( '<', nbytes, ir.Constant(arg_count.type, 0), ) msg = "offset must be non-negative and no greater than buffer length" with builder.if_then(nbytes_is_negative, likely=False): context.call_conv.return_user_exc(builder, ValueError, (msg,)) ll_count_is_negative = builder.icmp_signed( '<', arg_count, ir.Constant(arg_count.type, 0), ) # Check that the buffer size is compatible rem = builder.srem(nbytes, ll_itemsize) is_incompatible = cgutils.is_not_null(builder, rem) with builder.if_then(is_incompatible, likely=False): msg = "buffer size must be a multiple of element size" context.call_conv.return_user_exc(builder, ValueError, (msg,)) # Compute number of elements based on count with builder.if_else(ll_count_is_negative) as (then_block, else_block): with then_block: bb_if = builder.basic_block num_whole = builder.sdiv(nbytes, ll_itemsize) with else_block: bb_else = builder.basic_block ll_itemcount = builder.phi(arg_count.type) ll_itemcount.add_incoming(num_whole, bb_if) ll_itemcount.add_incoming(arg_count, bb_else) # Ensure we don’t exceed the buffer size ll_required_size = builder.mul(ll_itemcount, ll_itemsize) is_too_large = builder.icmp_unsigned('>', ll_required_size, nbytes) with builder.if_then(is_too_large, likely=False): msg = "buffer is smaller than requested size" context.call_conv.return_user_exc(builder, ValueError, (msg,)) # Set shape and strides shape = cgutils.pack_array(builder, [ll_itemcount]) strides = cgutils.pack_array(builder, [ll_itemsize]) data = builder.gep(buf.data, [arg_offset]) data = builder.bitcast( data, context.get_value_type(out_datamodel.get_type('data')) ) populate_array(out_ary, data=data, shape=shape, strides=strides, itemsize=ll_itemsize, meminfo=buf.meminfo, parent=buf.parent) res = out_ary._getvalue() return impl_ret_borrowed(context, builder, sig.return_type, res) return sig, codegen @overload(np.frombuffer) def impl_np_frombuffer(buffer, dtype=float, count=-1, offset=0): _check_const_str_dtype("frombuffer", dtype) if not isinstance(buffer, types.Buffer) or buffer.layout != 'C': msg = f'Argument "buffer" must be buffer-like. Got {buffer}' raise errors.TypingError(msg) if (dtype is float or (isinstance(dtype, types.Function) and dtype.typing_key is float) or is_nonelike(dtype)): # default nb_dtype = types.double else: nb_dtype = ty_parse_dtype(dtype) if nb_dtype is not None: retty = types.Array(dtype=nb_dtype, ndim=1, layout='C', readonly=not buffer.mutable) else: msg = ("Cannot parse input types to function " f"np.frombuffer({buffer}, {dtype})") raise errors.TypingError(msg) def impl(buffer, dtype=float, count=-1, offset=0): return np_frombuffer(buffer, dtype, count, offset, retty) return impl @overload(carray) def impl_carray(ptr, shape, dtype=None): if is_nonelike(dtype): intrinsic_cfarray = get_cfarray_intrinsic('C', None) def impl(ptr, shape, dtype=None): return intrinsic_cfarray(ptr, shape) return impl elif isinstance(dtype, types.DTypeSpec): intrinsic_cfarray = get_cfarray_intrinsic('C', dtype) def impl(ptr, shape, dtype=None): return intrinsic_cfarray(ptr, shape) return impl @overload(farray) def impl_farray(ptr, shape, dtype=None): if is_nonelike(dtype): intrinsic_cfarray = get_cfarray_intrinsic('F', None) def impl(ptr, shape, dtype=None): return intrinsic_cfarray(ptr, shape) return impl elif isinstance(dtype, types.DTypeSpec): intrinsic_cfarray = get_cfarray_intrinsic('F', dtype) def impl(ptr, shape, dtype=None): return intrinsic_cfarray(ptr, shape) return impl def get_cfarray_intrinsic(layout, dtype_): @intrinsic def intrinsic_cfarray(typingctx, ptr, shape): if ptr is types.voidptr: ptr_dtype = None elif isinstance(ptr, types.CPointer): ptr_dtype = ptr.dtype else: msg = f"pointer argument expected, got '{ptr}'" raise errors.NumbaTypeError(msg) if dtype_ is None: if ptr_dtype is None: msg = "explicit dtype required for void* argument" raise errors.NumbaTypeError(msg) dtype = ptr_dtype elif isinstance(dtype_, types.DTypeSpec): dtype = dtype_.dtype if ptr_dtype is not None and dtype != ptr_dtype: msg = f"mismatching dtype '{dtype}' for pointer type '{ptr}'" raise errors.NumbaTypeError(msg) else: msg = f"invalid dtype spec '{dtype_}'" raise errors.NumbaTypeError(msg) ndim = ty_parse_shape(shape) if ndim is None: msg = f"invalid shape '{shape}'" raise errors.NumbaTypeError(msg) retty = types.Array(dtype, ndim, layout) sig = signature(retty, ptr, shape) return sig, np_cfarray return intrinsic_cfarray def np_cfarray(context, builder, sig, args): """ numba.numpy_support.carray(...) and numba.numpy_support.farray(...). """ ptrty, shapety = sig.args[:2] ptr, shape = args[:2] aryty = sig.return_type assert aryty.layout in 'CF' out_ary = make_array(aryty)(context, builder) itemsize = get_itemsize(context, aryty) ll_itemsize = cgutils.intp_t(itemsize) if isinstance(shapety, types.BaseTuple): shapes = cgutils.unpack_tuple(builder, shape) else: shapety = (shapety,) shapes = (shape,) shapes = [context.cast(builder, value, fromty, types.intp) for fromty, value in zip(shapety, shapes)] off = ll_itemsize strides = [] if aryty.layout == 'F': for s in shapes: strides.append(off) off = builder.mul(off, s) else: for s in reversed(shapes): strides.append(off) off = builder.mul(off, s) strides.reverse() data = builder.bitcast(ptr, context.get_data_type(aryty.dtype).as_pointer()) populate_array(out_ary, data=data, shape=shapes, strides=strides, itemsize=ll_itemsize, # Array is not memory-managed meminfo=None, ) res = out_ary._getvalue() return impl_ret_new_ref(context, builder, sig.return_type, res) def _get_seq_size(context, builder, seqty, seq): if isinstance(seqty, types.BaseTuple): return context.get_constant(types.intp, len(seqty)) elif isinstance(seqty, types.Sequence): len_impl = context.get_function(len, signature(types.intp, seqty,)) return len_impl(builder, (seq,)) else: assert 0 def _get_borrowing_getitem(context, seqty): """ Return a getitem() implementation that doesn't incref its result. """ retty = seqty.dtype getitem_impl = context.get_function(operator.getitem, signature(retty, seqty, types.intp)) def wrap(builder, args): ret = getitem_impl(builder, args) if context.enable_nrt: context.nrt.decref(builder, retty, ret) return ret return wrap def compute_sequence_shape(context, builder, ndim, seqty, seq): """ Compute the likely shape of a nested sequence (possibly 0d). """ intp_t = context.get_value_type(types.intp) zero = Constant(intp_t, 0) def get_first_item(seqty, seq): if isinstance(seqty, types.BaseTuple): if len(seqty) == 0: return None, None else: return seqty[0], builder.extract_value(seq, 0) else: getitem_impl = _get_borrowing_getitem(context, seqty) return seqty.dtype, getitem_impl(builder, (seq, zero)) # Compute shape by traversing the first element of each nested # sequence shapes = [] innerty, inner = seqty, seq for i in range(ndim): if i > 0: innerty, inner = get_first_item(innerty, inner) shapes.append(_get_seq_size(context, builder, innerty, inner)) return tuple(shapes) def check_sequence_shape(context, builder, seqty, seq, shapes): """ Check the nested sequence matches the given *shapes*. """ def _fail(): context.call_conv.return_user_exc(builder, ValueError, ("incompatible sequence shape",)) def check_seq_size(seqty, seq, shapes): if len(shapes) == 0: return size = _get_seq_size(context, builder, seqty, seq) expected = shapes[0] mismatch = builder.icmp_signed('!=', size, expected) with builder.if_then(mismatch, likely=False): _fail() if len(shapes) == 1: return if isinstance(seqty, types.Sequence): getitem_impl = _get_borrowing_getitem(context, seqty) with cgutils.for_range(builder, size) as loop: innerty = seqty.dtype inner = getitem_impl(builder, (seq, loop.index)) check_seq_size(innerty, inner, shapes[1:]) elif isinstance(seqty, types.BaseTuple): for i in range(len(seqty)): innerty = seqty[i] inner = builder.extract_value(seq, i) check_seq_size(innerty, inner, shapes[1:]) else: assert 0, seqty check_seq_size(seqty, seq, shapes) def assign_sequence_to_array(context, builder, data, shapes, strides, arrty, seqty, seq): """ Assign a nested sequence contents to an array. The shape must match the sequence's structure. """ def assign_item(indices, valty, val): ptr = cgutils.get_item_pointer2(context, builder, data, shapes, strides, arrty.layout, indices, wraparound=False) val = context.cast(builder, val, valty, arrty.dtype) store_item(context, builder, arrty, val, ptr) def assign(seqty, seq, shapes, indices): if len(shapes) == 0: assert not isinstance(seqty, (types.Sequence, types.BaseTuple)) assign_item(indices, seqty, seq) return size = shapes[0] if isinstance(seqty, types.Sequence): getitem_impl = _get_borrowing_getitem(context, seqty) with cgutils.for_range(builder, size) as loop: innerty = seqty.dtype inner = getitem_impl(builder, (seq, loop.index)) assign(innerty, inner, shapes[1:], indices + (loop.index,)) elif isinstance(seqty, types.BaseTuple): for i in range(len(seqty)): innerty = seqty[i] inner = builder.extract_value(seq, i) index = context.get_constant(types.intp, i) assign(innerty, inner, shapes[1:], indices + (index,)) else: assert 0, seqty assign(seqty, seq, shapes, ()) def np_array_typer(typingctx, object, dtype): ndim, seq_dtype = _parse_nested_sequence(typingctx, object) if is_nonelike(dtype): dtype = seq_dtype else: dtype = ty_parse_dtype(dtype) if dtype is None: return return types.Array(dtype, ndim, 'C') @intrinsic def np_array(typingctx, obj, dtype): _check_const_str_dtype("array", dtype) ret = np_array_typer(typingctx, obj, dtype) sig = ret(obj, dtype) def codegen(context, builder, sig, args): arrty = sig.return_type ndim = arrty.ndim seqty = sig.args[0] seq = args[0] shapes = compute_sequence_shape(context, builder, ndim, seqty, seq) assert len(shapes) == ndim check_sequence_shape(context, builder, seqty, seq, shapes) arr = _empty_nd_impl(context, builder, arrty, shapes) assign_sequence_to_array(context, builder, arr.data, shapes, arr.strides, arrty, seqty, seq) return impl_ret_new_ref(context, builder, sig.return_type, arr._getvalue()) return sig, codegen @overload(np.array) def impl_np_array(object, dtype=None): _check_const_str_dtype("array", dtype) if not type_can_asarray(object): raise errors.TypingError('The argument "object" must ' 'be array-like') if not is_nonelike(dtype) and ty_parse_dtype(dtype) is None: msg = 'The argument "dtype" must be a data-type if it is provided' raise errors.TypingError(msg) def impl(object, dtype=None): return np_array(object, dtype) return impl def _normalize_axis(context, builder, func_name, ndim, axis): zero = axis.type(0) ll_ndim = axis.type(ndim) # Normalize negative axis is_neg_axis = builder.icmp_signed('<', axis, zero) axis = builder.select(is_neg_axis, builder.add(axis, ll_ndim), axis) # Check axis for bounds axis_out_of_bounds = builder.or_( builder.icmp_signed('<', axis, zero), builder.icmp_signed('>=', axis, ll_ndim)) with builder.if_then(axis_out_of_bounds, likely=False): msg = "%s(): axis out of bounds" % func_name context.call_conv.return_user_exc(builder, IndexError, (msg,)) return axis def _insert_axis_in_shape(context, builder, orig_shape, ndim, axis): """ Compute shape with the new axis inserted e.g. given original shape (2, 3, 4) and axis=2, the returned new shape is (2, 3, 1, 4). """ assert len(orig_shape) == ndim - 1 ll_shty = ir.ArrayType(cgutils.intp_t, ndim) shapes = cgutils.alloca_once(builder, ll_shty) one = cgutils.intp_t(1) # 1. copy original sizes at appropriate places for dim in range(ndim - 1): ll_dim = cgutils.intp_t(dim) after_axis = builder.icmp_signed('>=', ll_dim, axis) sh = orig_shape[dim] idx = builder.select(after_axis, builder.add(ll_dim, one), ll_dim) builder.store(sh, cgutils.gep_inbounds(builder, shapes, 0, idx)) # 2. insert new size (1) at axis dimension builder.store(one, cgutils.gep_inbounds(builder, shapes, 0, axis)) return cgutils.unpack_tuple(builder, builder.load(shapes)) def _insert_axis_in_strides(context, builder, orig_strides, ndim, axis): """ Same as _insert_axis_in_shape(), but with a strides array. """ assert len(orig_strides) == ndim - 1 ll_shty = ir.ArrayType(cgutils.intp_t, ndim) strides = cgutils.alloca_once(builder, ll_shty) one = cgutils.intp_t(1) zero = cgutils.intp_t(0) # 1. copy original strides at appropriate places for dim in range(ndim - 1): ll_dim = cgutils.intp_t(dim) after_axis = builder.icmp_signed('>=', ll_dim, axis) idx = builder.select(after_axis, builder.add(ll_dim, one), ll_dim) builder.store(orig_strides[dim], cgutils.gep_inbounds(builder, strides, 0, idx)) # 2. insert new stride at axis dimension # (the value is indifferent for a 1-sized dimension, we use 0) builder.store(zero, cgutils.gep_inbounds(builder, strides, 0, axis)) return cgutils.unpack_tuple(builder, builder.load(strides)) def expand_dims(context, builder, sig, args, axis): """ np.expand_dims() with the given axis. """ retty = sig.return_type ndim = retty.ndim arrty = sig.args[0] arr = make_array(arrty)(context, builder, value=args[0]) ret = make_array(retty)(context, builder) shapes = cgutils.unpack_tuple(builder, arr.shape) strides = cgutils.unpack_tuple(builder, arr.strides) new_shapes = _insert_axis_in_shape(context, builder, shapes, ndim, axis) new_strides = _insert_axis_in_strides(context, builder, strides, ndim, axis) populate_array(ret, data=arr.data, shape=new_shapes, strides=new_strides, itemsize=arr.itemsize, meminfo=arr.meminfo, parent=arr.parent) return ret._getvalue() @intrinsic def np_expand_dims(typingctx, a, axis): layout = a.layout if a.ndim <= 1 else 'A' ret = a.copy(ndim=a.ndim + 1, layout=layout) sig = ret(a, axis) def codegen(context, builder, sig, args): axis = context.cast(builder, args[1], sig.args[1], types.intp) axis = _normalize_axis(context, builder, "np.expand_dims", sig.return_type.ndim, axis) ret = expand_dims(context, builder, sig, args, axis) return impl_ret_borrowed(context, builder, sig.return_type, ret) return sig, codegen @overload(np.expand_dims) def impl_np_expand_dims(a, axis): if not isinstance(a, types.Array): msg = f'First argument "a" must be an array. Got {a}' raise errors.TypingError(msg) if not isinstance(axis, types.Integer): msg = f'Argument "axis" must be an integer. Got {axis}' raise errors.TypingError(msg) def impl(a, axis): return np_expand_dims(a, axis) return impl def _atleast_nd(minimum, axes): @intrinsic def impl(typingcontext, *args): arrtys = args rettys = [arg.copy(ndim=max(arg.ndim, minimum)) for arg in args] def codegen(context, builder, sig, args): transform = _atleast_nd_transform(minimum, axes) arrs = cgutils.unpack_tuple(builder, args[0]) rets = [transform(context, builder, arr, arrty, retty) for arr, arrty, retty in zip(arrs, arrtys, rettys)] if len(rets) > 1: ret = context.make_tuple(builder, sig.return_type, rets) else: ret = rets[0] return impl_ret_borrowed(context, builder, sig.return_type, ret) return signature(types.Tuple(rettys) if len(rettys) > 1 else rettys[0], types.StarArgTuple.from_types(args)), codegen return lambda *args: impl(*args) def _atleast_nd_transform(min_ndim, axes): """ Return a callback successively inserting 1-sized dimensions at the following axes. """ assert min_ndim == len(axes) def transform(context, builder, arr, arrty, retty): for i in range(min_ndim): ndim = i + 1 if arrty.ndim < ndim: axis = cgutils.intp_t(axes[i]) newarrty = arrty.copy(ndim=arrty.ndim + 1) arr = expand_dims(context, builder, typing.signature(newarrty, arrty), (arr,), axis) arrty = newarrty return arr return transform @overload(np.atleast_1d) def np_atleast_1d(*args): if all(isinstance(arg, types.Array) for arg in args): return _atleast_nd(1, [0]) @overload(np.atleast_2d) def np_atleast_2d(*args): if all(isinstance(arg, types.Array) for arg in args): return _atleast_nd(2, [0, 0]) @overload(np.atleast_3d) def np_atleast_3d(*args): if all(isinstance(arg, types.Array) for arg in args): return _atleast_nd(3, [0, 0, 2]) def _do_concatenate(context, builder, axis, arrtys, arrs, arr_shapes, arr_strides, retty, ret_shapes): """ Concatenate arrays along the given axis. """ assert len(arrtys) == len(arrs) == len(arr_shapes) == len(arr_strides) zero = cgutils.intp_t(0) # Allocate return array ret = _empty_nd_impl(context, builder, retty, ret_shapes) ret_strides = cgutils.unpack_tuple(builder, ret.strides) # Compute the offset by which to bump the destination pointer # after copying each input array. # Morally, we need to copy each input array at different start indices # into the destination array; bumping the destination pointer # is simply easier than offsetting all destination indices. copy_offsets = [] for arr_sh in arr_shapes: # offset = ret_strides[axis] * input_shape[axis] offset = zero for dim, (size, stride) in enumerate(zip(arr_sh, ret_strides)): is_axis = builder.icmp_signed('==', axis.type(dim), axis) addend = builder.mul(size, stride) offset = builder.select(is_axis, builder.add(offset, addend), offset) copy_offsets.append(offset) # Copy input arrays into the return array ret_data = ret.data for arrty, arr, arr_sh, arr_st, offset in zip(arrtys, arrs, arr_shapes, arr_strides, copy_offsets): arr_data = arr.data # Do the copy loop # Note the loop nesting is optimized for the destination layout loop_nest = cgutils.loop_nest(builder, arr_sh, cgutils.intp_t, order=retty.layout) with loop_nest as indices: src_ptr = cgutils.get_item_pointer2(context, builder, arr_data, arr_sh, arr_st, arrty.layout, indices) val = load_item(context, builder, arrty, src_ptr) val = context.cast(builder, val, arrty.dtype, retty.dtype) dest_ptr = cgutils.get_item_pointer2(context, builder, ret_data, ret_shapes, ret_strides, retty.layout, indices) store_item(context, builder, retty, val, dest_ptr) # Bump destination pointer ret_data = cgutils.pointer_add(builder, ret_data, offset) return ret def _np_concatenate(context, builder, arrtys, arrs, retty, axis): ndim = retty.ndim arrs = [make_array(aty)(context, builder, value=a) for aty, a in zip(arrtys, arrs)] axis = _normalize_axis(context, builder, "np.concatenate", ndim, axis) # Get input shapes arr_shapes = [cgutils.unpack_tuple(builder, arr.shape) for arr in arrs] arr_strides = [cgutils.unpack_tuple(builder, arr.strides) for arr in arrs] # Compute return shape: # - the dimension for the concatenation axis is summed over all inputs # - other dimensions must match exactly for each input ret_shapes = [cgutils.alloca_once_value(builder, sh) for sh in arr_shapes[0]] for dim in range(ndim): is_axis = builder.icmp_signed('==', axis.type(dim), axis) ret_shape_ptr = ret_shapes[dim] ret_sh = builder.load(ret_shape_ptr) other_shapes = [sh[dim] for sh in arr_shapes[1:]] with builder.if_else(is_axis) as (on_axis, on_other_dim): with on_axis: sh = functools.reduce( builder.add, other_shapes + [ret_sh]) builder.store(sh, ret_shape_ptr) with on_other_dim: is_ok = cgutils.true_bit for sh in other_shapes: is_ok = builder.and_(is_ok, builder.icmp_signed('==', sh, ret_sh)) with builder.if_then(builder.not_(is_ok), likely=False): context.call_conv.return_user_exc( builder, ValueError, ("np.concatenate(): input sizes over " "dimension %d do not match" % dim,)) ret_shapes = [builder.load(sh) for sh in ret_shapes] ret = _do_concatenate(context, builder, axis, arrtys, arrs, arr_shapes, arr_strides, retty, ret_shapes) return impl_ret_new_ref(context, builder, retty, ret._getvalue()) def _np_stack(context, builder, arrtys, arrs, retty, axis): ndim = retty.ndim zero = cgutils.intp_t(0) one = cgutils.intp_t(1) ll_narrays = cgutils.intp_t(len(arrs)) arrs = [make_array(aty)(context, builder, value=a) for aty, a in zip(arrtys, arrs)] axis = _normalize_axis(context, builder, "np.stack", ndim, axis) # Check input arrays have the same shape orig_shape = cgutils.unpack_tuple(builder, arrs[0].shape) for arr in arrs[1:]: is_ok = cgutils.true_bit for sh, orig_sh in zip(cgutils.unpack_tuple(builder, arr.shape), orig_shape): is_ok = builder.and_(is_ok, builder.icmp_signed('==', sh, orig_sh)) with builder.if_then(builder.not_(is_ok), likely=False): context.call_conv.return_user_exc( builder, ValueError, ("np.stack(): all input arrays must have the same shape",)) orig_strides = [cgutils.unpack_tuple(builder, arr.strides) for arr in arrs] # Compute input shapes and return shape with the new axis inserted # e.g. given 5 input arrays of shape (2, 3, 4) and axis=1, # corrected input shape is (2, 1, 3, 4) and return shape is (2, 5, 3, 4). ll_shty = ir.ArrayType(cgutils.intp_t, ndim) input_shapes = cgutils.alloca_once(builder, ll_shty) ret_shapes = cgutils.alloca_once(builder, ll_shty) # 1. copy original sizes at appropriate places for dim in range(ndim - 1): ll_dim = cgutils.intp_t(dim) after_axis = builder.icmp_signed('>=', ll_dim, axis) sh = orig_shape[dim] idx = builder.select(after_axis, builder.add(ll_dim, one), ll_dim) builder.store(sh, cgutils.gep_inbounds(builder, input_shapes, 0, idx)) builder.store(sh, cgutils.gep_inbounds(builder, ret_shapes, 0, idx)) # 2. insert new size at axis dimension builder.store(one, cgutils.gep_inbounds(builder, input_shapes, 0, axis)) builder.store(ll_narrays, cgutils.gep_inbounds(builder, ret_shapes, 0, axis)) input_shapes = cgutils.unpack_tuple(builder, builder.load(input_shapes)) input_shapes = [input_shapes] * len(arrs) ret_shapes = cgutils.unpack_tuple(builder, builder.load(ret_shapes)) # Compute input strides for each array with the new axis inserted input_strides = [cgutils.alloca_once(builder, ll_shty) for i in range(len(arrs))] # 1. copy original strides at appropriate places for dim in range(ndim - 1): ll_dim = cgutils.intp_t(dim) after_axis = builder.icmp_signed('>=', ll_dim, axis) idx = builder.select(after_axis, builder.add(ll_dim, one), ll_dim) for i in range(len(arrs)): builder.store(orig_strides[i][dim], cgutils.gep_inbounds(builder, input_strides[i], 0, idx)) # 2. insert new stride at axis dimension # (the value is indifferent for a 1-sized dimension, we put 0) for i in range(len(arrs)): builder.store(zero, cgutils.gep_inbounds(builder, input_strides[i], 0, axis)) input_strides = [cgutils.unpack_tuple(builder, builder.load(st)) for st in input_strides] # Create concatenated array ret = _do_concatenate(context, builder, axis, arrtys, arrs, input_shapes, input_strides, retty, ret_shapes) return impl_ret_new_ref(context, builder, retty, ret._getvalue()) def np_concatenate_typer(typingctx, arrays, axis): if axis is not None and not isinstance(axis, types.Integer): # Note Numpy allows axis=None, but it isn't documented: # https://github.com/numpy/numpy/issues/7968 return # does type checking dtype, ndim = _sequence_of_arrays(typingctx, "np.concatenate", arrays) if ndim == 0: msg = "zero-dimensional arrays cannot be concatenated" raise errors.NumbaTypeError(msg) layout = _choose_concatenation_layout(arrays) return types.Array(dtype, ndim, layout) @intrinsic def np_concatenate(typingctx, arrays, axis): ret = np_concatenate_typer(typingctx, arrays, axis) assert isinstance(ret, types.Array) sig = ret(arrays, axis) def codegen(context, builder, sig, args): axis = context.cast(builder, args[1], sig.args[1], types.intp) return _np_concatenate(context, builder, list(sig.args[0]), cgutils.unpack_tuple(builder, args[0]), sig.return_type, axis) return sig, codegen @overload(np.concatenate) def impl_np_concatenate(arrays, axis=0): if isinstance(arrays, types.BaseTuple): def impl(arrays, axis=0): return np_concatenate(arrays, axis) return impl def _column_stack_dims(context, func_name, arrays): # column_stack() allows stacking 1-d and 2-d arrays together for a in arrays: if a.ndim < 1 or a.ndim > 2: msg = "np.column_stack() is only defined on 1-d and 2-d arrays" raise errors.NumbaTypeError(msg) return 2 @intrinsic def np_column_stack(typingctx, tup): dtype, ndim = _sequence_of_arrays(typingctx, "np.column_stack", tup, dim_chooser=_column_stack_dims) layout = _choose_concatenation_layout(tup) ret = types.Array(dtype, ndim, layout) sig = ret(tup) def codegen(context, builder, sig, args): orig_arrtys = list(sig.args[0]) orig_arrs = cgutils.unpack_tuple(builder, args[0]) arrtys = [] arrs = [] axis = context.get_constant(types.intp, 1) for arrty, arr in zip(orig_arrtys, orig_arrs): if arrty.ndim == 2: arrtys.append(arrty) arrs.append(arr) else: # Convert 1d array to 2d column array: np.expand_dims(a, 1) assert arrty.ndim == 1 newty = arrty.copy(ndim=2) expand_sig = typing.signature(newty, arrty) newarr = expand_dims(context, builder, expand_sig, (arr,), axis) arrtys.append(newty) arrs.append(newarr) return _np_concatenate(context, builder, arrtys, arrs, sig.return_type, axis) return sig, codegen @overload(np.column_stack) def impl_column_stack(tup): if isinstance(tup, types.BaseTuple): def impl(tup): return np_column_stack(tup) return impl def _np_stack_common(context, builder, sig, args, axis): """ np.stack() with the given axis value. """ return _np_stack(context, builder, list(sig.args[0]), cgutils.unpack_tuple(builder, args[0]), sig.return_type, axis) @intrinsic def np_stack_common(typingctx, arrays, axis): # does type checking dtype, ndim = _sequence_of_arrays(typingctx, "np.stack", arrays) layout = 'F' if all(a.layout == 'F' for a in arrays) else 'C' ret = types.Array(dtype, ndim + 1, layout) sig = ret(arrays, axis) def codegen(context, builder, sig, args): axis = context.cast(builder, args[1], sig.args[1], types.intp) return _np_stack_common(context, builder, sig, args, axis) return sig, codegen @overload(np.stack) def impl_np_stack(arrays, axis=0): if isinstance(arrays, types.BaseTuple): def impl(arrays, axis=0): return np_stack_common(arrays, axis) return impl def NdStack_typer(typingctx, func_name, arrays, ndim_min): # does type checking dtype, ndim = _sequence_of_arrays(typingctx, func_name, arrays) ndim = max(ndim, ndim_min) layout = _choose_concatenation_layout(arrays) ret = types.Array(dtype, ndim, layout) return ret @intrinsic def _np_hstack(typingctx, tup): ret = NdStack_typer(typingctx, "np.hstack", tup, 1) sig = ret(tup) def codegen(context, builder, sig, args): tupty = sig.args[0] ndim = tupty[0].ndim if ndim == 0: # hstack() on 0-d arrays returns a 1-d array axis = context.get_constant(types.intp, 0) return _np_stack_common(context, builder, sig, args, axis) else: # As a special case, dimension 0 of 1-dimensional arrays # is "horizontal" axis = 0 if ndim == 1 else 1 def np_hstack_impl(arrays): return np.concatenate(arrays, axis=axis) return context.compile_internal(builder, np_hstack_impl, sig, args) return sig, codegen @overload(np.hstack) def impl_np_hstack(tup): if isinstance(tup, types.BaseTuple): def impl(tup): return _np_hstack(tup) return impl @intrinsic def _np_vstack(typingctx, tup): ret = NdStack_typer(typingctx, "np.vstack", tup, 2) sig = ret(tup) def codegen(context, builder, sig, args): tupty = sig.args[0] ndim = tupty[0].ndim if ndim == 0: def np_vstack_impl(arrays): return np.expand_dims(np.hstack(arrays), 1) elif ndim == 1: # np.stack(arrays, axis=0) axis = context.get_constant(types.intp, 0) return _np_stack_common(context, builder, sig, args, axis) else: def np_vstack_impl(arrays): return np.concatenate(arrays, axis=0) return context.compile_internal(builder, np_vstack_impl, sig, args) return sig, codegen @overload(np.vstack) def impl_np_vstack(tup): if isinstance(tup, types.BaseTuple): def impl(tup): return _np_vstack(tup) return impl if numpy_version >= (2, 0): overload(np.row_stack)(impl_np_vstack) @intrinsic def _np_dstack(typingctx, tup): ret = NdStack_typer(typingctx, "np.dstack", tup, 3) sig = ret(tup) def codegen(context, builder, sig, args): tupty = sig.args[0] retty = sig.return_type ndim = tupty[0].ndim if ndim == 0: def np_vstack_impl(arrays): return np.hstack(arrays).reshape(1, 1, -1) return context.compile_internal(builder, np_vstack_impl, sig, args) elif ndim == 1: # np.expand_dims(np.stack(arrays, axis=1), axis=0) axis = context.get_constant(types.intp, 1) stack_retty = retty.copy(ndim=retty.ndim - 1) stack_sig = typing.signature(stack_retty, *sig.args) stack_ret = _np_stack_common(context, builder, stack_sig, args, axis) axis = context.get_constant(types.intp, 0) expand_sig = typing.signature(retty, stack_retty) return expand_dims(context, builder, expand_sig, (stack_ret,), axis) elif ndim == 2: # np.stack(arrays, axis=2) axis = context.get_constant(types.intp, 2) return _np_stack_common(context, builder, sig, args, axis) else: def np_vstack_impl(arrays): return np.concatenate(arrays, axis=2) return context.compile_internal(builder, np_vstack_impl, sig, args) return sig, codegen @overload(np.dstack) def impl_np_dstack(tup): if isinstance(tup, types.BaseTuple): def impl(tup): return _np_dstack(tup) return impl @extending.overload_method(types.Array, 'fill') def arr_fill(arr, val): def fill_impl(arr, val): arr[:] = val return None return fill_impl @extending.overload_method(types.Array, 'dot') def array_dot(arr, other): def dot_impl(arr, other): return np.dot(arr, other) return dot_impl @overload(np.fliplr) def np_flip_lr(m): if not type_can_asarray(m): raise errors.TypingError("Cannot np.fliplr on %s type" % m) def impl(m): A = np.asarray(m) # this handling is superfluous/dead as < 2d array cannot be indexed as # present below and so typing fails. If the typing doesn't fail due to # some future change, this will catch it. if A.ndim < 2: raise ValueError('Input must be >= 2-d.') return A[::, ::-1, ...] return impl @overload(np.flipud) def np_flip_ud(m): if not type_can_asarray(m): raise errors.TypingError("Cannot np.flipud on %s type" % m) def impl(m): A = np.asarray(m) # this handling is superfluous/dead as a 0d array cannot be indexed as # present below and so typing fails. If the typing doesn't fail due to # some future change, this will catch it. if A.ndim < 1: raise ValueError('Input must be >= 1-d.') return A[::-1, ...] return impl @intrinsic def _build_flip_slice_tuple(tyctx, sz): """ Creates a tuple of slices for np.flip indexing like `(slice(None, None, -1),) * sz` """ if not isinstance(sz, types.IntegerLiteral): raise errors.RequireLiteralValue(sz) size = int(sz.literal_value) tuple_type = types.UniTuple(dtype=types.slice3_type, count=size) sig = tuple_type(sz) def codegen(context, builder, signature, args): def impl(length, empty_tuple): out = empty_tuple for i in range(length): out = tuple_setitem(out, i, slice(None, None, -1)) return out inner_argtypes = [types.intp, tuple_type] inner_sig = typing.signature(tuple_type, *inner_argtypes) ll_idx_type = context.get_value_type(types.intp) # Allocate an empty tuple empty_tuple = context.get_constant_undef(tuple_type) inner_args = [ll_idx_type(size), empty_tuple] res = context.compile_internal(builder, impl, inner_sig, inner_args) return res return sig, codegen @overload(np.flip) def np_flip(m): # a constant value is needed for the tuple slice, types.Array.ndim can # provide this and so at presnet only type.Array is support if not isinstance(m, types.Array): raise errors.TypingError("Cannot np.flip on %s type" % m) def impl(m): sl = _build_flip_slice_tuple(m.ndim) return m[sl] return impl @overload(np.array_split) def np_array_split(ary, indices_or_sections, axis=0): if isinstance(ary, (types.UniTuple, types.ListType, types.List)): def impl(ary, indices_or_sections, axis=0): return np.array_split( np.asarray(ary), indices_or_sections, axis=axis ) return impl if isinstance(indices_or_sections, types.Integer): def impl(ary, indices_or_sections, axis=0): l, rem = divmod(ary.shape[axis], indices_or_sections) indices = np.cumsum(np.array( [l + 1] * rem + [l] * (indices_or_sections - rem - 1) )) return np.array_split(ary, indices, axis=axis) return impl elif ( isinstance(indices_or_sections, types.IterableType) and isinstance( indices_or_sections.iterator_type.yield_type, types.Integer ) ): def impl(ary, indices_or_sections, axis=0): slice_tup = build_full_slice_tuple(ary.ndim) axis = normalize_axis("np.split", "axis", ary.ndim, axis) out = [] prev = 0 for cur in indices_or_sections: idx = tuple_setitem(slice_tup, axis, slice(prev, cur)) out.append(ary[idx]) prev = cur out.append(ary[tuple_setitem(slice_tup, axis, slice(cur, None))]) return out return impl elif ( isinstance(indices_or_sections, types.Tuple) and all(isinstance(t, types.Integer) for t in indices_or_sections.types) ): def impl(ary, indices_or_sections, axis=0): slice_tup = build_full_slice_tuple(ary.ndim) axis = normalize_axis("np.split", "axis", ary.ndim, axis) out = [] prev = 0 for cur in literal_unroll(indices_or_sections): idx = tuple_setitem(slice_tup, axis, slice(prev, cur)) out.append(ary[idx]) prev = cur out.append(ary[tuple_setitem(slice_tup, axis, slice(cur, None))]) return out return impl @overload(np.split) def np_split(ary, indices_or_sections, axis=0): # This is just a wrapper of array_split, but with an extra error if # indices is an int. if isinstance(ary, (types.UniTuple, types.ListType, types.List)): def impl(ary, indices_or_sections, axis=0): return np.split(np.asarray(ary), indices_or_sections, axis=axis) return impl if isinstance(indices_or_sections, types.Integer): def impl(ary, indices_or_sections, axis=0): _, rem = divmod(ary.shape[axis], indices_or_sections) if rem != 0: raise ValueError( "array split does not result in an equal division" ) return np.array_split( ary, indices_or_sections, axis=axis ) return impl else: return np_array_split(ary, indices_or_sections, axis=axis) @overload(np.vsplit) def numpy_vsplit(ary, indices_or_sections): if not isinstance(ary, types.Array): msg = 'The argument "ary" must be an array' raise errors.TypingError(msg) if not isinstance(indices_or_sections, (types.Integer, types.Array, types.List, types.UniTuple)): msg = ('The argument "indices_or_sections" must be int or 1d-array') raise errors.TypingError(msg) def impl(ary, indices_or_sections): if ary.ndim < 2: raise ValueError(('vsplit only works on ' 'arrays of 2 or more dimensions')) return np.split(ary, indices_or_sections, axis=0) return impl @overload(np.hsplit) def numpy_hsplit(ary, indices_or_sections): if not isinstance(ary, types.Array): msg = 'The argument "ary" must be an array' raise errors.TypingError(msg) if not isinstance(indices_or_sections, (types.Integer, types.Array, types.List, types.UniTuple)): msg = ('The argument "indices_or_sections" must be int or 1d-array') raise errors.TypingError(msg) def impl(ary, indices_or_sections): if ary.ndim == 0: raise ValueError(('hsplit only works on ' 'arrays of 1 or more dimensions')) if ary.ndim > 1: return np.split(ary, indices_or_sections, axis=1) return np.split(ary, indices_or_sections, axis=0) return impl @overload(np.dsplit) def numpy_dsplit(ary, indices_or_sections): if not isinstance(ary, types.Array): msg = 'The argument "ary" must be an array' raise errors.TypingError(msg) if not isinstance(indices_or_sections, (types.Integer, types.Array, types.List, types.UniTuple)): msg = ('The argument "indices_or_sections" must be int or 1d-array') raise errors.TypingError(msg) def impl(ary, indices_or_sections): if ary.ndim < 3: raise ValueError('dsplit only works on arrays of 3 or more ' 'dimensions') return np.split(ary, indices_or_sections, axis=2) return impl # ----------------------------------------------------------------------------- # Sorting _sorts = {} def default_lt(a, b): """ Trivial comparison function between two keys. """ return a < b def get_sort_func(kind, lt_impl, is_argsort=False): """ Get a sort implementation of the given kind. """ key = kind, lt_impl.__name__, is_argsort try: return _sorts[key] except KeyError: if kind == 'quicksort': sort = quicksort.make_jit_quicksort( lt=lt_impl, is_argsort=is_argsort, is_np_array=True) func = sort.run_quicksort elif kind == 'mergesort': sort = mergesort.make_jit_mergesort( lt=lt_impl, is_argsort=is_argsort) func = sort.run_mergesort _sorts[key] = func return func def lt_implementation(dtype): if isinstance(dtype, types.Float): return lt_floats elif isinstance(dtype, types.Complex): return lt_complex else: return default_lt @lower_builtin("array.sort", types.Array) def array_sort(context, builder, sig, args): arytype = sig.args[0] sort_func = get_sort_func(kind='quicksort', lt_impl=lt_implementation(arytype.dtype)) def array_sort_impl(arr): # Note we clobber the return value sort_func(arr) return context.compile_internal(builder, array_sort_impl, sig, args) @overload(np.sort) def impl_np_sort(a): if not type_can_asarray(a): raise errors.TypingError('Argument "a" must ' 'be array-like') def np_sort_impl(a): res = a.copy() res.sort() return res return np_sort_impl @lower_builtin("array.argsort", types.Array, types.StringLiteral) @lower_builtin(np.argsort, types.Array, types.StringLiteral) def array_argsort(context, builder, sig, args): arytype, kind = sig.args sort_func = get_sort_func(kind=kind.literal_value, lt_impl=lt_implementation(arytype.dtype), is_argsort=True) def array_argsort_impl(arr): return sort_func(arr) innersig = sig.replace(args=sig.args[:1]) innerargs = args[:1] return context.compile_internal(builder, array_argsort_impl, innersig, innerargs) # ------------------------------------------------------------------------------ # Implicit cast @lower_cast(types.Array, types.Array) def array_to_array(context, builder, fromty, toty, val): # Type inference should have prevented illegal array casting. assert fromty.mutable != toty.mutable or toty.layout == 'A' return val @lower_cast(types.Array, types.UnicodeCharSeq) @lower_cast(types.Array, types.Float) @lower_cast(types.Array, types.Integer) @lower_cast(types.Array, types.Complex) @lower_cast(types.Array, types.Boolean) @lower_cast(types.Array, types.NPTimedelta) @lower_cast(types.Array, types.NPDatetime) def array0d_to_scalar(context, builder, fromty, toty, val): def impl(a): # a is an array(T, 0d, O), T is type, O is order return a.take(0) sig = signature(toty, fromty) res = context.compile_internal(builder, impl, sig, [val]) return impl_ret_untracked(context, builder, sig.return_type, res) @lower_cast(types.Array, types.UnicodeCharSeq) def array_to_unichrseq(context, builder, fromty, toty, val): def impl(a): return str(a[()]) sig = signature(toty, fromty) res = context.compile_internal(builder, impl, sig, [val]) return impl_ret_borrowed(context, builder, sig.return_type, res) # ------------------------------------------------------------------------------ # Stride tricks def reshape_unchecked(a, shape, strides): """ An intrinsic returning a derived array with the given shape and strides. """ raise NotImplementedError @extending.type_callable(reshape_unchecked) def type_reshape_unchecked(context): def check_shape(shape): return (isinstance(shape, types.BaseTuple) and all(isinstance(v, types.Integer) for v in shape)) def typer(a, shape, strides): if not isinstance(a, types.Array): return if not check_shape(shape) or not check_shape(strides): return if len(shape) != len(strides): return return a.copy(ndim=len(shape), layout='A') return typer @lower_builtin(reshape_unchecked, types.Array, types.BaseTuple, types.BaseTuple) def impl_shape_unchecked(context, builder, sig, args): aryty = sig.args[0] retty = sig.return_type ary = make_array(aryty)(context, builder, args[0]) out = make_array(retty)(context, builder) shape = cgutils.unpack_tuple(builder, args[1]) strides = cgutils.unpack_tuple(builder, args[2]) populate_array(out, data=ary.data, shape=shape, strides=strides, itemsize=ary.itemsize, meminfo=ary.meminfo, ) res = out._getvalue() return impl_ret_borrowed(context, builder, retty, res) @extending.overload(np.lib.stride_tricks.as_strided) def as_strided(x, shape=None, strides=None): if shape in (None, types.none): @register_jitable def get_shape(x, shape): return x.shape else: @register_jitable def get_shape(x, shape): return shape if strides in (None, types.none): # When *strides* is not passed, as_strided() does a non-size-checking # reshape(), possibly changing the original strides. This is too # cumbersome to support right now, and a Web search shows all example # use cases of as_strided() pass explicit *strides*. raise errors.TypingError("as_strided() strides argument cannot be None") else: @register_jitable def get_strides(x, strides): return strides def as_strided_impl(x, shape=None, strides=None): x = reshape_unchecked(x, get_shape(x, shape), get_strides(x, strides)) return x return as_strided_impl @extending.overload(np.lib.stride_tricks.sliding_window_view) def sliding_window_view(x, window_shape, axis=None): # Window shape must be given as either an integer or tuple of integers. # We also need to generate buffer tuples we can modify to contain the # final shape and strides (reshape_unchecked does not accept lists). if isinstance(window_shape, types.Integer): shape_buffer = tuple(range(x.ndim + 1)) stride_buffer = tuple(range(x.ndim + 1)) @register_jitable def get_window_shape(window_shape): return (window_shape,) elif (isinstance(window_shape, types.UniTuple) and isinstance(window_shape.dtype, types.Integer)): shape_buffer = tuple(range(x.ndim + len(window_shape))) stride_buffer = tuple(range(x.ndim + len(window_shape))) @register_jitable def get_window_shape(window_shape): return window_shape else: raise errors.TypingError( "window_shape must be an integer or tuple of integers" ) # Axis must be integer, tuple of integers, or None for all axes. if is_nonelike(axis): @register_jitable def get_axis(window_shape, axis, ndim): return list(range(ndim)) elif isinstance(axis, types.Integer): @register_jitable def get_axis(window_shape, axis, ndim): return [ normalize_axis("sliding_window_view", "axis", ndim, axis) ] elif (isinstance(axis, types.UniTuple) and isinstance(axis.dtype, types.Integer)): @register_jitable def get_axis(window_shape, axis, ndim): return [normalize_axis("sliding_window_view", "axis", ndim, a) for a in axis] else: raise errors.TypingError( "axis must be None, an integer or tuple of integers" ) def sliding_window_view_impl(x, window_shape, axis=None): window_shape = get_window_shape(window_shape) axis = get_axis(window_shape, axis, x.ndim) if len(window_shape) != len(axis): raise ValueError( "Must provide matching length window_shape and axis" ) # Initialise view details with shape and strides of x. out_shape = shape_buffer out_strides = stride_buffer for i in range(x.ndim): out_shape = tuple_setitem(out_shape, i, x.shape[i]) out_strides = tuple_setitem(out_strides, i, x.strides[i]) # Trim the dimensions being windowed and set the window shape and # strides. Note: the same axis can be windowed repeatedly. i = x.ndim for ax, dim in zip(axis, window_shape): if dim < 0: raise ValueError( "`window_shape` cannot contain negative values" ) if out_shape[ax] < dim: raise ValueError( "window_shape cannot be larger than input array shape" ) trimmed = out_shape[ax] - dim + 1 out_shape = tuple_setitem(out_shape, ax, trimmed) out_shape = tuple_setitem(out_shape, i, dim) out_strides = tuple_setitem(out_strides, i, x.strides[ax]) i += 1 # The NumPy version calls as_strided, but our implementation of # as_strided is effectively a wrapper for reshape_unchecked. view = reshape_unchecked(x, out_shape, out_strides) return view return sliding_window_view_impl @overload(bool) def ol_bool(arr): if isinstance(arr, types.Array): def impl(arr): if arr.size == 0: if numpy_version < (2, 2): return False # this is deprecated else: raise ValueError(("The truth value of an empty array is " "ambiguous. Use `array.size > 0` to " "check that an array is not empty.")) elif arr.size == 1: return bool(arr.take(0)) else: raise ValueError(("The truth value of an array with more than" " one element is ambiguous. Use a.any() or" " a.all()")) return impl @overload(np.swapaxes) def numpy_swapaxes(a, axis1, axis2): if not isinstance(axis1, (int, types.Integer)): raise errors.TypingError('The second argument "axis1" must be an ' 'integer') if not isinstance(axis2, (int, types.Integer)): raise errors.TypingError('The third argument "axis2" must be an ' 'integer') if not isinstance(a, types.Array): raise errors.TypingError('The first argument "a" must be an array') # create tuple list for transpose ndim = a.ndim axes_list = tuple(range(ndim)) def impl(a, axis1, axis2): axis1 = normalize_axis("np.swapaxes", "axis1", ndim, axis1) axis2 = normalize_axis("np.swapaxes", "axis2", ndim, axis2) # to ensure tuple_setitem support of negative values if axis1 < 0: axis1 += ndim if axis2 < 0: axis2 += ndim axes_tuple = tuple_setitem(axes_list, axis1, axis2) axes_tuple = tuple_setitem(axes_tuple, axis2, axis1) return np.transpose(a, axes_tuple) return impl @register_jitable def _take_along_axis_impl( arr, indices, axis, Ni_orig, Nk_orig, indices_broadcast_shape ): # Based on example code in # https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/lib/shape_base.py#L90-L103 # With addition of pre-broadcasting: # https://github.com/numpy/numpy/issues/19704 # Wrap axis, it's used in tuple_setitem so must be (axis >= 0) to ensure # the GEP is in bounds. axis = normalize_axis("np.take_along_axis", "axis", arr.ndim, axis) # Broadcast the two arrays to matching shapes: arr_shape = list(arr.shape) arr_shape[axis] = 1 for i, (d1, d2) in enumerate(zip(arr_shape, indices.shape)): if d1 == 1: new_val = d2 elif d2 == 1: new_val = d1 else: if d1 != d2: raise ValueError( "`arr` and `indices` dimensions don't match" ) new_val = d1 indices_broadcast_shape = tuple_setitem( indices_broadcast_shape, i, new_val ) arr_broadcast_shape = tuple_setitem( indices_broadcast_shape, axis, arr.shape[axis] ) arr = np.broadcast_to(arr, arr_broadcast_shape) indices = np.broadcast_to(indices, indices_broadcast_shape) Ni = Ni_orig if len(Ni_orig) > 0: for i in range(len(Ni)): Ni = tuple_setitem(Ni, i, arr.shape[i]) Nk = Nk_orig if len(Nk_orig) > 0: for i in range(len(Nk)): Nk = tuple_setitem(Nk, i, arr.shape[axis + 1 + i]) J = indices.shape[axis] # Need not equal M out = np.empty(Ni + (J,) + Nk, arr.dtype) np_s_ = (slice(None, None, None),) for ii in np.ndindex(Ni): for kk in np.ndindex(Nk): a_1d = arr[ii + np_s_ + kk] indices_1d = indices[ii + np_s_ + kk] out_1d = out[ii + np_s_ + kk] for j in range(J): out_1d[j] = a_1d[indices_1d[j]] return out @overload(np.take_along_axis) def arr_take_along_axis(arr, indices, axis): if not isinstance(arr, types.Array): raise errors.TypingError('The first argument "arr" must be an array') if not isinstance(indices, types.Array): raise errors.TypingError( 'The second argument "indices" must be an array') if not isinstance(indices.dtype, types.Integer): raise errors.TypingError('The indices array must contain integers') if is_nonelike(axis): arr_ndim = 1 else: arr_ndim = arr.ndim if arr_ndim != indices.ndim: # Matches NumPy error: raise errors.TypingError( "`indices` and `arr` must have the same number of dimensions" ) indices_broadcast_shape = tuple(range(indices.ndim)) if is_nonelike(axis): def take_along_axis_impl(arr, indices, axis): return _take_along_axis_impl(arr.flatten(), indices, 0, (), (), indices_broadcast_shape) else: check_is_integer(axis, "axis") if not isinstance(axis, types.IntegerLiteral): raise errors.NumbaValueError("axis must be a literal value") axis = axis.literal_value if axis < 0: axis = arr.ndim + axis if axis < 0 or axis >= arr.ndim: raise errors.NumbaValueError("axis is out of bounds") Ni = tuple(range(axis)) Nk = tuple(range(axis + 1, arr.ndim)) def take_along_axis_impl(arr, indices, axis): return _take_along_axis_impl(arr, indices, axis, Ni, Nk, indices_broadcast_shape) return take_along_axis_impl @overload(np.nan_to_num) def nan_to_num_impl(x, copy=True, nan=0.0, posinf=None, neginf=None): if isinstance(x, types.Number): if isinstance(x, types.Integer): # Integers do not have nans or infs def impl(x, copy=True, nan=0.0, posinf=None, neginf=None): return x elif isinstance(x, types.Float): def impl(x, copy=True, nan=0.0, posinf=None, neginf=None): min_inf = ( neginf if neginf is not None else np.finfo(type(x)).min ) max_inf = ( posinf if posinf is not None else np.finfo(type(x)).max ) if np.isnan(x): return nan elif np.isneginf(x): return min_inf elif np.isposinf(x): return max_inf return x elif isinstance(x, types.Complex): def impl(x, copy=True, nan=0.0, posinf=None, neginf=None): r = np.nan_to_num(x.real, nan=nan, posinf=posinf, neginf=neginf) c = np.nan_to_num(x.imag, nan=nan, posinf=posinf, neginf=neginf) return complex(r, c) else: raise errors.TypingError( "Only Integer, Float, and Complex values are accepted" ) elif type_can_asarray(x): if isinstance(x.dtype, types.Integer): # Integers do not have nans or infs def impl(x, copy=True, nan=0.0, posinf=None, neginf=None): return x elif isinstance(x.dtype, types.Float): def impl(x, copy=True, nan=0.0, posinf=None, neginf=None): min_inf = ( neginf if neginf is not None else np.finfo(x.dtype).min ) max_inf = ( posinf if posinf is not None else np.finfo(x.dtype).max ) x_ = np.asarray(x) output = np.copy(x_) if copy else x_ output_flat = output.flat for i in range(output.size): if np.isnan(output_flat[i]): output_flat[i] = nan elif np.isneginf(output_flat[i]): output_flat[i] = min_inf elif np.isposinf(output_flat[i]): output_flat[i] = max_inf return output elif isinstance(x.dtype, types.Complex): def impl(x, copy=True, nan=0.0, posinf=None, neginf=None): x_ = np.asarray(x) output = np.copy(x_) if copy else x_ np.nan_to_num( output.real, copy=False, nan=nan, posinf=posinf, neginf=neginf ) np.nan_to_num( output.imag, copy=False, nan=nan, posinf=posinf, neginf=neginf ) return output else: raise errors.TypingError( "Only Integer, Float, and Complex values are accepted" ) else: raise errors.TypingError("The first argument must be a scalar or an " "array-like") return impl
FancyIndexer
python
django__django
tests/generic_views/views.py
{ "start": 1667, "end": 1787 }
class ____(generic.ListView): template_name = "generic_views/list.html" queryset = Artist.objects.all()
ArtistList
python
h5py__h5py
h5py/tests/test_group.py
{ "start": 14847, "end": 15397 }
class ____(BaseMapping): """ Feature: You can iterate over group members via "for x in y", etc. """ def test_iter(self): """ "for x in y" iteration """ lst = [x for x in self.f] self.assertSameElements(lst, self.groups) def test_iter_zero(self): """ Iteration works properly for the case with no group members """ hfile = File(self.mktemp(), 'w') try: lst = [x for x in hfile] self.assertEqual(lst, []) finally: hfile.close()
TestIter
python
sympy__sympy
sympy/physics/quantum/kind.py
{ "start": 1191, "end": 1484 }
class ____(Kind): """A kind for quantum bras.""" def __new__(cls): obj = super().__new__(cls) return obj def __repr__(self): return "BraKind" # Create an instance as many situations need this. BraKind = _BraKind() from sympy.core.kind import Kind
_BraKind
python
numba__llvmlite
llvmlite/binding/ffi.py
{ "start": 2356, "end": 3622 }
class ____: """A Lock to guarantee thread-safety for the LLVM C-API. This class implements __enter__ and __exit__ for acquiring and releasing the lock as a context manager. Also, callbacks can be attached so that every time the lock is acquired and released the corresponding callbacks will be invoked. """ def __init__(self): # The reentrant lock is needed for callbacks that re-enter # the Python interpreter. self._lock = threading.RLock() self._cblist = [] def register(self, acq_fn, rel_fn): """Register callbacks that are invoked immediately after the lock is acquired (``acq_fn()``) and immediately before the lock is released (``rel_fn()``). """ self._cblist.append((acq_fn, rel_fn)) def unregister(self, acq_fn, rel_fn): """Remove the registered callbacks. """ self._cblist.remove((acq_fn, rel_fn)) def __enter__(self): self._lock.acquire() # Invoke all callbacks for acq_fn, rel_fn in self._cblist: acq_fn() def __exit__(self, *exc_details): # Invoke all callbacks for acq_fn, rel_fn in self._cblist: rel_fn() self._lock.release()
_LLVMLock
python
dask__distributed
distributed/active_memory_manager.py
{ "start": 20045, "end": 21743 }
class ____(ActiveMemoryManagerPolicy): """Make sure that in-memory tasks are not replicated on more workers than desired; drop the excess replicas. """ def run(self) -> SuggestionGenerator: nkeys = 0 ndrop = 0 for ts in self.manager.scheduler.replicated_tasks: desired_replicas = 1 # TODO have a marker on TaskState assert ts.who_has nwaiters = len(ts.waiters or ()) if desired_replicas < nwaiters < 20: # If a dependent task has not been assigned to a worker yet, err on the # side of caution and preserve an additional replica for it. # However, if two dependent tasks have been already assigned to the same # worker, don't double count them. # This calculation is quite CPU-intensive, so it's disabled for tasks # with lots of waiters. nwaiters = len( {waiter.processing_on or waiter for waiter in ts.waiters or ()} ) ndrop_key = len(ts.who_has) - max(desired_replicas, nwaiters) if ts in self.manager.pending: pending_repl, pending_drop = self.manager.pending[ts] ndrop_key += len(pending_repl) - len(pending_drop) if ndrop_key > 0: nkeys += 1 ndrop += ndrop_key for _ in range(ndrop_key): yield Suggestion("drop", ts) if ndrop: logger.debug( "ReduceReplicas: Dropping %d superfluous replicas of %d tasks", ndrop, nkeys, )
ReduceReplicas
python
altair-viz__altair
tools/markup.py
{ "start": 1454, "end": 1941 }
class ____(_RSTRenderer): def __init__(self) -> None: super().__init__() def inline_html(self, token: Token, state: BlockState) -> str: html = token["raw"] if html == "<br/>": return "\n" # HACK: https://github.com/vega/altair/pull/3787#discussion_r1939885356 elif re.match(r"<span style=\"color: #.+;\">", (html)) or html == "</span>": return "" else: return rf" :raw-html:`{html}` "
RSTRenderer
python
jina-ai__jina
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
{ "start": 5808, "end": 6900 }
class ____(object): """* jina gRPC service for DataRequests. This is used to send requests to Executors when a list of requests is not needed """ def stream_doc(self, request, context): """Used for streaming one document to the Executors""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_JinaSingleDocumentRequestRPCServicer_to_server(servicer, server): rpc_method_handlers = { 'stream_doc': grpc.unary_stream_rpc_method_handler( servicer.stream_doc, request_deserializer=jina__pb2.SingleDocumentRequestProto.FromString, response_serializer=jina__pb2.SingleDocumentRequestProto.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'jina.JinaSingleDocumentRequestRPC', rpc_method_handlers ) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API.
JinaSingleDocumentRequestRPCServicer
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py
{ "start": 766, "end": 849 }
class ____(ABC): @abstractaoeuaoeuaoeu def method(self): foo()
Base_6
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeVarDefault2.py
{ "start": 548, "end": 585 }
class ____[T: int = float]: ...
ClassT3
python
sqlalchemy__sqlalchemy
test/engine/test_ddlevents.py
{ "start": 17402, "end": 17972 }
class ____(DDLEventWCreateHarness, fixtures.TestBase): creates_implicitly_with_table = False drops_implicitly_with_table = True supports_standalone_create = False @testing.fixture def produce_subject(self): return Index("my_idx", "key") @testing.fixture def produce_table_integrated_subject(self, metadata, produce_subject): return Table( "t", metadata, Column("id", Integer, primary_key=True), Column("key", String(50)), produce_subject, )
IndexDDLEventTest
python
dagster-io__dagster
python_modules/libraries/dagster-dbt/dagster_dbt/cloud/resources.py
{ "start": 915, "end": 1240 }
class ____(str, Enum): QUEUED = "Queued" STARTING = "Starting" RUNNING = "Running" SUCCESS = "Success" ERROR = "Error" CANCELLED = "Cancelled" # TODO: This resource should be a wrapper over an existing client for a accessing dbt Cloud, # rather than using requests to the API directly.
DbtCloudRunStatus
python
scipy__scipy
scipy/sparse/tests/test_base.py
{ "start": 101207, "end": 102051 }
class ____: def test_solve(self): # Test whether the lu_solve command segfaults, as reported by Nils # Wagner for a 64-bit machine, 02 March 2005 (EJS) n = 20 np.random.seed(0) # make tests repeatable A = zeros((n,n), dtype=complex) x = np.random.rand(n) y = np.random.rand(n-1)+1j*np.random.rand(n-1) r = np.random.rand(n) for i in range(len(x)): A[i,i] = x[i] for i in range(len(y)): A[i,i+1] = y[i] A[i+1,i] = conjugate(y[i]) A = self.spcreator(A) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "splu converted its input to CSC format", SparseEfficiencyWarning) x = splu(A).solve(r) assert_almost_equal(A @ x,r)
_TestSolve
python
walkccc__LeetCode
solutions/2767. Partition String Into Minimum Beautiful Substrings/2767.py
{ "start": 0, "end": 619 }
class ____: def minimumBeautifulSubstrings(self, s: str) -> int: n = len(s) # dp[i] := the minimum number of beautiful substrings for the first i chars dp = [0] + [n + 1] * n for i in range(1, n + 1): if s[i - 1] == '0': continue num = 0 # the number of s[i - 1..j - 1] for j in range(i, n + 1): num = (num << 1) + int(s[j - 1]) if self._isPowerOfFive(num): dp[j] = min(dp[j], dp[i - 1] + 1) return -1 if dp[n] == n + 1 else dp[n] def _isPowerOfFive(self, num: int) -> bool: while num % 5 == 0: num //= 5 return num == 1
Solution
python
chroma-core__chroma
sample_apps/generative_benchmarking/functions/types.py
{ "start": 470, "end": 534 }
class ____: lookup: Dict[str, QueryItem] @dataclass
QueryLookup
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-clickhouse/llama_index/vector_stores/clickhouse/base.py
{ "start": 1469, "end": 3344 }
class ____: """ ClickHouse Client Configuration. Args: table (str): Table name to operate on. database (str): Database name to find the table. engine (str): Engine. Options are "MergeTree" and "Memory". Default is "MergeTree". index_type (str): Index type string. metric (str): Metric type to compute distance e.g., cosine or l2 batch_size (int): The size of documents to insert. index_params (dict, optional): Index build parameter. search_params (dict, optional): Index search parameters for ClickHouse query. """ def __init__( self, table: str, database: str, engine: str, index_type: str, metric: str, batch_size: int, dimension: Optional[int] = None, index_params: Optional[dict] = None, search_params: Optional[dict] = None, **kwargs: Any, ) -> None: self.table = table self.database = database self.engine = engine self.index_type = index_type self.metric = metric self.batch_size = batch_size self.dimension = dimension self.index_params = index_params self.search_params = search_params def build_query_statement( self, query_embed: List[float], where_str: Optional[str] = None, limit: Optional[int] = None, ) -> str: query_embed_str = format_list_to_string(query_embed) where_str = f"WHERE {where_str}" if where_str else "" distance = DISTANCE_MAPPING[self.metric] return f""" SELECT id, doc_id, text, node_info, metadata, {distance}(vector, {query_embed_str}) AS score FROM {self.database}.{self.table} {where_str} ORDER BY score ASC LIMIT {limit} """
ClickHouseSettings
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dlp.py
{ "start": 107744, "end": 111851 }
class ____(GoogleCloudBaseOperator): """ Updates the InspectTemplate. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudDLPUpdateInspectTemplateOperator` :param template_id: The ID of the inspect template to be updated. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param inspect_template: New InspectTemplate value. :param update_mask: Mask to control which fields get updated. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "template_id", "organization_id", "project_id", "inspect_template", "update_mask", "gcp_conn_id", "impersonation_chain", ) operator_extra_links = (CloudDLPInspectTemplateDetailsLink(),) def __init__( self, *, template_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, inspect_template: dict | InspectTemplate | None = None, update_mask: dict | FieldMask | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.template_id = template_id self.organization_id = organization_id self.project_id = project_id self.inspect_template = inspect_template self.update_mask = update_mask self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context): hook = CloudDLPHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) template = hook.update_inspect_template( template_id=self.template_id, organization_id=self.organization_id, project_id=self.project_id, inspect_template=self.inspect_template, update_mask=self.update_mask, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) project_id = self.project_id or hook.project_id if project_id: CloudDLPInspectTemplateDetailsLink.persist( context=context, project_id=project_id, template_name=self.template_id, ) return InspectTemplate.to_dict(template)
CloudDLPUpdateInspectTemplateOperator
python
scipy__scipy
scipy/stats/tests/test_stats.py
{ "start": 86310, "end": 98837 }
class ____: def test_linregressBIGX(self): # W.II.F. Regress BIG on X. result = stats.linregress(X, BIG) assert_almost_equal(result.intercept, 99999990) assert_almost_equal(result.rvalue, 1.0) # The uncertainty ought to be almost zero # since all points lie on a line assert_almost_equal(result.stderr, 0.0) assert_almost_equal(result.intercept_stderr, 0.0) def test_regressXX(self): # W.IV.B. Regress X on X. # The constant should be exactly 0 and the regression coefficient # should be 1. This is a perfectly valid regression and the # program should not complain. result = stats.linregress(X, X) assert_almost_equal(result.intercept, 0.0) assert_almost_equal(result.rvalue, 1.0) # The uncertainly on regression through two points ought to be 0 assert_almost_equal(result.stderr, 0.0) assert_almost_equal(result.intercept_stderr, 0.0) # W.IV.C. Regress X on BIG and LITTLE (two predictors). The program # should tell you that this model is "singular" because BIG and # LITTLE are linear combinations of each other. Cryptic error # messages are unacceptable here. Singularity is the most # fundamental regression error. # # Need to figure out how to handle multiple linear regression. # This is not obvious def test_regressZEROX(self): # W.IV.D. Regress ZERO on X. # The program should inform you that ZERO has no variance or it should # go ahead and compute the regression and report a correlation and # total sum of squares of exactly 0. result = stats.linregress(X, ZERO) assert_almost_equal(result.intercept, 0.0) with pytest.warns(stats.ConstantInputWarning, match="An input array..."): ref_rvalue = stats.pearsonr(X, ZERO).statistic assert_almost_equal(result.rvalue, ref_rvalue) def test_regress_simple(self): # Regress a line with sinusoidal noise. x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 y += np.sin(np.linspace(0, 20, 100)) result = stats.linregress(x, y) lr = LinregressResult assert_(isinstance(result, lr)) assert_almost_equal(result.stderr, 2.3957814497838803e-3) def test_regress_alternative(self): # test alternative parameter x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 # slope is greater than zero y += np.sin(np.linspace(0, 20, 100)) with pytest.raises(ValueError, match="`alternative` must be 'less'..."): stats.linregress(x, y, alternative="ekki-ekki") res1 = stats.linregress(x, y, alternative="two-sided") # slope is greater than zero, so "less" p-value should be large res2 = stats.linregress(x, y, alternative="less") assert_allclose(res2.pvalue, 1 - (res1.pvalue / 2)) # slope is greater than zero, so "greater" p-value should be small res3 = stats.linregress(x, y, alternative="greater") assert_allclose(res3.pvalue, res1.pvalue / 2) assert res1.rvalue == res2.rvalue == res3.rvalue def test_regress_against_R(self): # test against R `lm` # options(digits=16) # x <- c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131) # y <- c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48) # relation <- lm(y~x) # print(summary(relation)) x = [151, 174, 138, 186, 128, 136, 179, 163, 152, 131] y = [63, 81, 56, 91, 47, 57, 76, 72, 62, 48] res = stats.linregress(x, y, alternative="two-sided") # expected values from R's `lm` above assert_allclose(res.slope, 0.6746104491292) assert_allclose(res.intercept, -38.4550870760770) assert_allclose(res.rvalue, np.sqrt(0.95478224775)) assert_allclose(res.pvalue, 1.16440531074e-06) assert_allclose(res.stderr, 0.0519051424731) assert_allclose(res.intercept_stderr, 8.0490133029927) def test_linregress(self): # compared with multivariate ols with pinv x = np.arange(11) y = np.arange(5, 16) y[[(1), (-2)]] -= 1 y[[(0), (-1)]] += 1 result = stats.linregress(x, y) # This test used to use 'assert_array_almost_equal' but its # formulation got confusing since LinregressResult became # _lib._bunch._make_tuple_bunch instead of namedtuple # (for backwards compatibility, see PR #12983) def assert_ae(x, y): return assert_almost_equal(x, y, decimal=14) assert_ae(result.slope, 1.0) assert_ae(result.intercept, 5.0) assert_ae(result.rvalue, 0.98229948625750) assert_ae(result.pvalue, 7.45259691e-008) assert_ae(result.stderr, 0.063564172616372733) assert_ae(result.intercept_stderr, 0.37605071654517686) def test_regress_simple_negative_cor(self): # If the slope of the regression is negative the factor R tend # to -1 not 1. Sometimes rounding errors makes it < -1 # leading to stderr being NaN. a, n = 1e-71, 100000 x = np.linspace(a, 2 * a, n) y = np.linspace(2 * a, a, n) result = stats.linregress(x, y) # Make sure propagated numerical errors # did not bring rvalue below -1 (or were coerced) assert_(result.rvalue >= -1) assert_almost_equal(result.rvalue, -1) # slope and intercept stderror should stay numeric assert_(not np.isnan(result.stderr)) assert_(not np.isnan(result.intercept_stderr)) def test_linregress_result_attributes(self): x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 y += np.sin(np.linspace(0, 20, 100)) result = stats.linregress(x, y) # Result is of a correct class lr = LinregressResult assert_(isinstance(result, lr)) # LinregressResult elements have correct names attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr') check_named_results(result, attributes) # Also check that the extra attribute (intercept_stderr) is present assert 'intercept_stderr' in dir(result) def test_regress_two_inputs(self): # Regress a simple line formed by two points. x = np.arange(2) y = np.arange(3, 5) result = stats.linregress(x, y) # Non-horizontal line assert_almost_equal(result.pvalue, 0.0) # Zero error through two points assert_almost_equal(result.stderr, 0.0) assert_almost_equal(result.intercept_stderr, 0.0) def test_regress_two_inputs_horizontal_line(self): # Regress a horizontal line formed by two points. x = np.arange(2) y = np.ones(2) result = stats.linregress(x, y) # Horizontal line assert_almost_equal(result.pvalue, 1.0) # Zero error through two points assert_almost_equal(result.stderr, 0.0) assert_almost_equal(result.intercept_stderr, 0.0) def test_nist_norris(self): # If this causes a lint failure in the future, please note the history of # requests to allow extra whitespace in table formatting (e.g. gh-12367). # Also see https://github.com/scipy/scipy/wiki/Why-do-we-not-use-an-auto%E2%80%90formatter%3F # noqa: E501 x = [ 0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0, 558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1, 995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0, 11.1, 118.3, 229.2, 669.1, 448.9, 0.5] y = [ 0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9, 559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3, 998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9, 10.2, 117.6, 228.9, 668.4, 449.2, 0.2] result = stats.linregress(x, y) assert_almost_equal(result.slope, 1.00211681802045) assert_almost_equal(result.intercept, -0.262323073774029) assert_almost_equal(result.rvalue**2, 0.999993745883712) assert_almost_equal(result.pvalue, 0.0) assert_almost_equal(result.stderr, 0.00042979684820) assert_almost_equal(result.intercept_stderr, 0.23281823430153) def test_compare_to_polyfit(self): x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 y += np.sin(np.linspace(0, 20, 100)) result = stats.linregress(x, y) poly = np.polyfit(x, y, 1) # Fit 1st degree polynomial # Make sure linear regression slope and intercept # match with results from numpy polyfit assert_almost_equal(result.slope, poly[0]) assert_almost_equal(result.intercept, poly[1]) def test_empty_input(self): with pytest.warns(SmallSampleWarning, match="One or more sample..."): res = stats.linregress([], []) assert np.all(np.isnan(res)) def test_nan_input(self): x = np.arange(10.) x[9] = np.nan with np.errstate(invalid="ignore"): result = stats.linregress(x, x) # Make sure the result still comes back as `LinregressResult` lr = LinregressResult assert_(isinstance(result, lr)) assert_array_equal(result, (np.nan,)*5) assert_equal(result.intercept_stderr, np.nan) def test_identical_x(self): rng = np.random.default_rng(7872425088) x = np.zeros(10) y = rng.random(10) msg = "Cannot calculate a linear regression" with assert_raises(ValueError, match=msg): stats.linregress(x, y) def test_theilslopes(): # Basic slope test. slope, intercept, lower, upper = stats.theilslopes([0,1,1]) assert_almost_equal(slope, 0.5) assert_almost_equal(intercept, 0.5) msg = ("method must be either 'joint' or 'separate'." "'joint_separate' is invalid.") with pytest.raises(ValueError, match=msg): stats.theilslopes([0, 1, 1], method='joint_separate') slope, intercept, lower, upper = stats.theilslopes([0, 1, 1], method='joint') assert_almost_equal(slope, 0.5) assert_almost_equal(intercept, 0.0) # Test of confidence intervals. x = [1, 2, 3, 4, 10, 12, 18] y = [9, 15, 19, 20, 45, 55, 78] slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07, method='separate') assert_almost_equal(slope, 4) assert_almost_equal(intercept, 4.0) assert_almost_equal(upper, 4.38, decimal=2) assert_almost_equal(lower, 3.71, decimal=2) slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07, method='joint') assert_almost_equal(slope, 4) assert_almost_equal(intercept, 6.0) assert_almost_equal(upper, 4.38, decimal=2) assert_almost_equal(lower, 3.71, decimal=2) def test_cumfreq(): x = [1, 4, 2, 1, 3, 1] cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4) assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.])) cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq( x, numbins=4, defaultreallimits=(1.5, 5)) assert_(extrapoints == 3) # test for namedtuple attribute results attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints') res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) check_named_results(res, attributes) def test_relfreq(): a = np.array([1, 4, 2, 1, 3, 1]) relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4) assert_array_almost_equal(relfreqs, array([0.5, 0.16666667, 0.16666667, 0.16666667])) # test for namedtuple attribute results attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints') res = stats.relfreq(a, numbins=4) check_named_results(res, attributes) # check array_like input is accepted relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1], numbins=4) assert_array_almost_equal(relfreqs, relfreqs2)
TestRegression
python
walkccc__LeetCode
solutions/230. Kth Smallest Element in a BST/230-2.py
{ "start": 0, "end": 390 }
class ____: def kthSmallest(self, root: TreeNode | None, k: int) -> int: rank = 0 ans = 0 def traverse(root: TreeNode | None) -> None: nonlocal rank nonlocal ans if not root: return traverse(root.left) rank += 1 if rank == k: ans = root.val return traverse(root.right) traverse(root) return ans
Solution
python
pytorch__pytorch
test/quantization/core/experimental/test_floatx.py
{ "start": 15120, "end": 16750 }
class ____(TestCase): """ Test of mul implementation NOTE: this is CPU-only for now because adding it to CUDA requires adding yet another C++ dtype macro, and there is no use case yet for unscaled float8 multiplication - doesn't seem worth it. """ @dtypes(*CUDA_FLOAT8_DTYPES) def test_mul(self, dtype): # TODO(#113663): remove arithmetic support from all float8 dtypes if dtype is torch.float8_e8m0fnu: return unittest.skip("arithmetic not supported for torch.float8_e8m0fnu") shape = (10, 10) a = torch.randn(shape) a8_simulated = simulate_fp8_precision(a, dtype) a8 = a.to(dtype) b = torch.randn(shape) b8_simulated = simulate_fp8_precision(b, dtype) b8 = b.to(dtype) mul8 = a8 * b8 mul8_simulated = (a8_simulated * b8_simulated).to(dtype) self.assertEqual(mul8, mul8_simulated) @unittest.skipIf(IS_WINDOWS, "torch.compile not supported on Windows yet") @dtypes(*CUDA_FLOAT8_DTYPES) def test_pt2_traceable_aot_eager(self, dtype): if dtype is torch.float8_e8m0fnu: return unittest.skip( "PT2 support for torch.float8_e8m0fnu is not implemented yet" ) @torch.compile(backend="aot_eager", fullgraph=True) def f(x): x = x.to(dtype) x = x.float() return x x = torch.randn(1).requires_grad_() f(x).sum().backward() instantiate_device_type_tests(TestFloat8DtypeCPUOnly, globals(), only_for="cpu") if __name__ == "__main__": run_tests()
TestFloat8DtypeCPUOnly
python
GoogleCloudPlatform__python-docs-samples
appengine/standard_python3/bundled-services/deferred/wsgi/main.py
{ "start": 2131, "end": 2637 }
class ____: def __call__(self, environ, start_response): path = environ.get("PATH_INFO", "").lstrip("/") for regex, handler in routes.items(): match = re.search(regex, path) if match is not None: return handler(environ, start_response) start_response("404 Not Found", [("Content-Type", "text/plain")]) return [b"Not found"] app = wrap_wsgi_app(WSGIApplication(), use_deferred=True) # [END gae_deferred_handler_wsgi]
WSGIApplication
python
getsentry__sentry
src/sentry/relocation/api/serializers/relocation.py
{ "start": 605, "end": 1395 }
class ____: """ Some useful info to collect about a relocation when serving it. """ # Maps the creator/owner's (aka "meta" users) `id`s to their respective `username`. meta_users: Mapping[int, RpcUser] # List the ids of the imported `User` models. imported_user_ids: list[int] # List the ids of the imported `Organization` models. imported_org_ids: list[int] def get_all_imported_ids_of_model(chunks: QuerySet[BaseImportChunk]) -> list[int]: all_imported_ids = set() for chunk in chunks: all_imported_ids |= ( set(chunk.inserted_map.values()) | set(chunk.existing_map.values()) | set(chunk.overwrite_map.values()) ) return list(all_imported_ids) @register(Relocation)
RelocationMetadata
python
gevent__gevent
src/greentest/3.12/test_socket.py
{ "start": 201880, "end": 206379 }
class ____(FileObjectClassTestCase): """Repeat the tests from FileObjectClassTestCase with bufsize==0. In this case (and in this case only), it should be possible to create a file object, read a line from it, create another file object, read another line from it, without loss of data in the first file object's buffer. Note that http.client relies on this when reading multiple requests from the same socket.""" bufsize = 0 # Use unbuffered mode def testUnbufferedReadline(self): # Read a line, create a new file object, read another line with it line = self.read_file.readline() # first line self.assertEqual(line, b"A. " + self.write_msg) # first line self.read_file = self.cli_conn.makefile('rb', 0) line = self.read_file.readline() # second line self.assertEqual(line, b"B. " + self.write_msg) # second line def _testUnbufferedReadline(self): self.write_file.write(b"A. " + self.write_msg) self.write_file.write(b"B. " + self.write_msg) self.write_file.flush() def testMakefileClose(self): # The file returned by makefile should keep the socket open... self.cli_conn.close() msg = self.cli_conn.recv(1024) self.assertEqual(msg, self.read_msg) # ...until the file is itself closed self.read_file.close() self.assertRaises(OSError, self.cli_conn.recv, 1024) def _testMakefileClose(self): self.write_file.write(self.write_msg) self.write_file.flush() @unittest.skipUnless(hasattr(sys, 'getrefcount'), 'test needs sys.getrefcount()') def testMakefileCloseSocketDestroy(self): refcount_before = sys.getrefcount(self.cli_conn) self.read_file.close() refcount_after = sys.getrefcount(self.cli_conn) self.assertEqual(refcount_before - 1, refcount_after) def _testMakefileCloseSocketDestroy(self): pass # Non-blocking ops # NOTE: to set `read_file` as non-blocking, we must call # `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp). def testSmallReadNonBlocking(self): self.cli_conn.setblocking(False) self.assertEqual(self.read_file.readinto(bytearray(10)), None) self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None) self.evt1.set() self.evt2.wait(1.0) first_seg = self.read_file.read(len(self.read_msg) - 3) if first_seg is None: # Data not arrived (can happen under Windows), wait a bit time.sleep(0.5) first_seg = self.read_file.read(len(self.read_msg) - 3) buf = bytearray(10) n = self.read_file.readinto(buf) self.assertEqual(n, 3) msg = first_seg + buf[:n] self.assertEqual(msg, self.read_msg) self.assertEqual(self.read_file.readinto(bytearray(16)), None) self.assertEqual(self.read_file.read(1), None) def _testSmallReadNonBlocking(self): self.evt1.wait(1.0) self.write_file.write(self.write_msg) self.write_file.flush() self.evt2.set() # Avoid closing the socket before the server test has finished, # otherwise system recv() will return 0 instead of EWOULDBLOCK. self.serv_finished.wait(5.0) def testWriteNonBlocking(self): self.cli_finished.wait(5.0) # The client thread can't skip directly - the SkipTest exception # would appear as a failure. if self.serv_skipped: self.skipTest(self.serv_skipped) def _testWriteNonBlocking(self): self.serv_skipped = None self.serv_conn.setblocking(False) # Try to saturate the socket buffer pipe with repeated large writes. BIG = b"x" * support.SOCK_MAX_SIZE LIMIT = 10 # The first write() succeeds since a chunk of data can be buffered n = self.write_file.write(BIG) self.assertGreater(n, 0) for i in range(LIMIT): n = self.write_file.write(BIG) if n is None: # Succeeded break self.assertGreater(n, 0) else: # Let us know that this test didn't manage to establish # the expected conditions. This is not a failure in itself but, # if it happens repeatedly, the test should be fixed. self.serv_skipped = "failed to saturate the socket buffer"
UnbufferedFileObjectClassTestCase
python
python-openxml__python-docx
src/docx/opc/constants.py
{ "start": 132, "end": 8549 }
class ____: """Content type URIs (like MIME-types) that specify a part's format.""" BMP = "image/bmp" DML_CHART = "application/vnd.openxmlformats-officedocument.drawingml.chart+xml" DML_CHARTSHAPES = "application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml" DML_DIAGRAM_COLORS = "application/vnd.openxmlformats-officedocument.drawingml.diagramColors+xml" DML_DIAGRAM_DATA = "application/vnd.openxmlformats-officedocument.drawingml.diagramData+xml" DML_DIAGRAM_LAYOUT = "application/vnd.openxmlformats-officedocument.drawingml.diagramLayout+xml" DML_DIAGRAM_STYLE = "application/vnd.openxmlformats-officedocument.drawingml.diagramStyle+xml" GIF = "image/gif" JPEG = "image/jpeg" MS_PHOTO = "image/vnd.ms-photo" OFC_CUSTOM_PROPERTIES = "application/vnd.openxmlformats-officedocument.custom-properties+xml" OFC_CUSTOM_XML_PROPERTIES = ( "application/vnd.openxmlformats-officedocument.customXmlProperties+xml" ) OFC_DRAWING = "application/vnd.openxmlformats-officedocument.drawing+xml" OFC_EXTENDED_PROPERTIES = ( "application/vnd.openxmlformats-officedocument.extended-properties+xml" ) OFC_OLE_OBJECT = "application/vnd.openxmlformats-officedocument.oleObject" OFC_PACKAGE = "application/vnd.openxmlformats-officedocument.package" OFC_THEME = "application/vnd.openxmlformats-officedocument.theme+xml" OFC_THEME_OVERRIDE = "application/vnd.openxmlformats-officedocument.themeOverride+xml" OFC_VML_DRAWING = "application/vnd.openxmlformats-officedocument.vmlDrawing" OPC_CORE_PROPERTIES = "application/vnd.openxmlformats-package.core-properties+xml" OPC_DIGITAL_SIGNATURE_CERTIFICATE = ( "application/vnd.openxmlformats-package.digital-signature-certificate" ) OPC_DIGITAL_SIGNATURE_ORIGIN = "application/vnd.openxmlformats-package.digital-signature-origin" OPC_DIGITAL_SIGNATURE_XMLSIGNATURE = ( "application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml" ) OPC_RELATIONSHIPS = "application/vnd.openxmlformats-package.relationships+xml" PML_COMMENTS = "application/vnd.openxmlformats-officedocument.presentationml.comments+xml" PML_COMMENT_AUTHORS = ( "application/vnd.openxmlformats-officedocument.presentationml.commentAuthors+xml" ) PML_HANDOUT_MASTER = ( "application/vnd.openxmlformats-officedocument.presentationml.handoutMaster+xml" ) PML_NOTES_MASTER = ( "application/vnd.openxmlformats-officedocument.presentationml.notesMaster+xml" ) PML_NOTES_SLIDE = "application/vnd.openxmlformats-officedocument.presentationml.notesSlide+xml" PML_PRESENTATION_MAIN = ( "application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml" ) PML_PRES_PROPS = "application/vnd.openxmlformats-officedocument.presentationml.presProps+xml" PML_PRINTER_SETTINGS = ( "application/vnd.openxmlformats-officedocument.presentationml.printerSettings" ) PML_SLIDE = "application/vnd.openxmlformats-officedocument.presentationml.slide+xml" PML_SLIDESHOW_MAIN = ( "application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml" ) PML_SLIDE_LAYOUT = ( "application/vnd.openxmlformats-officedocument.presentationml.slideLayout+xml" ) PML_SLIDE_MASTER = ( "application/vnd.openxmlformats-officedocument.presentationml.slideMaster+xml" ) PML_SLIDE_UPDATE_INFO = ( "application/vnd.openxmlformats-officedocument.presentationml.slideUpdateInfo+xml" ) PML_TABLE_STYLES = ( "application/vnd.openxmlformats-officedocument.presentationml.tableStyles+xml" ) PML_TAGS = "application/vnd.openxmlformats-officedocument.presentationml.tags+xml" PML_TEMPLATE_MAIN = ( "application/vnd.openxmlformats-officedocument.presentationml.template.main+xml" ) PML_VIEW_PROPS = "application/vnd.openxmlformats-officedocument.presentationml.viewProps+xml" PNG = "image/png" SML_CALC_CHAIN = "application/vnd.openxmlformats-officedocument.spreadsheetml.calcChain+xml" SML_CHARTSHEET = "application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml" SML_COMMENTS = "application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml" SML_CONNECTIONS = "application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml" SML_CUSTOM_PROPERTY = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.customProperty" ) SML_DIALOGSHEET = "application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml" SML_EXTERNAL_LINK = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.externalLink+xml" ) SML_PIVOT_CACHE_DEFINITION = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheDefinition+xml" ) SML_PIVOT_CACHE_RECORDS = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheRecords+xml" ) SML_PIVOT_TABLE = "application/vnd.openxmlformats-officedocument.spreadsheetml.pivotTable+xml" SML_PRINTER_SETTINGS = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.printerSettings" ) SML_QUERY_TABLE = "application/vnd.openxmlformats-officedocument.spreadsheetml.queryTable+xml" SML_REVISION_HEADERS = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.revisionHeaders+xml" ) SML_REVISION_LOG = "application/vnd.openxmlformats-officedocument.spreadsheetml.revisionLog+xml" SML_SHARED_STRINGS = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml" ) SML_SHEET = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" SML_SHEET_MAIN = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml" SML_SHEET_METADATA = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.sheetMetadata+xml" ) SML_STYLES = "application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml" SML_TABLE = "application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml" SML_TABLE_SINGLE_CELLS = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.tableSingleCells+xml" ) SML_TEMPLATE_MAIN = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml" ) SML_USER_NAMES = "application/vnd.openxmlformats-officedocument.spreadsheetml.userNames+xml" SML_VOLATILE_DEPENDENCIES = ( "application/vnd.openxmlformats-officedocument.spreadsheetml.volatileDependencies+xml" ) SML_WORKSHEET = "application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml" TIFF = "image/tiff" WML_COMMENTS = "application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml" WML_DOCUMENT = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" WML_DOCUMENT_GLOSSARY = ( "application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml" ) WML_DOCUMENT_MAIN = ( "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml" ) WML_ENDNOTES = "application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml" WML_FONT_TABLE = "application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml" WML_FOOTER = "application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml" WML_FOOTNOTES = "application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml" WML_HEADER = "application/vnd.openxmlformats-officedocument.wordprocessingml.header+xml" WML_NUMBERING = "application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml" WML_PRINTER_SETTINGS = ( "application/vnd.openxmlformats-officedocument.wordprocessingml.printerSettings" ) WML_SETTINGS = "application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml" WML_STYLES = "application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml" WML_WEB_SETTINGS = ( "application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml" ) XML = "application/xml" X_EMF = "image/x-emf" X_FONTDATA = "application/x-fontdata" X_FONT_TTF = "application/x-font-ttf" X_WMF = "image/x-wmf"
CONTENT_TYPE
python
Lightning-AI__lightning
tests/tests_pytorch/callbacks/test_spike.py
{ "start": 381, "end": 1346 }
class ____(LightningModule): def __init__(self, spike_global_rank: int, spike_value): super().__init__() self.layer = torch.nn.Linear(1, 1, bias=False) self.spike_global_rank = spike_global_rank self.spike_value = spike_value def training_step(self, batch, batch_idx: int): # initialize it all to weights one so that input = output but with gradients with torch.no_grad(): self.layer.weight.data = torch.ones_like(self.layer.weight.data) curr_loss_val = 1 / (batch_idx + 1) if self.trainer.global_rank == self.spike_global_rank and batch_idx == 4: curr_loss_val = self.spike_value if curr_loss_val is None: curr_loss_val = batch_idx return self.layer(torch.tensor(curr_loss_val, device=self.device, dtype=self.dtype).view(1, 1)) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=1e-3)
IdentityModule
python
Textualize__textual
src/textual/widgets/_header.py
{ "start": 2644, "end": 6295 }
class ____(Widget): """A header widget with icon and clock.""" DEFAULT_CSS = """ Header { dock: top; width: 100%; background: $panel; color: $foreground; height: 1; } Header.-tall { height: 3; } """ DEFAULT_CLASSES = "" tall: Reactive[bool] = Reactive(False) """Set to `True` for a taller header or `False` for a single line header.""" icon: Reactive[str] = Reactive("⭘") """A character for the icon at the top left.""" time_format: Reactive[str] = Reactive("%X") """Time format of the clock.""" def __init__( self, show_clock: bool = False, *, name: str | None = None, id: str | None = None, classes: str | None = None, icon: str | None = None, time_format: str | None = None, ): """Initialise the header widget. Args: show_clock: ``True`` if the clock should be shown on the right of the header. name: The name of the header widget. id: The ID of the header widget in the DOM. classes: The CSS classes of the header widget. icon: Single character to use as an icon, or `None` for default. time_format: Time format (used by strftime) for clock, or `None` for default. """ super().__init__(name=name, id=id, classes=classes) self._show_clock = show_clock if icon is not None: self.icon = icon if time_format is not None: self.time_format = time_format def compose(self) -> ComposeResult: yield HeaderIcon().data_bind(Header.icon) yield HeaderTitle() yield ( HeaderClock().data_bind(Header.time_format) if self._show_clock else HeaderClockSpace() ) def watch_tall(self, tall: bool) -> None: self.set_class(tall, "-tall") def _on_click(self): self.toggle_class("-tall") def format_title(self) -> Content: """Format the title and subtitle. Defers to [App.format_title][textual.app.App.format_title] by default. Override this method if you want to customize how the title is displayed in the header. Returns: Content for title display. """ return self.app.format_title(self.screen_title, self.screen_sub_title) @property def screen_title(self) -> str: """The title that this header will display. This depends on [`Screen.title`][textual.screen.Screen.title] and [`App.title`][textual.app.App.title]. """ screen_title = self.screen.title title = screen_title if screen_title is not None else self.app.title return title @property def screen_sub_title(self) -> str: """The sub-title that this header will display. This depends on [`Screen.sub_title`][textual.screen.Screen.sub_title] and [`App.sub_title`][textual.app.App.sub_title]. """ screen_sub_title = self.screen.sub_title sub_title = ( screen_sub_title if screen_sub_title is not None else self.app.sub_title ) return sub_title def _on_mount(self, _: Mount) -> None: async def set_title() -> None: try: self.query_one(HeaderTitle).update(self.format_title()) except NoScreen: pass self.watch(self.app, "title", set_title) self.watch(self.app, "sub_title", set_title) self.watch(self.screen, "title", set_title) self.watch(self.screen, "sub_title", set_title)
Header
python
google__pytype
pytype/abstract/_classes.py
{ "start": 4350, "end": 13323 }
class ____(_instance_base.SimpleValue, class_mixin.Class): """An abstract wrapper for user-defined class objects. These are the abstract value for class objects that are implemented in the program. """ def __init__( self, name: str, bases: list[cfg.Variable], members: dict[str, cfg.Variable], cls: class_mixin.Class, first_opcode: opcodes.Opcode | None, undecorated_methods: class_mixin.FunctionMapType | None, ctx: "context.Context", ): self._bases = bases super().__init__(name, ctx) self.members = datatypes.MonitorDict(members) class_mixin.Class.init_mixin(self, cls) self.instances = set() # filled through register_instance # instances created by analyze.py for the purpose of analyzing this class, # a subset of 'instances'. Filled through register_canonical_instance. self.canonical_instances = set() self.slots = self._convert_str_tuple("__slots__") self.match_args = self._convert_str_tuple("__match_args__") or () self.is_dynamic = self.compute_is_dynamic() self._undecorated_methods = undecorated_methods or {} log.info("Created class: %r", self) self._type_param_check() self._override_check() self._first_opcode = first_opcode def _get_class(self) -> "ParameterizedClass": return ParameterizedClass( self.ctx.convert.type_type, {abstract_utils.T: self}, self.ctx ) def get_first_opcode(self) -> opcodes.Opcode | None: return self._first_opcode def update_method_type_params(self) -> None: # For function type parameters check methods = [] # members of self._undecorated_methods that will be ignored for updating # signature scope. skip = set() for mbr in self.members.values(): for m in mbr.data: if not isinstance(m, _abstract.Function): continue methods.append(m) # We don't need to update the same method twice. skip.add(m) if m.__class__.__name__ == "StaticMethodInstance": # TypeVars in staticmethods should not be treated as bound to the # current class. skip.update(m.func.data) for undecorated_methods in self._undecorated_methods.values(): methods.extend(m for m in undecorated_methods if m not in skip) for m in methods: m.update_signature_scope(self) def _type_param_check(self) -> None: """Throw exception for invalid type parameters.""" self.update_method_type_params() if self.template: # nested class can not use the same type parameter # in current generic class inner_cls_types = self.collect_inner_cls_types() for cls, item in inner_cls_types: nitem = item.with_scope( self.full_name ) # pytype: disable=attribute-error if nitem in self.template: raise abstract_utils.GenericTypeError( self, ( "Generic class [%s] and its nested generic class [%s] " "cannot use the same type variable %s." ) % (self.full_name, cls.full_name, item.name), ) self._load_all_formal_type_parameters() # Throw exception if there is error for t in self.template: if t.full_name in self.all_formal_type_parameters: raise abstract_utils.GenericTypeError( self, f"Conflicting value for TypeVar {t.full_name}" ) def _override_check(self) -> None: """Checks for @typing.override errors.""" for name, member in self.members.items(): member_data = [ m for m in member.data if isinstance( m, (_abstract.InterpreterClass, _abstract.InterpreterFunction) ) ] if not member_data: continue # Get line number for error reporting. member = member_data[0] if isinstance(member, InterpreterClass): opcode = member.get_first_opcode() else: opcode = member.def_opcode stack = self.ctx.vm.simple_stack(opcode) if any( "override" in m.decorators or "typing.override" in m.decorators for m in member_data ): base = self._get_defining_base_class(name) if not base: # 'name' is marked as an override but not defined in a base class. self.ctx.errorlog.no_overridden_attribute(stack, name) elif self.ctx.options.require_override_decorator: base = self._get_defining_base_class(name) if base: # 'name' is defined in a base class but not marked as an override. self.ctx.errorlog.missing_override_decorator( stack, name, base.full_name ) def _get_defining_base_class(self, attr: str) -> _base.BaseValue | None: """Gets first base class, if any, that defines the given attribute.""" for base in self.mro[1:]: if isinstance(base, class_mixin.Class) and attr in base: return base return None def collect_inner_cls_types( self, max_depth: int = 5 ) -> set[tuple[_base.BaseValue, _base.BaseValue]]: """Collect all the type parameters from nested classes.""" templates = set() if max_depth > 0: for mbr in self.members.values(): mbr = abstract_utils.get_atomic_value( mbr, default=self.ctx.convert.unsolvable ) if isinstance(mbr, InterpreterClass) and mbr.template: templates.update( [(mbr, item.with_scope(None)) for item in mbr.template] ) templates.update(mbr.collect_inner_cls_types(max_depth - 1)) return templates def get_inner_classes(self) -> list[_base.BaseValue]: """Return the list of top-level nested classes.""" inner_classes = [] for member in self.members.values(): try: value = abstract_utils.get_atomic_value(member) except abstract_utils.ConversionError: continue if not isinstance(value, class_mixin.Class) or value.module: # Skip non-classes and imported classes. continue if value.official_name is None or ( self.official_name and value.official_name.startswith(f"{self.official_name}.") ): inner_classes.append(value) return inner_classes def get_own_attributes(self) -> set[str]: attributes = set(self.members) annotations_dict = abstract_utils.get_annotations_dict(self.members) if annotations_dict: attributes.update(annotations_dict.annotated_locals) return attributes - abstract_utils.CLASS_LEVEL_IGNORE def get_own_abstract_methods(self) -> set[str]: def _can_be_abstract(var): return any( isinstance(v, _abstract.Function) and v.is_abstract for v in var.data ) return {name for name, var in self.members.items() if _can_be_abstract(var)} def register_instance(self, instance: _instance_base.Instance) -> None: self.instances.add(instance) def register_canonical_instance( self, instance: _instance_base.Instance ) -> None: self.canonical_instances.add(instance) # TODO: b/350643999 - Type mismatch due to superclass not having type # annotation. Remove the suppression once type is on the superclass method. def bases(self) -> list[cfg.Variable]: # pytype: disable=signature-mismatch return self._bases def metaclass(self, node: cfg.CFGNode) -> _base.BaseValue | None: if ( self.cls.full_name != "builtins.type" and self.cls is not self._get_inherited_metaclass() ): return self.ctx.convert.merge_classes([self]) else: return None def instantiate( self, node: cfg.CFGNode, container: ( _instance_base.SimpleValue | abstract_utils.DummyContainer | None ) = None, ) -> cfg.Variable: if self.ctx.vm.current_opcode: return self._new_instance(container, node, None).to_variable(node) else: # When the analyze_x methods in CallTracer instantiate classes in # preparation for analysis, often there is no frame on the stack yet, or # the frame is a SimpleFrame with no opcode. return super().instantiate(node, container) def __repr__(self) -> str: return f"InterpreterClass({self.name})" def __contains__(self, name): if name in self.members: return True annotations_dict = abstract_utils.get_annotations_dict(self.members) return annotations_dict and name in annotations_dict.annotated_locals def has_protocol_base(self) -> bool: for base_var in self._bases: for base in base_var.data: if isinstance(base, PyTDClass) and base.full_name == "typing.Protocol": return True return False def get_undecorated_method( self, name: str, node: cfg.CFGNode ) -> cfg.Variable | None: if name not in self._undecorated_methods: return None return self.ctx.program.NewVariable( self._undecorated_methods[name], (), node )
InterpreterClass