language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
apache__airflow
providers/redis/tests/integration/redis/operators/test_redis_publish.py
{ "start": 1166, "end": 2257 }
class ____: def setup_method(self): args = {"owner": "airflow", "start_date": DEFAULT_DATE} self.dag = DAG("test_redis_dag_id", schedule=None, default_args=args) self.mock_context = MagicMock() self.channel = "test" def test_execute_hello(self): operator = RedisPublishOperator( task_id="test_task", dag=self.dag, message="hello", channel=self.channel, redis_conn_id="redis_default", ) hook = RedisHook(redis_conn_id="redis_default") pubsub = hook.get_conn().pubsub() pubsub.subscribe(self.channel) operator.execute(self.mock_context) context_calls = [] assert self.mock_context["ti"].method_calls == context_calls, "context calls should be same" message = pubsub.get_message() assert message["type"] == "subscribe" message = pubsub.get_message() assert message["type"] == "message" assert message["data"] == b"hello" pubsub.unsubscribe(self.channel)
TestRedisPublishOperator
python
pytorch__pytorch
torch/profiler/_memory_profiler.py
{ "start": 14826, "end": 15104 }
class ____: input_version: Optional[int] = None mutated: Optional[bool] = False @property def is_allocation(self) -> bool: return self.input_version is None @property def is_deletion(self) -> bool: return self.mutated is None
DataFlowEdge
python
django__django
django/contrib/sitemaps/__init__.py
{ "start": 206, "end": 5990 }
class ____: # This limit is defined by Google. See the index documentation at # https://www.sitemaps.org/protocol.html#index. limit = 50000 # If protocol is None, the URLs in the sitemap will use the protocol # with which the sitemap was requested. protocol = None # Enables generating URLs for all languages. i18n = False # Override list of languages to use. languages = None # Enables generating alternate/hreflang links. alternates = False # Add an alternate/hreflang link with value 'x-default'. x_default = False def _get(self, name, item, default=None): try: attr = getattr(self, name) except AttributeError: return default if callable(attr): if self.i18n: # Split the (item, lang_code) tuples again for the location, # priority, lastmod and changefreq method calls. item, lang_code = item return attr(item) return attr def get_languages_for_item(self, item): """Languages for which this item is displayed.""" return self._languages() def _languages(self): if self.languages is not None: return self.languages return [lang_code for lang_code, _ in settings.LANGUAGES] def _items(self): if self.i18n: # Create (item, lang_code) tuples for all items and languages. # This is necessary to paginate with all languages already # considered. items = [ (item, lang_code) for item in self.items() for lang_code in self.get_languages_for_item(item) ] return items return self.items() def _location(self, item, force_lang_code=None): if self.i18n: obj, lang_code = item # Activate language from item-tuple or forced one before calling # location. with translation.override(force_lang_code or lang_code): return self._get("location", item) return self._get("location", item) @property def paginator(self): return paginator.Paginator(self._items(), self.limit) def items(self): return [] def location(self, item): return item.get_absolute_url() def get_protocol(self, protocol=None): # Determine protocol return self.protocol or protocol or "https" def get_domain(self, site=None): # Determine domain if site is None: if django_apps.is_installed("django.contrib.sites"): Site = django_apps.get_model("sites.Site") try: site = Site.objects.get_current() except Site.DoesNotExist: pass if site is None: raise ImproperlyConfigured( "To use sitemaps, either enable the sites framework or pass " "a Site/RequestSite object in your view." ) return site.domain def get_urls(self, page=1, site=None, protocol=None): protocol = self.get_protocol(protocol) domain = self.get_domain(site) return self._urls(page, protocol, domain) def get_latest_lastmod(self): if not hasattr(self, "lastmod"): return None if callable(self.lastmod): try: return max([self.lastmod(item) for item in self.items()], default=None) except TypeError: return None else: return self.lastmod def _urls(self, page, protocol, domain): urls = [] latest_lastmod = None all_items_lastmod = True # track if all items have a lastmod paginator_page = self.paginator.page(page) for item in paginator_page.object_list: loc = f"{protocol}://{domain}{self._location(item)}" priority = self._get("priority", item) lastmod = self._get("lastmod", item) if all_items_lastmod: all_items_lastmod = lastmod is not None if all_items_lastmod and ( latest_lastmod is None or lastmod > latest_lastmod ): latest_lastmod = lastmod url_info = { "item": item, "location": loc, "lastmod": lastmod, "changefreq": self._get("changefreq", item), "priority": str(priority if priority is not None else ""), "alternates": [], } if self.i18n and self.alternates: item_languages = self.get_languages_for_item(item[0]) for lang_code in item_languages: loc = f"{protocol}://{domain}{self._location(item, lang_code)}" url_info["alternates"].append( { "location": loc, "lang_code": lang_code, } ) if self.x_default and settings.LANGUAGE_CODE in item_languages: lang_code = settings.LANGUAGE_CODE loc = f"{protocol}://{domain}{self._location(item, lang_code)}" loc = loc.replace(f"/{lang_code}/", "/", 1) url_info["alternates"].append( { "location": loc, "lang_code": "x-default", } ) urls.append(url_info) if all_items_lastmod and latest_lastmod: self.latest_lastmod = latest_lastmod return urls
Sitemap
python
tornadoweb__tornado
tornado/util.py
{ "start": 1405, "end": 1761 }
class ____(Dict[str, Any]): """Makes a dictionary behave like an object, with attribute-style access.""" def __getattr__(self, name: str) -> Any: try: return self[name] except KeyError: raise AttributeError(name) def __setattr__(self, name: str, value: Any) -> None: self[name] = value
ObjectDict
python
pypa__warehouse
tests/unit/admin/views/test_prohibited_email_domains.py
{ "start": 5278, "end": 7376 }
class ____: def test_no_domain_name(self, db_request): db_request.method = "POST" db_request.route_path = lambda a: "/admin/prohibited_email_domains/remove/" db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.POST = {} with pytest.raises(HTTPSeeOther): views.remove_prohibited_email_domain(db_request) assert db_request.session.flash.calls == [ pretend.call("Domain name is required.", queue="error") ] def test_domain_not_found(self, db_request): db_request.method = "POST" db_request.route_path = lambda a: "/admin/prohibited_email_domains/remove/" db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.POST = {"domain_name": "example.com"} with pytest.raises(HTTPSeeOther): views.remove_prohibited_email_domain(db_request) assert db_request.session.flash.calls == [ pretend.call("Domain not found.", queue="error") ] def test_success(self, db_request): domain = ProhibitedEmailDomainFactory.create() db_request.method = "POST" db_request.route_path = lambda a: "/admin/prohibited_email_domains/list/" db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.POST = {"domain_name": domain.domain} response = views.remove_prohibited_email_domain(db_request) assert response.status_code == 303 assert response.headers["Location"] == "/admin/prohibited_email_domains/list/" assert db_request.session.flash.calls == [ pretend.call( f"Prohibited email domain '{domain.domain}' removed.", queue="success" ) ] query = db_request.db.query(ProhibitedEmailDomain).filter( ProhibitedEmailDomain.domain == domain.domain ) assert query.count() == 0
TestProhibitedEmailDomainsRemove
python
getsentry__sentry
tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py
{ "start": 18716, "end": 19960 }
class ____(BaseWorkflowTest): def setUp(self) -> None: super().setUp() self.detector = self.create_detector(project=self.project) self.handler: TicketingIssueAlertHandler def _test_build_rule_action_blob(self, expected, action_type: Action.Type): action_data = pop_keys_from_data_blob(expected, action_type) action = self.create_action( type=action_type, integration_id=expected["integration"], data=self._form_ticketing_action_blob(action_data), ) blob = self.handler.build_rule_action_blob(action, self.organization.id) # pop uuid from blob # (we don't store it anymore since its a legacy artifact when we didn't have the action model) expected.pop("uuid") assert blob == { "id": expected["id"], "integration": expected["integration"], **expected, } def _form_ticketing_action_blob(self, expected): dynamic_form_fields, additional_fields = TicketingActionDataBlobHelper.separate_fields( expected ) return {"dynamic_form_fields": dynamic_form_fields, "additional_fields": additional_fields}
TestTicketingIssueAlertHandlerBase
python
apache__airflow
airflow-core/src/airflow/plugins_manager.py
{ "start": 4644, "end": 24288 }
class ____: """Class used to define AirflowPlugin.""" name: str | None = None source: AirflowPluginSource | None = None macros: list[Any] = [] admin_views: list[Any] = [] flask_blueprints: list[Any] = [] fastapi_apps: list[Any] = [] fastapi_root_middlewares: list[Any] = [] external_views: list[Any] = [] react_apps: list[Any] = [] menu_links: list[Any] = [] appbuilder_views: list[Any] = [] appbuilder_menu_items: list[Any] = [] # A list of global operator extra links that can redirect users to # external systems. These extra links will be available on the # task page in the form of buttons. # # Note: the global operator extra link can be overridden at each # operator level. global_operator_extra_links: list[Any] = [] # A list of operator extra links to override or add operator links # to existing Airflow Operators. # These extra links will be available on the task page in form of # buttons. operator_extra_links: list[Any] = [] # A list of timetable classes that can be used for DAG scheduling. timetables: list[type[Timetable]] = [] # A list of listeners that can be used for tracking task and DAG states. listeners: list[ModuleType | object] = [] # A list of hook lineage reader classes that can be used for reading lineage information from a hook. hook_lineage_readers: list[type[HookLineageReader]] = [] # A list of priority weight strategy classes that can be used for calculating tasks weight priority. priority_weight_strategies: list[type[PriorityWeightStrategy]] = [] @classmethod def validate(cls): """Validate if plugin has a name.""" if not cls.name: raise AirflowPluginException("Your plugin needs a name.") @classmethod def on_load(cls, *args, **kwargs): """ Execute when the plugin is loaded; This method is only called once during runtime. :param args: If future arguments are passed in on call. :param kwargs: If future arguments are passed in on call. """ def is_valid_plugin(plugin_obj): """ Check whether a potential object is a subclass of the AirflowPlugin class. :param plugin_obj: potential subclass of AirflowPlugin :return: Whether or not the obj is a valid subclass of AirflowPlugin """ if ( inspect.isclass(plugin_obj) and issubclass(plugin_obj, AirflowPlugin) and (plugin_obj is not AirflowPlugin) ): plugin_obj.validate() return plugin_obj not in plugins return False def register_plugin(plugin_instance): """ Start plugin load and register it after success initialization. If plugin is already registered, do nothing. :param plugin_instance: subclass of AirflowPlugin """ if plugin_instance.name in loaded_plugins: return loaded_plugins.add(plugin_instance.name) plugin_instance.on_load() plugins.append(plugin_instance) def load_entrypoint_plugins(): """ Load and register plugins AirflowPlugin subclasses from the entrypoints. The entry_point group should be 'airflow.plugins'. """ log.debug("Loading plugins from entrypoints") for entry_point, dist in entry_points_with_dist("airflow.plugins"): log.debug("Importing entry_point plugin %s", entry_point.name) try: plugin_class = entry_point.load() if not is_valid_plugin(plugin_class): continue plugin_instance = plugin_class() plugin_instance.source = EntryPointSource(entry_point, dist) register_plugin(plugin_instance) except Exception as e: log.exception("Failed to import plugin %s", entry_point.name) import_errors[entry_point.module] = str(e) def load_plugins_from_plugin_directory(): """Load and register Airflow Plugins from plugins directory.""" log.debug("Loading plugins from directory: %s", settings.PLUGINS_FOLDER) files = find_path_from_directory(settings.PLUGINS_FOLDER, ".airflowignore") plugin_search_locations: list[tuple[str, Generator[str, None, None]]] = [("", files)] if conf.getboolean("core", "LOAD_EXAMPLES"): log.debug("Note: Loading plugins from examples as well: %s", settings.PLUGINS_FOLDER) from airflow.example_dags import plugins example_plugins_folder = next(iter(plugins.__path__)) example_files = find_path_from_directory(example_plugins_folder, ".airflowignore") plugin_search_locations.append((plugins.__name__, example_files)) for module_prefix, plugin_files in plugin_search_locations: for file_path in plugin_files: path = Path(file_path) if not path.is_file() or path.suffix != ".py": continue mod_name = f"{module_prefix}.{path.stem}" if module_prefix else path.stem try: loader = importlib.machinery.SourceFileLoader(mod_name, file_path) spec = importlib.util.spec_from_loader(mod_name, loader) mod = importlib.util.module_from_spec(spec) sys.modules[spec.name] = mod loader.exec_module(mod) for mod_attr_value in (m for m in mod.__dict__.values() if is_valid_plugin(m)): plugin_instance = mod_attr_value() plugin_instance.source = PluginsDirectorySource(file_path) register_plugin(plugin_instance) except Exception as e: log.exception("Failed to import plugin %s", file_path) import_errors[file_path] = str(e) def load_providers_plugins(): from airflow.providers_manager import ProvidersManager log.debug("Loading plugins from providers") providers_manager = ProvidersManager() providers_manager.initialize_providers_plugins() for plugin in providers_manager.plugins: log.debug("Importing plugin %s from class %s", plugin.name, plugin.plugin_class) try: plugin_instance = import_string(plugin.plugin_class) if is_valid_plugin(plugin_instance): register_plugin(plugin_instance) else: log.warning("Plugin %s is not a valid plugin", plugin.name) except ImportError: log.exception("Failed to load plugin %s from class name %s", plugin.name, plugin.plugin_class) def make_module(name: str, objects: list[Any]): """Create new module.""" if not objects: return None log.debug("Creating module %s", name) name = name.lower() module = types.ModuleType(name) module._name = name.split(".")[-1] # type: ignore module._objects = objects # type: ignore module.__dict__.update((o.__name__, o) for o in objects) return module def ensure_plugins_loaded(): """ Load plugins from plugins directory and entrypoints. Plugins are only loaded if they have not been previously loaded. """ from airflow.stats import Stats global plugins if plugins is not None: log.debug("Plugins are already loaded. Skipping.") return if not settings.PLUGINS_FOLDER: raise ValueError("Plugins folder is not set") log.debug("Loading plugins") with Stats.timer() as timer: plugins = [] load_plugins_from_plugin_directory() load_entrypoint_plugins() if not settings.LAZY_LOAD_PROVIDERS: load_providers_plugins() if plugins: log.debug("Loading %d plugin(s) took %.2f seconds", len(plugins), timer.duration) def initialize_ui_plugins(): """Collect extension points for the UI.""" global external_views global react_apps if external_views is not None and react_apps is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize UI plugin") seen_url_route = {} external_views = [] react_apps = [] def _remove_list_item(lst, item): # Mutate in place the plugin's external views and react apps list to remove the invalid items # because some function still access these plugin's attribute and not the # global variables `external_views` `react_apps`. (get_plugin_info, for example) lst.remove(item) for plugin in plugins: external_views_to_remove = [] react_apps_to_remove = [] for external_view in plugin.external_views: if not isinstance(external_view, dict): log.warning( "Plugin '%s' has an external view that is not a dictionary. The view will not be loaded.", plugin.name, ) external_views_to_remove.append(external_view) continue url_route = external_view.get("url_route") if url_route is None: continue if url_route in seen_url_route: log.warning( "Plugin '%s' has an external view with an URL route '%s' " "that conflicts with another plugin '%s'. The view will not be loaded.", plugin.name, url_route, seen_url_route[url_route], ) external_views_to_remove.append(external_view) continue external_views.append(external_view) seen_url_route[url_route] = plugin.name for react_app in plugin.react_apps: if not isinstance(react_app, dict): log.warning( "Plugin '%s' has a React App that is not a dictionary. The React App will not be loaded.", plugin.name, ) react_apps_to_remove.append(react_app) continue url_route = react_app.get("url_route") if url_route is None: continue if url_route in seen_url_route: log.warning( "Plugin '%s' has a React App with an URL route '%s' " "that conflicts with another plugin '%s'. The React App will not be loaded.", plugin.name, url_route, seen_url_route[url_route], ) react_apps_to_remove.append(react_app) continue react_apps.append(react_app) seen_url_route[url_route] = plugin.name for item in external_views_to_remove: _remove_list_item(plugin.external_views, item) for item in react_apps_to_remove: _remove_list_item(plugin.react_apps, item) def initialize_flask_plugins(): """Collect flask extension points for WEB UI (legacy).""" global flask_blueprints global flask_appbuilder_views global flask_appbuilder_menu_links if ( flask_blueprints is not None and flask_appbuilder_views is not None and flask_appbuilder_menu_links is not None ): return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize legacy Web UI plugin") flask_blueprints = [] flask_appbuilder_views = [] flask_appbuilder_menu_links = [] for plugin in plugins: flask_appbuilder_views.extend(plugin.appbuilder_views) flask_appbuilder_menu_links.extend(plugin.appbuilder_menu_items) flask_blueprints.extend([{"name": plugin.name, "blueprint": bp} for bp in plugin.flask_blueprints]) if (plugin.admin_views and not plugin.appbuilder_views) or ( plugin.menu_links and not plugin.appbuilder_menu_items ): log.warning( "Plugin '%s' may not be compatible with the current Airflow version. " "Please contact the author of the plugin.", plugin.name, ) def initialize_fastapi_plugins(): """Collect extension points for the API.""" global fastapi_apps global fastapi_root_middlewares if fastapi_apps is not None and fastapi_root_middlewares is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize FastAPI plugins") fastapi_apps = [] fastapi_root_middlewares = [] for plugin in plugins: fastapi_apps.extend(plugin.fastapi_apps) fastapi_root_middlewares.extend(plugin.fastapi_root_middlewares) def initialize_extra_operators_links_plugins(): """Create modules for loaded extension from extra operators links plugins.""" global global_operator_extra_links global operator_extra_links global registered_operator_link_classes if ( global_operator_extra_links is not None and operator_extra_links is not None and registered_operator_link_classes is not None ): return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize extra operators links plugins") global_operator_extra_links = [] operator_extra_links = [] registered_operator_link_classes = {} for plugin in plugins: global_operator_extra_links.extend(plugin.global_operator_extra_links) operator_extra_links.extend(list(plugin.operator_extra_links)) registered_operator_link_classes.update( {qualname(link.__class__): link.__class__ for link in plugin.operator_extra_links} ) def initialize_timetables_plugins(): """Collect timetable classes registered by plugins.""" global timetable_classes if timetable_classes is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize extra timetables plugins") timetable_classes = { qualname(timetable_class): timetable_class for plugin in plugins for timetable_class in plugin.timetables } def initialize_hook_lineage_readers_plugins(): """Collect hook lineage reader classes registered by plugins.""" global hook_lineage_reader_classes if hook_lineage_reader_classes is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize hook lineage readers plugins") hook_lineage_reader_classes = [] for plugin in plugins: hook_lineage_reader_classes.extend(plugin.hook_lineage_readers) def integrate_macros_plugins() -> None: """Integrates macro plugins.""" global macros_modules from airflow.sdk.execution_time import macros if macros_modules is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Integrate Macros plugins") macros_modules = [] for plugin in plugins: if plugin.name is None: raise AirflowPluginException("Invalid plugin name") macros_module = make_module(f"airflow.sdk.execution_time.macros.{plugin.name}", plugin.macros) if macros_module: macros_modules.append(macros_module) sys.modules[macros_module.__name__] = macros_module # Register the newly created module on airflow.macros such that it # can be accessed when rendering templates. setattr(macros, plugin.name, macros_module) def integrate_listener_plugins(listener_manager: ListenerManager) -> None: """Add listeners from plugins.""" ensure_plugins_loaded() if plugins: for plugin in plugins: if plugin.name is None: raise AirflowPluginException("Invalid plugin name") for listener in plugin.listeners: listener_manager.add_listener(listener) def get_plugin_info(attrs_to_dump: Iterable[str] | None = None) -> list[dict[str, Any]]: """ Dump plugins attributes. :param attrs_to_dump: A list of plugin attributes to dump """ ensure_plugins_loaded() integrate_macros_plugins() initialize_flask_plugins() initialize_fastapi_plugins() initialize_ui_plugins() initialize_extra_operators_links_plugins() if not attrs_to_dump: attrs_to_dump = PLUGINS_ATTRIBUTES_TO_DUMP plugins_info = [] if plugins: for plugin in plugins: info: dict[str, Any] = {"name": plugin.name} for attr in attrs_to_dump: if attr in ("global_operator_extra_links", "operator_extra_links"): info[attr] = [f"<{qualname(d.__class__)} object>" for d in getattr(plugin, attr)] elif attr in ("macros", "timetables", "priority_weight_strategies"): info[attr] = [qualname(d) for d in getattr(plugin, attr)] elif attr == "listeners": # listeners may be modules or class instances info[attr] = [ d.__name__ if inspect.ismodule(d) else qualname(d) for d in getattr(plugin, attr) ] elif attr == "appbuilder_views": info[attr] = [ {**d, "view": qualname(d["view"].__class__) if "view" in d else None} for d in getattr(plugin, attr) ] elif attr == "flask_blueprints": info[attr] = [ f"<{qualname(d.__class__)}: name={d.name!r} import_name={d.import_name!r}>" for d in getattr(plugin, attr) ] elif attr == "fastapi_apps": info[attr] = [ {**d, "app": qualname(d["app"].__class__) if "app" in d else None} for d in getattr(plugin, attr) ] elif attr == "fastapi_root_middlewares": # remove args and kwargs from plugin info to hide potentially sensitive info. info[attr] = [ { k: (v if k != "middleware" else qualname(middleware_dict["middleware"])) for k, v in middleware_dict.items() if k not in ("args", "kwargs") } for middleware_dict in getattr(plugin, attr) ] else: info[attr] = getattr(plugin, attr) plugins_info.append(info) return plugins_info def initialize_priority_weight_strategy_plugins(): """Collect priority weight strategy classes registered by plugins.""" global priority_weight_strategy_classes if priority_weight_strategy_classes is not None: return ensure_plugins_loaded() if plugins is None: raise AirflowPluginException("Can't load plugins.") log.debug("Initialize extra priority weight strategy plugins") plugins_priority_weight_strategy_classes = { qualname(priority_weight_strategy_class): priority_weight_strategy_class for plugin in plugins for priority_weight_strategy_class in plugin.priority_weight_strategies } priority_weight_strategy_classes = { **airflow_priority_weight_strategies, **plugins_priority_weight_strategy_classes, }
AirflowPlugin
python
walkccc__LeetCode
solutions/3544. Subtree Inversion Sum/3544.py
{ "start": 0, "end": 970 }
class ____: def subtreeInversionSum( self, edges: list[list[int]], nums: list[int], k: int ) -> int: n = len(edges) + 1 parent = [-1] * n graph = [[] for _ in range(n)] for u, v in edges: graph[u].append(v) graph[v].append(u) @functools.lru_cache(None) def dp(u: int, stepsSinceInversion: int, inverted: bool) -> int: """ Returns the maximum sum for subtree rooted at u, with `stepsSinceInversion` steps of inversion and `inverted` is true if the subtree is inverted. """ num = -nums[u] if inverted else nums[u] negNum = -num for v in graph[u]: if v == parent[u]: continue parent[v] = u num += dp(v, min(k, stepsSinceInversion + 1), inverted) if stepsSinceInversion == k: negNum += dp(v, 1, not inverted) return max(num, negNum) if stepsSinceInversion == k else num return dp(0, k, False)
Solution
python
huggingface__transformers
src/transformers/models/evolla/modeling_evolla.py
{ "start": 27512, "end": 27820 }
class ____(ModelOutput): sequence_compressor_output: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
EvollaProteinEncoderModelOutput
python
huggingface__transformers
src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
{ "start": 2174, "end": 4386 }
class ____(nn.Module): def __init__( self, config: MobileViTV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias: bool = False, dilation: int = 1, use_normalization: bool = True, use_activation: Union[bool, str] = True, ) -> None: super().__init__() padding = int((kernel_size - 1) / 2) * dilation if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTInvertedResidual with MobileViT->MobileViTV2
MobileViTV2ConvLayer
python
matplotlib__matplotlib
galleries/users_explain/text/annotations.py
{ "start": 12228, "end": 34164 }
class ____: """A simple box.""" def __init__(self, pad=0.3): """ The arguments must be floats and have default values. Parameters ---------- pad : float amount of padding """ self.pad = pad super().__init__() def __call__(self, x0, y0, width, height, mutation_size): """ Given the location and size of the box, return the path of the box around it. Rotation is automatically taken care of. Parameters ---------- x0, y0, width, height : float Box location and size. mutation_size : float Reference scale for the mutation, typically the text font size. """ # padding pad = mutation_size * self.pad # width and height with padding added width = width + 2 * pad height = height + 2 * pad # boundary of the padded box x0, y0 = x0 - pad, y0 - pad x1, y1 = x0 + width, y0 + height # return the new path return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0-pad, (y0+y1)/2), (x0, y0), (x0, y0)], closed=True) BoxStyle._style_list["angled"] = MyStyle # Register the custom style. fig, ax = plt.subplots(figsize=(3, 3)) ax.text(0.5, 0.5, "Test", size=30, va="center", ha="center", rotation=30, bbox=dict(boxstyle="angled,pad=0.5", alpha=0.2)) del BoxStyle._style_list["angled"] # Unregister it. # %% # Similarly, you can define a custom `.ConnectionStyle` and a custom `.ArrowStyle`. View # the source code at `.patches` to learn how each class is defined. # # .. _annotation_with_custom_arrow: # # Customizing annotation arrows # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # An arrow connecting *xy* to *xytext* can be optionally drawn by # specifying the *arrowprops* argument. To draw only an arrow, use # empty string as the first argument: fig, ax = plt.subplots(figsize=(3, 3)) ax.annotate("", xy=(0.2, 0.2), xycoords='data', xytext=(0.8, 0.8), textcoords='data', arrowprops=dict(arrowstyle="->", connectionstyle="arc3")) # %% # The arrow is drawn as follows: # # 1. A path connecting the two points is created, as specified by the # *connectionstyle* parameter. # 2. The path is clipped to avoid patches *patchA* and *patchB*, if these are # set. # 3. The path is further shrunk by *shrinkA* and *shrinkB* (in pixels). # 4. The path is transmuted to an arrow patch, as specified by the *arrowstyle* # parameter. # # .. plot:: # :show-source-link: False # # import matplotlib.patches as mpatches # # x1, y1 = 0.3, 0.3 # x2, y2 = 0.7, 0.7 # arrowprops = { # "1. connect with connectionstyle": # dict(arrowstyle="-", patchB=False, shrinkB=0), # "2. clip against patchB": dict(arrowstyle="-", patchB=True, shrinkB=0), # "3. shrink by shrinkB": dict(arrowstyle="-", patchB=True, shrinkB=5), # "4. mutate with arrowstyle": dict(arrowstyle="fancy", patchB=True, shrinkB=5), # } # # fig, axs = plt.subplots(2, 2, figsize=(6, 6), layout='compressed') # for ax, (name, props) in zip(axs.flat, arrowprops.items()): # ax.plot([x1, x2], [y1, y2], ".") # # el = mpatches.Ellipse((x1, y1), 0.3, 0.4, angle=30, alpha=0.2) # ax.add_artist(el) # # props["patchB"] = el if props["patchB"] else None # # ax.annotate( # "", # xy=(x1, y1), xycoords='data', # xytext=(x2, y2), textcoords='data', # arrowprops={"color": "0.5", "connectionstyle": "arc3,rad=0.3", **props}) # ax.text(.05, .95, name, transform=ax.transAxes, ha="left", va="top") # # ax.set(xlim=(0, 1), ylim=(0, 1), xticks=[], yticks=[], aspect=1) # # fig.get_layout_engine().set(wspace=0, hspace=0, w_pad=0, h_pad=0) # # The creation of the connecting path between two points is controlled by # ``connectionstyle`` key and the following styles are available: # # ========== ============================================= # Name Attrs # ========== ============================================= # ``angle`` angleA=90,angleB=0,rad=0.0 # ``angle3`` angleA=90,angleB=0 # ``arc`` angleA=0,angleB=0,armA=None,armB=None,rad=0.0 # ``arc3`` rad=0.0 # ``bar`` armA=0.0,armB=0.0,fraction=0.3,angle=None # ========== ============================================= # # Note that "3" in ``angle3`` and ``arc3`` is meant to indicate that the # resulting path is a quadratic spline segment (three control # points). As will be discussed below, some arrow style options can only # be used when the connecting path is a quadratic spline. # # The behavior of each connection style is (limitedly) demonstrated in the # example below. (Warning: The behavior of the ``bar`` style is currently not # well-defined and may be changed in the future). # # .. plot:: # :caption: Connection styles for annotations # # def demo_con_style(ax, connectionstyle): # x1, y1 = 0.3, 0.2 # x2, y2 = 0.8, 0.6 # # ax.plot([x1, x2], [y1, y2], ".") # ax.annotate("", # xy=(x1, y1), xycoords='data', # xytext=(x2, y2), textcoords='data', # arrowprops=dict(arrowstyle="->", color="0.5", # shrinkA=5, shrinkB=5, # patchA=None, patchB=None, # connectionstyle=connectionstyle, # ), # ) # # ax.text(.05, .95, connectionstyle.replace(",", ",\n"), # transform=ax.transAxes, ha="left", va="top") # # ax.set(xlim=(0, 1), ylim=(0, 1.25), xticks=[], yticks=[], aspect=1.25) # # fig, axs = plt.subplots(3, 5, figsize=(7, 6.3), layout="compressed") # demo_con_style(axs[0, 0], "angle3,angleA=90,angleB=0") # demo_con_style(axs[1, 0], "angle3,angleA=0,angleB=90") # demo_con_style(axs[0, 1], "arc3,rad=0.") # demo_con_style(axs[1, 1], "arc3,rad=0.3") # demo_con_style(axs[2, 1], "arc3,rad=-0.3") # demo_con_style(axs[0, 2], "angle,angleA=-90,angleB=180,rad=0") # demo_con_style(axs[1, 2], "angle,angleA=-90,angleB=180,rad=5") # demo_con_style(axs[2, 2], "angle,angleA=-90,angleB=10,rad=5") # demo_con_style(axs[0, 3], "arc,angleA=-90,angleB=0,armA=30,armB=30,rad=0") # demo_con_style(axs[1, 3], "arc,angleA=-90,angleB=0,armA=30,armB=30,rad=5") # demo_con_style(axs[2, 3], "arc,angleA=-90,angleB=0,armA=0,armB=40,rad=0") # demo_con_style(axs[0, 4], "bar,fraction=0.3") # demo_con_style(axs[1, 4], "bar,fraction=-0.3") # demo_con_style(axs[2, 4], "bar,angle=180,fraction=-0.2") # # axs[2, 0].remove() # fig.get_layout_engine().set(wspace=0, hspace=0, w_pad=0, h_pad=0) # # The connecting path (after clipping and shrinking) is then mutated to # an arrow patch, according to the given ``arrowstyle``: # # ========== ============================================= # Name Attrs # ========== ============================================= # ``-`` None # ``->`` head_length=0.4,head_width=0.2 # ``-[`` widthB=1.0,lengthB=0.2,angleB=None # ``|-|`` widthA=1.0,widthB=1.0 # ``-|>`` head_length=0.4,head_width=0.2 # ``<-`` head_length=0.4,head_width=0.2 # ``<->`` head_length=0.4,head_width=0.2 # ``<|-`` head_length=0.4,head_width=0.2 # ``<|-|>`` head_length=0.4,head_width=0.2 # ``fancy`` head_length=0.4,head_width=0.4,tail_width=0.4 # ``simple`` head_length=0.5,head_width=0.5,tail_width=0.2 # ``wedge`` tail_width=0.3,shrink_factor=0.5 # ========== ============================================= # # .. figure:: /gallery/text_labels_and_annotations/images/sphx_glr_fancyarrow_demo_001.png # :target: /gallery/text_labels_and_annotations/fancyarrow_demo.html # :align: center # # Some arrowstyles only work with connection styles that generate a # quadratic-spline segment. They are ``fancy``, ``simple``, and ``wedge``. # For these arrow styles, you must use the "angle3" or "arc3" connection # style. # # If the annotation string is given, the patch is set to the bbox patch # of the text by default. fig, ax = plt.subplots(figsize=(3, 3)) ax.annotate("Test", xy=(0.2, 0.2), xycoords='data', xytext=(0.8, 0.8), textcoords='data', size=20, va="center", ha="center", arrowprops=dict(arrowstyle="simple", connectionstyle="arc3,rad=-0.2")) # %% # As with `~.Axes.text`, a box around the text can be drawn using the *bbox* # argument. fig, ax = plt.subplots(figsize=(3, 3)) ann = ax.annotate("Test", xy=(0.2, 0.2), xycoords='data', xytext=(0.8, 0.8), textcoords='data', size=20, va="center", ha="center", bbox=dict(boxstyle="round4", fc="w"), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3,rad=-0.2", fc="w")) # %% # By default, the starting point is set to the center of the text # extent. This can be adjusted with ``relpos`` key value. The values # are normalized to the extent of the text. For example, (0, 0) means # lower-left corner and (1, 1) means top-right. fig, ax = plt.subplots(figsize=(3, 3)) ann = ax.annotate("Test", xy=(0.2, 0.2), xycoords='data', xytext=(0.8, 0.8), textcoords='data', size=20, va="center", ha="center", bbox=dict(boxstyle="round4", fc="w"), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3,rad=0.2", relpos=(0., 0.), fc="w")) ann = ax.annotate("Test", xy=(0.2, 0.2), xycoords='data', xytext=(0.8, 0.8), textcoords='data', size=20, va="center", ha="center", bbox=dict(boxstyle="round4", fc="w"), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3,rad=-0.2", relpos=(1., 0.), fc="w")) # %% # Placing Artist at anchored Axes locations # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # There are classes of artists that can be placed at an anchored # location in the Axes. A common example is the legend. This type # of artist can be created by using the `.OffsetBox` class. A few # predefined classes are available in :mod:`matplotlib.offsetbox` and in # :mod:`mpl_toolkits.axes_grid1.anchored_artists`. from matplotlib.offsetbox import AnchoredText fig, ax = plt.subplots(figsize=(3, 3)) at = AnchoredText("Figure 1a", prop=dict(size=15), frameon=True, loc='upper left') at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2") ax.add_artist(at) # %% # The *loc* keyword has same meaning as in the legend command. # # A simple application is when the size of the artist (or collection of # artists) is known in pixel size during the time of creation. For # example, If you want to draw a circle with fixed size of 20 pixel x 20 # pixel (radius = 10 pixel), you can utilize # `~mpl_toolkits.axes_grid1.anchored_artists.AnchoredDrawingArea`. The instance # is created with a size of the drawing area (in pixels), and arbitrary artists # can be added to the drawing area. Note that the extents of the artists that are # added to the drawing area are not related to the placement of the drawing # area itself. Only the initial size matters. # # The artists that are added to the drawing area should not have a # transform set (it will be overridden) and the dimensions of those # artists are interpreted as a pixel coordinate, i.e., the radius of the # circles in above example are 10 pixels and 5 pixels, respectively. from matplotlib.patches import Circle from mpl_toolkits.axes_grid1.anchored_artists import AnchoredDrawingArea fig, ax = plt.subplots(figsize=(3, 3)) ada = AnchoredDrawingArea(40, 20, 0, 0, loc='upper right', pad=0., frameon=False) p1 = Circle((10, 10), 10) ada.drawing_area.add_artist(p1) p2 = Circle((30, 10), 5, fc="r") ada.drawing_area.add_artist(p2) ax.add_artist(ada) # %% # Sometimes, you want your artists to scale with the data coordinate (or # coordinates other than canvas pixels). You can use # `~mpl_toolkits.axes_grid1.anchored_artists.AnchoredAuxTransformBox` class. # This is similar to # `~mpl_toolkits.axes_grid1.anchored_artists.AnchoredDrawingArea` except that # the extent of the artist is determined during the drawing time respecting the # specified transform. # # The ellipse in the example below will have width and height # corresponding to 0.1 and 0.4 in data coordinates and will be # automatically scaled when the view limits of the Axes change. from matplotlib.patches import Ellipse from mpl_toolkits.axes_grid1.anchored_artists import AnchoredAuxTransformBox fig, ax = plt.subplots(figsize=(3, 3)) box = AnchoredAuxTransformBox(ax.transData, loc='upper left') el = Ellipse((0, 0), width=0.1, height=0.4, angle=30) # in data coordinates! box.drawing_area.add_artist(el) ax.add_artist(box) # %% # Another method of anchoring an artist relative to a parent Axes or anchor # point is via the *bbox_to_anchor* argument of `.AnchoredOffsetbox`. This # artist can then be automatically positioned relative to another artist using # `.HPacker` and `.VPacker`: from matplotlib.offsetbox import (AnchoredOffsetbox, DrawingArea, HPacker, TextArea) fig, ax = plt.subplots(figsize=(3, 3)) box1 = TextArea(" Test: ", textprops=dict(color="k")) box2 = DrawingArea(60, 20, 0, 0) el1 = Ellipse((10, 10), width=16, height=5, angle=30, fc="r") el2 = Ellipse((30, 10), width=16, height=5, angle=170, fc="g") el3 = Ellipse((50, 10), width=16, height=5, angle=230, fc="b") box2.add_artist(el1) box2.add_artist(el2) box2.add_artist(el3) box = HPacker(children=[box1, box2], align="center", pad=0, sep=5) anchored_box = AnchoredOffsetbox(loc='lower left', child=box, pad=0., frameon=True, bbox_to_anchor=(0., 1.02), bbox_transform=ax.transAxes, borderpad=0.,) ax.add_artist(anchored_box) fig.subplots_adjust(top=0.8) # %% # Note that, unlike in `.Legend`, the ``bbox_transform`` is set to # `.IdentityTransform` by default # # .. _annotating_coordinate_systems: # # Coordinate systems for annotations # ---------------------------------- # # Matplotlib Annotations support several types of coordinate systems. The # examples in :ref:`annotations-tutorial` used the ``data`` coordinate system; # Some others more advanced options are: # # `.Transform` instance # ^^^^^^^^^^^^^^^^^^^^^ # # Transforms map coordinates into different coordinate systems, usually the # display coordinate system. See :ref:`transforms_tutorial` for a detailed # explanation. Here Transform objects are used to identify the coordinate # system of the corresponding points. For example, the ``Axes.transAxes`` # transform positions the annotation relative to the Axes coordinates; therefore # using it is identical to setting the coordinate system to "axes fraction": fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6, 3)) ax1.annotate("Test", xy=(0.2, 0.2), xycoords=ax1.transAxes) ax2.annotate("Test", xy=(0.2, 0.2), xycoords="axes fraction") # %% # Another commonly used `.Transform` instance is ``Axes.transData``. This # transform is the coordinate system of the data plotted in the Axes. In this # example, it is used to draw an arrow between related data points in two # Axes. We have passed an empty text because in this case, the annotation # connects data points. x = np.linspace(-1, 1) fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6, 3)) ax1.plot(x, -x**3) ax2.plot(x, -3*x**2) ax2.annotate("", xy=(0, 0), xycoords=ax1.transData, xytext=(0, 0), textcoords=ax2.transData, arrowprops=dict(arrowstyle="<->")) # %% # .. _artist_annotation_coord: # # `.Artist` instance # ^^^^^^^^^^^^^^^^^^ # # The *xy* value (or *xytext*) is interpreted as a fractional coordinate of the # bounding box (bbox) of the artist: fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(3, 3)) an1 = ax.annotate("Test 1", xy=(0.5, 0.5), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w")) an2 = ax.annotate("Test 2", xy=(1, 0.5), xycoords=an1, # (1, 0.5) of an1's bbox xytext=(30, 0), textcoords="offset points", va="center", ha="left", bbox=dict(boxstyle="round", fc="w"), arrowprops=dict(arrowstyle="->")) # %% # Note that you must ensure that the extent of the coordinate artist (*an1* in # this example) is determined before *an2* gets drawn. Usually, this means # that *an2* needs to be drawn after *an1*. The base class for all bounding # boxes is `.BboxBase` # # Callable that returns `.Transform` of `.BboxBase` # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # A callable object that takes the renderer instance as single argument, and # returns either a `.Transform` or a `.BboxBase`. For example, the return # value of `.Artist.get_window_extent` is a bbox, so this method is identical # to (2) passing in the artist: fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(3, 3)) an1 = ax.annotate("Test 1", xy=(0.5, 0.5), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w")) an2 = ax.annotate("Test 2", xy=(1, 0.5), xycoords=an1.get_window_extent, xytext=(30, 0), textcoords="offset points", va="center", ha="left", bbox=dict(boxstyle="round", fc="w"), arrowprops=dict(arrowstyle="->")) # %% # `.Artist.get_window_extent` is the bounding box of the Axes object and is # therefore identical to setting the coordinate system to axes fraction: fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6, 3)) an1 = ax1.annotate("Test1", xy=(0.5, 0.5), xycoords="axes fraction") an2 = ax2.annotate("Test 2", xy=(0.5, 0.5), xycoords=ax2.get_window_extent) # %% # Blended coordinate specification # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # A blended pair of coordinate specifications -- the first for the # x-coordinate, and the second is for the y-coordinate. For example, x=0.5 is # in data coordinates, and y=1 is in normalized axes coordinates: fig, ax = plt.subplots(figsize=(3, 3)) ax.annotate("Test", xy=(0.5, 1), xycoords=("data", "axes fraction")) ax.axvline(x=.5, color='lightgray') ax.set(xlim=(0, 2), ylim=(1, 2)) # %% # Any of the supported coordinate systems can be used in a blended # specification. For example, the text "Anchored to 1 & 2" is positioned # relative to the two `.Text` Artists: fig, ax = plt.subplots(figsize=(3, 3)) t1 = ax.text(0.05, .05, "Text 1", va='bottom', ha='left') t2 = ax.text(0.90, .90, "Text 2", ha='right') t3 = ax.annotate("Anchored to 1 & 2", xy=(0, 0), xycoords=(t1, t2), va='bottom', color='tab:orange',) # %% # `.text.OffsetFrom` # ^^^^^^^^^^^^^^^^^^ # # Sometimes, you want your annotation with some "offset points", not from the # annotated point but from some other point or artist. `.text.OffsetFrom` is # a helper for such cases. from matplotlib.text import OffsetFrom fig, ax = plt.subplots(figsize=(3, 3)) an1 = ax.annotate("Test 1", xy=(0.5, 0.5), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w")) offset_from = OffsetFrom(an1, (0.5, 0)) an2 = ax.annotate("Test 2", xy=(0.1, 0.1), xycoords="data", xytext=(0, -10), textcoords=offset_from, # xytext is offset points from "xy=(0.5, 0), xycoords=an1" va="top", ha="center", bbox=dict(boxstyle="round", fc="w"), arrowprops=dict(arrowstyle="->")) # %% # Non-text annotations # -------------------- # # .. _using_connectionpatch: # # Using ConnectionPatch # ^^^^^^^^^^^^^^^^^^^^^ # # `.ConnectionPatch` is like an annotation without text. While `~.Axes.annotate` # is sufficient in most situations, `.ConnectionPatch` is useful when you want # to connect points in different Axes. For example, here we connect the point # *xy* in the data coordinates of ``ax1`` to point *xy* in the data coordinates # of ``ax2``: from matplotlib.patches import ConnectionPatch fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(6, 3)) xy = (0.3, 0.2) con = ConnectionPatch(xyA=xy, coordsA=ax1.transData, xyB=xy, coordsB=ax2.transData) fig.add_artist(con) # %% # Here, we added the `.ConnectionPatch` to the *figure* # (with `~.Figure.add_artist`) rather than to either Axes. This ensures that # the ConnectionPatch artist is drawn on top of both Axes, and is also necessary # when using :ref:`constrained_layout <constrainedlayout_guide>` # for positioning the Axes. # # Zoom effect between Axes # ^^^^^^^^^^^^^^^^^^^^^^^^ # # `mpl_toolkits.axes_grid1.inset_locator` defines some patch classes useful for # interconnecting two Axes. # # .. figure:: /gallery/subplots_axes_and_figures/images/sphx_glr_axes_zoom_effect_001.png # :target: /gallery/subplots_axes_and_figures/axes_zoom_effect.html # :align: center # # The code for this figure is at # :doc:`/gallery/subplots_axes_and_figures/axes_zoom_effect` and # familiarity with :ref:`transforms_tutorial` # is recommended.
MyStyle
python
pandas-dev__pandas
pandas/tests/generic/test_frame.py
{ "start": 255, "end": 5215 }
class ____: @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) def test_set_axis_name(self, func): df = DataFrame([[1, 2], [3, 4]]) result = methodcaller(func, "foo")(df) assert df.index.name is None assert result.index.name == "foo" result = methodcaller(func, "cols", axis=1)(df) assert df.columns.name is None assert result.columns.name == "cols" @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) def test_set_axis_name_mi(self, func): df = DataFrame( np.empty((3, 3)), index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]), columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]), ) level_names = ["L1", "L2"] result = methodcaller(func, level_names)(df) assert result.index.names == level_names assert result.columns.names == [None, None] result = methodcaller(func, level_names, axis=1)(df) assert result.columns.names == ["L1", "L2"] assert result.index.names == [None, None] def test_nonzero_single_element(self): df = DataFrame([[False, False]]) msg_err = "The truth value of a DataFrame is ambiguous" with pytest.raises(ValueError, match=msg_err): bool(df) def test_metadata_propagation_indiv_groupby(self): # groupby df = DataFrame( { "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], "B": ["one", "one", "two", "three", "two", "two", "one", "three"], "C": np.random.default_rng(2).standard_normal(8), "D": np.random.default_rng(2).standard_normal(8), } ) result = df.groupby("A").sum() tm.assert_metadata_equivalent(df, result) def test_metadata_propagation_indiv_resample(self): # resample df = DataFrame( np.random.default_rng(2).standard_normal((1000, 2)), index=date_range("20130101", periods=1000, freq="s"), ) result = df.resample("1min") tm.assert_metadata_equivalent(df, result) def test_metadata_propagation_indiv(self, monkeypatch): # merging with override # GH 6923 def finalize( self: DataFrame, other: DataFrame, method: Literal["merge", "concat"] | None = None, **kwargs, ): for name in self._metadata: if method == "merge": left, right = other.input_objs value = getattr(left, name, "") + "|" + getattr(right, name, "") object.__setattr__(self, name, value) elif method == "concat": value = "+".join( [ getattr(o, name) for o in other.input_objs if getattr(o, name, None) ] ) object.__setattr__(self, name, value) else: object.__setattr__(self, name, getattr(other, name, "")) return self with monkeypatch.context() as m: m.setattr(DataFrame, "_metadata", ["filename"]) m.setattr(DataFrame, "__finalize__", finalize) df1 = DataFrame( np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"] ) df2 = DataFrame( np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"] ) DataFrame._metadata = ["filename"] df1.filename = "fname1.csv" df2.filename = "fname2.csv" result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner") assert result.filename == "fname1.csv|fname2.csv" # concat # GH#6927 df1 = DataFrame( np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab") ) df1.filename = "foo" result = pd.concat([df1, df1]) assert result.filename == "foo+foo" def test_set_attribute(self): # Test for consistent setattr behavior when an attribute and a column # have the same name (Issue #8994) df = DataFrame({"x": [1, 2, 3]}) df.y = 2 df["y"] = [2, 4, 6] df.y = 5 assert df.y == 5 tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y")) def test_deepcopy_empty(self): # This test covers empty frame copying with non-empty column sets # as reported in issue GH15370 empty_frame = DataFrame(data=[], index=[], columns=["A"]) empty_frame_copy = deepcopy(empty_frame) tm.assert_frame_equal(empty_frame_copy, empty_frame) # formerly in Generic but only test DataFrame
TestDataFrame
python
ansible__ansible
test/units/parsing/test_dataloader.py
{ "start": 5258, "end": 5991 }
class ____(unittest.TestCase): def setUp(self): self._loader = DataLoader() def test_all_slash(self): self.assertEqual(self._loader.path_dwim_relative('/', '/', '/'), '/') def test_path_endswith_role(self): self.assertEqual(self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='/'), '/') def test_path_endswith_role_main_yml(self): self.assertIn('main.yml', self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='main.yml')) def test_path_endswith_role_source_tilde(self): self.assertEqual(self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='~/'), os.path.expanduser('~'))
TestPathDwimRelativeDataLoader
python
ray-project__ray
doc/source/serve/doc_code/http_guide/streaming_example.py
{ "start": 1385, "end": 2282 }
class ____: async def generate_forever(self) -> AsyncGenerator[str, None]: try: i = 0 while True: yield str(i) i += 1 await asyncio.sleep(0.1) except asyncio.CancelledError: print("Cancelled! Exiting.") def __call__(self, request: Request) -> StreamingResponse: gen = self.generate_forever() return StreamingResponse(gen, status_code=200, media_type="text/plain") serve.run(StreamingResponder.bind()) r = requests.get("http://localhost:8000?max=10", stream=True) start = time.time() r.raise_for_status() for i, chunk in enumerate(r.iter_content(chunk_size=None, decode_unicode=True)): print(f"Got result {round(time.time()-start, 1)}s after start: '{chunk}'") if i == 10: print("Client disconnecting") break # __end_cancellation__
StreamingResponder
python
allegroai__clearml
clearml/utilities/requests_toolbelt/multipart/encoder.py
{ "start": 14987, "end": 16697 }
class ____(object): def __init__(self, headers, body): self.headers = headers self.body = body self.headers_unread = True self.len = len(self.headers) + total_len(self.body) @classmethod def from_field(cls, field, encoding): """Create a part from a Request Field generated by urllib3.""" headers = encode_with(field.render_headers(), encoding) body = coerce_data(field.data, encoding) return cls(headers, body) def bytes_left_to_write(self): """Determine if there are bytes left to write. :returns: bool -- ``True`` if there are bytes left to write, otherwise ``False`` """ to_read = 0 if self.headers_unread: to_read += len(self.headers) return (to_read + total_len(self.body)) > 0 def write_to(self, buffer, size): """Write the requested amount of bytes to the buffer provided. The number of bytes written may exceed size on the first read since we load the headers ambitiously. :param CustomBytesIO buffer: buffer we want to write bytes to :param int size: number of bytes requested to be written to the buffer :returns: int -- number of bytes actually written """ written = 0 if self.headers_unread: written += buffer.append(self.headers) self.headers_unread = False while total_len(self.body) > 0 and (size == -1 or written < size): amount_to_read = size if size != -1: amount_to_read = size - written written += buffer.append(self.body.read(amount_to_read)) return written
Part
python
kamyu104__LeetCode-Solutions
Python/design-browser-history.py
{ "start": 103, "end": 926 }
class ____(object): def __init__(self, homepage): """ :type homepage: str """ self.__history = [homepage] self.__curr = 0 def visit(self, url): """ :type url: str :rtype: None """ while len(self.__history) > self.__curr+1: self.__history.pop() self.__history.append(url) self.__curr += 1 def back(self, steps): """ :type steps: int :rtype: str """ self.__curr = max(self.__curr-steps, 0) return self.__history[self.__curr] def forward(self, steps): """ :type steps: int :rtype: str """ self.__curr = min(self.__curr+steps, len(self.__history)-1) return self.__history[self.__curr]
BrowserHistory
python
pdm-project__pdm
src/pdm/models/serializers.py
{ "start": 850, "end": 1390 }
class ____: UnpackValueError = json.JSONDecodeError @staticmethod def packb(data: dict, use_bin_type: bool = True) -> bytes: return json.dumps(data, cls=Encoder).encode() @staticmethod def loads(data: bytes, raw: bool = False) -> Any: return json.loads(data, object_hook=Encoder.object_hook) def _get_msgpack_implementation() -> type[JSONMsgPack]: try: import msgpack except ImportError: return JSONMsgPack else: return cast("type[JSONMsgPack]", msgpack)
JSONMsgPack
python
huggingface__transformers
src/transformers/models/upernet/modeling_upernet.py
{ "start": 1096, "end": 2212 }
class ____(nn.Module): """ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). """ def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, tuple[int, int]], padding: Union[int, tuple[int, int], str] = 0, bias: bool = False, dilation: Union[int, tuple[int, int]] = 1, ) -> None: super().__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, bias=bias, dilation=dilation, ) self.batch_norm = nn.BatchNorm2d(out_channels) self.activation = nn.ReLU() def forward(self, input: torch.Tensor) -> torch.Tensor: output = self.conv(input) output = self.batch_norm(output) output = self.activation(output) return output
UperNetConvModule
python
kamyu104__LeetCode-Solutions
Python/number-of-spaces-cleaning-robot-cleaned.py
{ "start": 33, "end": 638 }
class ____(object): def numberOfCleanRooms(self, room): """ :type room: List[List[int]] :rtype: int """ directions = [(0, 1), (1, 0), (0, -1), (-1, 0)] result = r = c = d = 0 while not room[r][c]&(1<<(d+1)): result += (room[r][c]>>1) == 0 room[r][c] |= (1<<(d+1)) dr, dc = directions[d] nr, nc = r+dr, c+dc if 0 <= nr < len(room) and 0 <= nc < len(room[0]) and not (room[nr][nc]&1): r, c = nr, nc else: d = (d+1)%4 return result
Solution
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/urlfetch/snippets/main.py
{ "start": 1438, "end": 2036 }
class ____(webapp2.RequestHandler): """Demonstrates an HTTP query using urlfetch.""" def get(self): # [START gae_urlfetch_snippets_urlfetch_get] url = "http://www.google.com/humans.txt" try: result = urlfetch.fetch(url) if result.status_code == 200: self.response.write(result.content) else: self.response.status_code = result.status_code except urlfetch.Error: logging.exception("Caught exception fetching url") # [END gae_urlfetch_snippets_urlfetch_get]
UrlFetchHandler
python
pytorch__pytorch
test/test_mobile_optimizer.py
{ "start": 783, "end": 26755 }
class ____(TestCase): @skipIfNoXNNPACK def test_optimize_for_mobile(self): batch_size = 2 input_channels_per_group = 6 height = 16 width = 16 output_channels_per_group = 6 groups = 4 kernel_h = kernel_w = 3 stride_h = stride_w = 1 pad_h = pad_w = 1 dilation = 1 input_channels = input_channels_per_group * groups output_channels = output_channels_per_group * groups strides = (stride_h, stride_w) paddings = (pad_h, pad_w) dilations = (dilation, dilation) conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w) conv_bias_shape = (output_channels) input_data = torch.rand((batch_size, input_channels, height, width)) conv_weight = torch.rand((output_channels, input_channels_per_group, kernel_h, kernel_w)) conv_bias = torch.rand(output_channels) result = F.conv2d(input_data, conv_weight, conv_bias, strides, paddings, dilations, groups) weight_output_dim = 24 linear_input_shape = result.shape[1] linear_weight_shape = (weight_output_dim, linear_input_shape) class MyTestModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv_weight = torch.nn.Parameter(torch.rand(conv_weight_shape)) self.conv_bias = torch.nn.Parameter(torch.rand(conv_bias_shape)) self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape)) self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim)) self.strides = strides self.paddings = paddings self.dilations = dilations self.groups = groups def forward(self, x): o = F.conv2d(x, self.conv_weight, self.conv_bias, self.strides, self.paddings, self.dilations, self.groups) o = F.relu(o) x = o.permute([0, 2, 3, 1]) o = F.linear(x, self.linear_weight, self.linear_bias) o = o + x return F.relu(o) @torch.jit.export def foo(self, x): o = F.conv2d(x, self.conv_weight, self.conv_bias, self.strides, self.paddings, self.dilations, self.groups) o = F.relu(o) x = o.permute([0, 2, 3, 1]) o = F.linear(x, self.linear_weight, self.linear_bias) o = o + x return F.relu(o) class BNTestModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 20, 5, 1) self.bn = torch.nn.BatchNorm2d(num_features=20) self.bn.eps = 0.0023 def forward(self, x): x = self.conv(x) x = self.bn(x) return x data_shape = (batch_size, input_channels, height, width) input_data = torch.normal(1, 20, size=data_shape) scripted_model = torch.jit.script(MyTestModule()) scripted_model.eval() initial_result = scripted_model(input_data) initial_foo_result = scripted_model.foo(input_data) optimized_scripted_model = optimize_for_mobile(scripted_model, preserved_methods=['foo']) optimized_result = optimized_scripted_model(input_data) optimized_foo_result = optimized_scripted_model.foo(input_data) FileCheck().check_not("Tensor = aten::conv2d") \ .check_not("Tensor = prim::CallFunction") \ .check_not("prepacked::conv2d_clamp_prepack") \ .check_count("prepacked::conv2d_clamp_run", 1, exactly=True) \ .check_not("prepacked::linear_clamp_prepack") \ .check_count("prepacked::linear_clamp_run", 1, exactly=True) \ .check_not("aten::add(") \ .check_not("aten::relu(") \ .check_count("aten::_add_relu(", 1, exactly=True) \ .run(optimized_scripted_model.graph) torch.testing.assert_close(initial_result, optimized_result, rtol=1e-2, atol=1e-3) FileCheck().check_not("Tensor = aten::conv2d") \ .check_not("Tensor = prim::CallFunction") \ .check_not("prepacked::conv2d_clamp_prepack") \ .check_count("prepacked::conv2d_clamp_run", 1, exactly=True) \ .check_not("prepacked::linear_clamp_prepack") \ .check_count("prepacked::linear_clamp_run", 1, exactly=True) \ .check_not("aten::add(") \ .check_not("aten::relu(") \ .check_count("aten::_add_relu(", 1, exactly=True) \ .run(optimized_scripted_model.foo.graph) torch.testing.assert_close(initial_foo_result, optimized_foo_result, rtol=1e-2, atol=1e-3) optimization_blocklist_no_prepack = {MobileOptimizerType.INSERT_FOLD_PREPACK_OPS} optimized_scripted_model_no_prepack = optimize_for_mobile(scripted_model, optimization_blocklist_no_prepack) optimized_result_no_prepack = optimized_scripted_model_no_prepack(input_data) FileCheck().check_count("Tensor = aten::conv2d", 1, exactly=True) \ .check_not("prepacked::linear_clamp_run") \ .check_not("prepacked::conv2d_clamp_run") \ .run(optimized_scripted_model_no_prepack.graph) torch.testing.assert_close(initial_result, optimized_result_no_prepack, rtol=1e-2, atol=1e-3) bn_test_module = BNTestModule() bn_scripted_module = torch.jit.script(bn_test_module) bn_scripted_module.eval() self.assertEqual(len(torch.jit.export_opnames(bn_scripted_module)), 11) FileCheck().check_count('prim::CallMethod[name="forward"]', 2, exactly=True) \ .run(str(get_forward(bn_scripted_module._c).graph)) optimization_blocklist_no_prepack = {MobileOptimizerType.INSERT_FOLD_PREPACK_OPS} bn_fold_scripted_module = optimize_for_mobile(bn_scripted_module, optimization_blocklist_no_prepack) self.assertEqual(len(torch.jit.export_opnames(bn_fold_scripted_module)), 1) bn_input = torch.rand(1, 1, 6, 6) torch.testing.assert_close(bn_scripted_module(bn_input), bn_fold_scripted_module(bn_input), rtol=1e-2, atol=1e-3) optimization_blocklist_no_fold_bn = {MobileOptimizerType.CONV_BN_FUSION} no_bn_fold_scripted_module = optimize_for_mobile(bn_scripted_module, optimization_blocklist_no_fold_bn) FileCheck().check_count("aten::batch_norm", 1, exactly=True) \ .run(str(get_forward_graph(no_bn_fold_scripted_module._c))) bn_input = torch.rand(1, 1, 6, 6) torch.testing.assert_close(bn_scripted_module(bn_input), no_bn_fold_scripted_module(bn_input), rtol=1e-2, atol=1e-3) class MyMobileOptimizedTagTest(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape)) self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim)) def forward(self, x): o = F.linear(x, self.linear_weight, self.linear_bias) return F.relu(o) mobile_optimized_tag_module = MyMobileOptimizedTagTest() m = torch.jit.script(mobile_optimized_tag_module) m.eval() opt_m = optimize_for_mobile(m) tag = getattr(opt_m, "mobile_optimized", None) self.assertTrue(tag) class MyPreserveMethodsTest(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear_weight = torch.nn.Parameter(torch.rand(linear_weight_shape)) self.linear_bias = torch.nn.Parameter(torch.rand(weight_output_dim)) def forward(self, x): o = F.linear(x, self.linear_weight, self.linear_bias) return F.relu(o) @torch.jit.export def preserveThis(self): pass preserve_method_module = MyPreserveMethodsTest() m = torch.jit.script(preserve_method_module) m.eval() opt_m = optimize_for_mobile(m) no_preserveThis = getattr(opt_m, "preserveThis", None) self.assertEqual(no_preserveThis, None) opt_m = optimize_for_mobile(m, preserved_methods=["preserveThis"]) preserveThis = getattr(opt_m, "preserveThis", None) self.assertNotEqual(preserveThis, None) class OptimizeNoForwardTest(torch.nn.Module): def __init__(self) -> None: super().__init__() self.l = nn.Linear(10, 100) self.l2 = nn.Linear(100, 1) self.d = nn.Dropout(p=0.2) @torch.jit.export def foo(self, x): x = self.d(F.relu(self.l(x))) x = self.l2(x) x = x + torch.ones(1, 100) return F.relu(x) input_data = torch.ones(1, 10) m = torch.jit.script(OptimizeNoForwardTest()) m.eval() initial_result = m.foo(input_data) optimized_scripted_model = optimize_for_mobile(m, preserved_methods=['foo']) optimized_result = optimized_scripted_model.foo(input_data) FileCheck().check_not("dropout.__") \ .check_count("aten::_add_relu(", 1, exactly=True) \ .run(optimized_scripted_model.foo.graph) torch.testing.assert_close(initial_result, optimized_result, rtol=1e-2, atol=1e-3) class BNTestNoForwardModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(1, 20, 5, 1) self.bn = torch.nn.BatchNorm2d(num_features=20) self.bn.eps = 0.0023 @torch.jit.export def foo(self, x): x = self.conv(x) x = self.bn(x) return x bn_test_no_forward_module = BNTestNoForwardModule() bn_no_forward_scripted_module = torch.jit.script(bn_test_no_forward_module) bn_no_forward_scripted_module.eval() self.assertEqual(len(torch.jit.export_opnames(bn_no_forward_scripted_module)), 11) FileCheck().check_count('prim::CallMethod[name="forward"]', 2, exactly=True) \ .run(bn_no_forward_scripted_module.foo.graph) bn_fold_no_forward_scripted_module = optimize_for_mobile(bn_no_forward_scripted_module, preserved_methods=['foo']) self.assertEqual(len(torch.jit.export_opnames(bn_fold_no_forward_scripted_module)), 1) bn_input = torch.rand(1, 1, 6, 6) torch.testing.assert_close( bn_no_forward_scripted_module.foo(bn_input), bn_fold_no_forward_scripted_module.foo(bn_input), rtol=1e-2, atol=1e-3) @skipIfNoXNNPACK def test_quantized_conv_no_asan_failures(self): # There were ASAN failures when fold_conv_bn was run on # already quantized conv modules. Verifying that this does # not happen again. if 'qnnpack' not in torch.backends.quantized.supported_engines: return class Child(nn.Module): def __init__(self) -> None: super().__init__() self.conv2 = nn.Conv2d(1, 1, 1) def forward(self, x): x = self.conv2(x) return x class Parent(nn.Module): def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv1 = nn.Conv2d(1, 1, 1) self.child = Child() self.dequant = torch.ao.quantization.DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv1(x) x = self.child(x) x = self.dequant(x) return x with override_quantized_engine('qnnpack'): model = Parent() model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack') torch.ao.quantization.prepare(model, inplace=True) model(torch.randn(4, 1, 4, 4)) torch.ao.quantization.convert(model, inplace=True) model = torch.jit.script(model) # this line should not have ASAN failures optimize_for_mobile(model) def test_generate_mobile_module_lints(self): class MyTestModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(4, 4) self.dropout = torch.nn.Dropout(p=0.5) def forward(self, inputs): out = self.fc(inputs) out = self.dropout(out) return out class MyBNModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.bn = torch.nn.BatchNorm2d(4, affine=True) def forward(self, inputs): bn = self.bn(inputs) return bn class MyBundledInputModule(torch.nn.Module): def forward(self, inputs): return inputs def get_lint_count_by_type(lint_type, module_lint_List): return len([lint_dict for lint_dict in module_lint_List if lint_dict['name'] == lint_type.name]) test_module = torch.jit.script(MyTestModule()) test_module_lint_list = generate_mobile_module_lints(test_module) self.assertEqual(len(test_module_lint_list), 4) self.assertEqual(get_lint_count_by_type(LintCode.BUNDLED_INPUT, test_module_lint_list), 1) self.assertEqual(get_lint_count_by_type(LintCode.DROPOUT, test_module_lint_list), 1) self.assertEqual(get_lint_count_by_type(LintCode.REQUIRES_GRAD, test_module_lint_list), 2) bn_module = torch.jit.script(MyBNModule()) bn_module_lint_list = generate_mobile_module_lints(bn_module) self.assertEqual(len(bn_module_lint_list), 4) self.assertEqual(get_lint_count_by_type(LintCode.BUNDLED_INPUT, bn_module_lint_list), 1) self.assertEqual(get_lint_count_by_type(LintCode.BATCHNORM, bn_module_lint_list), 1) self.assertEqual(get_lint_count_by_type(LintCode.REQUIRES_GRAD, bn_module_lint_list), 2) bi_module = torch.jit.script(MyBundledInputModule()) torch.utils.bundled_inputs.augment_model_with_bundled_inputs( bi_module, [(torch.tensor([1]),)], []) bi_module_lint_list = generate_mobile_module_lints(bi_module) self.assertEqual(len(bi_module_lint_list), 0) @skipIfNoXNNPACK def test_preserve_bundled_inputs_methods(self): class MyBundledInputModule(torch.nn.Module): def forward(self, inputs): return inputs class MyIncompleteBundledInputModule(torch.nn.Module): def forward(self, inputs): return inputs @torch.jit.export def get_all_bundled_inputs(self): pass bi_module = torch.jit.script(MyBundledInputModule()) module_optim_bi_not_preserved = optimize_for_mobile(bi_module) # Expected to be False since no bundled inputs methods were added self.assertFalse( hasattr(module_optim_bi_not_preserved, 'get_all_bundled_inputs') or hasattr(module_optim_bi_not_preserved, 'get_num_bundled_inputs') ) # Add bundled inputs methods to the module torch.utils.bundled_inputs.augment_model_with_bundled_inputs( bi_module, [(torch.tensor([1]),)], []) # Now they should be preserved module_optim_bi_preserved = optimize_for_mobile(bi_module) # All of the bundled inputs methods were preserved self.assertTrue( hasattr(module_optim_bi_preserved, 'get_all_bundled_inputs') and hasattr(module_optim_bi_preserved, 'get_num_bundled_inputs') ) bundled_input = module_optim_bi_preserved.get_all_bundled_inputs()[0] module_optim_bi_preserved(*bundled_input) # If not all 3 bundled inputs methods are present in the module, # we will not try to preserve them unless specified by the user. incomplete_bi_module = torch.jit.script(MyIncompleteBundledInputModule()) incomplete_bi_module_optim = optimize_for_mobile(incomplete_bi_module) self.assertFalse(hasattr(incomplete_bi_module_optim, 'get_all_bundled_inputs')) # Specifically preserve get_all_bundled_inputs even if it's the only one # bundled inputs method available. incomplete_bi_module_optim = optimize_for_mobile(incomplete_bi_module, preserved_methods=['get_all_bundled_inputs']) self.assertTrue(hasattr(incomplete_bi_module_optim, 'get_all_bundled_inputs')) @skipIfNoXNNPACK def test_hoist_conv_packed_params(self): if 'qnnpack' not in torch.backends.quantized.supported_engines: return class Standalone(nn.Module): def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv1 = nn.Conv2d(1, 1, 1) self.conv2 = nn.Conv2d(1, 1, 1) self.relu = nn.ReLU() self.dequant = torch.ao.quantization.DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv1(x) x = self.conv2(x) x = self.relu(x) x = self.dequant(x) return x def fuse_model(self): torch.ao.quantization.fuse_modules(self, [['conv2', 'relu']], inplace=True) class Child(nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(1, 1, 1) def forward(self, x): x = self.conv1(x) return x class Parent(nn.Module): def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv1 = nn.Conv2d(1, 1, 1) self.child = Child() # TODO: test nn.Sequential after #42039 is fixed self.dequant = torch.ao.quantization.DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv1(x) x = self.child(x) x = self.dequant(x) return x def fuse_model(self): pass with override_quantized_engine('qnnpack'): def _quant_script_and_optimize(model): model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack') model.fuse_model() torch.ao.quantization.prepare(model, inplace=True) model(torch.randn(4, 1, 4, 4)) torch.ao.quantization.convert(model, inplace=True) model = torch.jit.script(model) model_optim = optimize_for_mobile(model) return model, model_optim # basic case m, m_optim = _quant_script_and_optimize(Standalone()) FileCheck().check_not('Conv2d = prim::GetAttr[name="conv1"]') \ .check_count("__torch__.torch.classes.quantized.Conv2dPackedParamsBase = prim::Constant", 2, exactly=True) \ .run(m_optim.graph) self.assertFalse(hasattr(m_optim, "conv1")) self.assertFalse(hasattr(m_optim, "conv2")) data = torch.randn(4, 1, 4, 4) m_res = m(data) m_optim_res = m_optim(data) torch.testing.assert_close(m_res, m_optim_res, rtol=1e-2, atol=1e-3) # generic case m, m_optim = _quant_script_and_optimize(Parent()) FileCheck().check_not('Conv2d = prim::GetAttr[name="conv1"]') \ .check_count("__torch__.torch.classes.quantized.Conv2dPackedParamsBase = prim::Constant", 2, exactly=True) \ .run(m_optim.graph) self.assertFalse(hasattr(m_optim, "conv1")) self.assertFalse(hasattr(m_optim, "child")) data = torch.randn(4, 1, 4, 4) m_res = m(data) m_optim_res = m_optim(data) torch.testing.assert_close(m_res, m_optim_res, rtol=1e-2, atol=1e-3) @skipIfNoXNNPACK @unittest.skipUnless(HAS_TORCHVISION, "Needs torchvision") def test_mobilenet_optimize_for_mobile(self): m = torchvision.models.mobilenet_v3_small() m = torch.jit.script(m) m = optimize_for_mobile(m) # run forward 3 times until segfault, see https://github.com/pytorch/pytorch/issues/52463 x = torch.zeros(1, 3, 56, 56) self.assertEqual(m(x).numel(), 1000) self.assertEqual(m(x).numel(), 1000) self.assertEqual(m(x).numel(), 1000) def test_clone_module_with_class(self): class MyInnerTestModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.pqr = torch.Tensor([10., 20., 30.]) def forward(self, inputs): return inputs @torch.jit.export def dummy_method_not_cloned(self): return 20 class MyTestModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.abc = 23 self.pqr = torch.Tensor([1., 2., 3.]) self.inner = MyInnerTestModule() def forward(self, inputs): x = self.dummy_method_cloned() # The call to self.inner.dummy_method_not_cloned should not raise an error y = self.inner.dummy_method_not_cloned() # The call to self.inner.pqr should not raise an error z = self.inner.pqr return (inputs, x, y, z) @torch.jit.export def dummy_method_not_cloned2(self): # The call to self.inner.dummy_method_not_cloned should not raise an error y = self.inner.dummy_method_not_cloned() # The call to self.inner.pqr should not raise an error z = self.inner.pqr return self.pqr, self.dummy_method_not_cloned(), y, z @torch.jit.export def dummy_method_not_cloned(self): return None @torch.jit.export def dummy_method_cloned(self): return None @torch.jit.export def dummy_method_ref_attr_pqr(self): return self.pqr, self.inner.pqr m = torch.jit.script(MyTestModule()) # Check that the methods exist on the original model. self.assertEqual(hasattr(m, "dummy_method_not_cloned"), True) self.assertEqual(hasattr(m, "dummy_method_cloned"), True) self.assertEqual(hasattr(m, "dummy_method_not_cloned2"), True) self.assertEqual(hasattr(m, "pqr"), True) # Case-1: Successfully clone, ignoring 2 methods, keeping all attributes. cloned = torch._C._hack_do_not_use_clone_module_with_class( m._c, ["dummy_method_not_cloned", "dummy_method_not_cloned2"], # ignored_methods [], # ignored_attributes ) # Check that the ignored methods don't exist on the cloned model. self.assertEqual(hasattr(cloned, "dummy_method_not_cloned"), False) self.assertEqual(hasattr(cloned, "dummy_method_cloned"), True) self.assertEqual(hasattr(cloned, "dummy_method_not_cloned2"), False) self.assertEqual(hasattr(cloned, "pqr"), True) # Check that the cloned class has a classname that starts with __torch__. self.assertTrue( cloned.qualified_name.startswith('__torch__.'), ("Expected the cloned module's name to start with the string " f"'__torch__.', but got: {cloned.qualified_name}"), ) # Case-2: Successfully clone the module, ignoring the attribute pqr, and the method that references it. cloned = torch._C._hack_do_not_use_clone_module_with_class( m._c, ["dummy_method_not_cloned", "dummy_method_not_cloned2", "dummy_method_ref_attr_pqr"], ["pqr"], ) # Check that the ignored methods don't exist on the cloned model. self.assertEqual(hasattr(cloned, "dummy_method_not_cloned"), False) self.assertEqual(hasattr(cloned, "dummy_method_cloned"), True) self.assertEqual(hasattr(cloned, "dummy_method_not_cloned2"), False) self.assertEqual(hasattr(cloned, "dummy_method_ref_attr_pqr"), False) self.assertEqual(hasattr(cloned, "pqr"), False) # Case-3: The statement below will throw since dummy_method_cloned2 is preserved, # and references dummy_method_not_cloned, which is not cloned. with self.assertRaises(RuntimeError): cloned = torch._C._hack_do_not_use_clone_module_with_class(m._c, ["dummy_method_not_cloned"], []) # Case-4: The statement below will throw since dummy_method_ref_attr_pqr # is preserved, and references "pqr", which is not cloned. with self.assertRaises(RuntimeError): cloned = torch._C._hack_do_not_use_clone_module_with_class( m._c, ["dummy_method_not_cloned", "dummy_method_not_cloned2"], ["pqr"], ) if __name__ == '__main__': run_tests()
TestOptimizer
python
psf__black
tests/data/cases/comments9.py
{ "start": 4634, "end": 5279 }
class ____: # First method has no empty lines between bare class def. # More comments. def first_method(self): pass # Regression test for https://github.com/psf/black/issues/3454. def foo(): pass # Trailing comment that belongs to this function @decorator1 @decorator2 # fmt: skip def bar(): pass # Regression test for https://github.com/psf/black/issues/3454. def foo(): pass # Trailing comment that belongs to this function. # NOTE this comment only has one empty line below, and the formatter # should enforce two blank lines. @decorator1 # A standalone comment def bar(): pass
MyClass
python
realpython__materials
python-import/finders_and_loaders/debug_importer.py
{ "start": 34, "end": 215 }
class ____: @classmethod def find_spec(cls, name, path, target=None): print(f"Importing {name!r}") return None sys.meta_path.insert(0, DebugFinder)
DebugFinder
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py
{ "start": 4243, "end": 49631 }
class ____(test_util.TensorFlowTestCase): """Class to test Tensorflow-TensorRT integration.""" @property def trt_incompatible_op(self): return math_ops.erfc @property def trt_incompatible_binary_op(self): return math_ops.igamma @property def precision_modes(self): return ["FP32", "FP16", "INT8"] # str is bytes in py2, but unicode in py3. def _ToUnicode(self, s): if isinstance(s, str): return s return s.decode("utf-8") def _ToBytes(self, s): if isinstance(s, str): return s.encode("utf-8") return s def _ToString(self, s): if isinstance(s, str): return s return s.decode("utf-8") def __init__(self, methodName="runTest"): # pylint: disable=invalid-name super(TfTrtIntegrationTestBase, self).__init__(methodName) self._trt_test_params = None self._disable_non_trt_optimizers = False self._profile_strategy = "ImplicitBatchModeCompatible" def setUp(self): """Setup method.""" super().setUp() warnings.simplefilter("always") if not is_tensorrt_enabled(): self.skipTest("Test requires TensorRT") def tearDown(self): """Making sure to clean artifact.""" idx = 0 while gc.garbage: gc.collect() # Force GC to destroy the TRT engine cache. idx += 1 if idx >= 10: # After 10 iterations, break to avoid infinite collect. break def _GetTensorSpec(self, shape, mask, dtype, name): # Set dimension i to None if mask[i] == False assert len(shape) == len(mask), ( f"len(shape): {len(shape)} == len(mask): {len(mask)}") new_shape = [s if m else None for s, m in zip(shape, mask)] return tensor_spec.TensorSpec(new_shape, dtype, name) def BuildParams(self, graph_fn, dtype, input_shapes, output_shapes): """Build test parameters. The input_shapes and output_shapes arguments are known (static) shapes that can be used to generate test data. To define the model, we also specify corresponding input/output TensorSpecs. These are defined using the shape arguments. For each input tensor we define: input_spec = [None] + input_shape[1:] and similarly for output shapes. This means that we leave the first (batch) dimension unknown, the rest is just copied from the shapes arg. Args: graph_fn: The function to build the graph. dtype: The element type. input_shapes: The input shapes. output_shapes: The output shapes. Returns: The test parameters. """ input_mask = [[False] + [True] * (len(shape) - 1) for shape in input_shapes] output_mask = [[False] + [True] * (len(shape) - 1) if shape else [] for shape in output_shapes] return self.BuildParamsWithMask(graph_fn, dtype, input_shapes, output_shapes, input_mask, output_mask, [], []) def BuildParamsWithMask(self, graph_fn, dtype, input_shapes, output_shapes, input_mask, output_mask, extra_inputs, extra_outputs): """Build test parameters with static or dynamic input shapes. To define dynamic shapes give a boolean mask that describes which dimensions to treat as known. The values in input_mask are interpreted the following way: - True: known dim (use the corresponding value from input_shapes) - False: unknown dim (replace the corresponding value from input_shapes with None) For example, to define the first two dimension with unknown size use input_shapes=[[1,2,1,8]], input_mask=[[False, False, True, True]]. Args: graph_fn: The function to build the graph. dtype: The element type. input_shapes: The input shapes. output_shapes: The output shapes. input_mask: The input shape masks. output_mask: the output shape masks. extra_inputs: list of additional input shapes extra_outputs: list of additional outputs shapes Returns: The test parameters. """ def _ValidateShapes(shapes): # Make sure all the shapes are fully specified. for shape in shapes: assert all(shape), f"Shape unspecified: {shape}" _ValidateShapes(input_shapes) _ValidateShapes(output_shapes) assert len(input_mask) == len(input_shapes), ( f"Inconsistent input_mask and input_shapes: len({input_mask}) != " f"len({input_shapes}).") assert len(output_mask) == len(output_shapes), ( f"Inconsistent output_mask and output_shapes: len({output_mask}) != " f"len({output_shapes}).") for extra_in_shape, extra_out_shape in zip(extra_inputs, extra_outputs): assert len(input_shapes) == len(extra_in_shape), ( f"Inconsistent input_shapes and extra_in_shape: len({input_shapes}) " f"!= len({extra_in_shape}).") assert len(output_shapes) == len(extra_out_shape), ( f"Inconsistent output_shapes and extra_out_shape: " f"len({output_shapes}) != len({extra_out_shape}).") return TfTrtIntegrationTestParams( graph_fn=graph_fn, input_specs=[ self._GetTensorSpec(shape, mask, dtype, "input_%d" % i) for i, (shape, mask) in enumerate(zip(input_shapes, input_mask)) ], output_specs=[ self._GetTensorSpec(shape, mask, dtype, "output_%d" % i) for i, (shape, mask) in enumerate(zip(output_shapes, output_mask)) ], input_dims=[input_shapes] + extra_inputs, expected_output_dims=[output_shapes] + extra_outputs) def DisableNonTrtOptimizers(self): self._disable_non_trt_optimizers = True def GetParams(self): """Returns a TfTrtIntegrationTestParams for the test.""" raise NotImplementedError() def GetConversionParams(self, run_params): """Returns a TrtConversionParams for test.""" conversion_params = trt_convert.TrtConversionParams( # We use the minimum of all the batch sizes, so when multiple different # input shapes are provided it'll always create new engines in the # cache, and we can therefore test the cache behavior. max_workspace_size_bytes=( trt_convert.DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES), precision_mode=run_params.precision_mode, minimum_segment_size=2, maximum_cached_engines=1, use_calibration=run_params.use_calibration) return conversion_params def GetMaxBatchSize(self, run_params): """Returns the max_batch_size that the converter should use for tests.""" if run_params.dynamic_engine: return None batch_list = [] for dims_list in self._GetParamsCached().input_dims: assert dims_list, f"Expect non-empty `dim_list` but got: {dims_list}" # Each list of shapes should have same batch size. input_batches = [dims[0] for dims in dims_list] assert max(input_batches) == min(input_batches), ( f"Inconsistent batch_size: max({input_batches}) != " f"min({input_batches}).") batch_list.append(input_batches[0]) return max(batch_list) def ShouldRunTest(self, run_params): """Whether to run the test.""" # Ensure use_calibration=True in case of INT8 precision return (run_params.use_calibration or not IsQuantizationMode( run_params.precision_mode)), "test either calibration or non-INT8" def ExpectedEnginesToBuild(self, run_params): """Returns the expected engines to build, implemented by subclass.""" raise NotImplementedError() def ExpectedConnections(self, run_params): """Returns the expected edges or an empty dict to skip the check.""" return {} def ExpectedMaxBatchSizes(self, run_params): """Returns the expected maximum batch sizes of the build engines.""" return None def ExpectedAbsoluteTolerance(self, run_params): """The absolute tolerance to compare floating point results.""" if run_params.precision_mode == "INT8": return 3e-1 return 1.e-05 if run_params.precision_mode == "FP32" else 2.e-02 def ExpectedRelativeTolerance(self, run_params): """The relative tolerance to compare floating point results.""" if run_params.precision_mode == "INT8": return 1e-1 return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-02 def _GetParamsCached(self): if self._trt_test_params is None: self._trt_test_params = self.GetParams() return self._trt_test_params def _GetGPUOptions(self): gpu_options = config_pb2.GPUOptions() gpu_options.allow_growth = True return gpu_options def _GetConfigProto(self, run_params, graph_state): """Get config proto based on specific settings.""" conversion_params = self.GetConversionParams(run_params) max_batch_size = self.GetMaxBatchSize(run_params) if graph_state == GraphState.INFERENCE and run_params.convert_online: rewriter_cfg = trt_convert.get_tensorrt_rewriter_config( conversion_params, is_dynamic_op=run_params.dynamic_engine, max_batch_size=max_batch_size, disable_non_trt_optimizers=self._disable_non_trt_optimizers) else: rewriter_cfg = rewriter_config_pb2.RewriterConfig() if self._disable_non_trt_optimizers: trt_utils.disable_non_trt_optimizers_in_rewriter_config(rewriter_cfg) config = config_pb2.ConfigProto( gpu_options=self._GetGPUOptions(), graph_options=config_pb2.GraphOptions(rewrite_options=rewriter_cfg)) return config def _GetFeedNames(self): params = self._GetParamsCached() # Construct the feeds tensor names by appending :0 to the node names. return [spec.name + ":0" for spec in params.input_specs] def _GetFetchNames(self): params = self._GetParamsCached() # Construct the fetches tensor names by appending :0 to the node names. return [spec.name + ":0" for spec in params.output_specs] def _GetFeedDict(self, inputs_data): return {name: data for name, data in zip(self._GetFeedNames(), inputs_data)} def _RunGraphV1(self, saved_model_dir, inputs_data, config, num_runs=2): """Run given graphdef multiple times using TF 1.x runtime.""" params = self._GetParamsCached() fetches = self._GetFetchNames() g = ops.Graph() with g.as_default(): with self.session(graph=g, config=config, use_gpu=True) as sess: loader.load(sess, [tag_constants.SERVING], saved_model_dir) vals = [] # Run for each input(s) shape for expected_shapes, current_input_data in zip( params.expected_output_dims, inputs_data): val = None for _ in range(num_runs): new_val = sess.run(fetches, self._GetFeedDict(current_input_data)) self.assertEqual(len(expected_shapes), len(new_val)) for expected_shape, actual_val in zip(expected_shapes, new_val): self.assertEqual(list(expected_shape), list(actual_val.shape)) if val is not None: # Some ops may have nondeterministic output. E.g. Conv2D may use # winograd algorithm. So we set atol/rtol be larger than 1.e-06. self.assertAllClose(val, new_val, atol=1.e-05, rtol=1.e-05) val = new_val vals.append(val) return vals def _RunGraphV2(self, saved_model_dir, inputs_data, graph_state, num_runs=2): """Run given graphdef multiple times using TF 2.0 runtime.""" params = self._GetParamsCached() root = load.load(saved_model_dir) func = root.signatures[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] results = [] for expected_shapes, current_input_data in zip(params.expected_output_dims, inputs_data): val = None for _ in range(num_runs): feed_dict = { params.input_specs[i].name: current_input_data[i] for i in range(len(params.input_specs)) } new_val = func(**feed_dict) assert isinstance( new_val, dict), (f"Invalid type for `new_val`, expected `dict`. " f"Got: {type(new_val)}.") # The key of the output map is always like output_i. new_val = [new_val[key] for key in sorted(new_val)] # Each element is an eager Tensor, and accessing individual elements is # very expensive, so we convert them to a numpy array first. new_val = [v.numpy() for v in new_val] self.assertEqual(len(expected_shapes), len(new_val)) for expected_shape, actual_val in zip(expected_shapes, new_val): self.assertEqual(list(expected_shape), list(actual_val.shape)) if val is not None: # Some ops may have nondeterministic output. E.g. Conv2D may use # winograd algorithm. So we set atol/rtol be larger than 1.e-06. self.assertAllClose(val, new_val, atol=1.e-05, rtol=1.e-05) val = new_val results.append(val) return results def _RunGraph(self, run_params, saved_model_dir, inputs_data, graph_state, num_runs=2): params = self._GetParamsCached() for data in inputs_data: assert len(params.input_specs) == len(data), ( f"Inconsistent params.input_specs and data: " f"len({params.input_specs}) != len({data}).") if run_params.is_v2: results = self._RunGraphV2(saved_model_dir, inputs_data, graph_state, num_runs) gc.collect() # Force GC to destroy the TRT engine cache. return results # The default config for tf.session is None. Create a config with # TensorRTOptimizer enabled to support convert_online for inference. config = None # TODO(b/170220818): use the default session config to run inferenence # graphs for the offline conversion case after fixing the bug. if graph_state == GraphState.INFERENCE: config = self._GetConfigProto(run_params, GraphState.INFERENCE) return self._RunGraphV1(saved_model_dir, inputs_data, config, num_runs) def _CreateConverter(self, run_params, saved_model_dir, conversion_params): """Returns a TrtGraphConverter.""" if run_params.is_v2: converter_v2 = trt_convert.TrtGraphConverterV2( input_saved_model_dir=saved_model_dir, use_dynamic_shape=run_params.dynamic_shape, dynamic_shape_profile_strategy=self._profile_strategy, **conversion_params._asdict()) if self._disable_non_trt_optimizers: converter_v2._test_only_disable_non_trt_optimizers = True # pylint: disable=protected-access return converter_v2 converter_v1 = trt_convert.TrtGraphConverter( input_saved_model_dir=saved_model_dir, max_batch_size=self.GetMaxBatchSize(run_params), max_workspace_size_bytes=conversion_params.max_workspace_size_bytes, precision_mode=conversion_params.precision_mode, minimum_segment_size=conversion_params.minimum_segment_size, is_dynamic_op=run_params.dynamic_engine, maximum_cached_engines=conversion_params.maximum_cached_engines, use_calibration=conversion_params.use_calibration) if self._disable_non_trt_optimizers: converter_v1._test_only_disable_non_trt_optimizers = True # pylint: disable=protected-access return converter_v1 def _GetCalibratedInferGraph(self, run_params, saved_model_dir, inputs_data): """Return trt converted graphdef in INT8 mode.""" conversion_params = self.GetConversionParams(run_params) logging.info(conversion_params) assert conversion_params.precision_mode == "INT8", ( f"Incorrect precision mode, expected INT8 but got: " f"{conversion_params.precision_mode}.") assert run_params.dynamic_engine, "dynamic_engine parameter must be True." assert conversion_params.maximum_cached_engines == 1, ( f"maximum_cached_engines: {conversion_params.maximum_cached_engines} " f"== 1") assert conversion_params.use_calibration, "use_calibration must be True." # We only support calibrating single engine. # TODO(aaroey): fix this. assert len(inputs_data) == 1, (f"len(inputs_data): {len(inputs_data)} == 1") converter = self._CreateConverter(run_params, saved_model_dir, conversion_params) if run_params.is_v2: def CalibrationInputFn(): for data_tensors in inputs_data: yield data_tensors converter.convert(calibration_input_fn=CalibrationInputFn) else: int8_gdef = converter.convert() self._VerifyGraphDef(run_params, saved_model_dir, int8_gdef, GraphState.CALIBRATE) converter.calibrate( fetch_names=self._GetFetchNames(), num_runs=5, feed_dict_fn=lambda: self._GetFeedDict(inputs_data[0])) if run_params.dynamic_shape and self._ShouldConverterBuild(run_params): logging.info("Using build mode") def _BuildInputFn(): for shapes in self._GetParamsCached().input_dims: yield [ array_ops.zeros(x, dtype=spec.dtype) for (x, spec) in zip(shapes, self._GetParamsCached().input_specs) ] converter.build(input_fn=_BuildInputFn) trt_saved_model_dir = self._GetSavedModelDir(run_params, GraphState.CALIBRATE) converter.save(trt_saved_model_dir) return trt_saved_model_dir def _ShouldConverterBuild(self, run_params): return True def _GetInferGraph(self, run_params, saved_model_dir): """Return trt converted graphdef.""" conversion_params = self.GetConversionParams(run_params) logging.info(conversion_params) converter = self._CreateConverter(run_params, saved_model_dir, conversion_params) converter.convert() if run_params.is_v2: try: line_length = max(160, os.get_terminal_size().columns) except OSError: line_length = 160 converter.summary(line_length=line_length, detailed=True) if run_params.dynamic_shape and self._ShouldConverterBuild(run_params): logging.info("Using build mode") def _BuildInputFn(): for shapes in self._GetParamsCached().input_dims: yield [ array_ops.zeros(x, dtype=spec.dtype) for (x, spec) in zip(shapes, self._GetParamsCached().input_specs) ] converter.build(input_fn=_BuildInputFn) trt_saved_model_dir = self._GetSavedModelDir(run_params, GraphState.INFERENCE) converter.save(trt_saved_model_dir) return trt_saved_model_dir def _GetGraphStateLabel(self, graph_state): if graph_state == GraphState.ORIGINAL: return "Original" elif graph_state == GraphState.CALIBRATE: return "CalibEngine" elif graph_state == GraphState.INFERENCE: return "InferEngine" else: return "UnknownState" def _WriteGraph(self, run_params, gdef, graph_state): temp_dir = os.getenv("TRT_TEST_TMPDIR") if not temp_dir: return graph_name = ( self.__class__.__name__ + "_" + run_params.test_name + "_" + self._GetGraphStateLabel(graph_state) + ".pbtxt") logging.info("Writing graph to %s/%s", temp_dir, graph_name) graph_io.write_graph(gdef, temp_dir, graph_name) # Removes the prefix(s) of function name(s). # The input value can be a string or a sequence of string. def _Canonicalize(self, value): if isinstance(value, str): return self._ToString(value.split("/")[-1]) elif isinstance(value, collections.abc.Iterable): return set(self._Canonicalize(nm) for nm in value) else: raise TypeError( "'_Canonicalize' can only be used on strings or sequence of strings!") # Removes the graph sequence number prefix from the name(s) only if the # name(s) has a prefix TRTEngineOp_n_. When expecting_prefix is true, asserts # such a prefix exists. # The input value can be a string or a sequence of string. def _RemoveGraphSequenceNumberImpl(self, value, expecting_prefix): if isinstance(value, str): match = re.search(r"TRTEngineOp_\d{3,}_", value) has_prefix = match and value.startswith(match.group(0)) assert (not expecting_prefix) or has_prefix, ( f"Expect (not expecting_prefix) or has_prefix but got: " f"- expecting_prefix = {expecting_prefix}\n" f"- has_prefix = {has_prefix}") if has_prefix: parts = value.split("_", maxsplit=2) assert len(parts) == 3, ( f"Incorrect `parts` of length == 3, but got: len({parts}).") return parts[0] + "_" + parts[2] return value elif isinstance(value, collections.abc.Iterable): return set( self._RemoveGraphSequenceNumberImpl(nm, expecting_prefix) for nm in value) else: raise TypeError( "'_RemoveGraphSequenceNumberImpl' can only be used on strings " "or sequence of strings!") def _RemoveGraphSequenceNumber(self, name): return self._RemoveGraphSequenceNumberImpl(name, True) def _MayRemoveGraphSequenceNumber(self, name): return self._RemoveGraphSequenceNumberImpl(name, False) def _VerifyConnections(self, expected_engines, expected_input_map, original_gdef, converted_gdef): """Checks that the converted graph contains the expected connections.""" old_to_new_node_map = { self._ToString(node.name): self._ToString(node.name) for node in original_gdef.node } for engine_name, node_names in expected_engines.items(): for node_name in node_names: old_to_new_node_map[node_name] = engine_name def _InputName(inp): inp = self._ToString(inp) prefix = "" if inp[0] == "^": prefix = "^" inp = inp[1:] parts = inp.split(":") if len(parts) > 1 and parts[-1].isdigit(): inp = inp[:-len(parts[-1]) - 1] return (prefix, inp) # Compute the actual mapping from each node to its input nodes. If a cast # op doesn't exist in the original graph, we replace the use of the cast op # with the input of the op. This allows the verification to handle the case # where the TF-TRT bridge splits a cast op into a chain of two cast ops. new_cast_op_name_to_node_map = { node.name: node for node in converted_gdef.node if (node.name not in old_to_new_node_map and node.op == "Cast") } actual_input_map = {} for node in converted_gdef.node: name_str = node.name # Only nodes from the original graph or TRTEngineOp nodes are added as # keys to the map. if node.op == "TRTEngineOp": name_str = self._RemoveGraphSequenceNumber(name_str) elif name_str not in old_to_new_node_map: continue actual_input_map[name_str] = set() input_set = actual_input_map[name_str] for inp in node.input: (prefix, node_name) = _InputName(inp) node_name = self._MayRemoveGraphSequenceNumber(node_name) if node_name in new_cast_op_name_to_node_map: (prefix, node_name) = _InputName( new_cast_op_name_to_node_map[node_name].input[0]) input_set.add(prefix + node_name) self.assertEqual( expected_input_map, actual_input_map, msg="\nexpected:\n%s\nvs actual:\n%s" % (sorted(expected_input_map.items()), sorted(actual_input_map.items()))) def _VerifyMaxBatchSizeAnnotations( self, expected_engines, original_gdef, converted_gdef, default_max_batch_size, expected_max_batch_sizes=None, ): """Verifies the max batch size annotations in the original and converted GraphDef. Args: expected_engines: A sequence of engines names. original_gdef: GraphDef. The graph def before TensorRT conversion. converted_gdef: GraphDef. The graph def after TensorRT conversion. default_max_batch_size: The default maximum batch size to use if no node inside a segment is annotated with a customized max batch size. This value is None when the graph is converted to TF-TRT with dynamic engines. expected_max_batch_sizes: Optional. A sequence of max batch sizes for all the engines. `None` if does not check enforce max batch sizes. """ if isinstance(expected_max_batch_sizes, collections.abc.Collection): self.assertEqual(len(expected_max_batch_sizes), len(expected_engines)) else: self.assertIsNone( expected_max_batch_sizes, "'expected_max_batch_sizes' shall only be a sequence " "of integers or `None`.") def _ChainAllNodes(graph_def): return itertools.chain( graph_def.node, itertools.chain( *[func.node_def for func in graph_def.library.function])) old_name_to_node_map = { self._ToString(node.name): node for node in _ChainAllNodes(original_gdef) } new_name_to_func_map = { self._ToString(func.signature.name): func for func in converted_gdef.library.function } def _DetectStaticBatchSize(node_def): """Returns the static batch size of an operation or None. It is incorrect to use the output shapes to find the batch size of an operation, as the segmenter actually uses the input shapes. However, it is a simplification and works for most of the cases for the test purposes. Args: node_def: `tf.NodeDef`. The target node for analysis. Returns: If all the outputs of the node have the same static batch size, returns the int value for the batch size. Otherwise returns None. """ shapes = node_def.attr["_output_shapes"].list.shape batch_size = set( list(s.dim)[0].size if len(s.dim) >= 2 else None for s in shapes) if len(batch_size) == 1 and list(batch_size)[0] >= 1: return list(batch_size)[0] return None name_to_engines_map = {} actual_max_batch_sizes = [] for node in _ChainAllNodes(converted_gdef): if node.op == "TRTEngineOp": engine = node engine_name = self._RemoveGraphSequenceNumber( self._Canonicalize(self._ToString(engine.name))) self.assertIn(engine_name, expected_engines) name_to_engines_map[engine_name] = engine # The input nodes shall not have the conflicting annotation (no # annotation or the same annotation) with the maximum batch size # annotation. If the engine has maximum batch size annotation as the # non-default maximum batch size, then at least one input node shall # have the same annotation to be the source. self.assertIn("max_batch_size", node.attr) engine_max_batch_size = node.attr["max_batch_size"].i self.assertIsInstance(engine_max_batch_size, int) actual_max_batch_sizes.append(engine_max_batch_size) seg_func = node.attr["segment_func"].func self.assertIsNotNone(seg_func) self.assertIn(seg_func.name, new_name_to_func_map) seg_func_def = new_name_to_func_map[seg_func.name] logging.info("Segment function name: %s. Including %d nodes.", seg_func.name, len(seg_func_def.node_def)) node_max_batch_size_all_none = True # Use the native segment to search for replaced nodes for alternative_node in seg_func_def.node_def: node_name = self._Canonicalize(self._ToString(alternative_node.name)) if node_name not in old_name_to_node_map: continue original_node = old_name_to_node_map[node_name] node_max_batch_size = None if "_tftrt_op_max_batch_size" in original_node.attr: node_max_batch_size = original_node.attr[ "_tftrt_op_max_batch_size"].i elif (original_node.op != "Const" and alternative_node.op != "Const" and "_output_shapes" in original_node.attr): node_max_batch_size = _DetectStaticBatchSize(original_node) logging.info( "'{%s}(%s)'s max batch size annotation is %s. " "'{%s}'s max batch size is %s.", node_name, original_node.op, str(node_max_batch_size), engine_name, str(engine_max_batch_size)) node_max_batch_size_all_none &= node_max_batch_size is None self.assertTrue(engine_max_batch_size == node_max_batch_size or node_max_batch_size is None) logging.info("'{%s}'s max batch size is %d.", engine_name, engine_max_batch_size) self.assertTrue(default_max_batch_size is None or engine_max_batch_size == default_max_batch_size or not node_max_batch_size_all_none) self.assertCountEqual(expected_engines, tuple(name_to_engines_map.keys())) if expected_max_batch_sizes is not None: self.assertCountEqual(expected_max_batch_sizes, actual_max_batch_sizes) def _GetGraphDef(self, run_params, gdef_or_saved_model_dir): if isinstance(gdef_or_saved_model_dir, str): if run_params.is_v2: root = load.load(gdef_or_saved_model_dir) func = root.signatures[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] gdef = func.graph.as_graph_def() # Manually unref the loaded saved model and force GC to destroy the TRT # engine cache after load(). There is currently a reference cycle in 2.0 # which prevents auto deletion of the resource. # TODO(laigd): fix this. del func del root gc.collect() return gdef return saved_model_utils.get_meta_graph_def( gdef_or_saved_model_dir, tag_constants.SERVING).graph_def assert isinstance(gdef_or_saved_model_dir, graph_pb2.GraphDef), ( f"Incorrect `gdef_or_saved_model_dir` type, expected " f"`graph_pb2.GraphDef`, but got: {type(gdef_or_saved_model_dir)}.") return gdef_or_saved_model_dir def _VerifyGraphDefV1(self, run_params, original_gdef, gdef_to_verify, graph_state): expected_engines = self.ExpectedEnginesToBuild(run_params) num_engines = 0 functions = [f.signature.name for f in gdef_to_verify.library.function] for node in gdef_to_verify.node: if node.op == "TRTEngineOp": logging.info("Found TRTEngineOp: " + node.name) num_engines += 1 segment_funcdef_name = node.attr["segment_func"].func.name function_name = node.name + "_native_segment" is_dynamic_engine = not node.attr["static_engine"].b self.assertNotEmpty(segment_funcdef_name, node.name) self.assertIn(function_name, functions) if (not IsQuantizationWithCalibration(run_params) and not is_dynamic_engine): self.assertTrue(len(node.attr["serialized_segment"].s), node.name) self.assertIn( self._RemoveGraphSequenceNumber(node.name), expected_engines) if IsQuantizationWithoutCalibration(run_params): # TODO(bixia): Refine this check by inspecting nodes in the engine. if self._ToBytes("INT8") != node.attr["precision_mode"].s: self.assertEqual( self._ToBytes("FP16"), node.attr["precision_mode"].s, node.name) else: self.assertEqual( self._ToBytes(run_params.precision_mode), node.attr["precision_mode"].s, node.name) self.assertEqual(run_params.dynamic_engine, is_dynamic_engine, node.name) self.assertEqual(node.attr["use_calibration"].b, run_params.use_calibration, node.name) has_calibration_data = len(node.attr["calibration_data"].s) if (IsQuantizationWithCalibration(run_params) and graph_state == GraphState.INFERENCE): self.assertTrue(has_calibration_data, node.name) else: self.assertFalse(has_calibration_data, node.name) if graph_state == GraphState.ORIGINAL: self.assertEqual(0, num_engines) else: self.assertEqual(num_engines, len(expected_engines)) expected_connections = self.ExpectedConnections(run_params) if expected_connections: self._VerifyConnections(expected_engines, expected_connections, original_gdef, gdef_to_verify) self._VerifyMaxBatchSizeAnnotations( expected_engines=expected_engines, original_gdef=original_gdef, converted_gdef=gdef_to_verify, expected_max_batch_sizes=self.ExpectedMaxBatchSizes(run_params), default_max_batch_size=self.GetMaxBatchSize(run_params)) def _VerifyGraphDefV2(self, run_params, original_gdef, gdef_to_verify, graph_state): if graph_state == GraphState.ORIGINAL: return expected_engines = self.ExpectedEnginesToBuild(run_params) all_op_names = [node.name for node in gdef_to_verify.node] trt_op_names = [] for func in gdef_to_verify.library.function: if not re.search(r"TRTEngineOp_\d{3,}_\d{3,}_native_segment", func.signature.name): for node in func.node_def: all_op_names.append(node.name) if node.op == "TRTEngineOp": trt_op_names.append(node.name) if run_params.dynamic_shape: self.assertEqual( self._ToString(node.attr["profile_strategy"].s).lower(), self._profile_strategy.lower()) all_op_names = self._Canonicalize(all_op_names) trt_op_names = self._RemoveGraphSequenceNumber( self._Canonicalize(trt_op_names)) if isinstance(expected_engines, dict): # For simplicity we don't verify the connections inside the engine in # 2.0, but we still make sure that the converted ops are gone from the # graph. unexpected_names = set(nest.flatten(expected_engines.values())) self.assertEmpty( [name for name in unexpected_names if name in all_op_names]) expected_engines = set(expected_engines.keys()) self.assertEqual(set(expected_engines), trt_op_names) def _VerifyGraphDef(self, run_params, original_gdef_or_saved_model_dir, gdef_or_saved_model_dir_to_verify, graph_state): original_gdef = self._GetGraphDef(run_params, original_gdef_or_saved_model_dir) gdef_to_verify = self._GetGraphDef(run_params, gdef_or_saved_model_dir_to_verify) self._WriteGraph(run_params, gdef_to_verify, graph_state) if run_params.is_v2: self._VerifyGraphDefV2(run_params, original_gdef, gdef_to_verify, graph_state) else: self._VerifyGraphDefV1(run_params, original_gdef, gdef_to_verify, graph_state) def _GetSavedModelDir(self, run_params, graph_state): test_tmpdir = os.getenv("TRT_TEST_TMPDIR") if test_tmpdir: saved_model_dir = os.path.join( test_tmpdir, self.__class__.__name__ + "_" + run_params.test_name + "_" + self._GetGraphStateLabel(graph_state)) try: # For TF 1.x we need to make sure the output directory doesn't exist # before exporting the saved model. shutil.rmtree(saved_model_dir) except OSError as e: if e.errno != errno.ENOENT: raise return saved_model_dir return tempfile.mkdtemp(dir=self.get_temp_dir()) def _MakeSavedModelV1(self, run_params): """Write the saved model as an input for testing.""" params = self._GetParamsCached() g = ops.Graph() with g.as_default(): inputs = [] for spec in params.input_specs: inp = array_ops.placeholder( dtype=spec.dtype, shape=spec.shape, name=spec.name) inputs.append(inp) outputs = params.graph_fn(*inputs) if not isinstance(outputs, list) and not isinstance(outputs, tuple): outputs = [outputs] signature_def = signature_def_utils.build_signature_def( inputs={inp.op.name: utils.build_tensor_info(inp) for inp in inputs}, outputs={out.op.name: utils.build_tensor_info(out) for out in outputs}, method_name=signature_constants.PREDICT_METHOD_NAME) saved_model_dir = self._GetSavedModelDir(run_params, GraphState.ORIGINAL) saved_model_builder = builder.SavedModelBuilder(saved_model_dir) with self.session( graph=g, config=self._GetConfigProto(run_params, GraphState.ORIGINAL)) as sess: saved_model_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def }) saved_model_builder.save() return saved_model_dir def _MakeSavedModelV2(self, run_params): params = self._GetParamsCached() root = autotrackable.AutoTrackable() root.run = def_function.function( params.graph_fn, input_signature=params.input_specs) saved_model_dir = self._GetSavedModelDir(run_params, GraphState.ORIGINAL) logging.info("Saving input SavedModel to %s", saved_model_dir) save.save(root, saved_model_dir, {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: root.run}) return saved_model_dir def _MakeSavedModel(self, run_params): if run_params.is_v2: return self._MakeSavedModelV2(run_params) return self._MakeSavedModelV1(run_params) def RunTest(self, run_params): with disable_tensorfloat32(): with trace.Trace(run_params.test_name): should_run, reason_for_skipping = self.ShouldRunTest(run_params) if not should_run: return self.skipTest(reason_for_skipping) saved_model_dir = self._MakeSavedModel(run_params) np.random.seed(12345) # Fix the seed so the test is deterministic. inputs_data = [] input_specs = self._GetParamsCached().input_specs for dim_list in self._GetParamsCached().input_dims: assert len(input_specs) == len(dim_list), ( f"Inconsistent input_specs and dim_list: len({input_specs}) != " f"len({dim_list}).") current_input_data = [] for spec, np_shape in zip(input_specs, dim_list): np_dtype = spec.dtype.as_numpy_dtype() if not np.issubdtype(np_dtype, np.bool_): # Multiply the input by some constant to avoid all zeros input for # integer types. scale = 10.0 if np.issubdtype(np_dtype, np.integer) else 1.0 data = (scale * np.random.random_sample(np_shape)).astype(np_dtype) else: data = np.random.choice(a=[False, True], size=np_shape) if run_params.is_v2: with ops.device("/GPU:0"): data = ops.convert_to_tensor(data) current_input_data.append(data) inputs_data.append(current_input_data) # Verify the original graph. self._VerifyGraphDef(run_params, saved_model_dir, saved_model_dir, GraphState.ORIGINAL) # Run the original graph without TensorRT to get the reference result. logging.info("Running original graph w/o TensorRT\n") ref_result = self._RunGraph( run_params, saved_model_dir, inputs_data, GraphState.ORIGINAL, num_runs=1) # Run calibration if necessary. if IsQuantizationWithCalibration(run_params): infer_saved_model_dir = self._GetCalibratedInferGraph( run_params, saved_model_dir, inputs_data) self._VerifyGraphDef(run_params, saved_model_dir, infer_saved_model_dir, GraphState.INFERENCE) elif not run_params.convert_online: infer_saved_model_dir = self._GetInferGraph(run_params, saved_model_dir) self._VerifyGraphDef(run_params, saved_model_dir, infer_saved_model_dir, GraphState.INFERENCE) else: infer_saved_model_dir = saved_model_dir # Run the inference graph, either using the converted graph or the # original graph with convert_online == True. logging.info("Running final inference graph\n") result = self._RunGraph(run_params, infer_saved_model_dir, inputs_data, GraphState.INFERENCE) self.assertAllClose( ref_result, result, atol=self.ExpectedAbsoluteTolerance(run_params), rtol=self.ExpectedRelativeTolerance(run_params)) def testIdempotence(self): # Test that applying tensorrt optimizer or offline conversion tools multiple # times to the same graph will result in same graph. # # TODO(aaroey): implement this. pass def _GetTestConfigsV1(): """Returns the config combinations to run the test.""" convert_online, convert_offline = True, False dynamic_engine, static_engine = True, False use_calibration, no_calibration = True, False implicit_batch = False # Add all possible test cases and let the derived test class to decide # whether to run specific ones with ShouldRunTest(). # # Note: INT8 without calibration behaves like FP32/FP16. opts = list( itertools.product([FP32, FP16, INT8], [convert_online, convert_offline], [dynamic_engine, static_engine], [no_calibration], [implicit_batch])) # We always run calibration with offline tool. # TODO(aaroey): static calibration engine is not supported yet. opts.append( (INT8, convert_offline, dynamic_engine, use_calibration, implicit_batch)) return opts def _GetTestConfigsV2(): """Returns the config combinations to run the test.""" convert_offline = False # TODO(laigd): add support for static_engine. dynamic_engine = True # TODO(laigd): add support for calibration. no_calibration = False use_calibration = True # Add all possible test cases and let the derived test class to decide # whether to run specific ones with ShouldRunTest(). # # Note: # - In TF2.0 the conversion always produce dynamic engine, and we don't test # the offline mode here. # - For simplicity we don't test online conversion which requires setting the # Grappler config in default eager context. # - INT8 without calibration behaves like FP32/FP16. opts = list( itertools.product([FP32, FP16], [convert_offline], [dynamic_engine], [no_calibration], [False, True])) # We always run calibration with offline tool. opts.append((INT8, convert_offline, dynamic_engine, use_calibration, False)) opts.append((INT8, convert_offline, dynamic_engine, use_calibration, True)) return opts def _GetTest(run_params): """Gets a single test method based on the parameters.""" def _Test(self): logging.info(f"Running test `{run_params.test_name}` with parameters: " f"convert_online={run_params.convert_online}, " f"precision_mode={run_params.precision_mode}, " f"dynamic_engine={run_params.dynamic_engine}, " f"dynamic_shape={run_params.dynamic_shape}") self.RunTest(run_params) return _Test def _AddTestsFor(test_class, is_v2): """Adds test methods to TfTrtIntegrationTestBase for specific TF version.""" opts = _GetTestConfigsV2() if is_v2 else _GetTestConfigsV1() for (precision_mode, convert_online, dynamic_engine, use_calibration, dynamic_shape) in opts: conversion = "OnlineConversion" if convert_online else "OfflineConversion" engine_type = "DynamicEngine" if dynamic_engine else "StaticEngine" calibration_type = "UseCalibration" if use_calibration else "NoCalibration" dynamic_shape_type = "DynamicShape" if dynamic_shape else "ImplicitBatch" test_name = "%s_%s_%s_%s_%s_%s" % ("testTfTrtV2" if is_v2 else "testTfTrt", conversion, engine_type, precision_mode, calibration_type, dynamic_shape_type) run_params = RunParams( convert_online=convert_online, precision_mode=precision_mode, dynamic_engine=dynamic_engine, test_name=test_name, use_calibration=use_calibration, is_v2=is_v2, dynamic_shape=dynamic_shape) if is_v2: setattr(test_class, test_name, test_util.run_v2_only(_GetTest(run_params))) else: setattr(test_class, test_name, test_util.run_v1_only("", _GetTest(run_params))) def _AddTests(test_class): """Adds test methods to TfTrtIntegrationTestBase.""" _AddTestsFor(test_class, is_v2=False) _AddTestsFor(test_class, is_v2=True) if is_tensorrt_enabled(): os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "False" _AddTests(TfTrtIntegrationTestBase)
TfTrtIntegrationTestBase
python
eventlet__eventlet
eventlet/greenpool.py
{ "start": 7355, "end": 9322 }
class ____: """GreenPile is an abstraction representing a bunch of I/O-related tasks. Construct a GreenPile with an existing GreenPool object. The GreenPile will then use that pool's concurrency as it processes its jobs. There can be many GreenPiles associated with a single GreenPool. A GreenPile can also be constructed standalone, not associated with any GreenPool. To do this, construct it with an integer size parameter instead of a GreenPool. It is not advisable to iterate over a GreenPile in a different greenthread than the one which is calling spawn. The iterator will exit early in that situation. """ def __init__(self, size_or_pool=1000): if isinstance(size_or_pool, GreenPool): self.pool = size_or_pool else: self.pool = GreenPool(size_or_pool) self.waiters = queue.LightQueue() self.counter = 0 def spawn(self, func, *args, **kw): """Runs *func* in its own green thread, with the result available by iterating over the GreenPile object.""" self.counter += 1 try: gt = self.pool.spawn(func, *args, **kw) self.waiters.put(gt) except: self.counter -= 1 raise def __iter__(self): return self def next(self): """Wait for the next result, suspending the current greenthread until it is available. Raises StopIteration when there are no more results.""" if self.counter == 0: raise StopIteration() return self._next() __next__ = next def _next(self): try: return self.waiters.get().wait() finally: self.counter -= 1 # this is identical to GreenPile but it blocks on spawn if the results # aren't consumed, and it doesn't generate its own StopIteration exception, # instead relying on the spawning process to send one in when it's done
GreenPile
python
kamyu104__LeetCode-Solutions
Python/maximum-ice-cream-bars.py
{ "start": 33, "end": 357 }
class ____(object): def maxIceCream(self, costs, coins): """ :type costs: List[int] :type coins: int :rtype: int """ costs.sort() for i, c in enumerate(costs): coins -= c if coins < 0: return i return len(costs)
Solution
python
getsentry__sentry
src/sentry/similarity/features.py
{ "start": 1215, "end": 8436 }
class ____: def __init__( self, index, encoder, aliases, features, expected_extraction_errors, expected_encoding_errors, ): self.index = index self.encoder = encoder self.aliases = aliases self.features = features self.expected_extraction_errors = expected_extraction_errors self.expected_encoding_errors = expected_encoding_errors assert set(self.aliases) == set(self.features) def __get_scope(self, project) -> str: return f"{project.id}" def __get_key(self, group) -> str: return f"{group.id}" def extract(self, event): results = {} for label, strategy in self.features.items(): try: results[label] = strategy.extract(event) except Exception as error: log = ( logger.debug if isinstance(error, self.expected_extraction_errors) else logger.warning ) log( "Could not extract features from %r for %r due to error: %r", event, label, error, exc_info=True, ) return results def record(self, events): if not events: return [] scope: str | None = None key: str | None = None items = [] for event in events: if not event.group_id: continue for label, features in self.extract(event).items(): if scope is None: scope = self.__get_scope(event.project) else: assert ( self.__get_scope(event.project) == scope ), "all events must be associated with the same project" if key is None: key = self.__get_key(event.group) else: assert ( self.__get_key(event.group) == key ), "all events must be associated with the same group" try: features = [self.encoder.dumps(feature) for feature in features] except Exception as error: log = ( logger.debug if isinstance(error, self.expected_encoding_errors) else logger.warning ) log( "Could not encode features from %r for %r due to error: %r", event, label, error, exc_info=True, ) else: if features: items.append((self.aliases[label], features)) return self.index.record(scope, key, items, timestamp=int(event.datetime.timestamp())) def classify(self, events, limit=None, thresholds=None): if not events: return [] if thresholds is None: thresholds = {} scope: str | None = None labels = [] items = [] for event in events: for label, features in self.extract(event).items(): if scope is None: scope = self.__get_scope(event.project) else: assert ( self.__get_scope(event.project) == scope ), "all events must be associated with the same project" try: features = [self.encoder.dumps(feature) for feature in features] except Exception as error: log = ( logger.debug if isinstance(error, self.expected_encoding_errors) else logger.warning ) log( "Could not encode features from %r for %r due to error: %r", event, label, error, exc_info=True, ) else: if features: items.append((self.aliases[label], thresholds.get(label, 0), features)) labels.append(label) return [ (int(key), dict(zip(labels, scores))) for key, scores in self.index.classify( scope, items, limit=limit, timestamp=int(event.datetime.timestamp()), ) ] def compare(self, group, limit=None, thresholds=None): if thresholds is None: thresholds = {} features = list(self.features.keys()) items = [(self.aliases[label], thresholds.get(label, 0)) for label in features] return [ (int(key), dict(zip(features, scores))) for key, scores in self.index.compare( self.__get_scope(group.project), self.__get_key(group), items, limit=limit ) ] def merge(self, destination, sources, allow_unsafe=False): def add_index_aliases_to_key(key): return [(self.aliases[label], key) for label in self.features.keys()] # Collect all of the sources by the scope that they are contained # within so that we can make the most efficient queries possible and # reject queries that cross scopes if we haven't explicitly allowed # unsafe actions. scopes: dict[str, set[str]] = {} for source in sources: scopes.setdefault(self.__get_scope(source.project), set()).add(source) unsafe_scopes = set(scopes.keys()) - {self.__get_scope(destination.project)} if unsafe_scopes and not allow_unsafe: raise ValueError( "all groups must belong to same project if unsafe merges are not allowed" ) destination_scope = self.__get_scope(destination.project) destination_key = self.__get_key(destination) for source_scope, sources in scopes.items(): items = [] for source in sources: items.extend(add_index_aliases_to_key(self.__get_key(source))) if source_scope != destination_scope: imports = [ (alias, destination_key, data) for (alias, _), data in zip(items, self.index.export(source_scope, items)) ] self.index.delete(source_scope, items) self.index.import_(destination_scope, imports) else: self.index.merge(destination_scope, destination_key, items) def delete(self, group): key = self.__get_key(group) return self.index.delete( self.__get_scope(group.project), [(self.aliases[label], key) for label in self.features.keys()], ) def flush(self, project): return self.index.flush(self.__get_scope(project), list(self.aliases.values()))
FeatureSet
python
doocs__leetcode
solution/0100-0199/0140.Word Break II/Solution.py
{ "start": 607, "end": 1115 }
class ____: def wordBreak(self, s: str, wordDict: List[str]) -> List[str]: def dfs(s): if not s: return [[]] res = [] for i in range(1, len(s) + 1): if trie.search(s[:i]): for v in dfs(s[i:]): res.append([s[:i]] + v) return res trie = Trie() for w in wordDict: trie.insert(w) ans = dfs(s) return [' '.join(v) for v in ans]
Solution
python
Pylons__pyramid
src/pyramid/response.py
{ "start": 2813, "end": 6421 }
class ____: """Decorator activated via a :term:`scan` which treats the function being decorated as a :term:`response adapter` for the set of types or interfaces passed as ``*types_or_ifaces`` to the decorator constructor. For example, if you scan the following response adapter: .. code-block:: python from pyramid.response import Response from pyramid.response import response_adapter @response_adapter(int) def myadapter(i): return Response(status=i) You can then return an integer from your view callables, and it will be converted into a response with the integer as the status code. More than one type or interface can be passed as a constructor argument. The decorated response adapter will be called for each type or interface. .. code-block:: python import json from pyramid.response import Response from pyramid.response import response_adapter @response_adapter(dict, list) def myadapter(ob): return Response(json.dumps(ob)) This method will have no effect until a :term:`scan` is performed agains the package or module which contains it, ala: .. code-block:: python from pyramid.config import Configurator config = Configurator() config.scan('somepackage_containing_adapters') Two additional keyword arguments which will be passed to the :term:`venusian` ``attach`` function are ``_depth`` and ``_category``. ``_depth`` is provided for people who wish to reuse this class from another decorator. The default value is ``0`` and should be specified relative to the ``response_adapter`` invocation. It will be passed in to the :term:`venusian` ``attach`` function as the depth of the callstack when Venusian checks if the decorator is being used in a class or module context. It's not often used, but it can be useful in this circumstance. ``_category`` sets the decorator category name. It can be useful in combination with the ``category`` argument of ``scan`` to control which views should be processed. See the :py:func:`venusian.attach` function in Venusian for more information about the ``_depth`` and ``_category`` arguments. .. versionchanged:: 1.9.1 Added the ``_depth`` and ``_category`` arguments. """ venusian = venusian # for unit testing def __init__(self, *types_or_ifaces, **kwargs): self.types_or_ifaces = types_or_ifaces self.depth = kwargs.pop('_depth', 0) self.category = kwargs.pop('_category', 'pyramid') self.kwargs = kwargs def register(self, scanner, name, wrapped): config = scanner.config for type_or_iface in self.types_or_ifaces: config.add_response_adapter(wrapped, type_or_iface, **self.kwargs) def __call__(self, wrapped): self.venusian.attach( wrapped, self.register, category=self.category, depth=self.depth + 1, ) return wrapped def _get_response_factory(registry): """Obtain a :class: `pyramid.response.Response` using the `pyramid.interfaces.IResponseFactory`. """ response_factory = registry.queryUtility( IResponseFactory, default=lambda r: Response() ) return response_factory def _guess_type(path): content_type, content_encoding = mimetypes.guess_type(path, strict=False) if content_type is None: content_type = 'application/octet-stream' return content_type, content_encoding
response_adapter
python
django__django
django/contrib/gis/db/models/fields.py
{ "start": 12248, "end": 12603 }
class ____(Field): "Used as a return value from an extent aggregate" description = _("Extent Aggregate Field") def get_internal_type(self): return "ExtentField" def select_format(self, compiler, sql, params): select = compiler.connection.ops.select_extent return select % sql if select else sql, params
ExtentField
python
doocs__leetcode
lcci/17.16.The Masseuse/Solution.py
{ "start": 0, "end": 165 }
class ____: def massage(self, nums: List[int]) -> int: f = g = 0 for x in nums: f, g = g + x, max(f, g) return max(f, g)
Solution
python
gevent__gevent
src/gevent/tests/test__pywsgi.py
{ "start": 44756, "end": 45239 }
class ____(TestCase): # testing for this: # File "/home/denis/work/gevent/gevent/pywsgi.py", line 70, in _do_read # if length and length > self.content_length - self.position: # TypeError: unsupported operand type(s) for -: 'NoneType' and 'int' validator = None def application(self, environ, start_response): environ['wsgi.input'].read(5) start_response('200 OK', []) return [] def test(self): self.urlopen()
TestInputN
python
run-llama__llama_index
llama-index-core/llama_index/core/indices/query/query_transform/base.py
{ "start": 977, "end": 2390 }
class ____(PromptMixin, DispatcherSpanMixin): """ Base class for query transform. A query transform augments a raw query string with associated transformations to improve index querying. The query transformation is performed before the query is sent to the index. """ def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" # TODO: keep this for now since response synthesizers don't generally have sub-modules return {} @abstractmethod def _run(self, query_bundle: QueryBundle, metadata: Dict) -> QueryBundle: """Run query transform.""" def run( self, query_bundle_or_str: QueryType, metadata: Optional[Dict] = None, ) -> QueryBundle: """Run query transform.""" metadata = metadata or {} if isinstance(query_bundle_or_str, str): query_bundle = QueryBundle( query_str=query_bundle_or_str, custom_embedding_strs=[query_bundle_or_str], ) else: query_bundle = query_bundle_or_str return self._run(query_bundle, metadata=metadata) def __call__( self, query_bundle_or_str: QueryType, metadata: Optional[Dict] = None, ) -> QueryBundle: """Run query processor.""" return self.run(query_bundle_or_str, metadata=metadata)
BaseQueryTransform
python
scipy__scipy
scipy/signal/tests/test_spectral.py
{ "start": 33916, "end": 34476 }
class ____: def test_identical_input(self): x = np.random.randn(20) y = np.copy(x) # So `y is x` -> False f = np.linspace(0, 0.5, 6) C = np.ones(6) f1, C1 = coherence(x, y, nperseg=10) assert_allclose(f, f1) assert_allclose(C, C1) def test_phase_shifted_input(self): x = np.random.randn(20) y = -x f = np.linspace(0, 0.5, 6) C = np.ones(6) f1, C1 = coherence(x, y, nperseg=10) assert_allclose(f, f1) assert_allclose(C, C1)
TestCoherence
python
pytorch__pytorch
torch/fx/experimental/symbolic_shapes.py
{ "start": 70878, "end": 77084 }
class ____(Constraint): """ Represent and decide various kinds of equality constraints between input sources. A "source pair" is a pair of input sources for dynamic dimensions that are specified equal. We represent `source_pairs` in a union-find forest so that we can efficiently check whether two such sources are transitively equal. A "derived equality" relates an input source to an expression over a root. The root can be another input source, corresponding to some dynamic dimension, or a phantom symbol that does not directly represent any dynamic dimension. We represent `derived_equalities` involving input sources in a transitively-closed map so that we can efficiently check whether an input source is transitively equal to a given expression over another input source. (NOTE: In contrast, it is easy to decide whether an input source is transitively equal to a given expression over a phantom symbol; such expressions are already in canonical form and so the problem reduces to symbolic expression equality.) """ source_pairs: list[tuple[Source, Source]] derived_equalities: list[ tuple[Source, Union[Source, sympy.Symbol], Callable[[sympy.Expr], sympy.Expr]] ] phantom_symbols: list[sympy.Symbol] relaxed_sources: set[Source] _parents: dict[Source, Source] = field(init=False) _defs: dict[Source, sympy.Expr] = field(init=False) def __post_init__(self) -> None: """ Pre-processing to answer queries `is_equal` and `is_derived` below. Example: Suppose we are given: source_pairs [a = b, b = c] derived_equalities [d = c + 1, e = d - 1] We first construct a union find with source_pairs: _parents = {a: a, b: a, c: a} Then we compute canonical symbolic expressions, recursively applying derived_equalities until we bottom out: _defs = {d: c + 1, e: (c + 1) - 1 aka c} """ # self._parents is a map from input sources to input sources where, conceptually, # these are directed edges in a union-find forest _parents: dict[Source, Source] = {} object.__setattr__(self, "_parents", _parents) # self._defs is a map from input sources to "canonical" symbolic expressions, # i.e., unary expressions with symbols that corresponds to regular Dims (i.e., # not derived Dims) _defs: dict[Source, sympy.Expr] = {} object.__setattr__(self, "_defs", _defs) for source1, source2 in self.source_pairs: # preprocess into a union-find forest self._union(self._find(source1), self._find(source2)) for source, root, fn in self.derived_equalities: # preprocess into a transitively-closed map # NOTE(avik): we reuse the union-find forest for canonicalizing input sources if isinstance(root, (sympy.Symbol, sympy.Integer)): self._defs[self._find(source)] = fn(root) else: self._defs[self._find(source)] = fn(self._rewrite(root)) def _find(self, source: Source) -> Source: # chase edges to find the root of this equivalence class if source in self._parents: return self._find(self._parents[source]) else: return source def _union(self, root1: Source, root2: Source) -> None: # merge two equivalence classes by adding an edge from one root to the other if root1 != root2: self._parents[root1] = root2 def _rewrite(self, src: Source) -> sympy.Expr: # always represent the given source by the root of its equivalence class src = self._find(src) if src in self._defs: # simply look up the definition if it exists # NOTE(avik): This works because definitions are always transitively-closed; # otherwise we would have to do recursive rewriting. return self._defs[src] else: # otherwise, create a symbol representing the source return sympy.Symbol(src.name()) def is_equal(self, source1: Source, source2: Source) -> bool: return ( # check whether source1 and source2 have the same root # or are relaxed (src1 := self._find(source1)) in self.relaxed_sources or (src2 := self._find(source2)) in self.relaxed_sources or src1 == src2 # check whether source1 is derived equal to source2 or self.is_derived(source1, source2, lambda x: x) ) def is_derived( self, src: Source, symbol_src: Source, fn: Callable[[sympy.Expr], sympy.Expr] ) -> bool: # check whether both src and symbol_src have the same definition return self._rewrite(src) == fn(self._rewrite(symbol_src)) def _assert_symbol_context(symbolic_context: object) -> TypeGuard[SymbolicContext]: assert isinstance(symbolic_context, SymbolicContext), ( "Invalid symbolic_context object" ) assert type(symbolic_context) is not SymbolicContext, ( "Illegal usage of symbolic_context ABC" ) return True def _is_supported_equivalence(expr: sympy.Expr) -> bool: # Currently supported Dim ops are linear expressions with integer coefficients. # So check that expr only contains +, *, ints, and a single occurrence of a symbol. # (See also documentation of dynamic_shapes._DerivedDim.) if isinstance(expr, (sympy.Add, sympy.Mul)): if len(expr.args) > 2: return False lhs, rhs = expr.args return (_is_supported_equivalence(lhs) and isinstance(rhs, sympy.Integer)) or ( isinstance(lhs, sympy.Integer) and _is_supported_equivalence(rhs) ) return isinstance(expr, sympy.Symbol) def _has_uninterpretable_sympy_function(expr: sympy.Basic) -> bool: """ Add functions that our sympy interpreter can't reify into FX nodes """ return expr.has( torch.utils._sympy.functions.ToFloat, torch.utils._sympy.functions.TruncToInt, torch.utils._sympy.functions.CeilToInt, ) @dataclass(frozen=True)
EqualityConstraint
python
django__django
tests/model_forms/tests.py
{ "start": 3805, "end": 3956 }
class ____(forms.ModelForm): class Meta: model = CustomFieldForExclusionModel fields = ["name", "markup"]
CustomFieldForExclusionForm
python
allegroai__clearml
clearml/backend_api/services/v2_20/projects.py
{ "start": 152577, "end": 154445 }
class ____(Response): """ Response of projects.update endpoint. :param updated: Number of projects updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict """ _service = "projects" _action = "update" _version = "2.20" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "updated": { "description": "Number of projects updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None: super(UpdateResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self) -> Optional[dict]: return self._property_fields @fields.setter def fields(self, value: Optional[dict]) -> None: if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value
UpdateResponse
python
numpy__numpy
numpy/_core/tests/test_overrides.py
{ "start": 10724, "end": 16015 }
class ____: def test_one_arg(self): MyArray, implements = _new_duck_type_and_implements() @implements(dispatched_one_arg) def _(array): return 'myarray' assert_equal(dispatched_one_arg(1), 'original') assert_equal(dispatched_one_arg(MyArray()), 'myarray') def test_optional_args(self): MyArray, implements = _new_duck_type_and_implements() @array_function_dispatch(lambda array, option=None: (array,)) def func_with_option(array, option='default'): return option @implements(func_with_option) def my_array_func_with_option(array, new_option='myarray'): return new_option # we don't need to implement every option on __array_function__ # implementations assert_equal(func_with_option(1), 'default') assert_equal(func_with_option(1, option='extra'), 'extra') assert_equal(func_with_option(MyArray()), 'myarray') with assert_raises(TypeError): func_with_option(MyArray(), option='extra') # but new options on implementations can't be used result = my_array_func_with_option(MyArray(), new_option='yes') assert_equal(result, 'yes') with assert_raises(TypeError): func_with_option(MyArray(), new_option='no') def test_not_implemented(self): MyArray, implements = _new_duck_type_and_implements() @array_function_dispatch(lambda array: (array,), module='my') def func(array): return array array = np.array(1) assert_(func(array) is array) assert_equal(func.__module__, 'my') with assert_raises_regex( TypeError, "no implementation found for 'my.func'"): func(MyArray()) @pytest.mark.parametrize("name", ["concatenate", "mean", "asarray"]) def test_signature_error_message_simple(self, name): func = getattr(np, name) try: # all of these functions need an argument: func() except TypeError as e: exc = e assert exc.args[0].startswith(f"{name}()") def test_signature_error_message(self): # The lambda function will be named "<lambda>", but the TypeError # should show the name as "func" def _dispatcher(): return () @array_function_dispatch(_dispatcher) def func(): pass try: func._implementation(bad_arg=3) except TypeError as e: expected_exception = e try: func(bad_arg=3) raise AssertionError("must fail") except TypeError as exc: if exc.args[0].startswith("_dispatcher"): # We replace the qualname currently, but it used `__name__` # (relevant functions have the same name and qualname anyway) pytest.skip("Python version is not using __qualname__ for " "TypeError formatting.") assert exc.args == expected_exception.args @pytest.mark.parametrize("value", [234, "this func is not replaced"]) def test_dispatcher_error(self, value): # If the dispatcher raises an error, we must not attempt to mutate it error = TypeError(value) def dispatcher(): raise error @array_function_dispatch(dispatcher) def func(): return 3 try: func() raise AssertionError("must fail") except TypeError as exc: assert exc is error # unmodified exception def test_properties(self): # Check that str and repr are sensible func = dispatched_two_arg assert str(func) == str(func._implementation) repr_no_id = repr(func).split("at ")[0] repr_no_id_impl = repr(func._implementation).split("at ")[0] assert repr_no_id == repr_no_id_impl @pytest.mark.parametrize("func", [ lambda x, y: 0, # no like argument lambda like=None: 0, # not keyword only lambda *, like=None, a=3: 0, # not last (not that it matters) ]) def test_bad_like_sig(self, func): # We sanity check the signature, and these should fail. with pytest.raises(RuntimeError): array_function_dispatch()(func) def test_bad_like_passing(self): # Cover internal sanity check for passing like as first positional arg def func(*, like=None): pass func_with_like = array_function_dispatch()(func) with pytest.raises(TypeError): func_with_like() with pytest.raises(TypeError): func_with_like(like=234) def test_too_many_args(self): # Mainly a unit-test to increase coverage objs = [] for i in range(80): class MyArr: def __array_function__(self, *args, **kwargs): return NotImplemented objs.append(MyArr()) def _dispatch(*args): return args @array_function_dispatch(_dispatch) def func(*args): pass with pytest.raises(TypeError, match="maximum number"): func(*objs)
TestArrayFunctionImplementation
python
ZoranPandovski__al-go-rithms
data_structures/Linked_list/Python/merge_two_sorted_lists.py
{ "start": 720, "end": 835 }
class ____: def __init__(self, val=0, next=None): self.val = val self.next = next
ListNode
python
dagster-io__dagster
python_modules/libraries/dagster-databricks/dagster_databricks/components/databricks_asset_bundle/configs.py
{ "start": 7860, "end": 9535 }
class ____(DatabricksBaseTask[jobs.SparkPythonTask]): @property def task_type(self) -> str: return "spark_python" @property def task_config_metadata(self) -> Mapping[str, Any]: task_config_metadata = {} python_config = self.task_config["spark_python_task"] task_config_metadata["python_file"] = python_config["python_file"] task_config_metadata["parameters"] = self.task_parameters return task_config_metadata @classmethod def from_job_task_config( cls, job_task_config: Mapping[str, Any] ) -> "DatabricksSparkPythonTask": spark_python_task = job_task_config["spark_python_task"] task_config = {"spark_python_task": spark_python_task} # Spark Python tasks use parameters differently task_parameters = spark_python_task.get("parameters", []) return cls( task_key=job_task_config["task_key"], task_config=task_config, task_parameters=task_parameters, depends_on=parse_depends_on(job_task_config.get("depends_on", [])), job_name=job_task_config["job_name"], libraries=job_task_config.get("libraries", []), ) @property def needs_cluster(self) -> bool: return True @property def submit_task_key(self) -> str: return "spark_python_task" def to_databricks_sdk_task(self) -> jobs.SparkPythonTask: python_config = self.task_config["spark_python_task"] return jobs.SparkPythonTask( python_file=python_config["python_file"], parameters=check.is_list(self.task_parameters) ) @record
DatabricksSparkPythonTask
python
falconry__falcon
tests/test_hello.py
{ "start": 203, "end": 500 }
class ____: def __init__(self, file_like, block_size=8192): self.file_like = file_like self.block_size = block_size def __getitem__(self, key): data = self.file_like.read(self.block_size) if data: return data raise IndexError
FileWrapper
python
pypa__virtualenv
src/virtualenv/activation/powershell/__init__.py
{ "start": 106, "end": 823 }
class ____(ViaTemplateActivator): def templates(self): yield "activate.ps1" @staticmethod def quote(string): """ This should satisfy PowerShell quoting rules [1], unless the quoted string is passed directly to Windows native commands [2]. [1]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules [2]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_parsing#passing-arguments-that-contain-quote-characters """ # noqa: D205 string = string.replace("'", "''") return f"'{string}'" __all__ = [ "PowerShellActivator", ]
PowerShellActivator
python
doocs__leetcode
solution/3500-3599/3555.Smallest Subarray to Sort in Every Sliding Window/Solution.py
{ "start": 0, "end": 602 }
class ____: def minSubarraySort(self, nums: List[int], k: int) -> List[int]: def f(i: int, j: int) -> int: mi, mx = inf, -inf l = r = -1 for k in range(i, j + 1): if mx > nums[k]: r = k else: mx = nums[k] p = j - k + i if mi < nums[p]: l = p else: mi = nums[p] return 0 if r == -1 else r - l + 1 n = len(nums) return [f(i, i + k - 1) for i in range(n - k + 1)]
Solution
python
getsentry__sentry
src/sentry/sentry_apps/services/app/model.py
{ "start": 2109, "end": 2346 }
class ____(RpcModel): """ A `SentryAppService` (a notification service) wrapped up and serializable via the rpc interface. """ title: str = "" slug: str = "" service_type: str = "sentry_app"
RpcSentryAppService
python
getsentry__sentry
src/sentry/api/endpoints/project_servicehook_stats.py
{ "start": 509, "end": 1511 }
class ____(ProjectEndpoint, StatsMixin): owner = ApiOwner.INTEGRATIONS publish_status = { "GET": ApiPublishStatus.UNKNOWN, } def get(self, request: Request, project, hook_id) -> Response: try: hook = ServiceHook.objects.get(project_id=project.id, guid=hook_id) except ServiceHook.DoesNotExist: raise ResourceDoesNotExist stat_args = self._parse_args(request) stats: dict[int, dict[str, int]] = {} for model, name in ((TSDBModel.servicehook_fired, "total"),): result = tsdb.backend.get_range( model=model, keys=[hook.id], **stat_args, tenant_ids={"organization_id": project.organization_id}, )[hook.id] for ts, count in result: stats.setdefault(int(ts), {})[name] = count return self.respond([{"ts": ts, "total": data["total"]} for ts, data in stats.items()])
ProjectServiceHookStatsEndpoint
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_projects_tasks.py
{ "start": 530, "end": 1938 }
class ____(TestCase): def setUp(self): self.project = get(Project) self.internal_version = get(Version, project=self.project) self.external_version = get(Version, project=self.project, type=EXTERNAL) self.external_build = get( Build, project=self.project, version=self.external_version ) self.internal_build = get( Build, project=self.project, version=self.internal_version ) @patch("readthedocs.projects.tasks.utils.send_build_status") def test_send_external_build_status_with_external_version(self, send_build_status): send_external_build_status( self.external_version.type, self.external_build.id, self.external_build.commit, BUILD_STATUS_SUCCESS, ) send_build_status.delay.assert_called_once_with( self.external_build.id, self.external_build.commit, BUILD_STATUS_SUCCESS, ) @patch("readthedocs.projects.tasks.utils.send_build_status") def test_send_external_build_status_with_internal_version(self, send_build_status): send_external_build_status( self.internal_version.type, self.internal_build.id, self.external_build.commit, BUILD_STATUS_SUCCESS, ) send_build_status.delay.assert_not_called()
SendBuildStatusTests
python
huggingface__transformers
src/transformers/models/perception_lm/modeling_perception_lm.py
{ "start": 7209, "end": 14457 }
class ____(PerceptionLMPreTrainedModel): _checkpoint_conversion_mapping = {} def __init__(self, config: PerceptionLMConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) self.multi_modal_projector = PerceptionLMMultiModalProjector(config) self.language_model = AutoModel.from_config(config.text_config) self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_image_features( self, pixel_values: torch.FloatTensor, **kwargs, ): """ Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_tiles, channels, height, width)`) The tensors corresponding to the input images. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_tiles, num_patches, embed_dim)`). """ image_outputs = self.vision_tower(pixel_values.flatten(0, 1)) image_outputs = image_outputs.last_hidden_state if self.config.vision_use_cls_token: image_outputs = image_outputs[:, 1:, :] image_features = self.multi_modal_projector(image_outputs) return image_features def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: Optional[torch.FloatTensor] = None, video_features: Optional[torch.FloatTensor] = None, ): """ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) special_video_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_video_mask = special_video_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_video_mask = input_ids == self.config.video_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.size()[:-1].numel()}" ) n_video_tokens = special_video_mask.sum() special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): raise ValueError( f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.size()[:-1].numel()}" ) return special_image_mask, special_video_mask @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **lm_kwargs, ) -> Union[tuple, PerceptionLMModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if (pixel_values is not None or pixel_values_videos is not None) and inputs_embeds is not None: raise ValueError( "You cannot specify both (pixel_values or pixel_values_videos) and inputs_embeds at the same time, and must specify either one" ) if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) image_features = None if pixel_values is not None: image_features = self.get_image_features(pixel_values=pixel_values) image_features = image_features.to(inputs_embeds.device, dtype=inputs_embeds.dtype) special_image_mask, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) video_features = None if pixel_values_videos is not None: video_features = self.get_image_features(pixel_values=pixel_values_videos) video_features = video_features.to(inputs_embeds.device, dtype=inputs_embeds.dtype) _, special_video_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, video_features=video_features ) inputs_embeds = inputs_embeds.masked_scatter(special_video_mask, video_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, logits_to_keep=logits_to_keep, **lm_kwargs, ) return PerceptionLMModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, past_key_values=outputs.past_key_values, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, video_hidden_states=(video_features if pixel_values_videos is not None else None), ) @auto_docstring
PerceptionLMModel
python
kamyu104__LeetCode-Solutions
Python/zigzag-iterator.py
{ "start": 50, "end": 602 }
class ____(object): def __init__(self, v1, v2): """ Initialize your q structure here. :type v1: List[int] :type v2: List[int] """ self.q = collections.deque([(len(v), iter(v)) for v in (v1, v2) if v]) def next(self): """ :rtype: int """ len, iter = self.q.popleft() if len > 1: self.q.append((len-1, iter)) return next(iter) def hasNext(self): """ :rtype: bool """ return bool(self.q)
ZigzagIterator
python
Textualize__textual
docs/examples/styles/box_sizing.py
{ "start": 65, "end": 336 }
class ____(App): CSS_PATH = "box_sizing.tcss" def compose(self): yield Static("I'm using border-box!", id="static1") yield Static("I'm using content-box!", id="static2") if __name__ == "__main__": app = BoxSizingApp() app.run()
BoxSizingApp
python
facebookresearch__faiss
tests/test_binary_io.py
{ "start": 3029, "end": 3911 }
class ____(unittest.TestCase): def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) d = 32 nt = 200 nb = 1500 nq = 500 (self.xt, self.xb, self.xq) = make_binary_dataset(d, nb, nt, nq) def test_binary_from_float(self): d = self.xq.shape[1] * 8 float_index = faiss.IndexHNSWFlat(d, 16) index = faiss.IndexBinaryFromFloat(float_index) index.add(self.xb) D, I = index.search(self.xq, 3) fd, tmpnam = tempfile.mkstemp() os.close(fd) try: faiss.write_index_binary(index, tmpnam) index2 = faiss.read_index_binary(tmpnam) D2, I2 = index2.search(self.xq, 3) assert (I2 == I).all() assert (D2 == D).all() finally: os.remove(tmpnam)
TestBinaryFromFloat
python
automl__auto-sklearn
autosklearn/estimators.py
{ "start": 63301, "end": 66805 }
class ____(AutoSklearnEstimator, RegressorMixin): """ This class implements the regression task. """ def fit(self, X, y, X_test=None, y_test=None, feat_type=None, dataset_name=None): """Fit *Auto-sklearn* to given training set (X, y). Fit both optimizes the machine learning models and builds an ensemble out of them. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] or [n_samples, n_targets] The regression target. X_test : array-like or sparse matrix of shape = [n_samples, n_features] Test data input samples. Will be used to save test predictions for all models. This allows to evaluate the performance of Auto-sklearn over time. y_test : array-like, shape = [n_samples] or [n_samples, n_targets] The regression target. Will be used to calculate the test error of all models. This allows to evaluate the performance of Auto-sklearn over time. feat_type : list, optional (default=None) List of str of `len(X.shape[1])` describing the attribute type. Possible types are `Categorical` and `Numerical`. `Categorical` attributes will be automatically One-Hot encoded. dataset_name : str, optional (default=None) Create nicer output. If None, a string will be determined by the md5 hash of the dataset. Returns ------- self """ # Before running anything else, first check that the # type of data is compatible with auto-sklearn. Legal target # types are: continuous, continuous-multioutput, and the special cases: # multiclass : because [3.0, 1.0, 5.0] is considered as multiclass # binary: because [1.0, 0.0] is considered multiclass # AutoSklearn does not handle sparse y for now y = convert_if_sparse(y) target_type = type_of_target(y) supported_types = [ "continuous", "binary", "multiclass", "continuous-multioutput", ] if target_type not in supported_types: raise ValueError( "Regression with data of type {} is " "not supported. Supported types are {}. " "You can find more information about scikit-learn " "data types in: " "https://scikit-learn.org/stable/modules/multiclass.html" "".format(target_type, supported_types) ) # Fit is supposed to be idempotent! # But not if we use share_mode. super().fit( X=X, y=y, X_test=X_test, y_test=y_test, feat_type=feat_type, dataset_name=dataset_name, ) return self def predict(self, X, batch_size=None, n_jobs=1): """Predict regression target for X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] Returns ------- y : array of shape = [n_samples] or [n_samples, n_outputs] The predicted values. """ return super().predict(X, batch_size=batch_size, n_jobs=n_jobs) def _get_automl_class(self): return AutoMLRegressor
AutoSklearnRegressor
python
run-llama__llama_index
llama-index-core/tests/node_parser/test_node_parser.py
{ "start": 176, "end": 1379 }
class ____(NodeParser): def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any ) -> List[BaseNode]: return super()._parse_nodes(nodes, show_progress, **kwargs) def test__postprocess_parsed_nodes_include_metadata(): np = _TestNodeParser() nodes = [] for i in range(3): node = TextNode(text=f"I am Node number {i}") node.metadata = {"node_number": i} nodes.append(node) ret = np._postprocess_parsed_nodes(nodes, {}) for i, node in enumerate(ret): assert node.metadata == {"node_number": i} def test__postprocess_parsed_nodes_include_metadata_parent_doc(): np = _TestNodeParser() doc = Document(text="I am root") doc.metadata = {"document_type": "root"} nodes = [] for i in range(3): node = TextNode(text=f"I am Node number {i}") node.metadata = {"node_number": i} node.relationships = {NodeRelationship.SOURCE: doc.as_related_node_info()} nodes.append(node) ret = np._postprocess_parsed_nodes(nodes, {}) for i, node in enumerate(ret): assert node.metadata == {"node_number": i, "document_type": "root"}
_TestNodeParser
python
dagster-io__dagster
python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/io_manager.py
{ "start": 6928, "end": 13192 }
class ____(ConfigurableIOManagerFactory): """Base class for an I/O manager definition that reads inputs from and writes outputs to BigQuery. Examples: .. code-block:: python from dagster_gcp import BigQueryIOManager from dagster_bigquery_pandas import BigQueryPandasTypeHandler from dagster import Definitions, EnvVar class MyBigQueryIOManager(BigQueryIOManager): @staticmethod def type_handlers() -> Sequence[DbTypeHandler]: return [BigQueryPandasTypeHandler()] @asset( key_prefix=["my_dataset"] # my_dataset will be used as the dataset in BigQuery ) def my_table() -> pd.DataFrame: # the name of the asset will be the table name ... defs = Definitions( assets=[my_table], resources={ "io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT")) } ) You can set a default dataset to store the assets using the ``dataset`` configuration value of the BigQuery I/O Manager. This dataset will be used if no other dataset is specified directly on an asset or op. .. code-block:: python defs = Definitions( assets=[my_table], resources={ "io_manager": MyBigQueryIOManager(project=EnvVar("GCP_PROJECT"), dataset="my_dataset") } ) On individual assets, you an also specify the dataset where they should be stored using metadata or by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will take precedence. .. code-block:: python @asset( key_prefix=["my_dataset"] # will be used as the dataset in BigQuery ) def my_table() -> pd.DataFrame: ... @asset( # note that the key needs to be "schema" metadata={"schema": "my_dataset"} # will be used as the dataset in BigQuery ) def my_other_table() -> pd.DataFrame: ... For ops, the dataset can be specified by including a "schema" entry in output metadata. .. code-block:: python @op( out={"my_table": Out(metadata={"schema": "my_schema"})} ) def make_my_table() -> pd.DataFrame: ... If none of these is provided, the dataset will default to "public". To only use specific columns of a table as input to a downstream op or asset, add the metadata ``columns`` to the :py:class:`~dagster.In` or :py:class:`~dagster.AssetIn`. .. code-block:: python @asset( ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})} ) def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame: # my_table will just contain the data from column "a" ... If you cannot upload a file to your Dagster deployment, or otherwise cannot `authenticate with GCP <https://cloud.google.com/docs/authentication/provide-credentials-adc>`_ via a standard method, you can provide a service account key as the ``gcp_credentials`` configuration. Dagster will store this key in a temporary file and set ``GOOGLE_APPLICATION_CREDENTIALS`` to point to the file. After the run completes, the file will be deleted, and ``GOOGLE_APPLICATION_CREDENTIALS`` will be unset. The key must be base64 encoded to avoid issues with newlines in the keys. You can retrieve the base64 encoded with this shell command: ``cat $GOOGLE_APPLICATION_CREDENTIALS | base64`` """ project: str = Field(description="The GCP project to use.") dataset: Optional[str] = Field( default=None, description=( "Name of the BigQuery dataset to use. If not provided, the last prefix before" " the asset name will be used." ), ) location: Optional[str] = Field( default=None, description=( "The GCP location. Note: When using PySpark DataFrames, the default" " location of the project will be used. A custom location can be specified in" " your SparkSession configuration." ), ) gcp_credentials: Optional[str] = Field( default=None, description=( "GCP authentication credentials. If provided, a temporary file will be created" " with the credentials and ``GOOGLE_APPLICATION_CREDENTIALS`` will be set to the" " temporary file. To avoid issues with newlines in the keys, you must base64" " encode the key. You can retrieve the base64 encoded key with this shell" " command: ``cat $GOOGLE_AUTH_CREDENTIALS | base64``" ), ) temporary_gcs_bucket: Optional[str] = Field( default=None, description=( "When using PySpark DataFrames, optionally specify a temporary GCS bucket to" " store data. If not provided, data will be directly written to BigQuery." ), ) timeout: Optional[float] = Field( default=None, description=( "When using Pandas DataFrames, optionally specify a timeout for the BigQuery" " queries (loading and reading from tables)." ), ) @staticmethod @abstractmethod def type_handlers() -> Sequence[DbTypeHandler]: ... @staticmethod def default_load_type() -> Optional[type]: return None def create_io_manager(self, context) -> Generator: mgr = DbIOManager( db_client=BigQueryClient(), io_manager_name="BigQueryIOManager", database=self.project, schema=self.dataset, type_handlers=self.type_handlers(), default_load_type=self.default_load_type(), ) if self.gcp_credentials: with setup_gcp_creds(self.gcp_credentials): yield mgr else: yield mgr
BigQueryIOManager
python
apache__airflow
providers/elasticsearch/tests/unit/elasticsearch/log/test_es_response.py
{ "start": 1494, "end": 2547 }
class ____: def test_initialization(self): test_list = [1, 2, 3] attr_list = AttributeList(test_list) assert attr_list._l_ == test_list test_tuple = (1, 2, 3) attr_list = AttributeList(test_tuple) assert attr_list._l_ == list(test_tuple) def test_index_access(self): test_list = [1, {"key1": "value1"}, 3] attr_list = AttributeList(test_list) assert attr_list[0] == 1 assert isinstance(attr_list[1], AttributeDict) assert attr_list[1].key1 == "value1" assert attr_list[2] == 3 def test_iteration(self): test_list = [1, {"key": "value"}, 3] attr_list = AttributeList(test_list) for i, item in enumerate(attr_list): if isinstance(test_list[i], dict): assert isinstance(item, AttributeDict) else: assert item == test_list[i] def test_boolean_representation(self): assert AttributeList([1, 2, 3]) assert not (AttributeList([]))
TestAttributeList
python
allegroai__clearml
clearml/backend_api/services/v2_13/queues.py
{ "start": 27791, "end": 28958 }
class ____(Response): """ Response of queues.delete endpoint. :param deleted: Number of queues deleted (0 or 1) :type deleted: int """ _service = "queues" _action = "delete" _version = "2.13" _schema = { "definitions": {}, "properties": { "deleted": { "description": "Number of queues deleted (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], } }, "type": "object", } def __init__(self, deleted: Optional[int] = None, **kwargs: Any) -> None: super(DeleteResponse, self).__init__(**kwargs) self.deleted = deleted @schema_property("deleted") def deleted(self) -> Optional[int]: return self._property_deleted @deleted.setter def deleted(self, value: Optional[int]) -> None: if value is None: self._property_deleted = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "deleted", six.integer_types) self._property_deleted = value
DeleteResponse
python
Netflix__metaflow
metaflow/plugins/airflow/airflow_utils.py
{ "start": 23779, "end": 28905 }
class ____(object): def __init__(self, file_path=None, graph_structure=None, metadata=None, **kwargs): self._dag_instantiation_params = AirflowDAGArgs(**kwargs) self._file_path = file_path self._metadata = metadata tree = lambda: defaultdict(tree) self.states = tree() self.metaflow_params = None self.graph_structure = graph_structure def set_parameters(self, params): self.metaflow_params = params def add_state(self, state): self.states[state.name] = state def to_dict(self): return dict( metadata=self._metadata, graph_structure=self.graph_structure, states={s: v.to_dict() for s, v in self.states.items()}, dag_instantiation_params=self._dag_instantiation_params.serialize(), file_path=self._file_path, metaflow_params=self.metaflow_params, ) def to_json(self): return json.dumps(self.to_dict()) @classmethod def from_dict(cls, data_dict): re_cls = cls( file_path=data_dict["file_path"], graph_structure=data_dict["graph_structure"], metadata=data_dict["metadata"], ) re_cls._dag_instantiation_params = AirflowDAGArgs.deserialize( data_dict["dag_instantiation_params"] ) for sd in data_dict["states"].values(): re_cls.add_state( AirflowTask.from_dict(sd, flow_name=data_dict["metadata"]["flow_name"]) ) re_cls.set_parameters(data_dict["metaflow_params"]) return re_cls @classmethod def from_json(cls, json_string): data = json.loads(json_string) return cls.from_dict(data) def _construct_params(self): from airflow.models.param import Param if self.metaflow_params is None: return {} param_dict = {} for p in self.metaflow_params: name = p["name"] del p["name"] param_dict[name] = Param(**p) return param_dict def compile(self): from airflow import DAG # Airflow 2.0.0 cannot import this, so we have to do it this way. # `XComArg` is needed for dynamic task mapping and if the airflow installation is of the right # version (+2.3.0) then the class will be importable. XComArg = get_xcom_arg_class() _validate_minimum_airflow_version() if self._metadata["contains_foreach"]: _validate_dynamic_mapping_compatibility() # We need to verify if KubernetesPodOperator is of version > 4.2.0 to support foreachs / dynamic task mapping. # If the dag uses dynamic Task mapping then we throw an error since the `resources` argument in the `KubernetesPodOperator` # doesn't work for dynamic task mapping for `KubernetesPodOperator` version < 4.2.0. # For more context check this issue : https://github.com/apache/airflow/issues/24669 _check_foreach_compatible_kubernetes_provider() params_dict = self._construct_params() # DAG Params can be seen here : # https://airflow.apache.org/docs/apache-airflow/2.0.0/_api/airflow/models/dag/index.html#airflow.models.dag.DAG # Airflow 2.0.0 Allows setting Params. dag = DAG(params=params_dict, **self._dag_instantiation_params.arguments) dag.fileloc = self._file_path if self._file_path is not None else dag.fileloc def add_node(node, parents, dag): """ A recursive function to traverse the specialized graph_structure datastructure. """ if type(node) == str: task = self.states[node].to_task() if parents: for parent in parents: # Handle foreach nodes. if self.states[node].is_mapper_node: task = task.expand(mapper_arr=XComArg(parent)) parent >> task return [task] # Return Parent # this means a split from parent if type(node) == list: # this means branching since everything within the list is a list if all(isinstance(n, list) for n in node): curr_parents = parents parent_list = [] for node_list in node: last_parent = add_node(node_list, curr_parents, dag) parent_list.extend(last_parent) return parent_list else: # this means no branching and everything within the list is not a list and can be actual nodes. curr_parents = parents for node_x in node: curr_parents = add_node(node_x, curr_parents, dag) return curr_parents with dag: parent = None for node in self.graph_structure: parent = add_node(node, parent, dag) return dag
Workflow
python
kamyu104__LeetCode-Solutions
Python/fizz-buzz-multithreaded.py
{ "start": 48, "end": 2007 }
class ____(object): def __init__(self, n): self.__n = n self.__curr = 0 self.__cv = threading.Condition() # printFizz() outputs "fizz" def fizz(self, printFizz): """ :type printFizz: method :rtype: void """ for i in xrange(1, self.__n+1): with self.__cv: while self.__curr % 4 != 0: self.__cv.wait() self.__curr += 1 if i % 3 == 0 and i % 5 != 0: printFizz() self.__cv.notify_all() # printBuzz() outputs "buzz" def buzz(self, printBuzz): """ :type printBuzz: method :rtype: void """ for i in xrange(1, self.__n+1): with self.__cv: while self.__curr % 4 != 1: self.__cv.wait() self.__curr += 1 if i % 3 != 0 and i % 5 == 0: printBuzz() self.__cv.notify_all() # printFizzBuzz() outputs "fizzbuzz" def fizzbuzz(self, printFizzBuzz): """ :type printFizzBuzz: method :rtype: void """ for i in xrange(1, self.__n+1): with self.__cv: while self.__curr % 4 != 2: self.__cv.wait() self.__curr += 1 if i % 3 == 0 and i % 5 == 0: printFizzBuzz() self.__cv.notify_all() # printNumber(x) outputs "x", where x is an integer. def number(self, printNumber): """ :type printNumber: method :rtype: void """ for i in xrange(1, self.__n+1): with self.__cv: while self.__curr % 4 != 3: self.__cv.wait() self.__curr += 1 if i % 3 != 0 and i % 5 != 0: printNumber(i) self.__cv.notify_all()
FizzBuzz
python
dask__distributed
distributed/spans.py
{ "start": 960, "end": 2991 }
class ____(TypedDict): collections: list[dict] @contextmanager def span(*tags: str) -> Iterator[str]: """Tag group of tasks to be part of a certain group, called a span. This context manager can be nested, thus creating sub-spans. If you close and re-open a span context manager with the same tag, you'll end up with two separate spans. Every cluster defines a global "default" span when no span has been defined by the client; the default span is automatically closed and reopened when all tasks associated to it have been completed; in other words the cluster is idle save for tasks that are explicitly annotated by a span. Note that, in some edge cases, you may end up with overlapping default spans, e.g. if a worker crashes and all unique tasks that were in memory on it need to be recomputed. You may capture the ID of a span on the client to match it with the :class:`~distributed.spans.Span` objects the scheduler: >>> client = Client() >>> with span("my workflow") as span_id: ... client.submit(lambda: "Hello world!").result() >>> client.cluster.scheduler.extensions["spans"].spans[span_id] Span<name=('my workflow',), id=5dc9b908-116b-49a5-b0d7-5a681f49a111> Notes ----- You may retrieve the current span with ``dask.get_annotations().get("span")``. You can do so in the client code as well as from inside a task. """ if not tags: raise ValueError("Must specify at least one span tag") annotation = dask.get_annotations().get("span") prev_tags = annotation["name"] if annotation else () # You must specify the full history of IDs, not just the parent, because # otherwise you would not be able to uniquely identify grandparents when # they have no tasks of their own. prev_ids = annotation["ids"] if annotation else () ids = tuple(str(uuid.uuid4()) for _ in tags) with dask.annotate(span={"name": prev_tags + tags, "ids": prev_ids + ids}): yield ids[-1]
SpanMetadata
python
python-markdown__markdown
markdown/blockprocessors.py
{ "start": 23336, "end": 24365 }
class ____(BlockProcessor): """ Process blocks that are empty or start with an empty line. """ def test(self, parent: etree.Element, block: str) -> bool: return not block or block.startswith('\n') def run(self, parent: etree.Element, blocks: list[str]) -> None: block = blocks.pop(0) filler = '\n\n' if block: # Starts with empty line # Only replace a single line. filler = '\n' # Save the rest for later. theRest = block[1:] if theRest: # Add remaining lines to master blocks for later. blocks.insert(0, theRest) sibling = self.lastChild(parent) if (sibling is not None and sibling.tag == 'pre' and len(sibling) and sibling[0].tag == 'code'): # Last block is a code block. Append to preserve whitespace. sibling[0].text = util.AtomicString( '{}{}'.format(sibling[0].text, filler) )
EmptyBlockProcessor
python
ansible__ansible
lib/ansible/module_utils/facts/hardware/dragonfly.py
{ "start": 834, "end": 1037 }
class ____(HardwareCollector): # Note: This uses the freebsd fact class, there is no dragonfly hardware fact class _fact_class = FreeBSDHardware _platform = 'DragonFly'
DragonFlyHardwareCollector
python
tensorflow__tensorflow
tensorflow/python/feature_column/feature_column_v2_test.py
{ "start": 2594, "end": 3200 }
class ____(feature_column_v2_types.FeatureColumn): """A base FeatureColumn useful to avoid boiler-plate in tests. Provides dummy implementations for abstract methods that raise ValueError in order to avoid re-defining all abstract methods for each test sub-class. """ @property def parents(self): raise ValueError('Should not use this method.') @classmethod def from_config(cls, config, custom_objects=None, columns_by_name=None): raise ValueError('Should not use this method.') def get_config(self): raise ValueError('Should not use this method.')
BaseFeatureColumnForTests
python
numba__numba
numba/tests/npyufunc/ufuncbuilding_usecases.py
{ "start": 609, "end": 976 }
class ____: pass def guadd_obj(a, b, c): Dummy() # to force object mode x, y = c.shape for i in range(x): for j in range(y): c[i, j] = a[i, j] + b[i, j] def guadd_scalar_obj(a, b, c): Dummy() # to force object mode x, y = c.shape for i in range(x): for j in range(y): c[i, j] = a[i, j] + b
Dummy
python
rapidsai__cudf
python/cudf/cudf/pandas/fast_slow_proxy.py
{ "start": 14791, "end": 16041 }
class ____(type): """ Metaclass used to dynamically find class attributes and classmethods of fast-slow proxy types. """ _fsproxy_slow_dir: list _fsproxy_slow_type: type _fsproxy_fast_type: type @property def _fsproxy_slow(self) -> type: return self._fsproxy_slow_type @property def _fsproxy_fast(self) -> type: return self._fsproxy_fast_type def __dir__(self): # Try to return the cached dir of the slow object, but if it # doesn't exist, fall back to the default implementation. try: return self._fsproxy_slow_dir except AttributeError: return type.__dir__(self) def __subclasscheck__(self, __subclass: type) -> bool: if super().__subclasscheck__(__subclass): return True if hasattr(__subclass, "_fsproxy_slow"): return issubclass(__subclass._fsproxy_slow, self._fsproxy_slow) return False def __instancecheck__(self, __instance: Any) -> bool: if super().__instancecheck__(__instance): return True elif hasattr(type(__instance), "_fsproxy_slow"): return issubclass(type(__instance), self) return False
_FastSlowProxyMeta
python
jpadilla__pyjwt
jwt/exceptions.py
{ "start": 200, "end": 327 }
class ____(InvalidTokenError): """Raised when a token cannot be decoded because it failed validation""" pass
DecodeError
python
neetcode-gh__leetcode
python/0002-add-two-numbers.py
{ "start": 151, "end": 725 }
class ____: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: dummy = ListNode() cur = dummy carry = 0 while l1 or l2 or carry: v1 = l1.val if l1 else 0 v2 = l2.val if l2 else 0 # new digit val = v1 + v2 + carry carry = val // 10 val = val % 10 cur.next = ListNode(val) # update ptrs cur = cur.next l1 = l1.next if l1 else None l2 = l2.next if l2 else None return dummy.next
Solution
python
kubernetes-client__python
kubernetes/client/models/v1_resource_claim_list.py
{ "start": 383, "end": 7002 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[ResourceV1ResourceClaim]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501 """V1ResourceClaimList - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): """Gets the api_version of this V1ResourceClaimList. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1ResourceClaimList. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1ResourceClaimList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1ResourceClaimList. # noqa: E501 :type: str """ self._api_version = api_version @property def items(self): """Gets the items of this V1ResourceClaimList. # noqa: E501 Items is the list of resource claims. # noqa: E501 :return: The items of this V1ResourceClaimList. # noqa: E501 :rtype: list[ResourceV1ResourceClaim] """ return self._items @items.setter def items(self, items): """Sets the items of this V1ResourceClaimList. Items is the list of resource claims. # noqa: E501 :param items: The items of this V1ResourceClaimList. # noqa: E501 :type: list[ResourceV1ResourceClaim] """ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501 raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501 self._items = items @property def kind(self): """Gets the kind of this V1ResourceClaimList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1ResourceClaimList. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1ResourceClaimList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1ResourceClaimList. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1ResourceClaimList. # noqa: E501 :return: The metadata of this V1ResourceClaimList. # noqa: E501 :rtype: V1ListMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1ResourceClaimList. :param metadata: The metadata of this V1ResourceClaimList. # noqa: E501 :type: V1ListMeta """ self._metadata = metadata def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ResourceClaimList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ResourceClaimList): return True return self.to_dict() != other.to_dict()
V1ResourceClaimList
python
optuna__optuna
optuna/terminator/erroreval.py
{ "start": 353, "end": 638 }
class ____(metaclass=abc.ABCMeta): """Base class for error evaluators.""" @abc.abstractmethod def evaluate( self, trials: list[FrozenTrial], study_direction: StudyDirection, ) -> float: pass @experimental_class("3.2.0")
BaseErrorEvaluator
python
run-llama__llama_index
llama-index-integrations/embeddings/llama-index-embeddings-huggingface-api/llama_index/embeddings/huggingface_api/pooling.py
{ "start": 130, "end": 2244 }
class ____(str, Enum): """Enum of possible pooling choices with pooling behaviors.""" CLS = "cls" MEAN = "mean" LAST = "last" # last token pooling def __call__(self, array: np.ndarray) -> np.ndarray: if self == self.CLS: return self.cls_pooling(array) elif self == self.LAST: return self.last_pooling(array) return self.mean_pooling(array) @classmethod @overload def cls_pooling(cls, array: np.ndarray) -> np.ndarray: ... @classmethod @overload # TODO: Remove this `type: ignore` after the false positive problem # is addressed in mypy: https://github.com/python/mypy/issues/15683 . def cls_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore ... @classmethod def cls_pooling( cls, array: "Union[np.ndarray, torch.Tensor]" ) -> "Union[np.ndarray, torch.Tensor]": if len(array.shape) == 3: return array[:, 0] if len(array.shape) == 2: return array[0] raise NotImplementedError(f"Unhandled shape {array.shape}.") @classmethod def mean_pooling(cls, array: np.ndarray) -> np.ndarray: if len(array.shape) == 3: return array.mean(axis=1) if len(array.shape) == 2: return array.mean(axis=0) raise NotImplementedError(f"Unhandled shape {array.shape}.") @classmethod @overload def last_pooling(cls, array: np.ndarray) -> np.ndarray: ... @classmethod @overload # TODO: Remove this `type: ignore` after the false positive problem # is addressed in mypy: https://github.com/python/mypy/issues/15683 . def last_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore ... @classmethod def last_pooling( cls, array: "Union[np.ndarray, torch.Tensor]" ) -> "Union[np.ndarray, torch.Tensor]": if len(array.shape) == 3: return array[:, -1] if len(array.shape) == 2: return array[-1] raise NotImplementedError(f"Unhandled shape {array.shape}.")
Pooling
python
numpy__numpy
numpy/ma/core.py
{ "start": 82068, "end": 217422 }
class ____(ndarray): """ An array class with possibly masked values. Masked values of True exclude the corresponding element from any computation. Construction:: x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, shrink=True, order=None) Parameters ---------- data : array_like Input data. mask : sequence, optional Mask. Must be convertible to an array of booleans with the same shape as `data`. True indicates a masked (i.e. invalid) data. dtype : dtype, optional Data type of the output. If `dtype` is None, the type of the data argument (``data.dtype``) is used. If `dtype` is not None and different from ``data.dtype``, a copy is performed. copy : bool, optional Whether to copy the input data (True), or to use a reference instead. Default is False. subok : bool, optional Whether to return a subclass of `MaskedArray` if possible (True) or a plain `MaskedArray`. Default is True. ndmin : int, optional Minimum number of dimensions. Default is 0. fill_value : scalar, optional Value used to fill in the masked values when necessary. If None, a default based on the data-type is used. keep_mask : bool, optional Whether to combine `mask` with the mask of the input data, if any (True), or to use only `mask` for the output (False). Default is True. hard_mask : bool, optional Whether to use a hard mask or not. With a hard mask, masked values cannot be unmasked. Default is False. shrink : bool, optional Whether to force compression of an empty mask. Default is True. order : {'C', 'F', 'A'}, optional Specify the order of the array. If order is 'C', then the array will be in C-contiguous order (last-index varies the fastest). If order is 'F', then the returned array will be in Fortran-contiguous order (first-index varies the fastest). If order is 'A' (default), then the returned array may be in any order (either C-, Fortran-contiguous, or even discontiguous), unless a copy is required, in which case it will be C-contiguous. Examples -------- >>> import numpy as np The ``mask`` can be initialized with an array of boolean values with the same shape as ``data``. >>> data = np.arange(6).reshape((2, 3)) >>> np.ma.MaskedArray(data, mask=[[False, True, False], ... [False, False, True]]) masked_array( data=[[0, --, 2], [3, 4, --]], mask=[[False, True, False], [False, False, True]], fill_value=999999) Alternatively, the ``mask`` can be initialized to homogeneous boolean array with the same shape as ``data`` by passing in a scalar boolean value: >>> np.ma.MaskedArray(data, mask=False) masked_array( data=[[0, 1, 2], [3, 4, 5]], mask=[[False, False, False], [False, False, False]], fill_value=999999) >>> np.ma.MaskedArray(data, mask=True) masked_array( data=[[--, --, --], [--, --, --]], mask=[[ True, True, True], [ True, True, True]], fill_value=999999, dtype=int64) .. note:: The recommended practice for initializing ``mask`` with a scalar boolean value is to use ``True``/``False`` rather than ``np.True_``/``np.False_``. The reason is :attr:`nomask` is represented internally as ``np.False_``. >>> np.False_ is np.ma.nomask True """ __array_priority__ = 15 _defaultmask = nomask _defaulthardmask = False _baseclass = ndarray # Maximum number of elements per axis used when printing an array. The # 1d case is handled separately because we need more values in this case. _print_width = 100 _print_width_1d = 1500 def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, shrink=True, order=None): """ Create a new masked array from scratch. Notes ----- A masked array can also be created by taking a .view(MaskedArray). """ # Process data. copy = None if not copy else True _data = np.array(data, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin) _baseclass = getattr(data, '_baseclass', type(_data)) # Check that we're not erasing the mask. if isinstance(data, MaskedArray) and (data.shape != _data.shape): copy = True # Here, we copy the _view_, so that we can attach new properties to it # we must never do .view(MaskedConstant), as that would create a new # instance of np.ma.masked, which make identity comparison fail if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): _data = ndarray.view(_data, type(data)) else: _data = ndarray.view(_data, cls) # Handle the case where data is not a subclass of ndarray, but # still has the _mask attribute like MaskedArrays if hasattr(data, '_mask') and not isinstance(data, ndarray): _data._mask = data._mask # FIXME: should we set `_data._sharedmask = True`? # Process mask. # Type of the mask mdtype = make_mask_descr(_data.dtype) if mask is nomask: # Case 1. : no mask in input. # Erase the current mask ? if not keep_mask: # With a reduced version if shrink: _data._mask = nomask # With full version else: _data._mask = np.zeros(_data.shape, dtype=mdtype) # Check whether we missed something elif isinstance(data, (tuple, list)): try: # If data is a sequence of masked array mask = np.array( [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) for m in data], dtype=mdtype) except (ValueError, TypeError): # If data is nested mask = nomask # Force shrinking of the mask if needed (and possible) if (mdtype == MaskType) and mask.any(): _data._mask = mask _data._sharedmask = False else: _data._sharedmask = not copy if copy: _data._mask = _data._mask.copy() # Reset the shape of the original mask if getmask(data) is not nomask: # gh-21022 encounters an issue here # because data._mask.shape is not writeable, but # the op was also pointless in that case, because # the shapes were the same, so we can at least # avoid that path if data._mask.shape != data.shape: data._mask.shape = data.shape else: # Case 2. : With a mask in input. # If mask is boolean, create an array of True or False # if users pass `mask=None` be forgiving here and cast it False # for speed; although the default is `mask=nomask` and can differ. if mask is None: mask = False if mask is True and mdtype == MaskType: mask = np.ones(_data.shape, dtype=mdtype) elif mask is False and mdtype == MaskType: mask = np.zeros(_data.shape, dtype=mdtype) else: # Read the mask with the current mdtype try: mask = np.array(mask, copy=copy, dtype=mdtype) # Or assume it's a sequence of bool/int except TypeError: mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) # Make sure the mask and the data have the same shape if mask.shape != _data.shape: (nd, nm) = (_data.size, mask.size) if nm == 1: mask = np.resize(mask, _data.shape) elif nm == nd: mask = np.reshape(mask, _data.shape) else: msg = (f"Mask and data not compatible:" f" data size is {nd}, mask size is {nm}.") raise MaskError(msg) copy = True # Set the mask to the new value if _data._mask is nomask: _data._mask = mask _data._sharedmask = not copy elif not keep_mask: _data._mask = mask _data._sharedmask = not copy else: if _data.dtype.names is not None: def _recursive_or(a, b): "do a|=b on each field of a, recursively" for name in a.dtype.names: (af, bf) = (a[name], b[name]) if af.dtype.names is not None: _recursive_or(af, bf) else: af |= bf _recursive_or(_data._mask, mask) else: _data._mask = np.logical_or(mask, _data._mask) _data._sharedmask = False # Update fill_value. if fill_value is None: fill_value = getattr(data, '_fill_value', None) # But don't run the check unless we have something to check. if fill_value is not None: _data._fill_value = _check_fill_value(fill_value, _data.dtype) # Process extra options .. if hard_mask is None: _data._hardmask = getattr(data, '_hardmask', False) else: _data._hardmask = hard_mask _data._baseclass = _baseclass return _data def _update_from(self, obj): """ Copies some attributes of obj to self. """ if isinstance(obj, ndarray): _baseclass = type(obj) else: _baseclass = ndarray # We need to copy the _basedict to avoid backward propagation _optinfo = {} _optinfo.update(getattr(obj, '_optinfo', {})) _optinfo.update(getattr(obj, '_basedict', {})) if not isinstance(obj, MaskedArray): _optinfo.update(getattr(obj, '__dict__', {})) _dict = {'_fill_value': getattr(obj, '_fill_value', None), '_hardmask': getattr(obj, '_hardmask', False), '_sharedmask': getattr(obj, '_sharedmask', False), '_isfield': getattr(obj, '_isfield', False), '_baseclass': getattr(obj, '_baseclass', _baseclass), '_optinfo': _optinfo, '_basedict': _optinfo} self.__dict__.update(_dict) self.__dict__.update(_optinfo) def __array_finalize__(self, obj): """ Finalizes the masked array. """ # Get main attributes. self._update_from(obj) # We have to decide how to initialize self.mask, based on # obj.mask. This is very difficult. There might be some # correspondence between the elements in the array we are being # created from (= obj) and us. Or there might not. This method can # be called in all kinds of places for all kinds of reasons -- could # be empty_like, could be slicing, could be a ufunc, could be a view. # The numpy subclassing interface simply doesn't give us any way # to know, which means that at best this method will be based on # guesswork and heuristics. To make things worse, there isn't even any # clear consensus about what the desired behavior is. For instance, # most users think that np.empty_like(marr) -- which goes via this # method -- should return a masked array with an empty mask (see # gh-3404 and linked discussions), but others disagree, and they have # existing code which depends on empty_like returning an array that # matches the input mask. # # Historically our algorithm was: if the template object mask had the # same *number of elements* as us, then we used *it's mask object # itself* as our mask, so that writes to us would also write to the # original array. This is horribly broken in multiple ways. # # Now what we do instead is, if the template object mask has the same # number of elements as us, and we do not have the same base pointer # as the template object (b/c views like arr[...] should keep the same # mask), then we make a copy of the template object mask and use # that. This is also horribly broken but somewhat less so. Maybe. if isinstance(obj, ndarray): # XX: This looks like a bug -- shouldn't it check self.dtype # instead? if obj.dtype.names is not None: _mask = getmaskarray(obj) else: _mask = getmask(obj) # If self and obj point to exactly the same data, then probably # self is a simple view of obj (e.g., self = obj[...]), so they # should share the same mask. (This isn't 100% reliable, e.g. self # could be the first row of obj, or have strange strides, but as a # heuristic it's not bad.) In all other cases, we make a copy of # the mask, so that future modifications to 'self' do not end up # side-effecting 'obj' as well. if (_mask is not nomask and obj.__array_interface__["data"][0] != self.__array_interface__["data"][0]): # We should make a copy. But we could get here via astype, # in which case the mask might need a new dtype as well # (e.g., changing to or from a structured dtype), and the # order could have changed. So, change the mask type if # needed and use astype instead of copy. if self.dtype == obj.dtype: _mask_dtype = _mask.dtype else: _mask_dtype = make_mask_descr(self.dtype) if self.flags.c_contiguous: order = "C" elif self.flags.f_contiguous: order = "F" else: order = "K" _mask = _mask.astype(_mask_dtype, order) else: # Take a view so shape changes, etc., do not propagate back. _mask = _mask.view() else: _mask = nomask self._mask = _mask # Finalize the mask if self._mask is not nomask: try: self._mask.shape = self.shape except ValueError: self._mask = nomask except (TypeError, AttributeError): # When _mask.shape is not writable (because it's a void) pass # Finalize the fill_value if self._fill_value is not None: self._fill_value = _check_fill_value(self._fill_value, self.dtype) elif self.dtype.names is not None: # Finalize the default fill_value for structured arrays self._fill_value = _check_fill_value(None, self.dtype) def __array_wrap__(self, obj, context=None, return_scalar=False): """ Special hook for ufuncs. Wraps the numpy array and sets the mask according to context. """ if obj is self: # for in-place operations result = obj else: result = obj.view(type(self)) result._update_from(self) if context is not None: result._mask = result._mask.copy() func, args, out_i = context # args sometimes contains outputs (gh-10459), which we don't want input_args = args[:func.nin] m = functools.reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask domain = ufunc_domain.get(func) if domain is not None: # Take the domain, and make sure it's a ndarray with np.errstate(divide='ignore', invalid='ignore'): # The result may be masked for two (unary) domains. # That can't really be right as some domains drop # the mask and some don't behaving differently here. d = domain(*input_args).astype(bool, copy=False) d = filled(d, True) if d.any(): # Fill the result where the domain is wrong try: # Binary domain: take the last value fill_value = ufunc_fills[func][-1] except TypeError: # Unary domain: just use this one fill_value = ufunc_fills[func] except KeyError: # Domain not recognized, use fill_value instead fill_value = self.fill_value np.copyto(result, fill_value, where=d) # Update the mask if m is nomask: m = d else: # Don't modify inplace, we risk back-propagation m = (m | d) # Make sure the mask has the proper size if result is not self and result.shape == () and m: return masked else: result._mask = m result._sharedmask = False return result def view(self, dtype=None, type=None, fill_value=None): """ Return a view of the MaskedArray data. Parameters ---------- dtype : data-type or ndarray sub-class, optional Data-type descriptor of the returned view, e.g., float32 or int16. The default, None, results in the view having the same data-type as `a`. As with ``ndarray.view``, dtype can also be specified as an ndarray sub-class, which then specifies the type of the returned object (this is equivalent to setting the ``type`` parameter). type : Python type, optional Type of the returned view, either ndarray or a subclass. The default None results in type preservation. fill_value : scalar, optional The value to use for invalid entries (None by default). If None, then this argument is inferred from the passed `dtype`, or in its absence the original array, as discussed in the notes below. See Also -------- numpy.ndarray.view : Equivalent method on ndarray object. Notes ----- ``a.view()`` is used two different ways: ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view of the array's memory with a different data-type. This can cause a reinterpretation of the bytes of memory. ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just returns an instance of `ndarray_subclass` that looks at the same array (same shape, dtype, etc.) This does not cause a reinterpretation of the memory. If `fill_value` is not specified, but `dtype` is specified (and is not an ndarray sub-class), the `fill_value` of the MaskedArray will be reset. If neither `fill_value` nor `dtype` are specified (or if `dtype` is an ndarray sub-class), then the fill value is preserved. Finally, if `fill_value` is specified, but `dtype` is not, the fill value is set to the specified value. For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of bytes per entry than the previous dtype (for example, converting a regular array to a structured array), then the behavior of the view cannot be predicted just from the superficial appearance of ``a`` (shown by ``print(a)``). It also depends on exactly how ``a`` is stored in memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus defined as a slice or transpose, etc., the view may give different results. """ if dtype is None: if type is None: output = ndarray.view(self) else: output = ndarray.view(self, type) elif type is None: try: if issubclass(dtype, ndarray): output = ndarray.view(self, dtype) dtype = None else: output = ndarray.view(self, dtype) except TypeError: output = ndarray.view(self, dtype) else: output = ndarray.view(self, dtype, type) # also make the mask be a view (so attr changes to the view's # mask do no affect original object's mask) # (especially important to avoid affecting np.masked singleton) if getmask(output) is not nomask: output._mask = output._mask.view() # Make sure to reset the _fill_value if needed if getattr(output, '_fill_value', None) is not None: if fill_value is None: if dtype is None: pass # leave _fill_value as is else: output._fill_value = None else: output.fill_value = fill_value return output def __getitem__(self, indx): """ x.__getitem__(y) <==> x[y] Return the item described by i, as a masked array. """ # We could directly use ndarray.__getitem__ on self. # But then we would have to modify __array_finalize__ to prevent the # mask of being reshaped if it hasn't been set up properly yet # So it's easier to stick to the current version dout = self.data[indx] _mask = self._mask def _is_scalar(m): return not isinstance(m, np.ndarray) def _scalar_heuristic(arr, elem): """ Return whether `elem` is a scalar result of indexing `arr`, or None if undecidable without promoting nomask to a full mask """ # obviously a scalar if not isinstance(elem, np.ndarray): return True # object array scalar indexing can return anything elif arr.dtype.type is np.object_: if arr.dtype is not elem.dtype: # elem is an array, but dtypes do not match, so must be # an element return True # well-behaved subclass that only returns 0d arrays when # expected - this is not a scalar elif type(arr).__getitem__ == ndarray.__getitem__: return False return None if _mask is not nomask: # _mask cannot be a subclass, so it tells us whether we should # expect a scalar. It also cannot be of dtype object. mout = _mask[indx] scalar_expected = _is_scalar(mout) else: # attempt to apply the heuristic to avoid constructing a full mask mout = nomask scalar_expected = _scalar_heuristic(self.data, dout) if scalar_expected is None: # heuristics have failed # construct a full array, so we can be certain. This is costly. # we could also fall back on ndarray.__getitem__(self.data, indx) scalar_expected = _is_scalar(getmaskarray(self)[indx]) # Did we extract a single item? if scalar_expected: # A record if isinstance(dout, np.void): # We should always re-cast to mvoid, otherwise users can # change masks on rows that already have masked values, but not # on rows that have no masked values, which is inconsistent. return mvoid(dout, mask=mout, hardmask=self._hardmask) # special case introduced in gh-5962 elif (self.dtype.type is np.object_ and isinstance(dout, np.ndarray) and dout is not masked): # If masked, turn into a MaskedArray, with everything masked. if mout: return MaskedArray(dout, mask=True) else: return dout # Just a scalar elif mout: return masked else: return dout else: # Force dout to MA dout = dout.view(type(self)) # Inherit attributes from self dout._update_from(self) # Check the fill_value if is_string_or_list_of_strings(indx): if self._fill_value is not None: dout._fill_value = self._fill_value[indx] # Something like gh-15895 has happened if this check fails. # _fill_value should always be an ndarray. if not isinstance(dout._fill_value, np.ndarray): raise RuntimeError('Internal NumPy error.') # If we're indexing a multidimensional field in a # structured array (such as dtype("(2,)i2,(2,)i1")), # dimensionality goes up (M[field].ndim == M.ndim + # M.dtype[field].ndim). That's fine for # M[field] but problematic for M[field].fill_value # which should have shape () to avoid breaking several # methods. There is no great way out, so set to # first element. See issue #6723. if dout._fill_value.ndim > 0: if not (dout._fill_value == dout._fill_value.flat[0]).all(): warnings.warn( "Upon accessing multidimensional field " f"{indx!s}, need to keep dimensionality " "of fill_value at 0. Discarding " "heterogeneous fill_value and setting " f"all to {dout._fill_value[0]!s}.", stacklevel=2) # Need to use `.flat[0:1].squeeze(...)` instead of just # `.flat[0]` to ensure the result is a 0d array and not # a scalar. dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0) dout._isfield = True # Update the mask if needed if mout is not nomask: # set shape to match that of data; this is needed for matrices dout._mask = reshape(mout, dout.shape) dout._sharedmask = True # Note: Don't try to check for m.any(), that'll take too long return dout # setitem may put NaNs into integer arrays or occasionally overflow a # float. But this may happen in masked values, so avoid otherwise # correct warnings (as is typical also in masked calculations). @np.errstate(over='ignore', invalid='ignore') def __setitem__(self, indx, value): """ x.__setitem__(i, y) <==> x[i]=y Set item described by index. If value is masked, masks those locations. """ if self is masked: raise MaskError('Cannot alter the masked element.') _data = self._data _mask = self._mask if isinstance(indx, str): _data[indx] = value if _mask is nomask: self._mask = _mask = make_mask_none(self.shape, self.dtype) _mask[indx] = getmask(value) return _dtype = _data.dtype if value is masked: # The mask wasn't set: create a full version. if _mask is nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) # Now, set the mask to its value. if _dtype.names is not None: _mask[indx] = tuple([True] * len(_dtype.names)) else: _mask[indx] = True return # Get the _data part of the new value dval = getattr(value, '_data', value) # Get the _mask part of the new value mval = getmask(value) if _dtype.names is not None and mval is nomask: mval = tuple([False] * len(_dtype.names)) if _mask is nomask: # Set the data, then the mask _data[indx] = dval if mval is not nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) _mask[indx] = mval elif not self._hardmask: # Set the data, then the mask if (isinstance(indx, masked_array) and not isinstance(value, masked_array)): _data[indx.data] = dval else: _data[indx] = dval _mask[indx] = mval elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): indx = indx * umath.logical_not(_mask) _data[indx] = dval else: if _dtype.names is not None: err_msg = "Flexible 'hard' masks are not yet supported." raise NotImplementedError(err_msg) mindx = mask_or(_mask[indx], mval, copy=True) dindx = self._data[indx] if dindx.size > 1: np.copyto(dindx, dval, where=~mindx) elif mindx is nomask: dindx = dval _data[indx] = dindx _mask[indx] = mindx return # Define so that we can overwrite the setter. @property def dtype(self): return super().dtype @dtype.setter def dtype(self, dtype): super(MaskedArray, type(self)).dtype.__set__(self, dtype) if self._mask is not nomask: self._mask = self._mask.view(make_mask_descr(dtype), ndarray) # Try to reset the shape of the mask (if we don't have a void). # This raises a ValueError if the dtype change won't work. try: self._mask.shape = self.shape except (AttributeError, TypeError): pass @property def shape(self): return super().shape @shape.setter def shape(self, shape): super(MaskedArray, type(self)).shape.__set__(self, shape) # Cannot use self._mask, since it may not (yet) exist when a # masked matrix sets the shape. if getmask(self) is not nomask: self._mask.shape = self.shape def __setmask__(self, mask, copy=False): """ Set the mask. """ idtype = self.dtype current_mask = self._mask if mask is masked: mask = True if current_mask is nomask: # Make sure the mask is set # Just don't do anything if there's nothing to do. if mask is nomask: return current_mask = self._mask = make_mask_none(self.shape, idtype) if idtype.names is None: # No named fields. # Hardmask: don't unmask the data if self._hardmask: current_mask |= mask # Softmask: set everything to False # If it's obviously a compatible scalar, use a quick update # method. elif isinstance(mask, (int, float, np.bool, np.number)): current_mask[...] = mask # Otherwise fall back to the slower, general purpose way. else: current_mask.flat = mask else: # Named fields w/ mdtype = current_mask.dtype mask = np.asarray(mask) # Mask is a singleton if not mask.ndim: # It's a boolean : make a record if mask.dtype.kind == 'b': mask = np.array(tuple([mask.item()] * len(mdtype)), dtype=mdtype) # It's a record: make sure the dtype is correct else: mask = mask.astype(mdtype) # Mask is a sequence else: # Make sure the new mask is a ndarray with the proper dtype try: copy = None if not copy else True mask = np.array(mask, copy=copy, dtype=mdtype) # Or assume it's a sequence of bool/int except TypeError: mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) # Hardmask: don't unmask the data if self._hardmask: for n in idtype.names: current_mask[n] |= mask[n] # Softmask: set everything to False # If it's obviously a compatible scalar, use a quick update # method. elif isinstance(mask, (int, float, np.bool, np.number)): current_mask[...] = mask # Otherwise fall back to the slower, general purpose way. else: current_mask.flat = mask # Reshape if needed if current_mask.shape: current_mask.shape = self.shape return _set_mask = __setmask__ @property def mask(self): """ Current mask. """ # We could try to force a reshape, but that wouldn't work in some # cases. # Return a view so that the dtype and shape cannot be changed in place # This still preserves nomask by identity return self._mask.view() @mask.setter def mask(self, value): self.__setmask__(value) @property def recordmask(self): """ Get or set the mask of the array if it has no named fields. For structured arrays, returns a ndarray of booleans where entries are ``True`` if **all** the fields are masked, ``False`` otherwise: >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], ... dtype=[('a', int), ('b', int)]) >>> x.recordmask array([False, False, True, False, False]) """ _mask = self._mask.view(ndarray) if _mask.dtype.names is None: return _mask return np.all(flatten_structured_array(_mask), axis=-1) @recordmask.setter def recordmask(self, mask): raise NotImplementedError("Coming soon: setting the mask per records!") def harden_mask(self): """ Force the mask to hard, preventing unmasking by assignment. Whether the mask of a masked array is hard or soft is determined by its `~ma.MaskedArray.hardmask` property. `harden_mask` sets `~ma.MaskedArray.hardmask` to ``True`` (and returns the modified self). See Also -------- ma.MaskedArray.hardmask ma.MaskedArray.soften_mask """ self._hardmask = True return self def soften_mask(self): """ Force the mask to soft (default), allowing unmasking by assignment. Whether the mask of a masked array is hard or soft is determined by its `~ma.MaskedArray.hardmask` property. `soften_mask` sets `~ma.MaskedArray.hardmask` to ``False`` (and returns the modified self). See Also -------- ma.MaskedArray.hardmask ma.MaskedArray.harden_mask """ self._hardmask = False return self @property def hardmask(self): """ Specifies whether values can be unmasked through assignments. By default, assigning definite values to masked array entries will unmask them. When `hardmask` is ``True``, the mask will not change through assignments. See Also -------- ma.MaskedArray.harden_mask ma.MaskedArray.soften_mask Examples -------- >>> import numpy as np >>> x = np.arange(10) >>> m = np.ma.masked_array(x, x>5) >>> assert not m.hardmask Since `m` has a soft mask, assigning an element value unmasks that element: >>> m[8] = 42 >>> m masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --], mask=[False, False, False, False, False, False, True, True, False, True], fill_value=999999) After hardening, the mask is not affected by assignments: >>> hardened = np.ma.harden_mask(m) >>> assert m.hardmask and hardened is m >>> m[:] = 23 >>> m masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --], mask=[False, False, False, False, False, False, True, True, False, True], fill_value=999999) """ return self._hardmask def unshare_mask(self): """ Copy the mask and set the `sharedmask` flag to ``False``. Whether the mask is shared between masked arrays can be seen from the `sharedmask` property. `unshare_mask` ensures the mask is not shared. A copy of the mask is only made if it was shared. See Also -------- sharedmask """ if self._sharedmask: self._mask = self._mask.copy() self._sharedmask = False return self @property def sharedmask(self): """ Share status of the mask (read-only). """ return self._sharedmask def shrink_mask(self): """ Reduce a mask to nomask when possible. Parameters ---------- None Returns ------- result : MaskedArray A :class:`~ma.MaskedArray` object. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], [False, False]]) >>> x.shrink_mask() masked_array( data=[[1, 2], [3, 4]], mask=False, fill_value=999999) >>> x.mask False """ self._mask = _shrink_mask(self._mask) return self @property def baseclass(self): """ Class of the underlying data (read-only). """ return self._baseclass def _get_data(self): """ Returns the underlying data, as a view of the masked array. If the underlying data is a subclass of :class:`numpy.ndarray`, it is returned as such. >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.data matrix([[1, 2], [3, 4]]) The type of the data can be accessed through the :attr:`baseclass` attribute. """ return ndarray.view(self, self._baseclass) _data = property(fget=_get_data) data = property(fget=_get_data) @property def flat(self): """ Return a flat iterator, or set a flattened version of self to value. """ return MaskedIterator(self) @flat.setter def flat(self, value): y = self.ravel() y[:] = value @property def fill_value(self): """ The filling value of the masked array is a scalar. When setting, None will set to a default based on the data type. Examples -------- >>> import numpy as np >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... np.int64(999999) np.int64(999999) np.float64(1e+20) np.complex128(1e+20+0j) >>> x = np.ma.array([0, 1.], fill_value=-np.inf) >>> x.fill_value np.float64(-inf) >>> x.fill_value = np.pi >>> x.fill_value np.float64(3.1415926535897931) Reset to default: >>> x.fill_value = None >>> x.fill_value np.float64(1e+20) """ if self._fill_value is None: self._fill_value = _check_fill_value(None, self.dtype) # Temporary workaround to account for the fact that str and bytes # scalars cannot be indexed with (), whereas all other numpy # scalars can. See issues #7259 and #7267. # The if-block can be removed after #7267 has been fixed. if isinstance(self._fill_value, ndarray): return self._fill_value[()] return self._fill_value @fill_value.setter def fill_value(self, value=None): target = _check_fill_value(value, self.dtype) if not target.ndim == 0: # 2019-11-12, 1.18.0 warnings.warn( "Non-scalar arrays for the fill value are deprecated. Use " "arrays with scalar values instead. The filled function " "still supports any array as `fill_value`.", DeprecationWarning, stacklevel=2) _fill_value = self._fill_value if _fill_value is None: # Create the attribute if it was undefined self._fill_value = target else: # Don't overwrite the attribute, just fill it (for propagation) _fill_value[()] = target # kept for compatibility get_fill_value = fill_value.fget set_fill_value = fill_value.fset def filled(self, fill_value=None): """ Return a copy of self, with masked values filled with a given value. **However**, if there are no masked values to fill, self will be returned instead as an ndarray. Parameters ---------- fill_value : array_like, optional The value to use for invalid entries. Can be scalar or non-scalar. If non-scalar, the resulting ndarray must be broadcastable over input array. Default is None, in which case, the `fill_value` attribute of the array is used instead. Returns ------- filled_array : ndarray A copy of ``self`` with invalid entries replaced by *fill_value* (be it the function argument or the attribute of ``self``), or ``self`` itself as an ndarray if there are no invalid entries to be replaced. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> import numpy as np >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) >>> x.filled() array([ 1, 2, -999, 4, -999]) >>> x.filled(fill_value=1000) array([ 1, 2, 1000, 4, 1000]) >>> type(x.filled()) <class 'numpy.ndarray'> Subclassing is preserved. This means that if, e.g., the data part of the masked array is a recarray, `filled` returns a recarray: >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) >>> m.filled() rec.array([(999999, 2), ( -3, 999999)], dtype=[('f0', '<i8'), ('f1', '<i8')]) """ m = self._mask if m is nomask: return self._data if fill_value is None: fill_value = self.fill_value else: fill_value = _check_fill_value(fill_value, self.dtype) if self is masked_singleton: return np.asanyarray(fill_value) if m.dtype.names is not None: result = self._data.copy('K') _recursive_filled(result, self._mask, fill_value) elif not m.any(): return self._data else: result = self._data.copy('K') try: np.copyto(result, fill_value, where=m) except (TypeError, AttributeError): fill_value = narray(fill_value, dtype=object) d = result.astype(object) result = np.choose(m, (d, fill_value)) except IndexError: # ok, if scalar if self._data.shape: raise elif m: result = np.array(fill_value, dtype=self.dtype) else: result = self._data return result def compressed(self): """ Return all the non-masked data as a 1-D array. Returns ------- data : ndarray A new `ndarray` holding the non-masked data is returned. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> import numpy as np >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) >>> x.compressed() array([0, 1]) >>> type(x.compressed()) <class 'numpy.ndarray'> N-D arrays are compressed to 1-D. >>> arr = [[1, 2], [3, 4]] >>> mask = [[1, 0], [0, 1]] >>> x = np.ma.array(arr, mask=mask) >>> x.compressed() array([2, 3]) """ data = ndarray.ravel(self._data) if self._mask is not nomask: data = data.compress(np.logical_not(ndarray.ravel(self._mask))) return data def compress(self, condition, axis=None, out=None): """ Return `a` where condition is ``True``. If condition is a `~ma.MaskedArray`, missing values are considered as ``False``. Parameters ---------- condition : var Boolean 1-d array selecting which entries to return. If len(condition) is less than the size of a along the axis, then output is truncated to length of condition array. axis : {None, int}, optional Axis along which the operation must be performed. out : {None, ndarray}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. Returns ------- result : MaskedArray A :class:`~ma.MaskedArray` object. Notes ----- Please note the difference with :meth:`compressed` ! The output of :meth:`compress` has a mask, the output of :meth:`compressed` does not. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.compress([1, 0, 1]) masked_array(data=[1, 3], mask=[False, False], fill_value=999999) >>> x.compress([1, 0, 1], axis=1) masked_array( data=[[1, 3], [--, --], [7, 9]], mask=[[False, False], [ True, True], [False, False]], fill_value=999999) """ # Get the basic components (_data, _mask) = (self._data, self._mask) # Force the condition to a regular ndarray and forget the missing # values. condition = np.asarray(condition) _new = _data.compress(condition, axis=axis, out=out).view(type(self)) _new._update_from(self) if _mask is not nomask: _new._mask = _mask.compress(condition, axis=axis) return _new def _insert_masked_print(self): """ Replace masked values with masked_print_option, casting all innermost dtypes to object. """ if masked_print_option.enabled(): mask = self._mask if mask is nomask: res = self._data else: # convert to object array to make filled work data = self._data # For big arrays, to avoid a costly conversion to the # object dtype, extract the corners before the conversion. print_width = (self._print_width if self.ndim > 1 else self._print_width_1d) for axis in range(self.ndim): if data.shape[axis] > print_width: ind = print_width // 2 arr = np.split(data, (ind, -ind), axis=axis) data = np.concatenate((arr[0], arr[2]), axis=axis) arr = np.split(mask, (ind, -ind), axis=axis) mask = np.concatenate((arr[0], arr[2]), axis=axis) rdtype = _replace_dtype_fields(self.dtype, "O") res = data.astype(rdtype) _recursive_printoption(res, mask, masked_print_option) else: res = self.filled(self.fill_value) return res def __str__(self): return str(self._insert_masked_print()) def __repr__(self): """ Literal string representation. """ if self._baseclass is np.ndarray: name = 'array' else: name = self._baseclass.__name__ # 2016-11-19: Demoted to legacy format if np._core.arrayprint._get_legacy_print_mode() <= 113: is_long = self.ndim > 1 parameters = { 'name': name, 'nlen': " " * len(name), 'data': str(self), 'mask': str(self._mask), 'fill': str(self.fill_value), 'dtype': str(self.dtype) } is_structured = bool(self.dtype.names) key = '{}_{}'.format( 'long' if is_long else 'short', 'flx' if is_structured else 'std' ) return _legacy_print_templates[key] % parameters prefix = f"masked_{name}(" dtype_needed = ( not np._core.arrayprint.dtype_is_implied(self.dtype) or np.all(self.mask) or self.size == 0 ) # determine which keyword args need to be shown keys = ['data', 'mask', 'fill_value'] if dtype_needed: keys.append('dtype') # array has only one row (non-column) is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) # choose what to indent each keyword with min_indent = 2 if is_one_row: # first key on the same line as the type, remaining keys # aligned by equals indents = {} indents[keys[0]] = prefix for k in keys[1:]: n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) indents[k] = ' ' * n prefix = '' # absorbed into the first indent else: # each key on its own line, indented by two spaces indents = dict.fromkeys(keys, ' ' * min_indent) prefix = prefix + '\n' # first key on the next line # format the field values reprs = {} reprs['data'] = np.array2string( self._insert_masked_print(), separator=", ", prefix=indents['data'] + 'data=', suffix=',') reprs['mask'] = np.array2string( self._mask, separator=", ", prefix=indents['mask'] + 'mask=', suffix=',') if self._fill_value is None: self.fill_value # initialize fill_value # noqa: B018 if (self._fill_value.dtype.kind in ("S", "U") and self.dtype.kind == self._fill_value.dtype.kind): # Allow strings: "N/A" has length 3 so would mismatch. fill_repr = repr(self.fill_value.item()) elif self._fill_value.dtype == self.dtype and not self.dtype == object: # Guess that it is OK to use the string as item repr. To really # fix this, it needs new logic (shared with structured scalars) fill_repr = str(self.fill_value) else: fill_repr = repr(self.fill_value) reprs['fill_value'] = fill_repr if dtype_needed: reprs['dtype'] = np._core.arrayprint.dtype_short_repr(self.dtype) # join keys with values and indentations result = ',\n'.join( f'{indents[k]}{k}={reprs[k]}' for k in keys ) return prefix + result + ')' def _delegate_binop(self, other): # This emulates the logic in # private/binop_override.h:forward_binop_should_defer if isinstance(other, type(self)): return False array_ufunc = getattr(other, "__array_ufunc__", False) if array_ufunc is False: other_priority = getattr(other, "__array_priority__", -1000000) return self.__array_priority__ < other_priority else: # If array_ufunc is not None, it will be called inside the ufunc; # None explicitly tells us to not call the ufunc, i.e., defer. return array_ufunc is None def _comparison(self, other, compare): """Compare self with other using operator.eq or operator.ne. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ omask = getmask(other) smask = self.mask mask = mask_or(smask, omask, copy=True) odata = getdata(other) if mask.dtype.names is not None: # only == and != are reasonably defined for structured dtypes, # so give up early for all other comparisons: if compare not in (operator.eq, operator.ne): return NotImplemented # For possibly masked structured arrays we need to be careful, # since the standard structured array comparison will use all # fields, masked or not. To avoid masked fields influencing the # outcome, we set all masked fields in self to other, so they'll # count as equal. To prepare, we ensure we have the right shape. broadcast_shape = np.broadcast(self, odata).shape sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) sbroadcast._mask = mask sdata = sbroadcast.filled(odata) # Now take care of the mask; the merged mask should have an item # masked if all fields were masked (in one and/or other). mask = (mask == np.ones((), mask.dtype)) # Ensure we can compare masks below if other was not masked. if omask is np.False_: omask = np.zeros((), smask.dtype) else: # For regular arrays, just use the data as they come. sdata = self.data check = compare(sdata, odata) if isinstance(check, (np.bool, bool)): return masked if mask else check if mask is not nomask: if compare in (operator.eq, operator.ne): # Adjust elements that were masked, which should be treated # as equal if masked in both, unequal if masked in one. # Note that this works automatically for structured arrays too. # Ignore this for operations other than `==` and `!=` check = np.where(mask, compare(smask, omask), check) if mask.shape != check.shape: # Guarantee consistency of the shape, making a copy since the # the mask may need to get written to later. mask = np.broadcast_to(mask, check.shape).copy() check = check.view(type(self)) check._update_from(self) check._mask = mask # Cast fill value to np.bool if needed. If it cannot be cast, the # default boolean fill value is used. if check._fill_value is not None: try: fill = _check_fill_value(check._fill_value, np.bool) except (TypeError, ValueError): fill = _check_fill_value(None, np.bool) check._fill_value = fill return check def __eq__(self, other): """Check whether other equals self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ return self._comparison(other, operator.eq) def __ne__(self, other): """Check whether other does not equal self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ return self._comparison(other, operator.ne) # All other comparisons: def __le__(self, other): return self._comparison(other, operator.le) def __lt__(self, other): return self._comparison(other, operator.lt) def __ge__(self, other): return self._comparison(other, operator.ge) def __gt__(self, other): return self._comparison(other, operator.gt) def __add__(self, other): """ Add self to other, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return add(self, other) def __radd__(self, other): """ Add other to self, and return a new masked array. """ # In analogy with __rsub__ and __rdiv__, use original order: # we get here from `other + self`. return add(other, self) def __sub__(self, other): """ Subtract other from self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return subtract(self, other) def __rsub__(self, other): """ Subtract self from other, and return a new masked array. """ return subtract(other, self) def __mul__(self, other): "Multiply self by other, and return a new masked array." if self._delegate_binop(other): return NotImplemented return multiply(self, other) def __rmul__(self, other): """ Multiply other by self, and return a new masked array. """ # In analogy with __rsub__ and __rdiv__, use original order: # we get here from `other * self`. return multiply(other, self) def __truediv__(self, other): """ Divide other into self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return true_divide(self, other) def __rtruediv__(self, other): """ Divide self into other, and return a new masked array. """ return true_divide(other, self) def __floordiv__(self, other): """ Divide other into self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return floor_divide(self, other) def __rfloordiv__(self, other): """ Divide self into other, and return a new masked array. """ return floor_divide(other, self) def __pow__(self, other): """ Raise self to the power other, masking the potential NaNs/Infs """ if self._delegate_binop(other): return NotImplemented return power(self, other) def __rpow__(self, other): """ Raise other to the power self, masking the potential NaNs/Infs """ return power(other, self) def __iadd__(self, other): """ Add other to self in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m other_data = getdata(other) other_data = np.where(self._mask, other_data.dtype.type(0), other_data) self._data.__iadd__(other_data) return self def __isub__(self, other): """ Subtract other from self in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m other_data = getdata(other) other_data = np.where(self._mask, other_data.dtype.type(0), other_data) self._data.__isub__(other_data) return self def __imul__(self, other): """ Multiply self by other in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m other_data = getdata(other) other_data = np.where(self._mask, other_data.dtype.type(1), other_data) self._data.__imul__(other_data) return self def __ifloordiv__(self, other): """ Floor divide self by other in-place. """ other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.floor_divide] other_data = np.where( dom_mask, other_data.dtype.type(fval), other_data) self._mask |= new_mask other_data = np.where(self._mask, other_data.dtype.type(1), other_data) self._data.__ifloordiv__(other_data) return self def __itruediv__(self, other): """ True divide self by other in-place. """ other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.true_divide] other_data = np.where( dom_mask, other_data.dtype.type(fval), other_data) self._mask |= new_mask other_data = np.where(self._mask, other_data.dtype.type(1), other_data) self._data.__itruediv__(other_data) return self def __ipow__(self, other): """ Raise self to the power other, in place. """ other_data = getdata(other) other_data = np.where(self._mask, other_data.dtype.type(1), other_data) other_mask = getmask(other) with np.errstate(divide='ignore', invalid='ignore'): self._data.__ipow__(other_data) invalid = np.logical_not(np.isfinite(self._data)) if invalid.any(): if self._mask is not nomask: self._mask |= invalid else: self._mask = invalid np.copyto(self._data, self.fill_value, where=invalid) new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) return self def __float__(self): """ Convert to float. """ if self.size > 1: raise TypeError("Only length-1 arrays can be converted " "to Python scalars") elif self._mask: warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) return np.nan return float(self.item()) def __int__(self): """ Convert to int. """ if self.size > 1: raise TypeError("Only length-1 arrays can be converted " "to Python scalars") elif self._mask: raise MaskError('Cannot convert masked element to a Python int.') return int(self.item()) @property def imag(self): """ The imaginary part of the masked array. This property is a view on the imaginary part of this `MaskedArray`. See Also -------- real Examples -------- >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.imag masked_array(data=[1.0, --, 1.6], mask=[False, True, False], fill_value=1e+20) """ result = self._data.imag.view(type(self)) result.__setmask__(self._mask) return result # kept for compatibility get_imag = imag.fget @property def real(self): """ The real part of the masked array. This property is a view on the real part of this `MaskedArray`. See Also -------- imag Examples -------- >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.real masked_array(data=[1.0, --, 3.45], mask=[False, True, False], fill_value=1e+20) """ result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result # kept for compatibility get_real = real.fget def count(self, axis=None, keepdims=np._NoValue): """ Count the non-masked elements of the array along the given axis. Parameters ---------- axis : None or int or tuple of ints, optional Axis or axes along which the count is performed. The default, None, performs the count over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. If this is a tuple of ints, the count is performed on multiple axes, instead of a single axis or all the axes as before. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- result : ndarray or scalar An array with the same shape as the input array, with the specified axis removed. If the array is a 0-d array, or if `axis` is None, a scalar is returned. See Also -------- ma.count_masked : Count masked elements in array or along a given axis. Examples -------- >>> import numpy.ma as ma >>> a = ma.arange(6).reshape((2, 3)) >>> a[1, :] = ma.masked >>> a masked_array( data=[[0, 1, 2], [--, --, --]], mask=[[False, False, False], [ True, True, True]], fill_value=999999) >>> a.count() 3 When the `axis` keyword is specified an array of appropriate size is returned. >>> a.count(axis=0) array([1, 1, 1]) >>> a.count(axis=1) array([3, 0]) """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} m = self._mask # special case for matrices (we assume no other subclasses modify # their dimensions) if isinstance(self.data, np.matrix): if m is nomask: m = np.zeros(self.shape, dtype=np.bool) m = m.view(type(self.data)) if m is nomask: # compare to _count_reduce_items in _methods.py if self.shape == (): if axis not in (None, 0): raise np.exceptions.AxisError(axis=axis, ndim=self.ndim) return 1 elif axis is None: if kwargs.get('keepdims'): return np.array(self.size, dtype=np.intp, ndmin=self.ndim) return self.size axes = normalize_axis_tuple(axis, self.ndim) items = 1 for ax in axes: items *= self.shape[ax] if kwargs.get('keepdims'): out_dims = list(self.shape) for a in axes: out_dims[a] = 1 else: out_dims = [d for n, d in enumerate(self.shape) if n not in axes] # make sure to return a 0-d array if axis is supplied return np.full(out_dims, items, dtype=np.intp) # take care of the masked singleton if self is masked: return 0 return (~m).sum(axis=axis, dtype=np.intp, **kwargs) def ravel(self, order='C'): """ Returns a 1D version of self, as a view. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional The elements of `a` are read using this index order. 'C' means to index the elements in C-like order, with the last axis index changing fastest, back to the first axis index changing slowest. 'F' means to index the elements in Fortran-like index order, with the first index changing fastest, and the last index changing slowest. Note that the 'C' and 'F' options take no account of the memory layout of the underlying array, and only refer to the order of axis indexing. 'A' means to read the elements in Fortran-like index order if `m` is Fortran *contiguous* in memory, C-like order otherwise. 'K' means to read the elements in the order they occur in memory, except for reversing the data when strides are negative. By default, 'C' index order is used. (Masked arrays currently use 'A' on the data when 'K' is passed.) Returns ------- MaskedArray Output view is of shape ``(self.size,)`` (or ``(np.ma.product(self.shape),)``). Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.ravel() masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], mask=[False, True, False, True, False, True, False, True, False], fill_value=999999) """ # The order of _data and _mask could be different (it shouldn't be # normally). Passing order `K` or `A` would be incorrect. # So we ignore the mask memory order. # TODO: We don't actually support K, so use A instead. We could # try to guess this correct by sorting strides or deprecate. if order in "kKaA": order = "F" if self._data.flags.fnc else "C" r = ndarray.ravel(self._data, order=order).view(type(self)) r._update_from(self) if self._mask is not nomask: r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) else: r._mask = nomask return r def reshape(self, *s, **kwargs): """ Give a new shape to the array without changing its data. Returns a masked array containing the same data, but with a new shape. The result is a view on the original array; if this is not possible, a ValueError is raised. Parameters ---------- shape : int or tuple of ints The new shape should be compatible with the original shape. If an integer is supplied, then the result will be a 1-D array of that length. order : {'C', 'F'}, optional Determines whether the array data should be viewed as in C (row-major) or FORTRAN (column-major) order. Returns ------- reshaped_array : array A new view on the array. See Also -------- reshape : Equivalent function in the masked array module. numpy.ndarray.reshape : Equivalent method on ndarray object. numpy.reshape : Equivalent function in the NumPy module. Notes ----- The reshaping operation cannot guarantee that a copy will not be made, to modify the shape in place, use ``a.shape = s`` Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) >>> x masked_array( data=[[--, 2], [3, --]], mask=[[ True, False], [False, True]], fill_value=999999) >>> x = x.reshape((4,1)) >>> x masked_array( data=[[--], [2], [3], [--]], mask=[[ True], [False], [False], [ True]], fill_value=999999) """ result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask if mask is not nomask: result._mask = mask.reshape(*s, **kwargs) return result def resize(self, newshape, refcheck=True, order=False): """ .. warning:: This method does nothing, except raise a ValueError exception. A masked array does not own its data and therefore cannot safely be resized in place. Use the `numpy.ma.resize` function instead. This method is difficult to implement safely and may be deprecated in future releases of NumPy. """ # Note : the 'order' keyword looks broken, let's just drop it errmsg = "A masked array does not own its data "\ "and therefore cannot be resized.\n" \ "Use the numpy.ma.resize function instead." raise ValueError(errmsg) def put(self, indices, values, mode='raise'): """ Set storage-indexed locations to corresponding values. Sets self._data.flat[n] = values[n] for each n in indices. If `values` is shorter than `indices` then it will repeat. If `values` has some masked values, the initial mask is updated in consequence, else the corresponding values are unmasked. Parameters ---------- indices : 1-D array_like Target indices, interpreted as integers. values : array_like Values to place in self._data copy at target indices. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. 'raise' : raise an error. 'wrap' : wrap around. 'clip' : clip to the range. Notes ----- `values` can be a scalar or length 1 array. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.put([0,4,8],[10,20,30]) >>> x masked_array( data=[[10, --, 3], [--, 20, --], [7, --, 30]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.put(4,999) >>> x masked_array( data=[[10, --, 3], [--, 999, --], [7, --, 30]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) """ # Hard mask: Get rid of the values/indices that fall on masked data if self._hardmask and self._mask is not nomask: mask = self._mask[indices] indices = narray(indices, copy=None) values = narray(values, copy=None, subok=True) values.resize(indices.shape) indices = indices[~mask] values = values[~mask] self._data.put(indices, values, mode=mode) # short circuit if neither self nor values are masked if self._mask is nomask and getmask(values) is nomask: return m = getmaskarray(self) if getmask(values) is nomask: m.put(indices, False, mode=mode) else: m.put(indices, values._mask, mode=mode) m = make_mask(m, copy=False, shrink=True) self._mask = m return def ids(self): """ Return the addresses of the data and mask areas. Parameters ---------- None Examples -------- >>> import numpy as np >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) # may vary If the array has no mask, the address of `nomask` is returned. This address is typically not close to the data in memory: >>> x = np.ma.array([1, 2, 3]) >>> x.ids() (166691080, 3083169284) # may vary """ if self._mask is nomask: return (self.ctypes.data, id(nomask)) return (self.ctypes.data, self._mask.ctypes.data) def iscontiguous(self): """ Return a boolean indicating whether the data is contiguous. Parameters ---------- None Examples -------- >>> import numpy as np >>> x = np.ma.array([1, 2, 3]) >>> x.iscontiguous() True `iscontiguous` returns one of the flags of the masked array: >>> x.flags C_CONTIGUOUS : True F_CONTIGUOUS : True OWNDATA : False WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False """ return self.flags['CONTIGUOUS'] def all(self, axis=None, out=None, keepdims=np._NoValue): """ Returns True if all elements evaluate to True. The output array is masked where all the values along the given axis are masked: if the output would have been a scalar and that all the values are masked, then the output is `masked`. Refer to `numpy.all` for full documentation. See Also -------- numpy.ndarray.all : corresponding function for ndarrays numpy.all : equivalent function Examples -------- >>> import numpy as np >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) >>> (a.all() is np.ma.masked) True """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} mask = _check_mask_axis(self._mask, axis, **kwargs) if out is None: d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: return masked return d self.filled(True).all(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out def any(self, axis=None, out=None, keepdims=np._NoValue): """ Returns True if any of the elements of `a` evaluate to True. Masked values are considered as False during computation. Refer to `numpy.any` for full documentation. See Also -------- numpy.ndarray.any : corresponding function for ndarrays numpy.any : equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} mask = _check_mask_axis(self._mask, axis, **kwargs) if out is None: d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: d = masked return d self.filled(False).any(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out def nonzero(self): """ Return the indices of unmasked elements that are not zero. Returns a tuple of arrays, one for each dimension, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with:: a[a.nonzero()] To group the indices by element, rather than dimension, use instead:: np.transpose(a.nonzero()) The result of this is always a 2d array, with a row for each non-zero element. Parameters ---------- None Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- numpy.nonzero : Function operating on ndarrays. flatnonzero : Return indices that are non-zero in the flattened version of the input array. numpy.ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x masked_array( data=[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], mask=False, fill_value=1e+20) >>> x.nonzero() (array([0, 1, 2]), array([0, 1, 2])) Masked elements are ignored. >>> x[1, 1] = ma.masked >>> x masked_array( data=[[1.0, 0.0, 0.0], [0.0, --, 0.0], [0.0, 0.0, 1.0]], mask=[[False, False, False], [False, True, False], [False, False, False]], fill_value=1e+20) >>> x.nonzero() (array([0, 2]), array([0, 2])) Indices can also be grouped by element. >>> np.transpose(x.nonzero()) array([[0, 0], [2, 2]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, ma.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 masked_array( data=[[False, False, False], [ True, True, True], [ True, True, True]], mask=False, fill_value=True) >>> ma.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) The ``nonzero`` method of the condition array can also be called. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) """ return np.asarray(self.filled(0)).nonzero() def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ (this docstring should be overwritten) """ # !!!: implement out + test! m = self._mask if m is nomask: result = super().trace(offset=offset, axis1=axis1, axis2=axis2, out=out) return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) return D.astype(dtype).filled(0).sum(axis=-1, out=out) trace.__doc__ = ndarray.trace.__doc__ def dot(self, b, out=None, strict=False): """ a.dot(b, out=None) Masked dot product of two arrays. Note that `out` and `strict` are located in different positions than in `ma.dot`. In order to maintain compatibility with the functional version, it is recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. Parameters ---------- b : masked_array_like Inputs array. out : masked_array, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for `ma.dot(a,b)`. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. strict : bool, optional Whether masked data are propagated (True) or set to 0 (False) for the computation. Default is False. Propagating the mask means that if a masked value appears in a row or column, the whole row or column is considered masked. See Also -------- numpy.ma.dot : equivalent function """ return dot(self, b, out=out, strict=strict) def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the sum of the array elements over the given axis. Masked elements are set to 0 internally. Refer to `numpy.sum` for full documentation. See Also -------- numpy.ndarray.sum : corresponding function for ndarrays numpy.sum : equivalent function Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.sum() 25 >>> x.sum(axis=1) masked_array(data=[4, 5, 16], mask=[False, False, False], fill_value=999999) >>> x.sum(axis=0) masked_array(data=[8, 5, 12], mask=[False, False, False], fill_value=999999) >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) <class 'numpy.int64'> """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) # No explicit output if out is None: result = self.filled(0).sum(axis, dtype=dtype, **kwargs) rndim = getattr(result, 'ndim', 0) if rndim: result = result.view(type(self)) result.__setmask__(newmask) elif newmask: result = masked return result # Explicit output result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask return out def cumsum(self, axis=None, dtype=None, out=None): """ Return the cumulative sum of the array elements over the given axis. Masked values are set to 0 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Refer to `numpy.cumsum` for full documentation. Notes ----- The mask is lost if `out` is not a valid :class:`ma.MaskedArray` ! Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.cumsum : corresponding function for ndarrays numpy.cumsum : equivalent function Examples -------- >>> import numpy as np >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> marr.cumsum() masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], mask=[False, False, False, True, True, True, False, False, False, False], fill_value=999999) """ result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(self.mask) return out result = result.view(type(self)) result.__setmask__(self._mask) return result def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the product of the array elements over the given axis. Masked elements are set to 1 internally for computation. Refer to `numpy.prod` for full documentation. Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.prod : corresponding function for ndarrays numpy.prod : equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) # No explicit output if out is None: result = self.filled(1).prod(axis, dtype=dtype, **kwargs) rndim = getattr(result, 'ndim', 0) if rndim: result = result.view(type(self)) result.__setmask__(newmask) elif newmask: result = masked return result # Explicit output result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask return out product = prod def cumprod(self, axis=None, dtype=None, out=None): """ Return the cumulative product of the array elements over the given axis. Masked values are set to 1 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Refer to `numpy.cumprod` for full documentation. Notes ----- The mask is lost if `out` is not a valid MaskedArray ! Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.cumprod : corresponding function for ndarrays numpy.cumprod : equivalent function """ result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(self._mask) return out result = result.view(type(self)) result.__setmask__(self._mask) return result def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Returns the average of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to `numpy.mean` for full documentation. See Also -------- numpy.ndarray.mean : corresponding function for ndarrays numpy.mean : Equivalent function numpy.ma.average : Weighted average. Examples -------- >>> import numpy as np >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data=[1, 2, --], mask=[False, False, True], fill_value=999999) >>> a.mean() 1.5 """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} if self._mask is nomask: result = super().mean(axis=axis, dtype=dtype, **kwargs)[()] else: is_float16_result = False if dtype is None: if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool)): dtype = mu.dtype('f8') elif issubclass(self.dtype.type, ntypes.float16): dtype = mu.dtype('f4') is_float16_result = True dsum = self.sum(axis=axis, dtype=dtype, **kwargs) cnt = self.count(axis=axis, **kwargs) if cnt.shape == () and (cnt == 0): result = masked elif is_float16_result: result = self.dtype.type(dsum * 1. / cnt) else: result = dsum * 1. / cnt if out is not None: out.flat = result if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = getmask(result) return out return result def anom(self, axis=None, dtype=None): """ Compute the anomalies (deviations from the arithmetic mean) along the given axis. Returns an array of anomalies, with the same shape as the input and where the arithmetic mean is computed along the given axis. Parameters ---------- axis : int, optional Axis over which the anomalies are taken. The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type the default is float32; for arrays of float types it is the same as the array type. See Also -------- mean : Compute the mean of the array. Examples -------- >>> import numpy as np >>> a = np.ma.array([1,2,3]) >>> a.anom() masked_array(data=[-1., 0., 1.], mask=False, fill_value=1e+20) """ m = self.mean(axis, dtype) if not axis: return self - m else: return self - expand_dims(m, axis) def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, mean=np._NoValue): """ Returns the variance of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to `numpy.var` for full documentation. See Also -------- numpy.ndarray.var : corresponding function for ndarrays numpy.var : Equivalent function """ kwargs = {} if keepdims is not np._NoValue: kwargs['keepdims'] = keepdims # Easy case: nomask, business as usual if self._mask is nomask: if mean is not np._NoValue: kwargs['mean'] = mean ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)[()] if out is not None: if isinstance(out, MaskedArray): out.__setmask__(nomask) return out return ret # Some data are masked, yay! cnt = self.count(axis=axis, **kwargs) - ddof if mean is not np._NoValue: danom = self - mean else: danom = self - self.mean(axis, dtype, keepdims=True) if iscomplexobj(self): danom = umath.absolute(danom) ** 2 else: danom *= danom dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) # Apply the mask if it's not a scalar if dvar.ndim: dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) dvar._update_from(self) elif getmask(dvar): # Make sure that masked is returned when the scalar is masked. dvar = masked if out is not None: if isinstance(out, MaskedArray): out.flat = 0 out.__setmask__(True) elif out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or "\ "more location." raise MaskError(errmsg) else: out.flat = np.nan return out # In case with have an explicit output if out is not None: # Set the data out.flat = dvar # Set the mask if needed if isinstance(out, MaskedArray): out.__setmask__(dvar.mask) return out return dvar var.__doc__ = np.var.__doc__ def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, mean=np._NoValue): """ Returns the standard deviation of the array elements along given axis. Masked entries are ignored. Refer to `numpy.std` for full documentation. See Also -------- numpy.ndarray.std : corresponding function for ndarrays numpy.std : Equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} dvar = self.var(axis, dtype, out, ddof, **kwargs) if dvar is not masked: if out is not None: np.power(out, 0.5, out=out, casting='unsafe') return out dvar = sqrt(dvar) return dvar def round(self, decimals=0, out=None): """ Return each element rounded to the given number of decimals. Refer to `numpy.around` for full documentation. See Also -------- numpy.ndarray.round : corresponding function for ndarrays numpy.around : equivalent function Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], ... mask=[0, 0, 0, 1, 0, 0]) >>> ma.round(x) masked_array(data=[1.0, 2.0, 2.0, --, 2.0, 3.0], mask=[False, False, False, True, False, False], fill_value=1e+20) """ result = self._data.round(decimals=decimals, out=out).view(type(self)) if result.ndim > 0: result._mask = self._mask result._update_from(self) elif self._mask: # Return masked when the scalar is masked result = masked # No explicit output: we're done if out is None: return result if isinstance(out, MaskedArray): out.__setmask__(self._mask) return out def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None, *, stable=False): """ Return an ndarray of indices that sort the array along the specified axis. Masked values are filled beforehand to `fill_value`. Parameters ---------- axis : int, optional Axis along which to sort. If None, the default, the flattened array is used. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. endwith : {True, False}, optional Whether missing values (if any) should be treated as the largest values (True) or the smallest values (False) When the array contains unmasked values at the same extremes of the datatype, the ordering of these values and the masked values is undefined. fill_value : scalar or None, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. stable : bool, optional Only for compatibility with ``np.argsort``. Ignored. Returns ------- index_array : ndarray, int Array of indices that sort `a` along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. See Also -------- ma.MaskedArray.sort : Describes sorting algorithms used. lexsort : Indirect stable sort with multiple keys. numpy.ndarray.sort : Inplace sort. Notes ----- See `sort` for notes on the different sorting algorithms. Examples -------- >>> import numpy as np >>> a = np.ma.array([3,2,1], mask=[False, False, True]) >>> a masked_array(data=[3, 2, --], mask=[False, False, True], fill_value=999999) >>> a.argsort() array([1, 0, 2]) """ if stable: raise ValueError( "`stable` parameter is not supported for masked arrays." ) # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default if axis is np._NoValue: axis = _deprecate_argsort_axis(self) if fill_value is None: if endwith: # nan > inf if np.issubdtype(self.dtype, np.floating): fill_value = np.nan else: fill_value = minimum_fill_value(self) else: fill_value = maximum_fill_value(self) filled = self.filled(fill_value) return filled.argsort(axis=axis, kind=kind, order=order) def argmin(self, axis=None, fill_value=None, out=None, *, keepdims=np._NoValue): """ Return array of indices to the minimum values along the given axis. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : scalar or None, optional Value used to fill in the masked values. If None, the output of minimum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- ndarray or scalar If multi-dimension input, returns a new ndarray of indices to the minimum values along the given axis. Otherwise, returns a scalar of index to the minimum values along the given axis. Examples -------- >>> import numpy as np >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) >>> x.shape = (2,2) >>> x masked_array( data=[[--, --], [2, 3]], mask=[[ True, True], [False, False]], fill_value=999999) >>> x.argmin(axis=0, fill_value=-1) array([0, 0]) >>> x.argmin(axis=0, fill_value=9) array([1, 1]) """ if fill_value is None: fill_value = minimum_fill_value(self) d = self.filled(fill_value).view(ndarray) keepdims = False if keepdims is np._NoValue else bool(keepdims) return d.argmin(axis, out=out, keepdims=keepdims) def argmax(self, axis=None, fill_value=None, out=None, *, keepdims=np._NoValue): """ Returns array of indices of the maximum values along the given axis. Masked values are treated as if they had the value fill_value. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : scalar or None, optional Value used to fill in the masked values. If None, the output of maximum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- index_array : {integer_array} Examples -------- >>> import numpy as np >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 >>> a.argmax(0) array([1, 1, 1]) >>> a.argmax(1) array([2, 2]) """ if fill_value is None: fill_value = maximum_fill_value(self._data) d = self.filled(fill_value).view(ndarray) keepdims = False if keepdims is np._NoValue else bool(keepdims) return d.argmax(axis, out=out, keepdims=keepdims) def sort(self, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, stable=False): """ Sort the array, in-place Parameters ---------- a : array_like Array to be sorted. axis : int, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. order : list, optional When `a` is a structured array, this argument specifies which fields to compare first, second, and so on. This list does not need to include all of the fields. endwith : {True, False}, optional Whether missing values (if any) should be treated as the largest values (True) or the smallest values (False) When the array contains unmasked values sorting at the same extremes of the datatype, the ordering of these values and the masked values is undefined. fill_value : scalar or None, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. stable : bool, optional Only for compatibility with ``np.sort``. Ignored. See Also -------- numpy.ndarray.sort : Method to sort an array in-place. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in a sorted array. Notes ----- See ``sort`` for notes on the different sorting algorithms. Examples -------- >>> import numpy as np >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() >>> a masked_array(data=[1, 3, 5, --, --], mask=[False, False, False, True, True], fill_value=999999) >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Put missing values in the front >>> a.sort(endwith=False) >>> a masked_array(data=[--, --, 1, 3, 5], mask=[ True, True, False, False, False], fill_value=999999) >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # fill_value takes over endwith >>> a.sort(endwith=False, fill_value=3) >>> a masked_array(data=[1, --, --, 3, 5], mask=[False, True, True, False, False], fill_value=999999) """ if stable: raise ValueError( "`stable` parameter is not supported for masked arrays." ) if self._mask is nomask: ndarray.sort(self, axis=axis, kind=kind, order=order) return if self is masked: return sidx = self.argsort(axis=axis, kind=kind, order=order, fill_value=fill_value, endwith=endwith) self[...] = np.take_along_axis(self, sidx, axis=axis) def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): """ Return the minimum along a given axis. Parameters ---------- axis : None or int or tuple of ints, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. fill_value : scalar or None, optional Value used to fill in the masked values. If None, use the output of `minimum_fill_value`. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- amin : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. See Also -------- ma.minimum_fill_value Returns the minimum filling value for a given datatype. Examples -------- >>> import numpy.ma as ma >>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]] >>> mask = [[1, 1, 0], [0, 0, 1]] >>> masked_x = ma.masked_array(x, mask) >>> masked_x masked_array( data=[[--, --, 3.0], [0.2, -0.7, --]], mask=[[ True, True, False], [False, False, True]], fill_value=1e+20) >>> ma.min(masked_x) -0.7 >>> ma.min(masked_x, axis=-1) masked_array(data=[3.0, -0.7], mask=[False, False], fill_value=1e+20) >>> ma.min(masked_x, axis=0, keepdims=True) masked_array(data=[[0.2, -0.7, 3.0]], mask=[[False, False, False]], fill_value=1e+20) >>> mask = [[1, 1, 1,], [1, 1, 1]] >>> masked_x = ma.masked_array(x, mask) >>> ma.min(masked_x, axis=0) masked_array(data=[--, --, --], mask=[ True, True, True], fill_value=1e+20, dtype=float64) """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) if fill_value is None: fill_value = minimum_fill_value(self) # No explicit output if out is None: result = self.filled(fill_value).min( axis=axis, out=out, **kwargs).view(type(self)) if result.ndim: # Set the mask result.__setmask__(newmask) # Get rid of Infs if newmask.ndim: np.copyto(result, result.fill_value, where=newmask) elif newmask: result = masked return result # Explicit output self.filled(fill_value).min(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." raise MaskError(errmsg) np.copyto(out, np.nan, where=newmask) return out def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): """ Return the maximum along a given axis. Parameters ---------- axis : None or int or tuple of ints, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. If this is a tuple of ints, the maximum is selected over multiple axes, instead of a single axis or all the axes as before. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. fill_value : scalar or None, optional Value used to fill in the masked values. If None, use the output of maximum_fill_value(). keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- amax : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. See Also -------- ma.maximum_fill_value Returns the maximum filling value for a given datatype. Examples -------- >>> import numpy.ma as ma >>> x = [[-1., 2.5], [4., -2.], [3., 0.]] >>> mask = [[0, 0], [1, 0], [1, 0]] >>> masked_x = ma.masked_array(x, mask) >>> masked_x masked_array( data=[[-1.0, 2.5], [--, -2.0], [--, 0.0]], mask=[[False, False], [ True, False], [ True, False]], fill_value=1e+20) >>> ma.max(masked_x) 2.5 >>> ma.max(masked_x, axis=0) masked_array(data=[-1.0, 2.5], mask=[False, False], fill_value=1e+20) >>> ma.max(masked_x, axis=1, keepdims=True) masked_array( data=[[2.5], [-2.0], [0.0]], mask=[[False], [False], [False]], fill_value=1e+20) >>> mask = [[1, 1], [1, 1], [1, 1]] >>> masked_x = ma.masked_array(x, mask) >>> ma.max(masked_x, axis=1) masked_array(data=[--, --, --], mask=[ True, True, True], fill_value=1e+20, dtype=float64) """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) if fill_value is None: fill_value = maximum_fill_value(self) # No explicit output if out is None: result = self.filled(fill_value).max( axis=axis, out=out, **kwargs).view(type(self)) if result.ndim: # Set the mask result.__setmask__(newmask) # Get rid of Infs if newmask.ndim: np.copyto(result, result.fill_value, where=newmask) elif newmask: result = masked return result # Explicit output self.filled(fill_value).max(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." raise MaskError(errmsg) np.copyto(out, np.nan, where=newmask) return out def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): """ Return (maximum - minimum) along the given dimension (i.e. peak-to-peak value). .. warning:: `ptp` preserves the data type of the array. This means the return value for an input of signed integers with n bits (e.g. `np.int8`, `np.int16`, etc) is also a signed integer with n bits. In that case, peak-to-peak values greater than ``2**(n-1)-1`` will be returned as negative values. An example with a work-around is shown below. Parameters ---------- axis : {None, int}, optional Axis along which to find the peaks. If None (default) the flattened array is used. out : {None, array_like}, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. fill_value : scalar or None, optional Value used to fill in the masked values. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- ptp : ndarray. A new array holding the result, unless ``out`` was specified, in which case a reference to ``out`` is returned. Examples -------- >>> import numpy as np >>> x = np.ma.MaskedArray([[4, 9, 2, 10], ... [6, 9, 7, 12]]) >>> x.ptp(axis=1) masked_array(data=[8, 6], mask=False, fill_value=999999) >>> x.ptp(axis=0) masked_array(data=[2, 0, 5, 2], mask=False, fill_value=999999) >>> x.ptp() 10 This example shows that a negative value can be returned when the input is an array of signed integers. >>> y = np.ma.MaskedArray([[1, 127], ... [0, 127], ... [-1, 127], ... [-2, 127]], dtype=np.int8) >>> y.ptp(axis=1) masked_array(data=[ 126, 127, -128, -127], mask=False, fill_value=np.int64(999999), dtype=int8) A work-around is to use the `view()` method to view the result as unsigned integers with the same bit width: >>> y.ptp(axis=1).view(np.uint8) masked_array(data=[126, 127, 128, 129], mask=False, fill_value=np.uint64(999999), dtype=uint8) """ if out is None: result = self.max(axis=axis, fill_value=fill_value, keepdims=keepdims) result -= self.min(axis=axis, fill_value=fill_value, keepdims=keepdims) return result out.flat = self.max(axis=axis, out=out, fill_value=fill_value, keepdims=keepdims) min_value = self.min(axis=axis, fill_value=fill_value, keepdims=keepdims) np.subtract(out, min_value, out=out, casting='unsafe') return out def partition(self, *args, **kwargs): warnings.warn("Warning: 'partition' will ignore the 'mask' " f"of the {self.__class__.__name__}.", stacklevel=2) return super().partition(*args, **kwargs) def argpartition(self, *args, **kwargs): warnings.warn("Warning: 'argpartition' will ignore the 'mask' " f"of the {self.__class__.__name__}.", stacklevel=2) return super().argpartition(*args, **kwargs) def take(self, indices, axis=None, out=None, mode='raise'): """ Take elements from a masked array along an axis. This function does the same thing as "fancy" indexing (indexing arrays using arrays) for masked arrays. It can be easier to use if you need elements along a given axis. Parameters ---------- a : masked_array The source masked array. indices : array_like The indices of the values to extract. Also allow scalars for indices. axis : int, optional The axis over which to select values. By default, the flattened input array is used. out : MaskedArray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. Note that `out` is always buffered if `mode='raise'`; use other modes for better performance. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- out : MaskedArray The returned array has the same type as `a`. See Also -------- numpy.take : Equivalent function for ndarrays. compress : Take elements using a boolean mask. take_along_axis : Take elements by matching the array and the index arrays. Notes ----- This function behaves similarly to `numpy.take`, but it handles masked values. The mask is retained in the output array, and masked values in the input array remain masked in the output. Examples -------- >>> import numpy as np >>> a = np.ma.array([4, 3, 5, 7, 6, 8], mask=[0, 0, 1, 0, 1, 0]) >>> indices = [0, 1, 4] >>> np.ma.take(a, indices) masked_array(data=[4, 3, --], mask=[False, False, True], fill_value=999999) When `indices` is not one-dimensional, the output also has these dimensions: >>> np.ma.take(a, [[0, 1], [2, 3]]) masked_array(data=[[4, 3], [--, 7]], mask=[[False, False], [ True, False]], fill_value=999999) """ (_data, _mask) = (self._data, self._mask) cls = type(self) # Make sure the indices are not masked maskindices = getmask(indices) if maskindices is not nomask: indices = indices.filled(0) # Get the data, promoting scalars to 0d arrays with [...] so that # .view works correctly if out is None: out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) else: np.take(_data, indices, axis=axis, mode=mode, out=out) # Get the mask if isinstance(out, MaskedArray): if _mask is nomask: outmask = maskindices else: outmask = _mask.take(indices, axis=axis, mode=mode) outmask |= maskindices out.__setmask__(outmask) # demote 0d arrays back to scalars, for consistency with ndarray.take return out[()] # Array methods copy = _arraymethod('copy') diagonal = _arraymethod('diagonal') flatten = _arraymethod('flatten') repeat = _arraymethod('repeat') squeeze = _arraymethod('squeeze') swapaxes = _arraymethod('swapaxes') T = property(fget=lambda self: self.transpose()) transpose = _arraymethod('transpose') @property def mT(self): """ Return the matrix-transpose of the masked array. The matrix transpose is the transpose of the last two dimensions, even if the array is of higher dimension. .. versionadded:: 2.0 Returns ------- result: MaskedArray The masked array with the last two dimensions transposed Raises ------ ValueError If the array is of dimension less than 2. See Also -------- ndarray.mT: Equivalent method for arrays """ if self.ndim < 2: raise ValueError("matrix transpose with ndim < 2 is undefined") if self._mask is nomask: return masked_array(data=self._data.mT) else: return masked_array(data=self.data.mT, mask=self.mask.mT) def tolist(self, fill_value=None): """ Return the data portion of the masked array as a hierarchical Python list. Data items are converted to the nearest compatible Python type. Masked values are converted to `fill_value`. If `fill_value` is None, the corresponding entries in the output list will be ``None``. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries. Default is None. Returns ------- result : list The Python list representation of the masked array. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] >>> x.tolist(-999) [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] """ _mask = self._mask # No mask ? Just return .data.tolist ? if _mask is nomask: return self._data.tolist() # Explicit fill_value: fill the array and get the list if fill_value is not None: return self.filled(fill_value).tolist() # Structured array. names = self.dtype.names if names: result = self._data.astype([(_, object) for _ in names]) for n in names: result[n][_mask[n]] = None return result.tolist() # Standard arrays. if _mask is nomask: return [None] # Set temps to save time when dealing w/ marrays. inishape = self.shape result = np.array(self._data.ravel(), dtype=object) result[_mask.ravel()] = None result.shape = inishape return result.tolist() def tobytes(self, fill_value=None, order='C'): """ Return the array data as a string containing the raw bytes in the array. The array is filled with a fill value before the string conversion. Parameters ---------- fill_value : scalar, optional Value used to fill in the masked values. Default is None, in which case `MaskedArray.fill_value` is used. order : {'C','F','A'}, optional Order of the data item in the copy. Default is 'C'. - 'C' -- C order (row major). - 'F' -- Fortran order (column major). - 'A' -- Any, current order of array. - None -- Same as 'A'. See Also -------- numpy.ndarray.tobytes tolist, tofile Notes ----- As for `ndarray.tobytes`, information about the shape, dtype, etc., but also about `fill_value`, will be lost. Examples -------- >>> import numpy as np >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.tobytes() b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' """ return self.filled(fill_value).tobytes(order=order) def tofile(self, fid, sep="", format="%s"): """ Save a masked array to a file in binary format. .. warning:: This function is not implemented yet. Raises ------ NotImplementedError When `tofile` is called. """ raise NotImplementedError("MaskedArray.tofile() not implemented yet.") def toflex(self): """ Transforms a masked array into a flexible-type array. The flexible type array that is returned will have two fields: * the ``_data`` field stores the ``_data`` part of the array. * the ``_mask`` field stores the ``_mask`` part of the array. Parameters ---------- None Returns ------- record : ndarray A new flexible-type `ndarray` with two fields: the first element containing a value, the second element containing the corresponding mask boolean. The returned record shape matches self.shape. Notes ----- A side-effect of transforming a masked array into a flexible `ndarray` is that meta information (``fill_value``, ...) will be lost. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.toflex() array([[(1, False), (2, True), (3, False)], [(4, True), (5, False), (6, True)], [(7, False), (8, True), (9, False)]], dtype=[('_data', '<i8'), ('_mask', '?')]) """ # Get the basic dtype. ddtype = self.dtype # Make sure we have a mask _mask = self._mask if _mask is None: _mask = make_mask_none(self.shape, ddtype) # And get its dtype mdtype = self._mask.dtype record = np.ndarray(shape=self.shape, dtype=[('_data', ddtype), ('_mask', mdtype)]) record['_data'] = self._data record['_mask'] = self._mask return record torecords = toflex # Pickling def __getstate__(self): """Return the internal state of the masked array, for pickling purposes. """ cf = 'CF'[self.flags.fnc] data_state = super().__reduce__()[2] return data_state + (getmaskarray(self).tobytes(cf), self._fill_value) def __setstate__(self, state): """Restore the internal state of the masked array, for pickling purposes. ``state`` is typically the output of the ``__getstate__`` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask. """ (_, shp, typ, isf, raw, msk, flv) = state super().__setstate__((shp, typ, isf, raw)) self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) self.fill_value = flv def __reduce__(self): """Return a 3-tuple for pickling a MaskedArray. """ return (_mareconstruct, (self.__class__, self._baseclass, (0,), 'b',), self.__getstate__()) def __deepcopy__(self, memo=None): from copy import deepcopy copied = MaskedArray.__new__(type(self), self, copy=True) if memo is None: memo = {} memo[id(self)] = copied for (k, v) in self.__dict__.items(): copied.__dict__[k] = deepcopy(v, memo) # as clearly documented for np.copy(), you need to use # deepcopy() directly for arrays of object type that may # contain compound types--you cannot depend on normal # copy semantics to do the right thing here if self.dtype.hasobject: copied._data[...] = deepcopy(copied._data) return copied def _mareconstruct(subtype, baseclass, baseshape, basetype,): """Internal function that builds a new MaskedArray from the information stored in a pickle. """ _data = ndarray.__new__(baseclass, baseshape, basetype) _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
MaskedArray
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py
{ "start": 11521, "end": 13073 }
class ____(Benchmark): r""" Cross-in-Tray objective function. This class defines the Cross-in-Tray [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{CrossInTray}}(x) = - 0.0001 \left(\left|{e^{\left|{100 - \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|} \sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1} with :math:`x_i \in [-15, 15]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = -2.062611870822739` for :math:`x_i = \pm 1.349406608602084` for :math:`i = 1, 2` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N)) self.global_optimum = [(1.349406685353340, 1.349406608602084), (-1.349406685353340, 1.349406608602084), (1.349406685353340, -1.349406608602084), (-1.349406685353340, -1.349406608602084)] self.fglob = -2.062611870822739 def fun(self, x, *args): self.nfev += 1 return (-0.0001 * (abs(sin(x[0]) * sin(x[1]) * exp(abs(100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi))) + 1) ** (0.1))
CrossInTray
python
pypa__pip
src/pip/_vendor/rich/_log_render.py
{ "start": 308, "end": 3225 }
class ____: def __init__( self, show_time: bool = True, show_level: bool = False, show_path: bool = True, time_format: Union[str, FormatTimeCallable] = "[%x %X]", omit_repeated_times: bool = True, level_width: Optional[int] = 8, ) -> None: self.show_time = show_time self.show_level = show_level self.show_path = show_path self.time_format = time_format self.omit_repeated_times = omit_repeated_times self.level_width = level_width self._last_time: Optional[Text] = None def __call__( self, console: "Console", renderables: Iterable["ConsoleRenderable"], log_time: Optional[datetime] = None, time_format: Optional[Union[str, FormatTimeCallable]] = None, level: TextType = "", path: Optional[str] = None, line_no: Optional[int] = None, link_path: Optional[str] = None, ) -> "Table": from .containers import Renderables from .table import Table output = Table.grid(padding=(0, 1)) output.expand = True if self.show_time: output.add_column(style="log.time") if self.show_level: output.add_column(style="log.level", width=self.level_width) output.add_column(ratio=1, style="log.message", overflow="fold") if self.show_path and path: output.add_column(style="log.path") row: List["RenderableType"] = [] if self.show_time: log_time = log_time or console.get_datetime() time_format = time_format or self.time_format if callable(time_format): log_time_display = time_format(log_time) else: log_time_display = Text(log_time.strftime(time_format)) if log_time_display == self._last_time and self.omit_repeated_times: row.append(Text(" " * len(log_time_display))) else: row.append(log_time_display) self._last_time = log_time_display if self.show_level: row.append(level) row.append(Renderables(renderables)) if self.show_path and path: path_text = Text() path_text.append( path, style=f"link file://{link_path}" if link_path else "" ) if line_no: path_text.append(":") path_text.append( f"{line_no}", style=f"link file://{link_path}#{line_no}" if link_path else "", ) row.append(path_text) output.add_row(*row) return output if __name__ == "__main__": # pragma: no cover from pip._vendor.rich.console import Console c = Console() c.print("[on blue]Hello", justify="right") c.log("[on blue]hello", justify="right")
LogRender
python
keon__algorithms
algorithms/strings/rabin_karp.py
{ "start": 76, "end": 1557 }
class ____: def __init__(self, text, size_word): self.text = text self.hash = 0 self.size_word = size_word for i in range(0, size_word): #ord maps the character to a number #subtract out the ASCII value of "a" to start the indexing at zero self.hash += (ord(self.text[i]) - ord("a")+1)*(26**(size_word - i -1)) #start index of current window self.window_start = 0 #end of index window self.window_end = size_word def move_window(self): if self.window_end <= len(self.text) - 1: #remove left letter from hash value self.hash -= (ord(self.text[self.window_start]) - ord("a")+1)*26**(self.size_word-1) self.hash *= 26 self.hash += ord(self.text[self.window_end])- ord("a")+1 self.window_start += 1 self.window_end += 1 def window_text(self): return self.text[self.window_start:self.window_end] def rabin_karp(word, text): if word == "" or text == "": return None if len(word) > len(text): return None rolling_hash = RollingHash(text, len(word)) word_hash = RollingHash(word, len(word)) #word_hash.move_window() for i in range(len(text) - len(word) + 1): if rolling_hash.hash == word_hash.hash: if rolling_hash.window_text() == word: return i rolling_hash.move_window() return None
RollingHash
python
apache__airflow
task-sdk/src/airflow/sdk/execution_time/comms.py
{ "start": 31271, "end": 32704 }
class ____(BaseModel): """Add a new value to be redacted in task logs.""" # This is needed since calls to `mask_secret` in the Task process will otherwise only add the mask value # to the child process, but the redaction happens in the parent. # We cannot use `string | Iterable | dict here` (would be more intuitive) because bug in Pydantic # https://github.com/pydantic/pydantic/issues/9541 turns iterable into a ValidatorIterator value: JsonValue name: str | None = None type: Literal["MaskSecret"] = "MaskSecret" ToSupervisor = Annotated[ DeferTask | DeleteXCom | GetAssetByName | GetAssetByUri | GetAssetEventByAsset | GetAssetEventByAssetAlias | GetConnection | GetDagRunState | GetDRCount | GetPrevSuccessfulDagRun | GetPreviousDagRun | GetTaskRescheduleStartDate | GetTICount | GetTaskBreadcrumbs | GetTaskStates | GetVariable | GetXCom | GetXComCount | GetXComSequenceItem | GetXComSequenceSlice | PutVariable | RescheduleTask | RetryTask | SetRenderedFields | SetRenderedMapIndex | SetXCom | SkipDownstreamTasks | SucceedTask | ValidateInletsAndOutlets | TaskState | TriggerDagRun | DeleteVariable | ResendLoggingFD | CreateHITLDetailPayload | UpdateHITLDetail | GetHITLDetailResponse | MaskSecret, Field(discriminator="type"), ]
MaskSecret
python
readthedocs__readthedocs.org
readthedocs/search/api/v2/serializers.py
{ "start": 1567, "end": 1750 }
class ____(serializers.Serializer): title = serializers.SerializerMethodField() def get_title(self, obj): return list(getattr(obj, "title", []))
PageHighlightSerializer
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/elements.py
{ "start": 91609, "end": 92308 }
class ____( SingletonConstant, roles.ConstExprRole[bool], ColumnElement[bool] ): """Represent the ``false`` keyword, or equivalent, in a SQL statement. :class:`.False_` is accessed as a constant via the :func:`.false` function. """ __visit_name__ = "false" _traverse_internals: _TraverseInternalsType = [] _singleton: False_ if not TYPE_CHECKING: @util.memoized_property def type(self) -> TypeEngine[_T]: # noqa: A001 return type_api.BOOLEANTYPE def _negate(self) -> True_: return True_._singleton @classmethod def _instance(cls) -> False_: return False_._singleton False_._create_singleton()
False_
python
pytorch__pytorch
test/cpp/aoti_inference/compile_model.py
{ "start": 186, "end": 353 }
class ____(torch.nn.Module): def __init__(self, data): super().__init__() for key in data: setattr(self, key, data[key])
TensorSerializer
python
huggingface__transformers
tests/models/glm46v/test_video_processing_glm46v.py
{ "start": 1229, "end": 5451 }
class ____: def __init__( self, parent, batch_size=5, num_frames=8, num_channels=3, min_resolution=30, max_resolution=80, temporal_patch_size=2, patch_size=14, merge_size=2, do_resize=True, size=None, do_normalize=True, image_mean=IMAGENET_STANDARD_MEAN, image_std=IMAGENET_STANDARD_STD, do_convert_rgb=True, ): size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10} self.parent = parent self.batch_size = batch_size self.num_frames = num_frames self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.temporal_patch_size = temporal_patch_size self.patch_size = patch_size self.merge_size = merge_size def prepare_video_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "do_sample_frames": True, } def prepare_video_metadata(self, videos): video_metadata = [] for video in videos: if isinstance(video, list): num_frames = len(video) elif hasattr(video, "shape"): if len(video.shape) == 4: # (T, H, W, C) num_frames = video.shape[0] else: num_frames = 1 else: num_frames = self.num_frames metadata = { "fps": 2, "duration": num_frames / 2, "total_num_frames": num_frames, } video_metadata.append(metadata) return video_metadata def expected_output_video_shape(self, videos): grid_t = self.num_frames // self.temporal_patch_size hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size seq_len = 0 for video in videos: if isinstance(video, list) and isinstance(video[0], Image.Image): video = np.stack([np.array(frame) for frame in video]) elif hasattr(video, "shape"): pass else: video = np.array(video) if hasattr(video, "shape") and len(video.shape) >= 3: if len(video.shape) == 4: t, height, width = video.shape[:3] elif len(video.shape) == 3: height, width = video.shape[:2] t = 1 else: t, height, width = self.num_frames, self.min_resolution, self.min_resolution else: t, height, width = self.num_frames, self.min_resolution, self.min_resolution resized_height, resized_width = smart_resize( t, height, width, factor=self.patch_size * self.merge_size, min_pixels=self.size["shortest_edge"], max_pixels=self.size["longest_edge"], ) grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size seq_len += grid_t * grid_h * grid_w return [seq_len, hidden_dim] def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"): videos = prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, return_tensors=return_tensors, ) return videos @require_torch @require_vision
Glm46VVideoProcessingTester
python
run-llama__llama_index
llama-index-instrumentation/src/llama_index_instrumentation/__init__.py
{ "start": 1116, "end": 3138 }
class ____(ABC): """ Apply the `dispatcher.span` decorator to implementations of abstract methods, as well as any methods previously decorated (in any base class) that are being overridden by a subclass. For example, if class `A` has abstract method `f`, and class `B` inherits from `A` and provides an implementation of `f`, then `B.f` will be decorated by the mixin. Furthermore, if `B` has a non-abstract method `g` that is decorated by `dispatcher.span` and new class `C` inherits from `B` and overrides `g`, then `C.g` will also be decorated by the mixin. Note that users can still manually apply `dispatcher.span` to the methods in their custom subclasses without creating duplicate spans because the `dispatcher.span` decorator should be idempotent. """ def __init_subclass__(cls, **kwargs: Any) -> None: super().__init_subclass__(**kwargs) abstract_methods: List[str] = [] decorated_methods: List[str] = [] for base_cls in inspect.getmro(cls): if base_cls is cls: continue for attr, method in base_cls.__dict__.items(): if not callable(method): continue if ( hasattr(method, "__isabstractmethod__") and method.__isabstractmethod__ # type: ignore ): abstract_methods.append(attr) elif hasattr(method, DISPATCHER_SPAN_DECORATED_ATTR): decorated_methods.append(attr) dispatcher = get_dispatcher(cls.__module__) for attr, method in cls.__dict__.items(): if ( not callable(method) or hasattr(method, "__isabstractmethod__") and method.__isabstractmethod__ # type: ignore ): continue if attr in abstract_methods or attr in decorated_methods: setattr(cls, attr, dispatcher.span(method))
DispatcherSpanMixin
python
django__django
tests/expressions/tests.py
{ "start": 64497, "end": 72238 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") + 15, float=F("float") + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3) ) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") - 15, float=F("float") - 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3) ) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") * 15, float=F("float") * 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3) ) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") / 2, float=F("float") / 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3) ) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F("integer") % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) def test_lefthand_modulo_null(self): # LH Modulo arithmetic on integers. Employee.objects.create(firstname="John", lastname="Doe", salary=None) qs = Employee.objects.annotate(modsalary=F("salary") % 20) self.assertIsNone(qs.get().salary) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F("integer").bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F("integer").bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F("integer").bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F("integer").bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F("integer").bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) def test_lefthand_transformed_field_bitwise_or(self): Employee.objects.create(firstname="Max", lastname="Mustermann") with register_lookup(CharField, Length): qs = Employee.objects.annotate(bitor=F("lastname__length").bitor(48)) self.assertEqual(qs.get().bitor, 58) def test_lefthand_power(self): # LH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") ** 2, float=F("float") ** 1.5 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2) ) def test_lefthand_bitwise_xor(self): Number.objects.update(integer=F("integer").bitxor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26) def test_lefthand_bitwise_xor_null(self): employee = Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.update(salary=F("salary").bitxor(48)) employee.refresh_from_db() self.assertIsNone(employee.salary) def test_lefthand_bitwise_xor_right_null(self): employee = Employee.objects.create(firstname="John", lastname="Doe", salary=48) Employee.objects.update(salary=F("salary").bitxor(None)) employee.refresh_from_db() self.assertIsNone(employee.salary) @unittest.skipUnless( connection.vendor == "oracle", "Oracle doesn't support bitwise XOR." ) def test_lefthand_bitwise_xor_not_supported(self): msg = "Bitwise XOR is not supported in Oracle." with self.assertRaisesMessage(NotSupportedError, msg): Number.objects.update(integer=F("integer").bitxor(48)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update( integer=15 + F("integer"), float=42.7 + F("float") ) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3) ) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update( integer=15 - F("integer"), float=42.7 - F("float") ) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3) ) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=15 * F("integer"), float=42.7 * F("float") ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3) ) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=640 / F("integer"), float=42.7 / F("float") ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3) ) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F("integer")) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) def test_righthand_power(self): # RH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update( integer=2 ** F("integer"), float=1.5 ** F("float") ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3) )
ExpressionOperatorTests
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
{ "start": 20344, "end": 20645 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneMessageEvent, GrapheneStepEvent) name = "AssetCheckEvaluationPlannedEvent" assetKey = graphene.NonNull(GrapheneAssetKey) checkName = graphene.NonNull(graphene.String)
GrapheneAssetCheckEvaluationPlannedEvent
python
spack__spack
lib/spack/spack/ci/common.py
{ "start": 13629, "end": 16976 }
class ____: """Turn a list of specs into a simple directed graph, that doesn't keep track of edge types.""" @classmethod def key(cls, spec: spack.spec.Spec) -> str: return spec.dag_hash() def __init__(self, specs: List[spack.spec.Spec]) -> None: # Build dictionary of nodes self.nodes: Dict[str, PipelineNode] = { PipelineDag.key(s): PipelineNode(s) for s in traverse.traverse_nodes(specs, deptype=dt.ALL_TYPES, root=True) } # Create edges for edge in traverse.traverse_edges( specs, deptype=dt.ALL_TYPES, root=False, cover="edges" ): parent_key = PipelineDag.key(edge.parent) child_key = PipelineDag.key(edge.spec) self.nodes[parent_key].children.add(child_key) self.nodes[child_key].parents.add(parent_key) def prune(self, node_key: str): """Remove a node from the graph, and reconnect its parents and children""" node = self.nodes[node_key] for parent in node.parents: self.nodes[parent].children.remove(node_key) self.nodes[parent].children |= node.children for child in node.children: self.nodes[child].parents.remove(node_key) self.nodes[child].parents |= node.parents del self.nodes[node_key] def traverse_nodes( self, direction: str = "children" ) -> Generator[Tuple[int, PipelineNode], None, None]: """Yields (depth, node) from the pipeline graph. Traversal is topologically ordered from the roots if ``direction`` is ``children``, or from the leaves if ``direction`` is ``parents``. The yielded depth is the length of the longest path from the starting point to the yielded node.""" if direction == "children": get_in_edges = lambda node: node.parents get_out_edges = lambda node: node.children else: get_in_edges = lambda node: node.children get_out_edges = lambda node: node.parents sort_key = lambda k: self.nodes[k].spec.name out_edges = {k: sorted(get_out_edges(n), key=sort_key) for k, n in self.nodes.items()} num_in_edges = {k: len(get_in_edges(n)) for k, n in self.nodes.items()} # Populate a queue with all the nodes that have no incoming edges nodes = deque( sorted( [(0, key) for key in self.nodes.keys() if num_in_edges[key] == 0], key=lambda item: item[1], ) ) while nodes: # Remove the next node, n, from the queue and yield it depth, n_key = nodes.pop() yield (depth, self.nodes[n_key]) # Remove an in-edge from every node, m, pointed to by an # out-edge from n. If any of those nodes are left with # 0 remaining in-edges, add them to the queue. for m in out_edges[n_key]: num_in_edges[m] -= 1 if num_in_edges[m] == 0: nodes.appendleft((depth + 1, m)) def get_dependencies(self, node: PipelineNode) -> List[PipelineNode]: """Returns a list of nodes corresponding to the direct dependencies of the given node.""" return [self.nodes[k] for k in node.children]
PipelineDag
python
mlflow__mlflow
mlflow/utils/autologging_utils/safety.py
{ "start": 35969, "end": 36131 }
class ____: def __init__(self, integration, id_): self.integration = integration self.id = id_ self.state = "running"
AutologgingSession
python
scrapy__scrapy
scrapy/contracts/__init__.py
{ "start": 3324, "end": 7681 }
class ____: contracts: dict[str, type[Contract]] = {} def __init__(self, contracts: Iterable[type[Contract]]): for contract in contracts: self.contracts[contract.name] = contract def tested_methods_from_spidercls(self, spidercls: type[Spider]) -> list[str]: is_method = re.compile(r"^\s*@", re.MULTILINE).search methods = [] for key, value in getmembers(spidercls): if callable(value) and value.__doc__ and is_method(value.__doc__): methods.append(key) return methods def extract_contracts(self, method: Callable) -> list[Contract]: contracts: list[Contract] = [] assert method.__doc__ is not None for line in method.__doc__.split("\n"): line = line.strip() if line.startswith("@"): m = re.match(r"@(\w+)\s*(.*)", line) if m is None: continue name, args = m.groups() args = re.split(r"\s+", args) contracts.append(self.contracts[name](method, *args)) return contracts def from_spider(self, spider: Spider, results: TestResult) -> list[Request | None]: requests: list[Request | None] = [] for method in self.tested_methods_from_spidercls(type(spider)): bound_method = spider.__getattribute__(method) try: requests.append(self.from_method(bound_method, results)) except Exception: case = _create_testcase(bound_method, "contract") results.addError(case, sys.exc_info()) return requests def from_method(self, method: Callable, results: TestResult) -> Request | None: contracts = self.extract_contracts(method) if contracts: request_cls = Request for contract in contracts: if contract.request_cls is not None: request_cls = contract.request_cls # calculate request args args, kwargs = get_spec(request_cls.__init__) # Don't filter requests to allow # testing different callbacks on the same URL. kwargs["dont_filter"] = True kwargs["callback"] = method for contract in contracts: kwargs = contract.adjust_request_args(kwargs) args.remove("self") # check if all positional arguments are defined in kwargs if set(args).issubset(set(kwargs)): request = request_cls(**kwargs) # execute pre and post hooks in order for contract in reversed(contracts): request = contract.add_pre_hook(request, results) for contract in contracts: request = contract.add_post_hook(request, results) self._clean_req(request, method, results) return request return None def _clean_req( self, request: Request, method: Callable, results: TestResult ) -> None: """stop the request from returning objects and records any errors""" cb = request.callback assert cb is not None @wraps(cb) def cb_wrapper(response: Response, **cb_kwargs: Any) -> None: try: output = cb(response, **cb_kwargs) output = list(cast("Iterable[Any]", iterate_spider_output(output))) except Exception: case = _create_testcase(method, "callback") results.addError(case, sys.exc_info()) def eb_wrapper(failure: Failure) -> None: case = _create_testcase(method, "errback") exc_info = failure.type, failure.value, failure.getTracebackObject() results.addError(case, exc_info) request.callback = cb_wrapper request.errback = eb_wrapper def _create_testcase(method: Callable, desc: str) -> TestCase: spider = method.__self__.name # type: ignore[attr-defined] class ContractTestCase(TestCase): def __str__(_self) -> str: # pylint: disable=no-self-argument return f"[{spider}] {method.__name__} ({desc})" name = f"{spider}_{method.__name__}" setattr(ContractTestCase, name, lambda x: x) return ContractTestCase(name)
ContractsManager
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 20127, "end": 22922 }
class ____: def test_pdf_no_overflow_warning(self): # The argument is large enough that x**2 will overflow to # infinity and 1/(1 + x**2) will be 0. This should not # trigger a warning. p = stats.cauchy.pdf(1e200) assert p == 0.0 # Reference values were computed with mpmath. @pytest.mark.parametrize( 'x, ref', [(0.0, -1.1447298858494002), (5e-324, -1.1447298858494002), (1e-34, -1.1447298858494002), (2.2e-16, -1.1447298858494002), (2e-8, -1.1447298858494006), (5e-4, -1.144730135849369), (0.1, -1.1546802167025683), (1.5, -2.3233848821910463), (2e18, -85.42408759475494), (1e200, -922.1787670834676), (_XMAX, -1420.7101556726175)]) def test_logpdf(self, x, ref): logp = stats.cauchy.logpdf([x, -x]) assert_allclose(logp, [ref, ref], rtol=1e-15) # Reference values were computed with mpmath. @pytest.mark.parametrize( 'x, ref', [(-5e15, 6.366197723675814e-17), (-5, 0.06283295818900118), (-1, 0.25), (0, 0.5), (1, 0.75), (5, 0.9371670418109989), (5e15, 0.9999999999999999)] ) @pytest.mark.parametrize( 'method, sgn', [(stats.cauchy.cdf, 1), (stats.cauchy.sf, -1)] ) def test_cdf_sf(self, x, ref, method, sgn): p = method(sgn*x) assert_allclose(p, ref, rtol=1e-15) # Reference values were computed with mpmath. @pytest.mark.parametrize('x, ref', [(4e250, -7.957747154594767e-252), (1e25, -3.1830988618379063e-26), (10.0, -0.03223967552667532), (0.0, -0.6931471805599453), (-10.0, -3.4506339556469654), (-7e45, -106.70696921963678), (-3e225, -520.3249880981778)]) def test_logcdf_logsf(self, x, ref): logcdf = stats.cauchy.logcdf(x) assert_allclose(logcdf, ref, rtol=5e-15) logsf = stats.cauchy.logsf(-x) assert_allclose(logsf, ref, rtol=5e-15) # Reference values were computed with mpmath. @pytest.mark.parametrize( 'p, ref', [(1e-20, -3.1830988618379067e+19), (1e-9, -318309886.1837906), (0.25, -1.0), (0.50, 0.0), (0.75, 1.0), (0.999999, 318309.88617359026), (0.999999999999, 318316927901.77966)] ) @pytest.mark.parametrize( 'method, sgn', [(stats.cauchy.ppf, 1), (stats.cauchy.isf, -1)]) def test_ppf_isf(self, p, ref, method, sgn): x = sgn*method(p) assert_allclose(x, ref, rtol=1e-15)
TestCauchy
python
sqlalchemy__sqlalchemy
test/orm/test_query.py
{ "start": 184142, "end": 186256 }
class ____(QueryTest, AssertsCompiledSQL): __dialect__ = "default" def test_hints(self): User = self.classes.User from sqlalchemy.dialects import mysql dialect = mysql.dialect() sess = fixture_session() self.assert_compile( sess.query(User).with_hint( User, "USE INDEX (col1_index,col2_index)" ), "SELECT users.id AS users_id, users.name AS users_name " "FROM users USE INDEX (col1_index,col2_index)", dialect=dialect, ) self.assert_compile( sess.query(User).with_hint( User, "WITH INDEX col1_index", "sybase" ), "SELECT users.id AS users_id, users.name AS users_name " "FROM users", dialect=dialect, ) ualias = aliased(User) self.assert_compile( sess.query(User, ualias) .with_hint(ualias, "USE INDEX (col1_index,col2_index)") .join(ualias, ualias.id > User.id), "SELECT users.id AS users_id, users.name AS users_name, " "users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users INNER JOIN users AS users_1 " "USE INDEX (col1_index,col2_index) " "ON users_1.id > users.id", dialect=dialect, ) def test_statement_hints(self): User = self.classes.User sess = fixture_session() stmt = ( sess.query(User) .with_statement_hint("test hint one") .with_statement_hint("test hint two") .with_statement_hint("test hint three", "postgresql") ) self.assert_compile( stmt, "SELECT users.id AS users_id, users.name AS users_name " "FROM users test hint one test hint two", ) self.assert_compile( stmt, "SELECT users.id AS users_id, users.name AS users_name " "FROM users test hint one test hint two test hint three", dialect="postgresql", )
HintsTest
python
prabhupant__python-ds
data_structures/linked_list/remove_nth_node_from_end.py
{ "start": 0, "end": 616 }
class ____(): def __init__(self, val): self.val = val self.next = None def remove(head, n): res = head slow = head fast = head for i in range(n+1): fast = fast.next while fast: fast = fast.next slow = slow.next slow.next = slow.next.next return res def print_list(head): curr = head while curr: print(curr.val, end=" ") curr = curr.next head = Node(1) head.next = Node(2) head.next.next = Node(3) head.next.next.next = Node(4) head.next.next.next.next = Node(5) print_list(head) remove(head, 2) print_list(head)
Node
python
sphinx-doc__sphinx
sphinx/domains/python/__init__.py
{ "start": 11462, "end": 13482 }
class ____(PyObject): """Description of an attribute.""" option_spec = PyObject.option_spec.copy() option_spec.update({ 'abstract': directives.flag, 'abstractmethod': directives.flag, 'classmethod': directives.flag, 'type': directives.unchanged, }) def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]: fullname, prefix = super().handle_signature(sig, signode) typ = self.options.get('type') if typ: annotations = _parse_annotation(typ, self.env) signode += addnodes.desc_annotation( typ, '', addnodes.desc_sig_punctuation('', ':'), addnodes.desc_sig_space(), *annotations, ) return fullname, prefix def get_signature_prefix(self, sig: str) -> Sequence[nodes.Node]: prefix: list[addnodes.desc_sig_element] = [] if 'abstract' in self.options or 'abstractmethod' in self.options: prefix.extend(( addnodes.desc_sig_keyword('', 'abstract'), addnodes.desc_sig_space(), )) if 'classmethod' in self.options: prefix.extend(( addnodes.desc_sig_keyword('', 'class'), addnodes.desc_sig_space(), )) prefix.extend(( addnodes.desc_sig_keyword('', 'property'), addnodes.desc_sig_space(), )) return prefix def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str: name, _cls = name_cls try: clsname, attrname = name.rsplit('.', 1) if modname and self.config.add_module_names: clsname = f'{modname}.{clsname}' except ValueError: if modname: return _('%s (in module %s)') % (name, modname) else: return name return _('%s (%s property)') % (attrname, clsname)
PyProperty
python
dagster-io__dagster
python_modules/dagster/dagster/_daemon/auto_run_reexecution/event_log_consumer.py
{ "start": 988, "end": 9305 }
class ____(IntervalDaemon): def __init__( self, interval_seconds: int = _INTERVAL_SECONDS, event_log_fetch_limit: int = _EVENT_LOG_FETCH_LIMIT, ): super().__init__(interval_seconds=interval_seconds) self._event_log_fetch_limit = event_log_fetch_limit @classmethod def daemon_type(cls) -> str: return "EVENT_LOG_CONSUMER" @property def handle_updated_runs_fns( self, ) -> Sequence[ Callable[[IWorkspaceProcessContext, Sequence[RunRecord], logging.Logger], Iterator] ]: """List of functions that will be called with the list of run records that have new events.""" return [consume_new_runs_for_automatic_reexecution] def run_iteration(self, workspace_process_context: IWorkspaceProcessContext): instance = workspace_process_context.instance # get the persisted cursor for each event type persisted_cursors = _fetch_persisted_cursors(instance, DAGSTER_EVENT_TYPES, self._logger) # Get the current greatest event id before we query for the specific event types overall_max_event_id = instance.event_log_storage.get_maximum_record_id() events: list[EventLogEntry] = [] new_cursors: dict[ DagsterEventType, int ] = {} # keep these in memory until we handle the events for event_type in DAGSTER_EVENT_TYPES: yield cursor = persisted_cursors[event_type] if cursor is None: # if we don't have a cursor for this event type, start at the top of the event log and ignore older events. Otherwise enabling the daemon would result in retrying all old runs. cursor = overall_max_event_id or 0 events_by_log_id_for_type = instance.event_log_storage.get_logs_for_all_runs_by_log_id( after_cursor=cursor, dagster_event_type={event_type}, limit=self._event_log_fetch_limit, ) events.extend(events_by_log_id_for_type.values()) # calculate the new cursor for this event type new_cursors[event_type] = get_new_cursor( cursor, overall_max_event_id, self._event_log_fetch_limit, list(events_by_log_id_for_type.keys()), ) if events: run_ids = list({event.run_id for event in events}) run_records = instance.get_run_records(filters=RunsFilter(run_ids=run_ids)) # call each handler with the list of runs that have events for fn in self.handle_updated_runs_fns: try: yield from fn(workspace_process_context, run_records, self._logger) except Exception: DaemonErrorCapture.process_exception( sys.exc_info(), logger=self._logger, log_message=f"Error calling event event log consumer handler: {fn.__name__}", ) # persist cursors now that we've processed all the events through the handlers _persist_cursors(instance, new_cursors) def _create_cursor_key(event_type: DagsterEventType) -> str: check.inst_param(event_type, "event_type", DagsterEventType) return f"EVENT_LOG_CONSUMER_CURSOR-{event_type.value}" def _fetch_persisted_cursors( instance: DagsterInstance, event_types: Sequence[DagsterEventType], logger: logging.Logger ) -> dict[DagsterEventType, Optional[int]]: check.inst_param(instance, "instance", DagsterInstance) check.sequence_param(event_types, "event_types", of_type=DagsterEventType) # get the persisted cursor for each event type persisted_cursors = instance.daemon_cursor_storage.get_cursor_values( {_create_cursor_key(event_type) for event_type in event_types} ) fetched_cursors: dict[DagsterEventType, Optional[int]] = {} for event_type in event_types: raw_cursor_value = persisted_cursors.get(_create_cursor_key(event_type)) if raw_cursor_value is None: logger.warn(f"No cursor for event type {event_type}, ignoring older events") fetched_cursors[event_type] = None else: try: cursor_value = int(raw_cursor_value) except ValueError: logger.exception(f"Invalid cursor for event_type {event_type}: {raw_cursor_value}") raise fetched_cursors[event_type] = cursor_value return fetched_cursors def _persist_cursors(instance: DagsterInstance, cursors: Mapping[DagsterEventType, int]) -> None: check.inst_param(instance, "instance", DagsterInstance) check.mapping_param(cursors, "cursors", key_type=DagsterEventType, value_type=int) if cursors: instance.daemon_cursor_storage.set_cursor_values( { _create_cursor_key(event_type): str(cursor_value) for event_type, cursor_value in cursors.items() } ) def get_new_cursor( persisted_cursor: int, overall_max_event_id: Optional[int], fetch_limit: int, new_event_ids: Sequence[int], ) -> int: """Return the new cursor value for an event type, or None if one shouldn't be persisted. The cursor is guaranteed to be: - greater than or equal to any id in new_event_ids (otherwise we could process an event twice) - less than the id of any event of the desired type that hasn't been fetched yet (otherwise we could skip events) This method optimizes for moving the cursor as far forward as possible, using overall_max_event_id. """ check.int_param(persisted_cursor, "persisted_cursor") check.opt_int_param(overall_max_event_id, "overall_max_event_id") check.int_param(fetch_limit, "fetch_limit") check.sequence_param(new_event_ids, "new_event_ids", of_type=int) if overall_max_event_id is None: # We only get here if the event log was empty when we queried it for the overall max. if new_event_ids: # We only get here if some events snuck in after the max id query. Set the cursor # to the max event id of the new events. return max(new_event_ids) # Event log is empty, set the cursor to 0 so we pick up the next events. Otherwise we'd skip to # the latest event if no cursor was set. return 0 if not new_event_ids: # No new events, so we can skip to overall_max_event_id because we queried that first, so we # know there are no relevant events up to that id. return overall_max_event_id # these should be ordered, but we won't assume max_new_event_id = max(new_event_ids) check.invariant( max_new_event_id > persisted_cursor, f"The new cursor {max_new_event_id} should be greater than the previous {persisted_cursor}", ) num_new_events = len(new_event_ids) check.invariant( num_new_events <= fetch_limit, "Query returned more than the limit!", ) if num_new_events == fetch_limit: # We got back the limit number of events, so the only thing we can do is move the cursor # forward to the max event id of the new events. It's possible for the very next log id # to be the desired type. There's no way to skip ahead. return max_new_event_id else: # We got back fewer than the limit number of events, so we may be able to skip ahead. Since # we queried for overall_max_event_id before we queried for events of the desired type, we # know that there can be no events of the desired type with ids less than overall_max_event_id # that we haven't fetched yet (they would have been in this query, up until it reached the # fetch limit). Thus we can skip ahead to overall_max_event_id. if overall_max_event_id >= max_new_event_id: return overall_max_event_id # There's also a rare case where more events of our desired type snuck in after the query # for overall_max_event_id but before the specific event query. In this case, we just move # the cursor forward to the max event id of the new events. return max_new_event_id
EventLogConsumerDaemon
python
anthropics__anthropic-sdk-python
src/anthropic/types/completion_create_params.py
{ "start": 3936, "end": 4839 }
class ____(CompletionCreateParamsBase): stream: Required[Literal[True]] """Whether to incrementally stream the response using server-sent events. See [streaming](https://docs.claude.com/en/api/streaming) for details. """ CompletionRequestStreamingMetadata = MetadataParam """This is deprecated, `MetadataParam` should be used instead""" CompletionRequestNonStreamingMetadata = MetadataParam """This is deprecated, `MetadataParam` should be used instead""" CompletionRequestNonStreaming = CompletionCreateParamsNonStreaming """This is deprecated, `CompletionCreateParamsNonStreaming` should be used instead""" CompletionRequestStreaming = CompletionCreateParamsStreaming """This is deprecated, `CompletionCreateParamsStreaming` should be used instead""" CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
CompletionCreateParamsStreaming
python
django__django
tests/postgres_tests/test_search.py
{ "start": 642, "end": 3625 }
class ____: @classmethod def setUpTestData(cls): cls.robin = Scene.objects.create( scene="Scene 10", setting="The dark forest of Ewing" ) cls.minstrel = Character.objects.create(name="Minstrel") verses = [ ( "Bravely bold Sir Robin, rode forth from Camelot. " "He was not afraid to die, o Brave Sir Robin. " "He was not at all afraid to be killed in nasty ways. " "Brave, brave, brave, brave Sir Robin" ), ( "He was not in the least bit scared to be mashed into a pulp, " "Or to have his eyes gouged out, and his elbows broken. " "To have his kneecaps split, and his body burned away, " "And his limbs all hacked and mangled, brave Sir Robin!" ), ( "His head smashed in and his heart cut out, " "And his liver removed and his bowels unplugged, " "And his nostrils ripped and his bottom burned off," "And his --" ), ] cls.verses = [ Line.objects.create( scene=cls.robin, character=cls.minstrel, dialogue=verse, ) for verse in verses ] cls.verse0, cls.verse1, cls.verse2 = cls.verses cls.witch_scene = Scene.objects.create( scene="Scene 5", setting="Sir Bedemir's Castle" ) bedemir = Character.objects.create(name="Bedemir") crowd = Character.objects.create(name="Crowd") witch = Character.objects.create(name="Witch") duck = Character.objects.create(name="Duck") cls.bedemir0 = Line.objects.create( scene=cls.witch_scene, character=bedemir, dialogue="We shall use my larger scales!", dialogue_config="english", ) cls.bedemir1 = Line.objects.create( scene=cls.witch_scene, character=bedemir, dialogue="Right, remove the supports!", dialogue_config="english", ) cls.duck = Line.objects.create( scene=cls.witch_scene, character=duck, dialogue=None ) cls.crowd = Line.objects.create( scene=cls.witch_scene, character=crowd, dialogue="A witch! A witch!" ) cls.witch = Line.objects.create( scene=cls.witch_scene, character=witch, dialogue="It's a fair cop." ) trojan_rabbit = Scene.objects.create( scene="Scene 8", setting="The castle of Our Master Ruiz' de lu la Ramper" ) guards = Character.objects.create(name="French Guards") cls.french = Line.objects.create( scene=trojan_rabbit, character=guards, dialogue="Oh. Un beau cadeau. Oui oui.", dialogue_config="french", )
GrailTestData
python
doocs__leetcode
solution/2100-2199/2139.Minimum Moves to Reach Target Score/Solution2.py
{ "start": 0, "end": 339 }
class ____: def minMoves(self, target: int, maxDoubles: int) -> int: ans = 0 while maxDoubles and target > 1: ans += 1 if target % 2 == 1: target -= 1 else: maxDoubles -= 1 target >>= 1 ans += target - 1 return ans
Solution
python
xlwings__xlwings
xlwings/constants.py
{ "start": 112321, "end": 112442 }
class ____: xlNext = 1 # from enum XlSearchDirection xlPrevious = 2 # from enum XlSearchDirection
SearchDirection
python
matplotlib__matplotlib
lib/matplotlib/colors.py
{ "start": 120683, "end": 121111 }
class ____(Normalize): """ Dummy replacement for `Normalize`, for the case where we want to use indices directly in a `~matplotlib.cm.ScalarMappable`. """ def __call__(self, value, clip=None): if np.iterable(value): return np.ma.array(value) return value def inverse(self, value): if np.iterable(value): return np.ma.array(value) return value
NoNorm
python
ethereum__web3.py
web3/exceptions.py
{ "start": 1247, "end": 1409 }
class ____(Web3Exception, TypeError): """ A web3.py exception wrapper for `TypeError`, for better control over exception handling. """
Web3TypeError
python
python-openxml__python-docx
tests/oxml/test_table.py
{ "start": 1444, "end": 13640 }
class ____: """Unit-test suite for `docx.oxml.table.CT_Tc` objects.""" @pytest.mark.parametrize( ("tr_cxml", "tc_idx", "expected_value"), [ ("w:tr/(w:tc/w:p,w:tc/w:p)", 0, 0), ("w:tr/(w:tc/w:p,w:tc/w:p)", 1, 1), ("w:tr/(w:trPr/w:gridBefore{w:val=2},w:tc/w:p,w:tc/w:p)", 0, 2), ("w:tr/(w:trPr/w:gridBefore{w:val=2},w:tc/w:p,w:tc/w:p)", 1, 3), ("w:tr/(w:trPr/w:gridBefore{w:val=4},w:tc/w:p,w:tc/w:p,w:tc/w:p,w:tc/w:p)", 2, 6), ], ) def it_knows_its_grid_offset(self, tr_cxml: str, tc_idx: int, expected_value: int): tr = cast(CT_Row, element(tr_cxml)) tc = tr.tc_lst[tc_idx] assert tc.grid_offset == expected_value def it_can_merge_to_another_tc( self, tr_: Mock, _span_dimensions_: Mock, _tbl_: Mock, _grow_to_: Mock, top_tc_: Mock ): top_tr_ = tr_ tc, other_tc = cast(CT_Tc, element("w:tc")), cast(CT_Tc, element("w:tc")) top, left, height, width = 0, 1, 2, 3 _span_dimensions_.return_value = top, left, height, width _tbl_.return_value.tr_lst = [tr_] tr_.tc_at_grid_offset.return_value = top_tc_ merged_tc = tc.merge(other_tc) _span_dimensions_.assert_called_once_with(tc, other_tc) top_tr_.tc_at_grid_offset.assert_called_once_with(left) top_tc_._grow_to.assert_called_once_with(width, height) assert merged_tc is top_tc_ @pytest.mark.parametrize( ("snippet_idx", "row", "col", "attr_name", "expected_value"), [ (0, 0, 0, "top", 0), (2, 0, 1, "top", 0), (2, 1, 1, "top", 0), (4, 2, 1, "top", 1), (0, 0, 0, "left", 0), (1, 0, 1, "left", 2), (3, 1, 0, "left", 0), (3, 1, 1, "left", 2), (0, 0, 0, "bottom", 1), (1, 0, 0, "bottom", 1), (2, 0, 1, "bottom", 2), (4, 1, 1, "bottom", 3), (0, 0, 0, "right", 1), (1, 0, 0, "right", 2), (4, 2, 1, "right", 3), ], ) def it_knows_its_extents_to_help( self, snippet_idx: int, row: int, col: int, attr_name: str, expected_value: int ): tbl = self._snippet_tbl(snippet_idx) tc = tbl.tr_lst[row].tc_lst[col] extent = getattr(tc, attr_name) assert extent == expected_value @pytest.mark.parametrize( ("snippet_idx", "row", "col", "row_2", "col_2", "expected_value"), [ (0, 0, 0, 0, 1, (0, 0, 1, 2)), (0, 0, 1, 2, 1, (0, 1, 3, 1)), (0, 2, 2, 1, 1, (1, 1, 2, 2)), (0, 1, 2, 1, 0, (1, 0, 1, 3)), (1, 0, 0, 1, 1, (0, 0, 2, 2)), (1, 0, 1, 0, 0, (0, 0, 1, 3)), (2, 0, 1, 2, 1, (0, 1, 3, 1)), (2, 0, 1, 1, 0, (0, 0, 2, 2)), (2, 1, 2, 0, 1, (0, 1, 2, 2)), (4, 0, 1, 0, 0, (0, 0, 1, 3)), ], ) def it_calculates_the_dimensions_of_a_span_to_help( self, snippet_idx: int, row: int, col: int, row_2: int, col_2: int, expected_value: tuple[int, int, int, int], ): tbl = self._snippet_tbl(snippet_idx) tc = tbl.tr_lst[row].tc_lst[col] other_tc = tbl.tr_lst[row_2].tc_lst[col_2] dimensions = tc._span_dimensions(other_tc) assert dimensions == expected_value @pytest.mark.parametrize( ("snippet_idx", "row", "col", "row_2", "col_2"), [ (1, 0, 0, 1, 0), # inverted-L horz (1, 1, 0, 0, 0), # same in opposite order (2, 0, 2, 0, 1), # inverted-L vert (5, 0, 1, 1, 0), # tee-shape horz bar (5, 1, 0, 2, 1), # same, opposite side (6, 1, 0, 0, 1), # tee-shape vert bar (6, 0, 1, 1, 2), # same, opposite side ], ) def it_raises_on_invalid_span( self, snippet_idx: int, row: int, col: int, row_2: int, col_2: int ): tbl = self._snippet_tbl(snippet_idx) tc = tbl.tr_lst[row].tc_lst[col] other_tc = tbl.tr_lst[row_2].tc_lst[col_2] with pytest.raises(InvalidSpanError): tc._span_dimensions(other_tc) @pytest.mark.parametrize( ("snippet_idx", "row", "col", "width", "height"), [ (0, 0, 0, 2, 1), (0, 0, 1, 1, 2), (0, 1, 1, 2, 2), (1, 0, 0, 2, 2), (2, 0, 0, 2, 2), (2, 1, 2, 1, 2), ], ) def it_can_grow_itself_to_help_merge( self, snippet_idx: int, row: int, col: int, width: int, height: int, _span_to_width_: Mock ): tbl = self._snippet_tbl(snippet_idx) tc = tbl.tr_lst[row].tc_lst[col] start = 0 if height == 1 else 1 end = start + height tc._grow_to(width, height, None) assert ( _span_to_width_.call_args_list == [ call(width, tc, None), call(width, tc, "restart"), call(width, tc, "continue"), call(width, tc, "continue"), ][start:end] ) def it_can_extend_its_horz_span_to_help_merge( self, top_tc_: Mock, grid_span_: Mock, _move_content_to_: Mock, _swallow_next_tc_: Mock ): grid_span_.side_effect = [1, 3, 4] grid_width, vMerge = 4, "continue" tc = cast(CT_Tc, element("w:tc")) tc._span_to_width(grid_width, top_tc_, vMerge) _move_content_to_.assert_called_once_with(tc, top_tc_) assert _swallow_next_tc_.call_args_list == [ call(tc, grid_width, top_tc_), call(tc, grid_width, top_tc_), ] assert tc.vMerge == vMerge def it_knows_its_inner_content_block_item_elements(self): tc = cast(CT_Tc, element("w:tc/(w:p,w:tbl,w:p)")) assert [type(e) for e in tc.inner_content_elements] == [CT_P, CT_Tbl, CT_P] @pytest.mark.parametrize( ("tr_cxml", "tc_idx", "grid_width", "expected_cxml"), [ ( "w:tr/(w:tc/w:p,w:tc/w:p)", 0, 2, "w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))", ), ( "w:tr/(w:tc/w:p,w:tc/w:p,w:tc/w:p)", 1, 2, "w:tr/(w:tc/w:p,w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))", ), ( 'w:tr/(w:tc/w:p/w:r/w:t"a",w:tc/w:p/w:r/w:t"b")', 0, 2, 'w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p/w:r/w:t"a",w:p/w:r/w:t"b"))', ), ( "w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p),w:tc/w:p)", 0, 3, "w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=3},w:p))", ), ( "w:tr/(w:tc/w:p,w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))", 0, 3, "w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=3},w:p))", ), ], ) def it_can_swallow_the_next_tc_help_merge( self, tr_cxml: str, tc_idx: int, grid_width: int, expected_cxml: str ): tr = cast(CT_Row, element(tr_cxml)) tc = top_tc = tr.tc_lst[tc_idx] tc._swallow_next_tc(grid_width, top_tc) assert tr.xml == xml(expected_cxml) @pytest.mark.parametrize( ("tr_cxml", "tc_idx", "grid_width", "expected_cxml"), [ # both cells have a width ( "w:tr/(w:tc/(w:tcPr/w:tcW{w:w=1440,w:type=dxa},w:p)," "w:tc/(w:tcPr/w:tcW{w:w=1440,w:type=dxa},w:p))", 0, 2, "w:tr/(w:tc/(w:tcPr/(w:tcW{w:w=2880,w:type=dxa},w:gridSpan{w:val=2}),w:p))", ), # neither have a width ( "w:tr/(w:tc/w:p,w:tc/w:p)", 0, 2, "w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))", ), # only second one has a width ( "w:tr/(w:tc/w:p,w:tc/(w:tcPr/w:tcW{w:w=1440,w:type=dxa},w:p))", 0, 2, "w:tr/(w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))", ), # only first one has a width ( "w:tr/(w:tc/(w:tcPr/w:tcW{w:w=1440,w:type=dxa},w:p),w:tc/w:p)", 0, 2, "w:tr/(w:tc/(w:tcPr/(w:tcW{w:w=1440,w:type=dxa},w:gridSpan{w:val=2}),w:p))", ), ], ) def it_adds_cell_widths_on_swallow( self, tr_cxml: str, tc_idx: int, grid_width: int, expected_cxml: str ): tr = cast(CT_Row, element(tr_cxml)) tc = top_tc = tr.tc_lst[tc_idx] tc._swallow_next_tc(grid_width, top_tc) assert tr.xml == xml(expected_cxml) @pytest.mark.parametrize( ("tr_cxml", "tc_idx", "grid_width"), [ ("w:tr/w:tc/w:p", 0, 2), ("w:tr/(w:tc/w:p,w:tc/(w:tcPr/w:gridSpan{w:val=2},w:p))", 0, 2), ], ) def it_raises_on_invalid_swallow(self, tr_cxml: str, tc_idx: int, grid_width: int): tr = cast(CT_Row, element(tr_cxml)) tc = top_tc = tr.tc_lst[tc_idx] with pytest.raises(InvalidSpanError): tc._swallow_next_tc(grid_width, top_tc) @pytest.mark.parametrize( ("tc_cxml", "tc_2_cxml", "expected_tc_cxml", "expected_tc_2_cxml"), [ ("w:tc/w:p", "w:tc/w:p", "w:tc/w:p", "w:tc/w:p"), ("w:tc/w:p", "w:tc/w:p/w:r", "w:tc/w:p", "w:tc/w:p/w:r"), ("w:tc/w:p/w:r", "w:tc/w:p", "w:tc/w:p", "w:tc/w:p/w:r"), ("w:tc/(w:p/w:r,w:sdt)", "w:tc/w:p", "w:tc/w:p", "w:tc/(w:p/w:r,w:sdt)"), ( "w:tc/(w:p/w:r,w:sdt)", "w:tc/(w:tbl,w:p)", "w:tc/w:p", "w:tc/(w:tbl,w:p/w:r,w:sdt)", ), ], ) def it_can_move_its_content_to_help_merge( self, tc_cxml: str, tc_2_cxml: str, expected_tc_cxml: str, expected_tc_2_cxml: str ): tc, tc_2 = cast(CT_Tc, element(tc_cxml)), cast(CT_Tc, element(tc_2_cxml)) tc._move_content_to(tc_2) assert tc.xml == xml(expected_tc_cxml) assert tc_2.xml == xml(expected_tc_2_cxml) @pytest.mark.parametrize(("snippet_idx", "row_idx", "col_idx"), [(0, 0, 0), (4, 0, 0)]) def it_raises_on_tr_above(self, snippet_idx: int, row_idx: int, col_idx: int): tbl = cast(CT_Tbl, parse_xml(snippet_seq("tbl-cells")[snippet_idx])) tc = tbl.tr_lst[row_idx].tc_lst[col_idx] with pytest.raises(ValueError, match="no tr above topmost tr"): tc._tr_above # fixtures ------------------------------------------------------- @pytest.fixture def grid_span_(self, request: FixtureRequest): return property_mock(request, CT_Tc, "grid_span") @pytest.fixture def _grow_to_(self, request: FixtureRequest): return method_mock(request, CT_Tc, "_grow_to") @pytest.fixture def _move_content_to_(self, request: FixtureRequest): return method_mock(request, CT_Tc, "_move_content_to") @pytest.fixture def _span_dimensions_(self, request: FixtureRequest): return method_mock(request, CT_Tc, "_span_dimensions") @pytest.fixture def _span_to_width_(self, request: FixtureRequest): return method_mock(request, CT_Tc, "_span_to_width", autospec=False) def _snippet_tbl(self, idx: int) -> CT_Tbl: """A <w:tbl> element for snippet at `idx` in 'tbl-cells' snippet file.""" return cast(CT_Tbl, parse_xml(snippet_seq("tbl-cells")[idx])) @pytest.fixture def _swallow_next_tc_(self, request: FixtureRequest): return method_mock(request, CT_Tc, "_swallow_next_tc") @pytest.fixture def _tbl_(self, request: FixtureRequest): return property_mock(request, CT_Tc, "_tbl") @pytest.fixture def top_tc_(self, request: FixtureRequest): return instance_mock(request, CT_Tc) @pytest.fixture def tr_(self, request: FixtureRequest): return instance_mock(request, CT_Row)
DescribeCT_Tc
python
MongoEngine__mongoengine
tests/test_dereference.py
{ "start": 134, "end": 39067 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls): cls.db = connect(db="mongoenginetest") @classmethod def tearDownClass(cls): cls.db.drop_database("mongoenginetest") def test_list_item_dereference(self): """Ensure that DBRef items in ListFields are dereferenced.""" class User(Document): name = StringField() class Group(Document): members = ListField(ReferenceField(User)) User.drop_collection() Group.drop_collection() for i in range(1, 51): user = User(name="user %s" % i) user.save() group = Group(members=User.objects) group.save() group = Group(members=User.objects) group.save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 len(group_obj._data["members"]) assert q == 1 len(group_obj.members) assert q == 2 _ = [m for m in group_obj.members] assert q == 2 # Document select_related with query_counter() as q: assert q == 0 group_obj = Group.objects.first().select_related() assert q == 2 _ = [m for m in group_obj.members] assert q == 2 # Queryset select_related with query_counter() as q: assert q == 0 group_objs = Group.objects.select_related() assert q == 2 for group_obj in group_objs: _ = [m for m in group_obj.members] assert q == 2 User.drop_collection() Group.drop_collection() def test_list_item_dereference_dref_false(self): """Ensure that DBRef items in ListFields are dereferenced.""" class User(Document): name = StringField() class Group(Document): members = ListField(ReferenceField(User, dbref=False)) User.drop_collection() Group.drop_collection() for i in range(1, 51): user = User(name="user %s" % i) user.save() group = Group(members=User.objects) group.save() group.reload() # Confirm reload works with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 2 assert group_obj._data["members"]._dereferenced # verifies that no additional queries gets executed # if we re-iterate over the ListField once it is # dereferenced _ = [m for m in group_obj.members] assert q == 2 assert group_obj._data["members"]._dereferenced # Document select_related with query_counter() as q: assert q == 0 group_obj = Group.objects.first().select_related() assert q == 2 _ = [m for m in group_obj.members] assert q == 2 # Queryset select_related with query_counter() as q: assert q == 0 group_objs = Group.objects.select_related() assert q == 2 for group_obj in group_objs: _ = [m for m in group_obj.members] assert q == 2 def test_list_item_dereference_orphan_dbref(self): """Ensure that orphan DBRef items in ListFields are dereferenced.""" class User(Document): name = StringField() class Group(Document): members = ListField(ReferenceField(User, dbref=False)) User.drop_collection() Group.drop_collection() for i in range(1, 51): user = User(name="user %s" % i) user.save() group = Group(members=User.objects) group.save() group.reload() # Confirm reload works # Delete one User so one of the references in the # Group.members list is an orphan DBRef User.objects[0].delete() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 2 assert group_obj._data["members"]._dereferenced # verifies that no additional queries gets executed # if we re-iterate over the ListField once it is # dereferenced _ = [m for m in group_obj.members] assert q == 2 assert group_obj._data["members"]._dereferenced User.drop_collection() Group.drop_collection() def test_list_item_dereference_dref_false_stores_as_type(self): """Ensure that DBRef items are stored as their type""" class User(Document): my_id = IntField(primary_key=True) name = StringField() class Group(Document): members = ListField(ReferenceField(User, dbref=False)) User.drop_collection() Group.drop_collection() user = User(my_id=1, name="user 1").save() Group(members=User.objects).save() group = Group.objects.first() assert Group._get_collection().find_one()["members"] == [1] assert group.members == [user] def test_handle_old_style_references(self): """Ensure that DBRef items in ListFields are dereferenced.""" class User(Document): name = StringField() class Group(Document): members = ListField(ReferenceField(User, dbref=True)) User.drop_collection() Group.drop_collection() for i in range(1, 26): user = User(name="user %s" % i) user.save() group = Group(members=User.objects) group.save() group = Group._get_collection().find_one() # Update the model to change the reference class Group(Document): members = ListField(ReferenceField(User, dbref=False)) group = Group.objects.first() group.members.append(User(name="String!").save()) group.save() group = Group.objects.first() assert group.members[0].name == "user 1" assert group.members[-1].name == "String!" def test_migrate_references(self): """Example of migrating ReferenceField storage""" # Create some sample data class User(Document): name = StringField() class Group(Document): author = ReferenceField(User, dbref=True) members = ListField(ReferenceField(User, dbref=True)) User.drop_collection() Group.drop_collection() user = User(name="Ross").save() group = Group(author=user, members=[user]).save() raw_data = Group._get_collection().find_one() assert isinstance(raw_data["author"], DBRef) assert isinstance(raw_data["members"][0], DBRef) group = Group.objects.first() assert group.author == user assert group.members == [user] # Migrate the model definition class Group(Document): author = ReferenceField(User, dbref=False) members = ListField(ReferenceField(User, dbref=False)) # Migrate the data for g in Group.objects(): # Explicitly mark as changed so resets g._mark_as_changed("author") g._mark_as_changed("members") g.save() group = Group.objects.first() assert group.author == user assert group.members == [user] raw_data = Group._get_collection().find_one() assert isinstance(raw_data["author"], ObjectId) assert isinstance(raw_data["members"][0], ObjectId) def test_recursive_reference(self): """Ensure that ReferenceFields can reference their own documents.""" class Employee(Document): name = StringField() boss = ReferenceField("self") friends = ListField(ReferenceField("self")) Employee.drop_collection() bill = Employee(name="Bill Lumbergh") bill.save() michael = Employee(name="Michael Bolton") michael.save() samir = Employee(name="Samir Nagheenanajar") samir.save() friends = [michael, samir] peter = Employee(name="Peter Gibbons", boss=bill, friends=friends) peter.save() Employee(name="Funky Gibbon", boss=bill, friends=friends).save() Employee(name="Funky Gibbon", boss=bill, friends=friends).save() Employee(name="Funky Gibbon", boss=bill, friends=friends).save() with query_counter() as q: assert q == 0 peter = Employee.objects.with_id(peter.id) assert q == 1 peter.boss assert q == 2 peter.friends assert q == 3 # Document select_related with query_counter() as q: assert q == 0 peter = Employee.objects.with_id(peter.id).select_related() assert q == 2 assert peter.boss == bill assert q == 2 assert peter.friends == friends assert q == 2 # Queryset select_related with query_counter() as q: assert q == 0 employees = Employee.objects(boss=bill).select_related() assert q == 2 for employee in employees: assert employee.boss == bill assert q == 2 assert employee.friends == friends assert q == 2 def test_list_of_lists_of_references(self): class User(Document): name = StringField() class Post(Document): user_lists = ListField(ListField(ReferenceField(User))) class SimpleList(Document): users = ListField(ReferenceField(User)) User.drop_collection() Post.drop_collection() SimpleList.drop_collection() u1 = User.objects.create(name="u1") u2 = User.objects.create(name="u2") u3 = User.objects.create(name="u3") SimpleList.objects.create(users=[u1, u2, u3]) assert SimpleList.objects.all()[0].users == [u1, u2, u3] Post.objects.create(user_lists=[[u1, u2], [u3]]) assert Post.objects.all()[0].user_lists == [[u1, u2], [u3]] def test_circular_reference(self): """Ensure you can handle circular references""" class Relation(EmbeddedDocument): name = StringField() person = ReferenceField("Person") class Person(Document): name = StringField() relations = ListField(EmbeddedDocumentField("Relation")) def __repr__(self): return "<Person: %s>" % self.name Person.drop_collection() mother = Person(name="Mother") daughter = Person(name="Daughter") mother.save() daughter.save() daughter_rel = Relation(name="Daughter", person=daughter) mother.relations.append(daughter_rel) mother.save() mother_rel = Relation(name="Daughter", person=mother) self_rel = Relation(name="Self", person=daughter) daughter.relations.append(mother_rel) daughter.relations.append(self_rel) daughter.save() assert "[<Person: Mother>, <Person: Daughter>]" == "%s" % Person.objects() def test_circular_reference_on_self(self): """Ensure you can handle circular references""" class Person(Document): name = StringField() relations = ListField(ReferenceField("self")) def __repr__(self): return "<Person: %s>" % self.name Person.drop_collection() mother = Person(name="Mother") daughter = Person(name="Daughter") mother.save() daughter.save() mother.relations.append(daughter) mother.save() daughter.relations.append(mother) daughter.relations.append(daughter) assert daughter._get_changed_fields() == ["relations"] daughter.save() assert "[<Person: Mother>, <Person: Daughter>]" == "%s" % Person.objects() def test_circular_tree_reference(self): """Ensure you can handle circular references with more than one level""" class Other(EmbeddedDocument): name = StringField() friends = ListField(ReferenceField("Person")) class Person(Document): name = StringField() other = EmbeddedDocumentField(Other, default=lambda: Other()) def __repr__(self): return "<Person: %s>" % self.name Person.drop_collection() paul = Person(name="Paul").save() maria = Person(name="Maria").save() julia = Person(name="Julia").save() anna = Person(name="Anna").save() paul.other.friends = [maria, julia, anna] paul.other.name = "Paul's friends" paul.save() maria.other.friends = [paul, julia, anna] maria.other.name = "Maria's friends" maria.save() julia.other.friends = [paul, maria, anna] julia.other.name = "Julia's friends" julia.save() anna.other.friends = [paul, maria, julia] anna.other.name = "Anna's friends" anna.save() assert ( "[<Person: Paul>, <Person: Maria>, <Person: Julia>, <Person: Anna>]" == "%s" % Person.objects() ) def test_generic_reference(self): class UserA(Document): name = StringField() class UserB(Document): name = StringField() class UserC(Document): name = StringField() class Group(Document): members = ListField(GenericReferenceField()) UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() members = [] for i in range(1, 51): a = UserA(name="User A %s" % i) a.save() b = UserB(name="User B %s" % i) b.save() c = UserC(name="User C %s" % i) c.save() members += [a, b, c] group = Group(members=members) group.save() group = Group(members=members) group.save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for m in group_obj.members: assert "User" in m.__class__.__name__ # Document select_related with query_counter() as q: assert q == 0 group_obj = Group.objects.first().select_related() assert q == 4 _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for m in group_obj.members: assert "User" in m.__class__.__name__ # Queryset select_related with query_counter() as q: assert q == 0 group_objs = Group.objects.select_related() assert q == 4 for group_obj in group_objs: _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for m in group_obj.members: assert "User" in m.__class__.__name__ def test_generic_reference_orphan_dbref(self): """Ensure that generic orphan DBRef items in ListFields are dereferenced.""" class UserA(Document): name = StringField() class UserB(Document): name = StringField() class UserC(Document): name = StringField() class Group(Document): members = ListField(GenericReferenceField()) UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() members = [] for i in range(1, 51): a = UserA(name="User A %s" % i) a.save() b = UserB(name="User B %s" % i) b.save() c = UserC(name="User C %s" % i) c.save() members += [a, b, c] group = Group(members=members) group.save() # Delete one UserA instance so that there is # an orphan DBRef in the GenericReference ListField UserA.objects[0].delete() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 4 assert group_obj._data["members"]._dereferenced _ = [m for m in group_obj.members] assert q == 4 assert group_obj._data["members"]._dereferenced UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() def test_list_field_complex(self): class UserA(Document): name = StringField() class UserB(Document): name = StringField() class UserC(Document): name = StringField() class Group(Document): members = ListField() UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() members = [] for i in range(1, 51): a = UserA(name="User A %s" % i) a.save() b = UserB(name="User B %s" % i) b.save() c = UserC(name="User C %s" % i) c.save() members += [a, b, c] group = Group(members=members) group.save() group = Group(members=members) group.save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for m in group_obj.members: assert "User" in m.__class__.__name__ # Document select_related with query_counter() as q: assert q == 0 group_obj = Group.objects.first().select_related() assert q == 4 _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for m in group_obj.members: assert "User" in m.__class__.__name__ # Queryset select_related with query_counter() as q: assert q == 0 group_objs = Group.objects.select_related() assert q == 4 for group_obj in group_objs: _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for m in group_obj.members: assert "User" in m.__class__.__name__ UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() def test_map_field_reference(self): class User(Document): name = StringField() class Group(Document): members = MapField(ReferenceField(User)) User.drop_collection() Group.drop_collection() members = [] for i in range(1, 51): user = User(name="user %s" % i) user.save() members.append(user) group = Group(members={str(u.id): u for u in members}) group.save() group = Group(members={str(u.id): u for u in members}) group.save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 2 for _, m in group_obj.members.items(): assert isinstance(m, User) # Document select_related with query_counter() as q: assert q == 0 group_obj = Group.objects.first().select_related() assert q == 2 _ = [m for m in group_obj.members] assert q == 2 for k, m in group_obj.members.items(): assert isinstance(m, User) # Queryset select_related with query_counter() as q: assert q == 0 group_objs = Group.objects.select_related() assert q == 2 for group_obj in group_objs: _ = [m for m in group_obj.members] assert q == 2 for k, m in group_obj.members.items(): assert isinstance(m, User) User.drop_collection() Group.drop_collection() def test_dict_field(self): class UserA(Document): name = StringField() class UserB(Document): name = StringField() class UserC(Document): name = StringField() class Group(Document): members = DictField() UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() members = [] for i in range(1, 51): a = UserA(name="User A %s" % i) a.save() b = UserB(name="User B %s" % i) b.save() c = UserC(name="User C %s" % i) c.save() members += [a, b, c] group = Group(members={str(u.id): u for u in members}) group.save() group = Group(members={str(u.id): u for u in members}) group.save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for k, m in group_obj.members.items(): assert "User" in m.__class__.__name__ # Document select_related with query_counter() as q: assert q == 0 group_obj = Group.objects.first().select_related() assert q == 4 _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for k, m in group_obj.members.items(): assert "User" in m.__class__.__name__ # Queryset select_related with query_counter() as q: assert q == 0 group_objs = Group.objects.select_related() assert q == 4 for group_obj in group_objs: _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for k, m in group_obj.members.items(): assert "User" in m.__class__.__name__ Group.objects.delete() Group().save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 1 assert group_obj.members == {} UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() def test_dict_field_no_field_inheritance(self): class UserA(Document): name = StringField() meta = {"allow_inheritance": False} class Group(Document): members = DictField() UserA.drop_collection() Group.drop_collection() members = [] for i in range(1, 51): a = UserA(name="User A %s" % i) a.save() members += [a] group = Group(members={str(u.id): u for u in members}) group.save() group = Group(members={str(u.id): u for u in members}) group.save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 2 _ = [m for m in group_obj.members] assert q == 2 for k, m in group_obj.members.items(): assert isinstance(m, UserA) # Document select_related with query_counter() as q: assert q == 0 group_obj = Group.objects.first().select_related() assert q == 2 _ = [m for m in group_obj.members] assert q == 2 _ = [m for m in group_obj.members] assert q == 2 for k, m in group_obj.members.items(): assert isinstance(m, UserA) # Queryset select_related with query_counter() as q: assert q == 0 group_objs = Group.objects.select_related() assert q == 2 for group_obj in group_objs: _ = [m for m in group_obj.members] assert q == 2 _ = [m for m in group_obj.members] assert q == 2 for _, m in group_obj.members.items(): assert isinstance(m, UserA) UserA.drop_collection() Group.drop_collection() def test_generic_reference_map_field(self): class UserA(Document): name = StringField() class UserB(Document): name = StringField() class UserC(Document): name = StringField() class Group(Document): members = MapField(GenericReferenceField()) UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() members = [] for i in range(1, 51): a = UserA(name="User A %s" % i) a.save() b = UserB(name="User B %s" % i) b.save() c = UserC(name="User C %s" % i) c.save() members += [a, b, c] group = Group(members={str(u.id): u for u in members}) group.save() group = Group(members={str(u.id): u for u in members}) group.save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for _, m in group_obj.members.items(): assert "User" in m.__class__.__name__ # Document select_related with query_counter() as q: assert q == 0 group_obj = Group.objects.first().select_related() assert q == 4 _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for _, m in group_obj.members.items(): assert "User" in m.__class__.__name__ # Queryset select_related with query_counter() as q: assert q == 0 group_objs = Group.objects.select_related() assert q == 4 for group_obj in group_objs: _ = [m for m in group_obj.members] assert q == 4 _ = [m for m in group_obj.members] assert q == 4 for _, m in group_obj.members.items(): assert "User" in m.__class__.__name__ Group.objects.delete() Group().save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 _ = [m for m in group_obj.members] assert q == 1 UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() def test_multidirectional_lists(self): class Asset(Document): name = StringField(max_length=250, required=True) path = StringField() title = StringField() parent = GenericReferenceField(default=None) parents = ListField(GenericReferenceField()) children = ListField(GenericReferenceField()) Asset.drop_collection() root = Asset(name="", path="/", title="Site Root") root.save() company = Asset(name="company", title="Company", parent=root, parents=[root]) company.save() root.children = [company] root.save() root = root.reload() assert root.children == [company] assert company.parents == [root] def test_dict_in_dbref_instance(self): class Person(Document): name = StringField(max_length=250, required=True) class Room(Document): number = StringField(max_length=250, required=True) staffs_with_position = ListField(DictField()) Person.drop_collection() Room.drop_collection() bob = Person.objects.create(name="Bob") bob.save() sarah = Person.objects.create(name="Sarah") sarah.save() room_101 = Room.objects.create(number="101") room_101.staffs_with_position = [ {"position_key": "window", "staff": sarah}, {"position_key": "door", "staff": bob.to_dbref()}, ] room_101.save() room = Room.objects.first().select_related() assert room.staffs_with_position[0]["staff"] == sarah assert room.staffs_with_position[1]["staff"] == bob def test_document_reload_no_inheritance(self): class Foo(Document): meta = {"allow_inheritance": False} bar = ReferenceField("Bar") baz = ReferenceField("Baz") class Bar(Document): meta = {"allow_inheritance": False} msg = StringField(required=True, default="Blammo!") class Baz(Document): meta = {"allow_inheritance": False} msg = StringField(required=True, default="Kaboom!") Foo.drop_collection() Bar.drop_collection() Baz.drop_collection() bar = Bar() bar.save() baz = Baz() baz.save() foo = Foo() foo.bar = bar foo.baz = baz foo.save() foo.reload() assert isinstance(foo.bar, Bar) assert isinstance(foo.baz, Baz) def test_document_reload_reference_integrity(self): """ Ensure reloading a document with multiple similar id in different collections doesn't mix them. """ class Topic(Document): id = IntField(primary_key=True) class User(Document): id = IntField(primary_key=True) name = StringField() class Message(Document): id = IntField(primary_key=True) topic = ReferenceField(Topic) author = ReferenceField(User) Topic.drop_collection() User.drop_collection() Message.drop_collection() # All objects share the same id, but each in a different collection topic = Topic(id=1).save() user = User(id=1, name="user-name").save() Message(id=1, topic=topic, author=user).save() concurrent_change_user = User.objects.get(id=1) concurrent_change_user.name = "new-name" concurrent_change_user.save() assert user.name != "new-name" msg = Message.objects.get(id=1) msg.reload() assert msg.topic == topic assert msg.author == user assert msg.author.name == "new-name" def test_list_lookup_not_checked_in_map(self): """Ensure we dereference list data correctly""" class Comment(Document): id = IntField(primary_key=True) text = StringField() class Message(Document): id = IntField(primary_key=True) comments = ListField(ReferenceField(Comment)) Comment.drop_collection() Message.drop_collection() c1 = Comment(id=0, text="zero").save() c2 = Comment(id=1, text="one").save() Message(id=1, comments=[c1, c2]).save() msg = Message.objects.get(id=1) assert 0 == msg.comments[0].id assert 1 == msg.comments[1].id def test_list_item_dereference_dref_false_save_doesnt_cause_extra_queries(self): """Ensure that DBRef items in ListFields are dereferenced.""" class User(Document): name = StringField() class Group(Document): name = StringField() members = ListField(ReferenceField(User, dbref=False)) User.drop_collection() Group.drop_collection() for i in range(1, 51): User(name="user %s" % i).save() Group(name="Test", members=User.objects).save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 group_obj.name = "new test" group_obj.save() assert q == 2 def test_list_item_dereference_dref_true_save_doesnt_cause_extra_queries(self): """Ensure that DBRef items in ListFields are dereferenced.""" class User(Document): name = StringField() class Group(Document): name = StringField() members = ListField(ReferenceField(User, dbref=True)) User.drop_collection() Group.drop_collection() for i in range(1, 51): User(name="user %s" % i).save() Group(name="Test", members=User.objects).save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 group_obj.name = "new test" group_obj.save() assert q == 2 def test_generic_reference_save_doesnt_cause_extra_queries(self): class UserA(Document): name = StringField() class UserB(Document): name = StringField() class UserC(Document): name = StringField() class Group(Document): name = StringField() members = ListField(GenericReferenceField()) UserA.drop_collection() UserB.drop_collection() UserC.drop_collection() Group.drop_collection() members = [] for i in range(1, 51): a = UserA(name="User A %s" % i).save() b = UserB(name="User B %s" % i).save() c = UserC(name="User C %s" % i).save() members += [a, b, c] Group(name="test", members=members).save() with query_counter() as q: assert q == 0 group_obj = Group.objects.first() assert q == 1 group_obj.name = "new test" group_obj.save() assert q == 2 def test_objectid_reference_across_databases(self): # mongoenginetest - Is default connection alias from setUp() # Register Aliases register_connection("testdb-1", "mongoenginetest2") class User(Document): name = StringField() meta = {"db_alias": "testdb-1"} class Book(Document): name = StringField() author = ReferenceField(User) # Drops User.drop_collection() Book.drop_collection() user = User(name="Ross").save() Book(name="MongoEngine for pros", author=user).save() # Can't use query_counter across databases - so test the _data object book = Book.objects.first() assert not isinstance(book._data["author"], User) book.select_related() assert isinstance(book._data["author"], User) def test_non_ascii_pk(self): """ Ensure that dbref conversion to string does not fail when non-ascii characters are used in primary key """ class Brand(Document): title = StringField(max_length=255, primary_key=True) class BrandGroup(Document): title = StringField(max_length=255, primary_key=True) brands = ListField(ReferenceField("Brand", dbref=True)) Brand.drop_collection() BrandGroup.drop_collection() brand1 = Brand(title="Moschino").save() brand2 = Brand(title="Денис Симачёв").save() BrandGroup(title="top_brands", brands=[brand1, brand2]).save() brand_groups = BrandGroup.objects().all() assert 2 == len([brand for bg in brand_groups for brand in bg.brands]) def test_dereferencing_embedded_listfield_referencefield(self): class Tag(Document): meta = {"collection": "tags"} name = StringField() class Post(EmbeddedDocument): body = StringField() tags = ListField(ReferenceField("Tag", dbref=True)) class Page(Document): meta = {"collection": "pages"} tags = ListField(ReferenceField("Tag", dbref=True)) posts = ListField(EmbeddedDocumentField(Post)) Tag.drop_collection() Page.drop_collection() tag = Tag(name="test").save() post = Post(body="test body", tags=[tag]) Page(tags=[tag], posts=[post]).save() page = Page.objects.first() assert page.tags[0] == page.posts[0].tags[0] def test_select_related_follows_embedded_referencefields(self): class Song(Document): title = StringField() class PlaylistItem(EmbeddedDocument): song = ReferenceField("Song") class Playlist(Document): items = ListField(EmbeddedDocumentField("PlaylistItem")) Playlist.drop_collection() Song.drop_collection() songs = [Song.objects.create(title="song %d" % i) for i in range(3)] items = [PlaylistItem(song=song) for song in songs] playlist = Playlist.objects.create(items=items) with query_counter() as q: assert q == 0 playlist = Playlist.objects.first().select_related() songs = [item.song for item in playlist.items] assert q == 2 if __name__ == "__main__": unittest.main()
FieldTest
python
doocs__leetcode
solution/2100-2199/2169.Count Operations to Obtain Zero/Solution.py
{ "start": 0, "end": 266 }
class ____: def countOperations(self, num1: int, num2: int) -> int: ans = 0 while num1 and num2: if num1 >= num2: num1 -= num2 else: num2 -= num1 ans += 1 return ans
Solution
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/genericType28.py
{ "start": 737, "end": 778 }
class ____(Class3[T_co]): ...
Class3_Child1
python
readthedocs__readthedocs.org
readthedocs/doc_builder/backends/mkdocs.py
{ "start": 2337, "end": 2427 }
class ____(BaseMkdocs): builder = "build" build_dir = "_readthedocs/html"
MkdocsHTML
python
pallets__itsdangerous
src/itsdangerous/exc.py
{ "start": 1619, "end": 1788 }
class ____(BadTimeSignature): """Raised if a signature timestamp is older than ``max_age``. This is a subclass of :exc:`BadTimeSignature`. """
SignatureExpired